repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
PX4/pyulog | pyulog/core.py | ULog._check_file_corruption | python | def _check_file_corruption(self, header):
# We need to handle 2 cases:
# - corrupt file (we do our best to read the rest of the file)
# - new ULog message type got added (we just want to skip the message)
if header.msg_type == 0 or header.msg_size == 0 or header.msg_size > 10000:
if not self._file_corrupt and self._debug:
print('File corruption detected')
self._file_corrupt = True
return self._file_corrupt | check for file corruption based on an unknown message type in the header | train | https://github.com/PX4/pyulog/blob/3bc4f9338d30e2e0a0dfbed58f54d200967e5056/pyulog/core.py#L602-L612 | null | class ULog(object):
"""
This class parses an ulog file
"""
## constants ##
HEADER_BYTES = b'\x55\x4c\x6f\x67\x01\x12\x35'
# message types
MSG_TYPE_FORMAT = ord('F')
MSG_TYPE_DATA = ord('D')
MSG_TYPE_INFO = ord('I')
MSG_TYPE_INFO_MULTIPLE = ord('M')
MSG_TYPE_PARAMETER = ord('P')
MSG_TYPE_ADD_LOGGED_MSG = ord('A')
MSG_TYPE_REMOVE_LOGGED_MSG = ord('R')
MSG_TYPE_SYNC = ord('S')
MSG_TYPE_DROPOUT = ord('O')
MSG_TYPE_LOGGING = ord('L')
MSG_TYPE_FLAG_BITS = ord('B')
_UNPACK_TYPES = {
'int8_t': ['b', 1, np.int8],
'uint8_t': ['B', 1, np.uint8],
'int16_t': ['h', 2, np.int16],
'uint16_t': ['H', 2, np.uint16],
'int32_t': ['i', 4, np.int32],
'uint32_t': ['I', 4, np.uint32],
'int64_t': ['q', 8, np.int64],
'uint64_t': ['Q', 8, np.uint64],
'float': ['f', 4, np.float32],
'double': ['d', 8, np.float64],
'bool': ['?', 1, np.int8],
'char': ['c', 1, np.int8]
}
@staticmethod
def get_field_size(type_str):
"""
get the field size in bytes.
:param type_str: type string, eg. 'int8_t'
"""
return ULog._UNPACK_TYPES[type_str][1]
# pre-init unpack structs for quicker use
_unpack_ushort_byte = struct.Struct('<HB').unpack
_unpack_ushort = struct.Struct('<H').unpack
_unpack_uint64 = struct.Struct('<Q').unpack
def __init__(self, log_file, message_name_filter_list=None):
"""
Initialize the object & load the file.
:param log_file: a file name (str) or a readable file object
:param message_name_filter_list: list of strings, to only load messages
with the given names. If None, load everything.
"""
self._debug = False
self._file_corrupt = False
self._start_timestamp = 0
self._last_timestamp = 0
self._msg_info_dict = {}
self._msg_info_multiple_dict = {}
self._initial_parameters = {}
self._changed_parameters = []
self._message_formats = {}
self._logged_messages = []
self._dropouts = []
self._data_list = []
self._subscriptions = {} # dict of key=msg_id, value=_MessageAddLogged
self._filtered_message_ids = set() # _MessageAddLogged id's that are filtered
self._missing_message_ids = set() # _MessageAddLogged id's that could not be found
self._file_version = 0
self._compat_flags = [0] * 8
self._incompat_flags = [0] * 8
self._appended_offsets = [] # file offsets for appended data
self._load_file(log_file, message_name_filter_list)
## parsed data
@property
def start_timestamp(self):
""" timestamp of file start """
return self._start_timestamp
@property
def last_timestamp(self):
""" timestamp of last message """
return self._last_timestamp
@property
def msg_info_dict(self):
""" dictionary of all information messages (key is a string, value
depends on the type, usually string or int) """
return self._msg_info_dict
@property
def msg_info_multiple_dict(self):
""" dictionary of all information multiple messages (key is a string, value
is a list of lists that contains the messages) """
return self._msg_info_multiple_dict
@property
def initial_parameters(self):
""" dictionary of all initially set parameters (key=param name) """
return self._initial_parameters
@property
def changed_parameters(self):
""" list of all changed parameters (tuple of (timestamp, name, value))"""
return self._changed_parameters
@property
def message_formats(self):
""" dictionary with key = format name (MessageFormat.name),
value = MessageFormat object """
return self._message_formats
@property
def logged_messages(self):
""" list of MessageLogging objects """
return self._logged_messages
@property
def dropouts(self):
""" list of MessageDropout objects """
return self._dropouts
@property
def data_list(self):
""" extracted data: list of Data objects """
return self._data_list
@property
def has_data_appended(self):
""" returns True if the log has data appended, False otherwise """
return self._incompat_flags[0] & 0x1
@property
def file_corruption(self):
""" True if a file corruption got detected """
return self._file_corrupt
def get_dataset(self, name, multi_instance=0):
""" get a specific dataset.
example:
try:
gyro_data = ulog.get_dataset('sensor_gyro')
except (KeyError, IndexError, ValueError) as error:
print(type(error), "(sensor_gyro):", error)
:param name: name of the dataset
:param multi_instance: the multi_id, defaults to the first
:raises KeyError, IndexError, ValueError: if name or instance not found
"""
return [elem for elem in self._data_list
if elem.name == name and elem.multi_id == multi_instance][0]
class Data(object):
""" contains the final topic data for a single topic and instance """
def __init__(self, message_add_logged_obj):
self.multi_id = message_add_logged_obj.multi_id
self.name = message_add_logged_obj.message_name
self.field_data = message_add_logged_obj.field_data
self.timestamp_idx = message_add_logged_obj.timestamp_idx
# get data as numpy.ndarray
np_array = np.frombuffer(message_add_logged_obj.buffer,
dtype=message_add_logged_obj.dtype)
# convert into dict of np.array (which is easier to handle)
self.data = {}
for name in np_array.dtype.names:
self.data[name] = np_array[name]
def list_value_changes(self, field_name):
""" get a list of (timestamp, value) tuples, whenever the value
changes. The first data point with non-zero timestamp is always
included, messages with timestamp = 0 are ignored """
t = self.data['timestamp']
x = self.data[field_name]
indices = t != 0 # filter out 0 values
t = t[indices]
x = x[indices]
if len(t) == 0: return []
ret = [(t[0], x[0])]
indices = np.where(x[:-1] != x[1:])[0] + 1
ret.extend(zip(t[indices], x[indices]))
return ret
## Representations of the messages from the log file ##
class _MessageHeader(object):
""" 3 bytes ULog message header """
def __init__(self):
self.msg_size = 0
self.msg_type = 0
def initialize(self, data):
self.msg_size, self.msg_type = ULog._unpack_ushort_byte(data)
class _MessageInfo(object):
""" ULog info message representation """
def __init__(self, data, header, is_info_multiple=False):
if is_info_multiple: # INFO_MULTIPLE message
self.is_continued, = struct.unpack('<B', data[0:1])
data = data[1:]
key_len, = struct.unpack('<B', data[0:1])
type_key = _parse_string(data[1:1+key_len])
type_key_split = type_key.split(' ')
self.type = type_key_split[0]
self.key = type_key_split[1]
if self.type.startswith('char['): # it's a string
self.value = _parse_string(data[1+key_len:])
elif self.type in ULog._UNPACK_TYPES:
unpack_type = ULog._UNPACK_TYPES[self.type]
self.value, = struct.unpack('<'+unpack_type[0], data[1+key_len:])
else: # probably an array (or non-basic type)
self.value = data[1+key_len:]
class _MessageFlagBits(object):
""" ULog message flag bits """
def __init__(self, data, header):
if header.msg_size > 8 + 8 + 3*8:
# we can still parse it but might miss some information
print('Warning: Flags Bit message is longer than expected')
self.compat_flags = list(struct.unpack('<'+'B'*8, data[0:8]))
self.incompat_flags = list(struct.unpack('<'+'B'*8, data[8:16]))
self.appended_offsets = list(struct.unpack('<'+'Q'*3, data[16:16+3*8]))
# remove the 0's at the end
while len(self.appended_offsets) > 0 and self.appended_offsets[-1] == 0:
self.appended_offsets.pop()
class MessageFormat(object):
""" ULog message format representation """
def __init__(self, data, header):
format_arr = _parse_string(data).split(':')
self.name = format_arr[0]
types_str = format_arr[1].split(';')
self.fields = [] # list of tuples (type, array_size, name)
for t in types_str:
if len(t) > 0:
self.fields.append(self._extract_type(t))
@staticmethod
def _extract_type(field_str):
field_str_split = field_str.split(' ')
type_str = field_str_split[0]
name_str = field_str_split[1]
a_pos = type_str.find('[')
if a_pos == -1:
array_size = 1
type_name = type_str
else:
b_pos = type_str.find(']')
array_size = int(type_str[a_pos+1:b_pos])
type_name = type_str[:a_pos]
return type_name, array_size, name_str
class MessageLogging(object):
""" ULog logged string message representation """
def __init__(self, data, header):
self.log_level, = struct.unpack('<B', data[0:1])
self.timestamp, = struct.unpack('<Q', data[1:9])
self.message = _parse_string(data[9:])
def log_level_str(self):
return {ord('0'): 'EMERGENCY',
ord('1'): 'ALERT',
ord('2'): 'CRITICAL',
ord('3'): 'ERROR',
ord('4'): 'WARNING',
ord('5'): 'NOTICE',
ord('6'): 'INFO',
ord('7'): 'DEBUG'}.get(self.log_level, 'UNKNOWN')
class MessageDropout(object):
""" ULog dropout message representation """
def __init__(self, data, header, timestamp):
self.duration, = struct.unpack('<H', data)
self.timestamp = timestamp
class _FieldData(object):
""" Type and name of a single ULog data field """
def __init__(self, field_name, type_str):
self.field_name = field_name
self.type_str = type_str
class _MessageAddLogged(object):
""" ULog add logging data message representation """
def __init__(self, data, header, message_formats):
self.multi_id, = struct.unpack('<B', data[0:1])
self.msg_id, = struct.unpack('<H', data[1:3])
self.message_name = _parse_string(data[3:])
self.field_data = [] # list of _FieldData
self.timestamp_idx = -1
self._parse_format(message_formats)
self.timestamp_offset = 0
for field in self.field_data:
if field.field_name == 'timestamp':
break
self.timestamp_offset += ULog._UNPACK_TYPES[field.type_str][1]
self.buffer = bytearray() # accumulate all message data here
# construct types for numpy
dtype_list = []
for field in self.field_data:
numpy_type = ULog._UNPACK_TYPES[field.type_str][2]
dtype_list.append((field.field_name, numpy_type))
self.dtype = np.dtype(dtype_list).newbyteorder('<')
def _parse_format(self, message_formats):
self._parse_nested_type('', self.message_name, message_formats)
# remove padding fields at the end
while (len(self.field_data) > 0 and
self.field_data[-1].field_name.startswith('_padding')):
self.field_data.pop()
def _parse_nested_type(self, prefix_str, type_name, message_formats):
# we flatten nested types
message_format = message_formats[type_name]
for (type_name_fmt, array_size, field_name) in message_format.fields:
if type_name_fmt in ULog._UNPACK_TYPES:
if array_size > 1:
for i in range(array_size):
self.field_data.append(ULog._FieldData(
prefix_str+field_name+'['+str(i)+']', type_name_fmt))
else:
self.field_data.append(ULog._FieldData(
prefix_str+field_name, type_name_fmt))
if prefix_str+field_name == 'timestamp':
self.timestamp_idx = len(self.field_data) - 1
else: # nested type
if array_size > 1:
for i in range(array_size):
self._parse_nested_type(prefix_str+field_name+'['+str(i)+'].',
type_name_fmt, message_formats)
else:
self._parse_nested_type(prefix_str+field_name+'.',
type_name_fmt, message_formats)
class _MessageData(object):
def __init__(self):
self.timestamp = 0
def initialize(self, data, header, subscriptions, ulog_object):
msg_id, = ULog._unpack_ushort(data[:2])
if msg_id in subscriptions:
subscription = subscriptions[msg_id]
# accumulate data to a buffer, will be parsed later
subscription.buffer += data[2:]
t_off = subscription.timestamp_offset
# TODO: the timestamp can have another size than uint64
self.timestamp, = ULog._unpack_uint64(data[t_off+2:t_off+10])
else:
if not msg_id in ulog_object._filtered_message_ids:
# this is an error, but make it non-fatal
if not msg_id in ulog_object._missing_message_ids:
ulog_object._missing_message_ids.add(msg_id)
if ulog_object._debug:
print(ulog_object._file_handle.tell())
print('Warning: no subscription found for message id {:}. Continuing,'
' but file is most likely corrupt'.format(msg_id))
self.timestamp = 0
def _add_message_info_multiple(self, msg_info):
""" add a message info multiple to self._msg_info_multiple_dict """
if msg_info.key in self._msg_info_multiple_dict:
if msg_info.is_continued:
self._msg_info_multiple_dict[msg_info.key][-1].append(msg_info.value)
else:
self._msg_info_multiple_dict[msg_info.key].append([msg_info.value])
else:
self._msg_info_multiple_dict[msg_info.key] = [[msg_info.value]]
def _load_file(self, log_file, message_name_filter_list):
""" load and parse an ULog file into memory """
if isinstance(log_file, str):
self._file_handle = open(log_file, "rb")
else:
self._file_handle = log_file
# parse the whole file
self._read_file_header()
self._last_timestamp = self._start_timestamp
self._read_file_definitions()
if self.has_data_appended and len(self._appended_offsets) > 0:
if self._debug:
print('This file has data appended')
for offset in self._appended_offsets:
self._read_file_data(message_name_filter_list, read_until=offset)
self._file_handle.seek(offset)
# read the whole file, or the rest if data appended
self._read_file_data(message_name_filter_list)
self._file_handle.close()
del self._file_handle
def _read_file_header(self):
header_data = self._file_handle.read(16)
if len(header_data) != 16:
raise Exception("Invalid file format (Header too short)")
if header_data[:7] != self.HEADER_BYTES:
raise Exception("Invalid file format (Failed to parse header)")
self._file_version, = struct.unpack('B', header_data[7:8])
if self._file_version > 1:
print("Warning: unknown file version. Will attempt to read it anyway")
# read timestamp
self._start_timestamp, = ULog._unpack_uint64(header_data[8:])
def _read_file_definitions(self):
header = self._MessageHeader()
while True:
data = self._file_handle.read(3)
if not data:
break
header.initialize(data)
data = self._file_handle.read(header.msg_size)
if header.msg_type == self.MSG_TYPE_INFO:
msg_info = self._MessageInfo(data, header)
self._msg_info_dict[msg_info.key] = msg_info.value
elif header.msg_type == self.MSG_TYPE_INFO_MULTIPLE:
msg_info = self._MessageInfo(data, header, is_info_multiple=True)
self._add_message_info_multiple(msg_info)
elif header.msg_type == self.MSG_TYPE_FORMAT:
msg_format = self.MessageFormat(data, header)
self._message_formats[msg_format.name] = msg_format
elif header.msg_type == self.MSG_TYPE_PARAMETER:
msg_info = self._MessageInfo(data, header)
self._initial_parameters[msg_info.key] = msg_info.value
elif (header.msg_type == self.MSG_TYPE_ADD_LOGGED_MSG or
header.msg_type == self.MSG_TYPE_LOGGING):
self._file_handle.seek(-(3+header.msg_size), 1)
break # end of section
elif header.msg_type == self.MSG_TYPE_FLAG_BITS:
# make sure this is the first message in the log
if self._file_handle.tell() != 16 + 3 + header.msg_size:
print('Error: FLAGS_BITS message must be first message. Offset:',
self._file_handle.tell())
msg_flag_bits = self._MessageFlagBits(data, header)
self._compat_flags = msg_flag_bits.compat_flags
self._incompat_flags = msg_flag_bits.incompat_flags
self._appended_offsets = msg_flag_bits.appended_offsets
if self._debug:
print('compat flags: ', self._compat_flags)
print('incompat flags:', self._incompat_flags)
print('appended offsets:', self._appended_offsets)
# check if there are bits set that we don't know
unknown_incompat_flag_msg = "Unknown incompatible flag set: cannot parse the log"
if self._incompat_flags[0] & ~1:
raise Exception(unknown_incompat_flag_msg)
for i in range(1, 8):
if self._incompat_flags[i]:
raise Exception(unknown_incompat_flag_msg)
else:
if self._debug:
print('read_file_definitions: unknown message type: %i (%s)' %
(header.msg_type, chr(header.msg_type)))
file_position = self._file_handle.tell()
print('file position: %i (0x%x) msg size: %i' % (
file_position, file_position, header.msg_size))
if self._check_file_corruption(header):
# seek back to advance only by a single byte instead of
# skipping the message
self._file_handle.seek(-2-header.msg_size, 1)
def _read_file_data(self, message_name_filter_list, read_until=None):
"""
read the file data section
:param read_until: an optional file offset: if set, parse only up to
this offset (smaller than)
"""
if read_until is None:
read_until = 1 << 50 # make it larger than any possible log file
try:
# pre-init reusable objects
header = self._MessageHeader()
msg_data = self._MessageData()
while True:
data = self._file_handle.read(3)
header.initialize(data)
data = self._file_handle.read(header.msg_size)
if len(data) < header.msg_size:
break # less data than expected. File is most likely cut
if self._file_handle.tell() > read_until:
if self._debug:
print('read until offset=%i done, current pos=%i' %
(read_until, self._file_handle.tell()))
break
if header.msg_type == self.MSG_TYPE_INFO:
msg_info = self._MessageInfo(data, header)
self._msg_info_dict[msg_info.key] = msg_info.value
elif header.msg_type == self.MSG_TYPE_INFO_MULTIPLE:
msg_info = self._MessageInfo(data, header, is_info_multiple=True)
self._add_message_info_multiple(msg_info)
elif header.msg_type == self.MSG_TYPE_PARAMETER:
msg_info = self._MessageInfo(data, header)
self._changed_parameters.append((self._last_timestamp,
msg_info.key, msg_info.value))
elif header.msg_type == self.MSG_TYPE_ADD_LOGGED_MSG:
msg_add_logged = self._MessageAddLogged(data, header,
self._message_formats)
if (message_name_filter_list is None or
msg_add_logged.message_name in message_name_filter_list):
self._subscriptions[msg_add_logged.msg_id] = msg_add_logged
else:
self._filtered_message_ids.add(msg_add_logged.msg_id)
elif header.msg_type == self.MSG_TYPE_LOGGING:
msg_logging = self.MessageLogging(data, header)
self._logged_messages.append(msg_logging)
elif header.msg_type == self.MSG_TYPE_DATA:
msg_data.initialize(data, header, self._subscriptions, self)
if msg_data.timestamp != 0 and msg_data.timestamp > self._last_timestamp:
self._last_timestamp = msg_data.timestamp
elif header.msg_type == self.MSG_TYPE_DROPOUT:
msg_dropout = self.MessageDropout(data, header,
self._last_timestamp)
self._dropouts.append(msg_dropout)
else:
if self._debug:
print('_read_file_data: unknown message type: %i (%s)' %
(header.msg_type, chr(header.msg_type)))
file_position = self._file_handle.tell()
print('file position: %i (0x%x) msg size: %i' % (
file_position, file_position, header.msg_size))
if self._check_file_corruption(header):
# seek back to advance only by a single byte instead of
# skipping the message
self._file_handle.seek(-2-header.msg_size, 1)
except struct.error:
pass #we read past the end of the file
# convert into final representation
while self._subscriptions:
_, value = self._subscriptions.popitem()
if len(value.buffer) > 0: # only add if we have data
data_item = ULog.Data(value)
self._data_list.append(data_item)
def get_version_info(self, key_name='ver_sw_release'):
"""
get the (major, minor, patch, type) version information as tuple.
Returns None if not found
definition of type is:
>= 0: development
>= 64: alpha version
>= 128: beta version
>= 192: RC version
== 255: release version
"""
if key_name in self._msg_info_dict:
val = self._msg_info_dict[key_name]
return ((val >> 24) & 0xff, (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff)
return None
def get_version_info_str(self, key_name='ver_sw_release'):
"""
get version information in the form 'v1.2.3 (RC)', or None if version
tag either not found or it's a development version
"""
version = self.get_version_info(key_name)
if not version is None and version[3] >= 64:
type_str = ''
if version[3] < 128: type_str = ' (alpha)'
elif version[3] < 192: type_str = ' (beta)'
elif version[3] < 255: type_str = ' (RC)'
return 'v{}.{}.{}{}'.format(version[0], version[1], version[2], type_str)
return None
|
PX4/pyulog | pyulog/core.py | ULog.get_version_info | python | def get_version_info(self, key_name='ver_sw_release'):
if key_name in self._msg_info_dict:
val = self._msg_info_dict[key_name]
return ((val >> 24) & 0xff, (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff)
return None | get the (major, minor, patch, type) version information as tuple.
Returns None if not found
definition of type is:
>= 0: development
>= 64: alpha version
>= 128: beta version
>= 192: RC version
== 255: release version | train | https://github.com/PX4/pyulog/blob/3bc4f9338d30e2e0a0dfbed58f54d200967e5056/pyulog/core.py#L614-L628 | null | class ULog(object):
"""
This class parses an ulog file
"""
## constants ##
HEADER_BYTES = b'\x55\x4c\x6f\x67\x01\x12\x35'
# message types
MSG_TYPE_FORMAT = ord('F')
MSG_TYPE_DATA = ord('D')
MSG_TYPE_INFO = ord('I')
MSG_TYPE_INFO_MULTIPLE = ord('M')
MSG_TYPE_PARAMETER = ord('P')
MSG_TYPE_ADD_LOGGED_MSG = ord('A')
MSG_TYPE_REMOVE_LOGGED_MSG = ord('R')
MSG_TYPE_SYNC = ord('S')
MSG_TYPE_DROPOUT = ord('O')
MSG_TYPE_LOGGING = ord('L')
MSG_TYPE_FLAG_BITS = ord('B')
_UNPACK_TYPES = {
'int8_t': ['b', 1, np.int8],
'uint8_t': ['B', 1, np.uint8],
'int16_t': ['h', 2, np.int16],
'uint16_t': ['H', 2, np.uint16],
'int32_t': ['i', 4, np.int32],
'uint32_t': ['I', 4, np.uint32],
'int64_t': ['q', 8, np.int64],
'uint64_t': ['Q', 8, np.uint64],
'float': ['f', 4, np.float32],
'double': ['d', 8, np.float64],
'bool': ['?', 1, np.int8],
'char': ['c', 1, np.int8]
}
@staticmethod
def get_field_size(type_str):
"""
get the field size in bytes.
:param type_str: type string, eg. 'int8_t'
"""
return ULog._UNPACK_TYPES[type_str][1]
# pre-init unpack structs for quicker use
_unpack_ushort_byte = struct.Struct('<HB').unpack
_unpack_ushort = struct.Struct('<H').unpack
_unpack_uint64 = struct.Struct('<Q').unpack
def __init__(self, log_file, message_name_filter_list=None):
"""
Initialize the object & load the file.
:param log_file: a file name (str) or a readable file object
:param message_name_filter_list: list of strings, to only load messages
with the given names. If None, load everything.
"""
self._debug = False
self._file_corrupt = False
self._start_timestamp = 0
self._last_timestamp = 0
self._msg_info_dict = {}
self._msg_info_multiple_dict = {}
self._initial_parameters = {}
self._changed_parameters = []
self._message_formats = {}
self._logged_messages = []
self._dropouts = []
self._data_list = []
self._subscriptions = {} # dict of key=msg_id, value=_MessageAddLogged
self._filtered_message_ids = set() # _MessageAddLogged id's that are filtered
self._missing_message_ids = set() # _MessageAddLogged id's that could not be found
self._file_version = 0
self._compat_flags = [0] * 8
self._incompat_flags = [0] * 8
self._appended_offsets = [] # file offsets for appended data
self._load_file(log_file, message_name_filter_list)
## parsed data
@property
def start_timestamp(self):
""" timestamp of file start """
return self._start_timestamp
@property
def last_timestamp(self):
""" timestamp of last message """
return self._last_timestamp
@property
def msg_info_dict(self):
""" dictionary of all information messages (key is a string, value
depends on the type, usually string or int) """
return self._msg_info_dict
@property
def msg_info_multiple_dict(self):
""" dictionary of all information multiple messages (key is a string, value
is a list of lists that contains the messages) """
return self._msg_info_multiple_dict
@property
def initial_parameters(self):
""" dictionary of all initially set parameters (key=param name) """
return self._initial_parameters
@property
def changed_parameters(self):
""" list of all changed parameters (tuple of (timestamp, name, value))"""
return self._changed_parameters
@property
def message_formats(self):
""" dictionary with key = format name (MessageFormat.name),
value = MessageFormat object """
return self._message_formats
@property
def logged_messages(self):
""" list of MessageLogging objects """
return self._logged_messages
@property
def dropouts(self):
""" list of MessageDropout objects """
return self._dropouts
@property
def data_list(self):
""" extracted data: list of Data objects """
return self._data_list
@property
def has_data_appended(self):
""" returns True if the log has data appended, False otherwise """
return self._incompat_flags[0] & 0x1
@property
def file_corruption(self):
""" True if a file corruption got detected """
return self._file_corrupt
def get_dataset(self, name, multi_instance=0):
""" get a specific dataset.
example:
try:
gyro_data = ulog.get_dataset('sensor_gyro')
except (KeyError, IndexError, ValueError) as error:
print(type(error), "(sensor_gyro):", error)
:param name: name of the dataset
:param multi_instance: the multi_id, defaults to the first
:raises KeyError, IndexError, ValueError: if name or instance not found
"""
return [elem for elem in self._data_list
if elem.name == name and elem.multi_id == multi_instance][0]
class Data(object):
""" contains the final topic data for a single topic and instance """
def __init__(self, message_add_logged_obj):
self.multi_id = message_add_logged_obj.multi_id
self.name = message_add_logged_obj.message_name
self.field_data = message_add_logged_obj.field_data
self.timestamp_idx = message_add_logged_obj.timestamp_idx
# get data as numpy.ndarray
np_array = np.frombuffer(message_add_logged_obj.buffer,
dtype=message_add_logged_obj.dtype)
# convert into dict of np.array (which is easier to handle)
self.data = {}
for name in np_array.dtype.names:
self.data[name] = np_array[name]
def list_value_changes(self, field_name):
""" get a list of (timestamp, value) tuples, whenever the value
changes. The first data point with non-zero timestamp is always
included, messages with timestamp = 0 are ignored """
t = self.data['timestamp']
x = self.data[field_name]
indices = t != 0 # filter out 0 values
t = t[indices]
x = x[indices]
if len(t) == 0: return []
ret = [(t[0], x[0])]
indices = np.where(x[:-1] != x[1:])[0] + 1
ret.extend(zip(t[indices], x[indices]))
return ret
## Representations of the messages from the log file ##
class _MessageHeader(object):
""" 3 bytes ULog message header """
def __init__(self):
self.msg_size = 0
self.msg_type = 0
def initialize(self, data):
self.msg_size, self.msg_type = ULog._unpack_ushort_byte(data)
class _MessageInfo(object):
""" ULog info message representation """
def __init__(self, data, header, is_info_multiple=False):
if is_info_multiple: # INFO_MULTIPLE message
self.is_continued, = struct.unpack('<B', data[0:1])
data = data[1:]
key_len, = struct.unpack('<B', data[0:1])
type_key = _parse_string(data[1:1+key_len])
type_key_split = type_key.split(' ')
self.type = type_key_split[0]
self.key = type_key_split[1]
if self.type.startswith('char['): # it's a string
self.value = _parse_string(data[1+key_len:])
elif self.type in ULog._UNPACK_TYPES:
unpack_type = ULog._UNPACK_TYPES[self.type]
self.value, = struct.unpack('<'+unpack_type[0], data[1+key_len:])
else: # probably an array (or non-basic type)
self.value = data[1+key_len:]
class _MessageFlagBits(object):
""" ULog message flag bits """
def __init__(self, data, header):
if header.msg_size > 8 + 8 + 3*8:
# we can still parse it but might miss some information
print('Warning: Flags Bit message is longer than expected')
self.compat_flags = list(struct.unpack('<'+'B'*8, data[0:8]))
self.incompat_flags = list(struct.unpack('<'+'B'*8, data[8:16]))
self.appended_offsets = list(struct.unpack('<'+'Q'*3, data[16:16+3*8]))
# remove the 0's at the end
while len(self.appended_offsets) > 0 and self.appended_offsets[-1] == 0:
self.appended_offsets.pop()
class MessageFormat(object):
""" ULog message format representation """
def __init__(self, data, header):
format_arr = _parse_string(data).split(':')
self.name = format_arr[0]
types_str = format_arr[1].split(';')
self.fields = [] # list of tuples (type, array_size, name)
for t in types_str:
if len(t) > 0:
self.fields.append(self._extract_type(t))
@staticmethod
def _extract_type(field_str):
field_str_split = field_str.split(' ')
type_str = field_str_split[0]
name_str = field_str_split[1]
a_pos = type_str.find('[')
if a_pos == -1:
array_size = 1
type_name = type_str
else:
b_pos = type_str.find(']')
array_size = int(type_str[a_pos+1:b_pos])
type_name = type_str[:a_pos]
return type_name, array_size, name_str
class MessageLogging(object):
""" ULog logged string message representation """
def __init__(self, data, header):
self.log_level, = struct.unpack('<B', data[0:1])
self.timestamp, = struct.unpack('<Q', data[1:9])
self.message = _parse_string(data[9:])
def log_level_str(self):
return {ord('0'): 'EMERGENCY',
ord('1'): 'ALERT',
ord('2'): 'CRITICAL',
ord('3'): 'ERROR',
ord('4'): 'WARNING',
ord('5'): 'NOTICE',
ord('6'): 'INFO',
ord('7'): 'DEBUG'}.get(self.log_level, 'UNKNOWN')
class MessageDropout(object):
""" ULog dropout message representation """
def __init__(self, data, header, timestamp):
self.duration, = struct.unpack('<H', data)
self.timestamp = timestamp
class _FieldData(object):
""" Type and name of a single ULog data field """
def __init__(self, field_name, type_str):
self.field_name = field_name
self.type_str = type_str
class _MessageAddLogged(object):
""" ULog add logging data message representation """
def __init__(self, data, header, message_formats):
self.multi_id, = struct.unpack('<B', data[0:1])
self.msg_id, = struct.unpack('<H', data[1:3])
self.message_name = _parse_string(data[3:])
self.field_data = [] # list of _FieldData
self.timestamp_idx = -1
self._parse_format(message_formats)
self.timestamp_offset = 0
for field in self.field_data:
if field.field_name == 'timestamp':
break
self.timestamp_offset += ULog._UNPACK_TYPES[field.type_str][1]
self.buffer = bytearray() # accumulate all message data here
# construct types for numpy
dtype_list = []
for field in self.field_data:
numpy_type = ULog._UNPACK_TYPES[field.type_str][2]
dtype_list.append((field.field_name, numpy_type))
self.dtype = np.dtype(dtype_list).newbyteorder('<')
def _parse_format(self, message_formats):
self._parse_nested_type('', self.message_name, message_formats)
# remove padding fields at the end
while (len(self.field_data) > 0 and
self.field_data[-1].field_name.startswith('_padding')):
self.field_data.pop()
def _parse_nested_type(self, prefix_str, type_name, message_formats):
# we flatten nested types
message_format = message_formats[type_name]
for (type_name_fmt, array_size, field_name) in message_format.fields:
if type_name_fmt in ULog._UNPACK_TYPES:
if array_size > 1:
for i in range(array_size):
self.field_data.append(ULog._FieldData(
prefix_str+field_name+'['+str(i)+']', type_name_fmt))
else:
self.field_data.append(ULog._FieldData(
prefix_str+field_name, type_name_fmt))
if prefix_str+field_name == 'timestamp':
self.timestamp_idx = len(self.field_data) - 1
else: # nested type
if array_size > 1:
for i in range(array_size):
self._parse_nested_type(prefix_str+field_name+'['+str(i)+'].',
type_name_fmt, message_formats)
else:
self._parse_nested_type(prefix_str+field_name+'.',
type_name_fmt, message_formats)
class _MessageData(object):
def __init__(self):
self.timestamp = 0
def initialize(self, data, header, subscriptions, ulog_object):
msg_id, = ULog._unpack_ushort(data[:2])
if msg_id in subscriptions:
subscription = subscriptions[msg_id]
# accumulate data to a buffer, will be parsed later
subscription.buffer += data[2:]
t_off = subscription.timestamp_offset
# TODO: the timestamp can have another size than uint64
self.timestamp, = ULog._unpack_uint64(data[t_off+2:t_off+10])
else:
if not msg_id in ulog_object._filtered_message_ids:
# this is an error, but make it non-fatal
if not msg_id in ulog_object._missing_message_ids:
ulog_object._missing_message_ids.add(msg_id)
if ulog_object._debug:
print(ulog_object._file_handle.tell())
print('Warning: no subscription found for message id {:}. Continuing,'
' but file is most likely corrupt'.format(msg_id))
self.timestamp = 0
def _add_message_info_multiple(self, msg_info):
""" add a message info multiple to self._msg_info_multiple_dict """
if msg_info.key in self._msg_info_multiple_dict:
if msg_info.is_continued:
self._msg_info_multiple_dict[msg_info.key][-1].append(msg_info.value)
else:
self._msg_info_multiple_dict[msg_info.key].append([msg_info.value])
else:
self._msg_info_multiple_dict[msg_info.key] = [[msg_info.value]]
def _load_file(self, log_file, message_name_filter_list):
""" load and parse an ULog file into memory """
if isinstance(log_file, str):
self._file_handle = open(log_file, "rb")
else:
self._file_handle = log_file
# parse the whole file
self._read_file_header()
self._last_timestamp = self._start_timestamp
self._read_file_definitions()
if self.has_data_appended and len(self._appended_offsets) > 0:
if self._debug:
print('This file has data appended')
for offset in self._appended_offsets:
self._read_file_data(message_name_filter_list, read_until=offset)
self._file_handle.seek(offset)
# read the whole file, or the rest if data appended
self._read_file_data(message_name_filter_list)
self._file_handle.close()
del self._file_handle
def _read_file_header(self):
header_data = self._file_handle.read(16)
if len(header_data) != 16:
raise Exception("Invalid file format (Header too short)")
if header_data[:7] != self.HEADER_BYTES:
raise Exception("Invalid file format (Failed to parse header)")
self._file_version, = struct.unpack('B', header_data[7:8])
if self._file_version > 1:
print("Warning: unknown file version. Will attempt to read it anyway")
# read timestamp
self._start_timestamp, = ULog._unpack_uint64(header_data[8:])
def _read_file_definitions(self):
header = self._MessageHeader()
while True:
data = self._file_handle.read(3)
if not data:
break
header.initialize(data)
data = self._file_handle.read(header.msg_size)
if header.msg_type == self.MSG_TYPE_INFO:
msg_info = self._MessageInfo(data, header)
self._msg_info_dict[msg_info.key] = msg_info.value
elif header.msg_type == self.MSG_TYPE_INFO_MULTIPLE:
msg_info = self._MessageInfo(data, header, is_info_multiple=True)
self._add_message_info_multiple(msg_info)
elif header.msg_type == self.MSG_TYPE_FORMAT:
msg_format = self.MessageFormat(data, header)
self._message_formats[msg_format.name] = msg_format
elif header.msg_type == self.MSG_TYPE_PARAMETER:
msg_info = self._MessageInfo(data, header)
self._initial_parameters[msg_info.key] = msg_info.value
elif (header.msg_type == self.MSG_TYPE_ADD_LOGGED_MSG or
header.msg_type == self.MSG_TYPE_LOGGING):
self._file_handle.seek(-(3+header.msg_size), 1)
break # end of section
elif header.msg_type == self.MSG_TYPE_FLAG_BITS:
# make sure this is the first message in the log
if self._file_handle.tell() != 16 + 3 + header.msg_size:
print('Error: FLAGS_BITS message must be first message. Offset:',
self._file_handle.tell())
msg_flag_bits = self._MessageFlagBits(data, header)
self._compat_flags = msg_flag_bits.compat_flags
self._incompat_flags = msg_flag_bits.incompat_flags
self._appended_offsets = msg_flag_bits.appended_offsets
if self._debug:
print('compat flags: ', self._compat_flags)
print('incompat flags:', self._incompat_flags)
print('appended offsets:', self._appended_offsets)
# check if there are bits set that we don't know
unknown_incompat_flag_msg = "Unknown incompatible flag set: cannot parse the log"
if self._incompat_flags[0] & ~1:
raise Exception(unknown_incompat_flag_msg)
for i in range(1, 8):
if self._incompat_flags[i]:
raise Exception(unknown_incompat_flag_msg)
else:
if self._debug:
print('read_file_definitions: unknown message type: %i (%s)' %
(header.msg_type, chr(header.msg_type)))
file_position = self._file_handle.tell()
print('file position: %i (0x%x) msg size: %i' % (
file_position, file_position, header.msg_size))
if self._check_file_corruption(header):
# seek back to advance only by a single byte instead of
# skipping the message
self._file_handle.seek(-2-header.msg_size, 1)
def _read_file_data(self, message_name_filter_list, read_until=None):
"""
read the file data section
:param read_until: an optional file offset: if set, parse only up to
this offset (smaller than)
"""
if read_until is None:
read_until = 1 << 50 # make it larger than any possible log file
try:
# pre-init reusable objects
header = self._MessageHeader()
msg_data = self._MessageData()
while True:
data = self._file_handle.read(3)
header.initialize(data)
data = self._file_handle.read(header.msg_size)
if len(data) < header.msg_size:
break # less data than expected. File is most likely cut
if self._file_handle.tell() > read_until:
if self._debug:
print('read until offset=%i done, current pos=%i' %
(read_until, self._file_handle.tell()))
break
if header.msg_type == self.MSG_TYPE_INFO:
msg_info = self._MessageInfo(data, header)
self._msg_info_dict[msg_info.key] = msg_info.value
elif header.msg_type == self.MSG_TYPE_INFO_MULTIPLE:
msg_info = self._MessageInfo(data, header, is_info_multiple=True)
self._add_message_info_multiple(msg_info)
elif header.msg_type == self.MSG_TYPE_PARAMETER:
msg_info = self._MessageInfo(data, header)
self._changed_parameters.append((self._last_timestamp,
msg_info.key, msg_info.value))
elif header.msg_type == self.MSG_TYPE_ADD_LOGGED_MSG:
msg_add_logged = self._MessageAddLogged(data, header,
self._message_formats)
if (message_name_filter_list is None or
msg_add_logged.message_name in message_name_filter_list):
self._subscriptions[msg_add_logged.msg_id] = msg_add_logged
else:
self._filtered_message_ids.add(msg_add_logged.msg_id)
elif header.msg_type == self.MSG_TYPE_LOGGING:
msg_logging = self.MessageLogging(data, header)
self._logged_messages.append(msg_logging)
elif header.msg_type == self.MSG_TYPE_DATA:
msg_data.initialize(data, header, self._subscriptions, self)
if msg_data.timestamp != 0 and msg_data.timestamp > self._last_timestamp:
self._last_timestamp = msg_data.timestamp
elif header.msg_type == self.MSG_TYPE_DROPOUT:
msg_dropout = self.MessageDropout(data, header,
self._last_timestamp)
self._dropouts.append(msg_dropout)
else:
if self._debug:
print('_read_file_data: unknown message type: %i (%s)' %
(header.msg_type, chr(header.msg_type)))
file_position = self._file_handle.tell()
print('file position: %i (0x%x) msg size: %i' % (
file_position, file_position, header.msg_size))
if self._check_file_corruption(header):
# seek back to advance only by a single byte instead of
# skipping the message
self._file_handle.seek(-2-header.msg_size, 1)
except struct.error:
pass #we read past the end of the file
# convert into final representation
while self._subscriptions:
_, value = self._subscriptions.popitem()
if len(value.buffer) > 0: # only add if we have data
data_item = ULog.Data(value)
self._data_list.append(data_item)
def _check_file_corruption(self, header):
""" check for file corruption based on an unknown message type in the header """
# We need to handle 2 cases:
# - corrupt file (we do our best to read the rest of the file)
# - new ULog message type got added (we just want to skip the message)
if header.msg_type == 0 or header.msg_size == 0 or header.msg_size > 10000:
if not self._file_corrupt and self._debug:
print('File corruption detected')
self._file_corrupt = True
return self._file_corrupt
def get_version_info_str(self, key_name='ver_sw_release'):
"""
get version information in the form 'v1.2.3 (RC)', or None if version
tag either not found or it's a development version
"""
version = self.get_version_info(key_name)
if not version is None and version[3] >= 64:
type_str = ''
if version[3] < 128: type_str = ' (alpha)'
elif version[3] < 192: type_str = ' (beta)'
elif version[3] < 255: type_str = ' (RC)'
return 'v{}.{}.{}{}'.format(version[0], version[1], version[2], type_str)
return None
|
PX4/pyulog | pyulog/core.py | ULog.get_version_info_str | python | def get_version_info_str(self, key_name='ver_sw_release'):
version = self.get_version_info(key_name)
if not version is None and version[3] >= 64:
type_str = ''
if version[3] < 128: type_str = ' (alpha)'
elif version[3] < 192: type_str = ' (beta)'
elif version[3] < 255: type_str = ' (RC)'
return 'v{}.{}.{}{}'.format(version[0], version[1], version[2], type_str)
return None | get version information in the form 'v1.2.3 (RC)', or None if version
tag either not found or it's a development version | train | https://github.com/PX4/pyulog/blob/3bc4f9338d30e2e0a0dfbed58f54d200967e5056/pyulog/core.py#L630-L642 | [
"def get_version_info(self, key_name='ver_sw_release'):\n \"\"\"\n get the (major, minor, patch, type) version information as tuple.\n Returns None if not found\n definition of type is:\n >= 0: development\n >= 64: alpha version\n >= 128: beta version\n >= 192: RC version\n == 255: release version\n \"\"\"\n if key_name in self._msg_info_dict:\n val = self._msg_info_dict[key_name]\n return ((val >> 24) & 0xff, (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff)\n return None\n"
] | class ULog(object):
"""
This class parses an ulog file
"""
## constants ##
HEADER_BYTES = b'\x55\x4c\x6f\x67\x01\x12\x35'
# message types
MSG_TYPE_FORMAT = ord('F')
MSG_TYPE_DATA = ord('D')
MSG_TYPE_INFO = ord('I')
MSG_TYPE_INFO_MULTIPLE = ord('M')
MSG_TYPE_PARAMETER = ord('P')
MSG_TYPE_ADD_LOGGED_MSG = ord('A')
MSG_TYPE_REMOVE_LOGGED_MSG = ord('R')
MSG_TYPE_SYNC = ord('S')
MSG_TYPE_DROPOUT = ord('O')
MSG_TYPE_LOGGING = ord('L')
MSG_TYPE_FLAG_BITS = ord('B')
_UNPACK_TYPES = {
'int8_t': ['b', 1, np.int8],
'uint8_t': ['B', 1, np.uint8],
'int16_t': ['h', 2, np.int16],
'uint16_t': ['H', 2, np.uint16],
'int32_t': ['i', 4, np.int32],
'uint32_t': ['I', 4, np.uint32],
'int64_t': ['q', 8, np.int64],
'uint64_t': ['Q', 8, np.uint64],
'float': ['f', 4, np.float32],
'double': ['d', 8, np.float64],
'bool': ['?', 1, np.int8],
'char': ['c', 1, np.int8]
}
@staticmethod
def get_field_size(type_str):
"""
get the field size in bytes.
:param type_str: type string, eg. 'int8_t'
"""
return ULog._UNPACK_TYPES[type_str][1]
# pre-init unpack structs for quicker use
_unpack_ushort_byte = struct.Struct('<HB').unpack
_unpack_ushort = struct.Struct('<H').unpack
_unpack_uint64 = struct.Struct('<Q').unpack
def __init__(self, log_file, message_name_filter_list=None):
"""
Initialize the object & load the file.
:param log_file: a file name (str) or a readable file object
:param message_name_filter_list: list of strings, to only load messages
with the given names. If None, load everything.
"""
self._debug = False
self._file_corrupt = False
self._start_timestamp = 0
self._last_timestamp = 0
self._msg_info_dict = {}
self._msg_info_multiple_dict = {}
self._initial_parameters = {}
self._changed_parameters = []
self._message_formats = {}
self._logged_messages = []
self._dropouts = []
self._data_list = []
self._subscriptions = {} # dict of key=msg_id, value=_MessageAddLogged
self._filtered_message_ids = set() # _MessageAddLogged id's that are filtered
self._missing_message_ids = set() # _MessageAddLogged id's that could not be found
self._file_version = 0
self._compat_flags = [0] * 8
self._incompat_flags = [0] * 8
self._appended_offsets = [] # file offsets for appended data
self._load_file(log_file, message_name_filter_list)
## parsed data
@property
def start_timestamp(self):
""" timestamp of file start """
return self._start_timestamp
@property
def last_timestamp(self):
""" timestamp of last message """
return self._last_timestamp
@property
def msg_info_dict(self):
""" dictionary of all information messages (key is a string, value
depends on the type, usually string or int) """
return self._msg_info_dict
@property
def msg_info_multiple_dict(self):
""" dictionary of all information multiple messages (key is a string, value
is a list of lists that contains the messages) """
return self._msg_info_multiple_dict
@property
def initial_parameters(self):
""" dictionary of all initially set parameters (key=param name) """
return self._initial_parameters
@property
def changed_parameters(self):
""" list of all changed parameters (tuple of (timestamp, name, value))"""
return self._changed_parameters
@property
def message_formats(self):
""" dictionary with key = format name (MessageFormat.name),
value = MessageFormat object """
return self._message_formats
@property
def logged_messages(self):
""" list of MessageLogging objects """
return self._logged_messages
@property
def dropouts(self):
""" list of MessageDropout objects """
return self._dropouts
@property
def data_list(self):
""" extracted data: list of Data objects """
return self._data_list
@property
def has_data_appended(self):
""" returns True if the log has data appended, False otherwise """
return self._incompat_flags[0] & 0x1
@property
def file_corruption(self):
""" True if a file corruption got detected """
return self._file_corrupt
def get_dataset(self, name, multi_instance=0):
""" get a specific dataset.
example:
try:
gyro_data = ulog.get_dataset('sensor_gyro')
except (KeyError, IndexError, ValueError) as error:
print(type(error), "(sensor_gyro):", error)
:param name: name of the dataset
:param multi_instance: the multi_id, defaults to the first
:raises KeyError, IndexError, ValueError: if name or instance not found
"""
return [elem for elem in self._data_list
if elem.name == name and elem.multi_id == multi_instance][0]
class Data(object):
""" contains the final topic data for a single topic and instance """
def __init__(self, message_add_logged_obj):
self.multi_id = message_add_logged_obj.multi_id
self.name = message_add_logged_obj.message_name
self.field_data = message_add_logged_obj.field_data
self.timestamp_idx = message_add_logged_obj.timestamp_idx
# get data as numpy.ndarray
np_array = np.frombuffer(message_add_logged_obj.buffer,
dtype=message_add_logged_obj.dtype)
# convert into dict of np.array (which is easier to handle)
self.data = {}
for name in np_array.dtype.names:
self.data[name] = np_array[name]
def list_value_changes(self, field_name):
""" get a list of (timestamp, value) tuples, whenever the value
changes. The first data point with non-zero timestamp is always
included, messages with timestamp = 0 are ignored """
t = self.data['timestamp']
x = self.data[field_name]
indices = t != 0 # filter out 0 values
t = t[indices]
x = x[indices]
if len(t) == 0: return []
ret = [(t[0], x[0])]
indices = np.where(x[:-1] != x[1:])[0] + 1
ret.extend(zip(t[indices], x[indices]))
return ret
## Representations of the messages from the log file ##
class _MessageHeader(object):
""" 3 bytes ULog message header """
def __init__(self):
self.msg_size = 0
self.msg_type = 0
def initialize(self, data):
self.msg_size, self.msg_type = ULog._unpack_ushort_byte(data)
class _MessageInfo(object):
""" ULog info message representation """
def __init__(self, data, header, is_info_multiple=False):
if is_info_multiple: # INFO_MULTIPLE message
self.is_continued, = struct.unpack('<B', data[0:1])
data = data[1:]
key_len, = struct.unpack('<B', data[0:1])
type_key = _parse_string(data[1:1+key_len])
type_key_split = type_key.split(' ')
self.type = type_key_split[0]
self.key = type_key_split[1]
if self.type.startswith('char['): # it's a string
self.value = _parse_string(data[1+key_len:])
elif self.type in ULog._UNPACK_TYPES:
unpack_type = ULog._UNPACK_TYPES[self.type]
self.value, = struct.unpack('<'+unpack_type[0], data[1+key_len:])
else: # probably an array (or non-basic type)
self.value = data[1+key_len:]
class _MessageFlagBits(object):
""" ULog message flag bits """
def __init__(self, data, header):
if header.msg_size > 8 + 8 + 3*8:
# we can still parse it but might miss some information
print('Warning: Flags Bit message is longer than expected')
self.compat_flags = list(struct.unpack('<'+'B'*8, data[0:8]))
self.incompat_flags = list(struct.unpack('<'+'B'*8, data[8:16]))
self.appended_offsets = list(struct.unpack('<'+'Q'*3, data[16:16+3*8]))
# remove the 0's at the end
while len(self.appended_offsets) > 0 and self.appended_offsets[-1] == 0:
self.appended_offsets.pop()
class MessageFormat(object):
""" ULog message format representation """
def __init__(self, data, header):
format_arr = _parse_string(data).split(':')
self.name = format_arr[0]
types_str = format_arr[1].split(';')
self.fields = [] # list of tuples (type, array_size, name)
for t in types_str:
if len(t) > 0:
self.fields.append(self._extract_type(t))
@staticmethod
def _extract_type(field_str):
field_str_split = field_str.split(' ')
type_str = field_str_split[0]
name_str = field_str_split[1]
a_pos = type_str.find('[')
if a_pos == -1:
array_size = 1
type_name = type_str
else:
b_pos = type_str.find(']')
array_size = int(type_str[a_pos+1:b_pos])
type_name = type_str[:a_pos]
return type_name, array_size, name_str
class MessageLogging(object):
""" ULog logged string message representation """
def __init__(self, data, header):
self.log_level, = struct.unpack('<B', data[0:1])
self.timestamp, = struct.unpack('<Q', data[1:9])
self.message = _parse_string(data[9:])
def log_level_str(self):
return {ord('0'): 'EMERGENCY',
ord('1'): 'ALERT',
ord('2'): 'CRITICAL',
ord('3'): 'ERROR',
ord('4'): 'WARNING',
ord('5'): 'NOTICE',
ord('6'): 'INFO',
ord('7'): 'DEBUG'}.get(self.log_level, 'UNKNOWN')
class MessageDropout(object):
""" ULog dropout message representation """
def __init__(self, data, header, timestamp):
self.duration, = struct.unpack('<H', data)
self.timestamp = timestamp
class _FieldData(object):
""" Type and name of a single ULog data field """
def __init__(self, field_name, type_str):
self.field_name = field_name
self.type_str = type_str
class _MessageAddLogged(object):
""" ULog add logging data message representation """
def __init__(self, data, header, message_formats):
self.multi_id, = struct.unpack('<B', data[0:1])
self.msg_id, = struct.unpack('<H', data[1:3])
self.message_name = _parse_string(data[3:])
self.field_data = [] # list of _FieldData
self.timestamp_idx = -1
self._parse_format(message_formats)
self.timestamp_offset = 0
for field in self.field_data:
if field.field_name == 'timestamp':
break
self.timestamp_offset += ULog._UNPACK_TYPES[field.type_str][1]
self.buffer = bytearray() # accumulate all message data here
# construct types for numpy
dtype_list = []
for field in self.field_data:
numpy_type = ULog._UNPACK_TYPES[field.type_str][2]
dtype_list.append((field.field_name, numpy_type))
self.dtype = np.dtype(dtype_list).newbyteorder('<')
def _parse_format(self, message_formats):
self._parse_nested_type('', self.message_name, message_formats)
# remove padding fields at the end
while (len(self.field_data) > 0 and
self.field_data[-1].field_name.startswith('_padding')):
self.field_data.pop()
def _parse_nested_type(self, prefix_str, type_name, message_formats):
# we flatten nested types
message_format = message_formats[type_name]
for (type_name_fmt, array_size, field_name) in message_format.fields:
if type_name_fmt in ULog._UNPACK_TYPES:
if array_size > 1:
for i in range(array_size):
self.field_data.append(ULog._FieldData(
prefix_str+field_name+'['+str(i)+']', type_name_fmt))
else:
self.field_data.append(ULog._FieldData(
prefix_str+field_name, type_name_fmt))
if prefix_str+field_name == 'timestamp':
self.timestamp_idx = len(self.field_data) - 1
else: # nested type
if array_size > 1:
for i in range(array_size):
self._parse_nested_type(prefix_str+field_name+'['+str(i)+'].',
type_name_fmt, message_formats)
else:
self._parse_nested_type(prefix_str+field_name+'.',
type_name_fmt, message_formats)
class _MessageData(object):
def __init__(self):
self.timestamp = 0
def initialize(self, data, header, subscriptions, ulog_object):
msg_id, = ULog._unpack_ushort(data[:2])
if msg_id in subscriptions:
subscription = subscriptions[msg_id]
# accumulate data to a buffer, will be parsed later
subscription.buffer += data[2:]
t_off = subscription.timestamp_offset
# TODO: the timestamp can have another size than uint64
self.timestamp, = ULog._unpack_uint64(data[t_off+2:t_off+10])
else:
if not msg_id in ulog_object._filtered_message_ids:
# this is an error, but make it non-fatal
if not msg_id in ulog_object._missing_message_ids:
ulog_object._missing_message_ids.add(msg_id)
if ulog_object._debug:
print(ulog_object._file_handle.tell())
print('Warning: no subscription found for message id {:}. Continuing,'
' but file is most likely corrupt'.format(msg_id))
self.timestamp = 0
def _add_message_info_multiple(self, msg_info):
""" add a message info multiple to self._msg_info_multiple_dict """
if msg_info.key in self._msg_info_multiple_dict:
if msg_info.is_continued:
self._msg_info_multiple_dict[msg_info.key][-1].append(msg_info.value)
else:
self._msg_info_multiple_dict[msg_info.key].append([msg_info.value])
else:
self._msg_info_multiple_dict[msg_info.key] = [[msg_info.value]]
def _load_file(self, log_file, message_name_filter_list):
""" load and parse an ULog file into memory """
if isinstance(log_file, str):
self._file_handle = open(log_file, "rb")
else:
self._file_handle = log_file
# parse the whole file
self._read_file_header()
self._last_timestamp = self._start_timestamp
self._read_file_definitions()
if self.has_data_appended and len(self._appended_offsets) > 0:
if self._debug:
print('This file has data appended')
for offset in self._appended_offsets:
self._read_file_data(message_name_filter_list, read_until=offset)
self._file_handle.seek(offset)
# read the whole file, or the rest if data appended
self._read_file_data(message_name_filter_list)
self._file_handle.close()
del self._file_handle
def _read_file_header(self):
header_data = self._file_handle.read(16)
if len(header_data) != 16:
raise Exception("Invalid file format (Header too short)")
if header_data[:7] != self.HEADER_BYTES:
raise Exception("Invalid file format (Failed to parse header)")
self._file_version, = struct.unpack('B', header_data[7:8])
if self._file_version > 1:
print("Warning: unknown file version. Will attempt to read it anyway")
# read timestamp
self._start_timestamp, = ULog._unpack_uint64(header_data[8:])
def _read_file_definitions(self):
header = self._MessageHeader()
while True:
data = self._file_handle.read(3)
if not data:
break
header.initialize(data)
data = self._file_handle.read(header.msg_size)
if header.msg_type == self.MSG_TYPE_INFO:
msg_info = self._MessageInfo(data, header)
self._msg_info_dict[msg_info.key] = msg_info.value
elif header.msg_type == self.MSG_TYPE_INFO_MULTIPLE:
msg_info = self._MessageInfo(data, header, is_info_multiple=True)
self._add_message_info_multiple(msg_info)
elif header.msg_type == self.MSG_TYPE_FORMAT:
msg_format = self.MessageFormat(data, header)
self._message_formats[msg_format.name] = msg_format
elif header.msg_type == self.MSG_TYPE_PARAMETER:
msg_info = self._MessageInfo(data, header)
self._initial_parameters[msg_info.key] = msg_info.value
elif (header.msg_type == self.MSG_TYPE_ADD_LOGGED_MSG or
header.msg_type == self.MSG_TYPE_LOGGING):
self._file_handle.seek(-(3+header.msg_size), 1)
break # end of section
elif header.msg_type == self.MSG_TYPE_FLAG_BITS:
# make sure this is the first message in the log
if self._file_handle.tell() != 16 + 3 + header.msg_size:
print('Error: FLAGS_BITS message must be first message. Offset:',
self._file_handle.tell())
msg_flag_bits = self._MessageFlagBits(data, header)
self._compat_flags = msg_flag_bits.compat_flags
self._incompat_flags = msg_flag_bits.incompat_flags
self._appended_offsets = msg_flag_bits.appended_offsets
if self._debug:
print('compat flags: ', self._compat_flags)
print('incompat flags:', self._incompat_flags)
print('appended offsets:', self._appended_offsets)
# check if there are bits set that we don't know
unknown_incompat_flag_msg = "Unknown incompatible flag set: cannot parse the log"
if self._incompat_flags[0] & ~1:
raise Exception(unknown_incompat_flag_msg)
for i in range(1, 8):
if self._incompat_flags[i]:
raise Exception(unknown_incompat_flag_msg)
else:
if self._debug:
print('read_file_definitions: unknown message type: %i (%s)' %
(header.msg_type, chr(header.msg_type)))
file_position = self._file_handle.tell()
print('file position: %i (0x%x) msg size: %i' % (
file_position, file_position, header.msg_size))
if self._check_file_corruption(header):
# seek back to advance only by a single byte instead of
# skipping the message
self._file_handle.seek(-2-header.msg_size, 1)
def _read_file_data(self, message_name_filter_list, read_until=None):
"""
read the file data section
:param read_until: an optional file offset: if set, parse only up to
this offset (smaller than)
"""
if read_until is None:
read_until = 1 << 50 # make it larger than any possible log file
try:
# pre-init reusable objects
header = self._MessageHeader()
msg_data = self._MessageData()
while True:
data = self._file_handle.read(3)
header.initialize(data)
data = self._file_handle.read(header.msg_size)
if len(data) < header.msg_size:
break # less data than expected. File is most likely cut
if self._file_handle.tell() > read_until:
if self._debug:
print('read until offset=%i done, current pos=%i' %
(read_until, self._file_handle.tell()))
break
if header.msg_type == self.MSG_TYPE_INFO:
msg_info = self._MessageInfo(data, header)
self._msg_info_dict[msg_info.key] = msg_info.value
elif header.msg_type == self.MSG_TYPE_INFO_MULTIPLE:
msg_info = self._MessageInfo(data, header, is_info_multiple=True)
self._add_message_info_multiple(msg_info)
elif header.msg_type == self.MSG_TYPE_PARAMETER:
msg_info = self._MessageInfo(data, header)
self._changed_parameters.append((self._last_timestamp,
msg_info.key, msg_info.value))
elif header.msg_type == self.MSG_TYPE_ADD_LOGGED_MSG:
msg_add_logged = self._MessageAddLogged(data, header,
self._message_formats)
if (message_name_filter_list is None or
msg_add_logged.message_name in message_name_filter_list):
self._subscriptions[msg_add_logged.msg_id] = msg_add_logged
else:
self._filtered_message_ids.add(msg_add_logged.msg_id)
elif header.msg_type == self.MSG_TYPE_LOGGING:
msg_logging = self.MessageLogging(data, header)
self._logged_messages.append(msg_logging)
elif header.msg_type == self.MSG_TYPE_DATA:
msg_data.initialize(data, header, self._subscriptions, self)
if msg_data.timestamp != 0 and msg_data.timestamp > self._last_timestamp:
self._last_timestamp = msg_data.timestamp
elif header.msg_type == self.MSG_TYPE_DROPOUT:
msg_dropout = self.MessageDropout(data, header,
self._last_timestamp)
self._dropouts.append(msg_dropout)
else:
if self._debug:
print('_read_file_data: unknown message type: %i (%s)' %
(header.msg_type, chr(header.msg_type)))
file_position = self._file_handle.tell()
print('file position: %i (0x%x) msg size: %i' % (
file_position, file_position, header.msg_size))
if self._check_file_corruption(header):
# seek back to advance only by a single byte instead of
# skipping the message
self._file_handle.seek(-2-header.msg_size, 1)
except struct.error:
pass #we read past the end of the file
# convert into final representation
while self._subscriptions:
_, value = self._subscriptions.popitem()
if len(value.buffer) > 0: # only add if we have data
data_item = ULog.Data(value)
self._data_list.append(data_item)
def _check_file_corruption(self, header):
""" check for file corruption based on an unknown message type in the header """
# We need to handle 2 cases:
# - corrupt file (we do our best to read the rest of the file)
# - new ULog message type got added (we just want to skip the message)
if header.msg_type == 0 or header.msg_size == 0 or header.msg_size > 10000:
if not self._file_corrupt and self._debug:
print('File corruption detected')
self._file_corrupt = True
return self._file_corrupt
def get_version_info(self, key_name='ver_sw_release'):
"""
get the (major, minor, patch, type) version information as tuple.
Returns None if not found
definition of type is:
>= 0: development
>= 64: alpha version
>= 128: beta version
>= 192: RC version
== 255: release version
"""
if key_name in self._msg_info_dict:
val = self._msg_info_dict[key_name]
return ((val >> 24) & 0xff, (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff)
return None
|
PX4/pyulog | pyulog/ulog2csv.py | main | python | def main():
parser = argparse.ArgumentParser(description='Convert ULog to CSV')
parser.add_argument('filename', metavar='file.ulg', help='ULog input file')
parser.add_argument(
'-m', '--messages', dest='messages',
help=("Only consider given messages. Must be a comma-separated list of"
" names, like 'sensor_combined,vehicle_gps_position'"))
parser.add_argument('-d', '--delimiter', dest='delimiter', action='store',
help="Use delimiter in CSV (default is ',')", default=',')
parser.add_argument('-o', '--output', dest='output', action='store',
help='Output directory (default is same as input file)',
metavar='DIR')
args = parser.parse_args()
if args.output and not os.path.isdir(args.output):
print('Creating output directory {:}'.format(args.output))
os.mkdir(args.output)
convert_ulog2csv(args.filename, args.messages, args.output, args.delimiter) | Command line interface | train | https://github.com/PX4/pyulog/blob/3bc4f9338d30e2e0a0dfbed58f54d200967e5056/pyulog/ulog2csv.py#L16-L40 | [
"def convert_ulog2csv(ulog_file_name, messages, output, delimiter):\n \"\"\"\n Coverts and ULog file to a CSV file.\n\n :param ulog_file_name: The ULog filename to open and read\n :param messages: A list of message names\n :param output: Output file path\n :param delimiter: CSV delimiter\n\n :return: None\n \"\"\"\n\n msg_filter = messages.split(',') if messages else None\n\n ulog = ULog(ulog_file_name, msg_filter)\n data = ulog.data_list\n\n output_file_prefix = ulog_file_name\n # strip '.ulg'\n if output_file_prefix.lower().endswith('.ulg'):\n output_file_prefix = output_file_prefix[:-4]\n\n # write to different output path?\n if output:\n base_name = os.path.basename(output_file_prefix)\n output_file_prefix = os.path.join(output, base_name)\n\n for d in data:\n fmt = '{0}_{1}_{2}.csv'\n output_file_name = fmt.format(output_file_prefix, d.name, d.multi_id)\n fmt = 'Writing {0} ({1} data points)'\n # print(fmt.format(output_file_name, len(d.data['timestamp'])))\n with open(output_file_name, 'w') as csvfile:\n\n # use same field order as in the log, except for the timestamp\n data_keys = [f.field_name for f in d.field_data]\n data_keys.remove('timestamp')\n data_keys.insert(0, 'timestamp') # we want timestamp at first position\n\n # we don't use np.savetxt, because we have multiple arrays with\n # potentially different data types. However the following is quite\n # slow...\n\n # write the header\n csvfile.write(delimiter.join(data_keys) + '\\n')\n\n # write the data\n last_elem = len(data_keys)-1\n for i in range(len(d.data['timestamp'])):\n for k in range(len(data_keys)):\n csvfile.write(str(d.data[data_keys[k]][i]))\n if k != last_elem:\n csvfile.write(delimiter)\n csvfile.write('\\n')\n"
] | #! /usr/bin/env python
"""
Convert a ULog file into CSV file(s)
"""
from __future__ import print_function
import argparse
import os
from .core import ULog
#pylint: disable=too-many-locals, invalid-name, consider-using-enumerate
def convert_ulog2csv(ulog_file_name, messages, output, delimiter):
"""
Coverts and ULog file to a CSV file.
:param ulog_file_name: The ULog filename to open and read
:param messages: A list of message names
:param output: Output file path
:param delimiter: CSV delimiter
:return: None
"""
msg_filter = messages.split(',') if messages else None
ulog = ULog(ulog_file_name, msg_filter)
data = ulog.data_list
output_file_prefix = ulog_file_name
# strip '.ulg'
if output_file_prefix.lower().endswith('.ulg'):
output_file_prefix = output_file_prefix[:-4]
# write to different output path?
if output:
base_name = os.path.basename(output_file_prefix)
output_file_prefix = os.path.join(output, base_name)
for d in data:
fmt = '{0}_{1}_{2}.csv'
output_file_name = fmt.format(output_file_prefix, d.name, d.multi_id)
fmt = 'Writing {0} ({1} data points)'
# print(fmt.format(output_file_name, len(d.data['timestamp'])))
with open(output_file_name, 'w') as csvfile:
# use same field order as in the log, except for the timestamp
data_keys = [f.field_name for f in d.field_data]
data_keys.remove('timestamp')
data_keys.insert(0, 'timestamp') # we want timestamp at first position
# we don't use np.savetxt, because we have multiple arrays with
# potentially different data types. However the following is quite
# slow...
# write the header
csvfile.write(delimiter.join(data_keys) + '\n')
# write the data
last_elem = len(data_keys)-1
for i in range(len(d.data['timestamp'])):
for k in range(len(data_keys)):
csvfile.write(str(d.data[data_keys[k]][i]))
if k != last_elem:
csvfile.write(delimiter)
csvfile.write('\n')
|
PX4/pyulog | pyulog/ulog2csv.py | convert_ulog2csv | python | def convert_ulog2csv(ulog_file_name, messages, output, delimiter):
msg_filter = messages.split(',') if messages else None
ulog = ULog(ulog_file_name, msg_filter)
data = ulog.data_list
output_file_prefix = ulog_file_name
# strip '.ulg'
if output_file_prefix.lower().endswith('.ulg'):
output_file_prefix = output_file_prefix[:-4]
# write to different output path?
if output:
base_name = os.path.basename(output_file_prefix)
output_file_prefix = os.path.join(output, base_name)
for d in data:
fmt = '{0}_{1}_{2}.csv'
output_file_name = fmt.format(output_file_prefix, d.name, d.multi_id)
fmt = 'Writing {0} ({1} data points)'
# print(fmt.format(output_file_name, len(d.data['timestamp'])))
with open(output_file_name, 'w') as csvfile:
# use same field order as in the log, except for the timestamp
data_keys = [f.field_name for f in d.field_data]
data_keys.remove('timestamp')
data_keys.insert(0, 'timestamp') # we want timestamp at first position
# we don't use np.savetxt, because we have multiple arrays with
# potentially different data types. However the following is quite
# slow...
# write the header
csvfile.write(delimiter.join(data_keys) + '\n')
# write the data
last_elem = len(data_keys)-1
for i in range(len(d.data['timestamp'])):
for k in range(len(data_keys)):
csvfile.write(str(d.data[data_keys[k]][i]))
if k != last_elem:
csvfile.write(delimiter)
csvfile.write('\n') | Coverts and ULog file to a CSV file.
:param ulog_file_name: The ULog filename to open and read
:param messages: A list of message names
:param output: Output file path
:param delimiter: CSV delimiter
:return: None | train | https://github.com/PX4/pyulog/blob/3bc4f9338d30e2e0a0dfbed58f54d200967e5056/pyulog/ulog2csv.py#L43-L96 | null | #! /usr/bin/env python
"""
Convert a ULog file into CSV file(s)
"""
from __future__ import print_function
import argparse
import os
from .core import ULog
#pylint: disable=too-many-locals, invalid-name, consider-using-enumerate
def main():
"""Command line interface"""
parser = argparse.ArgumentParser(description='Convert ULog to CSV')
parser.add_argument('filename', metavar='file.ulg', help='ULog input file')
parser.add_argument(
'-m', '--messages', dest='messages',
help=("Only consider given messages. Must be a comma-separated list of"
" names, like 'sensor_combined,vehicle_gps_position'"))
parser.add_argument('-d', '--delimiter', dest='delimiter', action='store',
help="Use delimiter in CSV (default is ',')", default=',')
parser.add_argument('-o', '--output', dest='output', action='store',
help='Output directory (default is same as input file)',
metavar='DIR')
args = parser.parse_args()
if args.output and not os.path.isdir(args.output):
print('Creating output directory {:}'.format(args.output))
os.mkdir(args.output)
convert_ulog2csv(args.filename, args.messages, args.output, args.delimiter)
|
PX4/pyulog | pyulog/info.py | show_info | python | def show_info(ulog, verbose):
m1, s1 = divmod(int(ulog.start_timestamp/1e6), 60)
h1, m1 = divmod(m1, 60)
m2, s2 = divmod(int((ulog.last_timestamp - ulog.start_timestamp)/1e6), 60)
h2, m2 = divmod(m2, 60)
print("Logging start time: {:d}:{:02d}:{:02d}, duration: {:d}:{:02d}:{:02d}".format(
h1, m1, s1, h2, m2, s2))
dropout_durations = [dropout.duration for dropout in ulog.dropouts]
if len(dropout_durations) == 0:
print("No Dropouts")
else:
print("Dropouts: count: {:}, total duration: {:.1f} s, max: {:} ms, mean: {:} ms"
.format(len(dropout_durations), sum(dropout_durations)/1000.,
max(dropout_durations),
int(sum(dropout_durations)/len(dropout_durations))))
version = ulog.get_version_info_str()
if not version is None:
print('SW Version: {}'.format(version))
print("Info Messages:")
for k in sorted(ulog.msg_info_dict):
if not k.startswith('perf_') or verbose:
print(" {0}: {1}".format(k, ulog.msg_info_dict[k]))
if len(ulog.msg_info_multiple_dict) > 0:
if verbose:
print("Info Multiple Messages:")
for k in sorted(ulog.msg_info_multiple_dict):
print(" {0}: {1}".format(k, ulog.msg_info_multiple_dict[k]))
else:
print("Info Multiple Messages: {}".format(
", ".join(["[{}: {}]".format(k, len(ulog.msg_info_multiple_dict[k])) for k in
sorted(ulog.msg_info_multiple_dict)])))
print("")
print("{:<41} {:7}, {:10}".format("Name (multi id, message size in bytes)",
"number of data points", "total bytes"))
data_list_sorted = sorted(ulog.data_list, key=lambda d: d.name + str(d.multi_id))
for d in data_list_sorted:
message_size = sum([ULog.get_field_size(f.type_str) for f in d.field_data])
num_data_points = len(d.data['timestamp'])
name_id = "{:} ({:}, {:})".format(d.name, d.multi_id, message_size)
print(" {:<40} {:7d} {:10d}".format(name_id, num_data_points,
message_size * num_data_points)) | Show general information from an ULog | train | https://github.com/PX4/pyulog/blob/3bc4f9338d30e2e0a0dfbed58f54d200967e5056/pyulog/info.py#L15-L65 | [
"def get_version_info_str(self, key_name='ver_sw_release'):\n \"\"\"\n get version information in the form 'v1.2.3 (RC)', or None if version\n tag either not found or it's a development version\n \"\"\"\n version = self.get_version_info(key_name)\n if not version is None and version[3] >= 64:\n type_str = ''\n if version[3] < 128: type_str = ' (alpha)'\n elif version[3] < 192: type_str = ' (beta)'\n elif version[3] < 255: type_str = ' (RC)'\n return 'v{}.{}.{}{}'.format(version[0], version[1], version[2], type_str)\n return None\n"
] | #! /usr/bin/env python
"""
Display information from an ULog file
"""
from __future__ import print_function
import argparse
from .core import ULog
#pylint: disable=too-many-locals, unused-wildcard-import, wildcard-import
#pylint: disable=invalid-name
def main():
"""Commande line interface"""
parser = argparse.ArgumentParser(description='Display information from an ULog file')
parser.add_argument('filename', metavar='file.ulg', help='ULog input file')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help='Verbose output', default=False)
parser.add_argument('-m', '--message', dest='message',
help='Show a specific Info Multiple Message')
parser.add_argument('-n', '--newline', dest='newline', action='store_true',
help='Add newline separators (only with --message)', default=False)
args = parser.parse_args()
ulog_file_name = args.filename
ulog = ULog(ulog_file_name)
message = args.message
if message:
separator = ""
if args.newline: separator = "\n"
if len(ulog.msg_info_multiple_dict) > 0 and message in ulog.msg_info_multiple_dict:
message_info_multiple = ulog.msg_info_multiple_dict[message]
for i, m in enumerate(message_info_multiple):
print("# {} {}:".format(message, i))
print(separator.join(m))
else:
print("message {} not found".format(message))
else:
show_info(ulog, args.verbose)
|
PX4/pyulog | pyulog/info.py | main | python | def main():
parser = argparse.ArgumentParser(description='Display information from an ULog file')
parser.add_argument('filename', metavar='file.ulg', help='ULog input file')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help='Verbose output', default=False)
parser.add_argument('-m', '--message', dest='message',
help='Show a specific Info Multiple Message')
parser.add_argument('-n', '--newline', dest='newline', action='store_true',
help='Add newline separators (only with --message)', default=False)
args = parser.parse_args()
ulog_file_name = args.filename
ulog = ULog(ulog_file_name)
message = args.message
if message:
separator = ""
if args.newline: separator = "\n"
if len(ulog.msg_info_multiple_dict) > 0 and message in ulog.msg_info_multiple_dict:
message_info_multiple = ulog.msg_info_multiple_dict[message]
for i, m in enumerate(message_info_multiple):
print("# {} {}:".format(message, i))
print(separator.join(m))
else:
print("message {} not found".format(message))
else:
show_info(ulog, args.verbose) | Commande line interface | train | https://github.com/PX4/pyulog/blob/3bc4f9338d30e2e0a0dfbed58f54d200967e5056/pyulog/info.py#L68-L95 | [
"def show_info(ulog, verbose):\n \"\"\"Show general information from an ULog\"\"\"\n m1, s1 = divmod(int(ulog.start_timestamp/1e6), 60)\n h1, m1 = divmod(m1, 60)\n m2, s2 = divmod(int((ulog.last_timestamp - ulog.start_timestamp)/1e6), 60)\n h2, m2 = divmod(m2, 60)\n print(\"Logging start time: {:d}:{:02d}:{:02d}, duration: {:d}:{:02d}:{:02d}\".format(\n h1, m1, s1, h2, m2, s2))\n\n dropout_durations = [dropout.duration for dropout in ulog.dropouts]\n if len(dropout_durations) == 0:\n print(\"No Dropouts\")\n else:\n print(\"Dropouts: count: {:}, total duration: {:.1f} s, max: {:} ms, mean: {:} ms\"\n .format(len(dropout_durations), sum(dropout_durations)/1000.,\n max(dropout_durations),\n int(sum(dropout_durations)/len(dropout_durations))))\n\n version = ulog.get_version_info_str()\n if not version is None:\n print('SW Version: {}'.format(version))\n\n print(\"Info Messages:\")\n for k in sorted(ulog.msg_info_dict):\n if not k.startswith('perf_') or verbose:\n print(\" {0}: {1}\".format(k, ulog.msg_info_dict[k]))\n\n\n if len(ulog.msg_info_multiple_dict) > 0:\n if verbose:\n print(\"Info Multiple Messages:\")\n for k in sorted(ulog.msg_info_multiple_dict):\n print(\" {0}: {1}\".format(k, ulog.msg_info_multiple_dict[k]))\n else:\n print(\"Info Multiple Messages: {}\".format(\n \", \".join([\"[{}: {}]\".format(k, len(ulog.msg_info_multiple_dict[k])) for k in\n sorted(ulog.msg_info_multiple_dict)])))\n\n\n\n print(\"\")\n print(\"{:<41} {:7}, {:10}\".format(\"Name (multi id, message size in bytes)\",\n \"number of data points\", \"total bytes\"))\n\n data_list_sorted = sorted(ulog.data_list, key=lambda d: d.name + str(d.multi_id))\n for d in data_list_sorted:\n message_size = sum([ULog.get_field_size(f.type_str) for f in d.field_data])\n num_data_points = len(d.data['timestamp'])\n name_id = \"{:} ({:}, {:})\".format(d.name, d.multi_id, message_size)\n print(\" {:<40} {:7d} {:10d}\".format(name_id, num_data_points,\n message_size * num_data_points))\n"
] | #! /usr/bin/env python
"""
Display information from an ULog file
"""
from __future__ import print_function
import argparse
from .core import ULog
#pylint: disable=too-many-locals, unused-wildcard-import, wildcard-import
#pylint: disable=invalid-name
def show_info(ulog, verbose):
"""Show general information from an ULog"""
m1, s1 = divmod(int(ulog.start_timestamp/1e6), 60)
h1, m1 = divmod(m1, 60)
m2, s2 = divmod(int((ulog.last_timestamp - ulog.start_timestamp)/1e6), 60)
h2, m2 = divmod(m2, 60)
print("Logging start time: {:d}:{:02d}:{:02d}, duration: {:d}:{:02d}:{:02d}".format(
h1, m1, s1, h2, m2, s2))
dropout_durations = [dropout.duration for dropout in ulog.dropouts]
if len(dropout_durations) == 0:
print("No Dropouts")
else:
print("Dropouts: count: {:}, total duration: {:.1f} s, max: {:} ms, mean: {:} ms"
.format(len(dropout_durations), sum(dropout_durations)/1000.,
max(dropout_durations),
int(sum(dropout_durations)/len(dropout_durations))))
version = ulog.get_version_info_str()
if not version is None:
print('SW Version: {}'.format(version))
print("Info Messages:")
for k in sorted(ulog.msg_info_dict):
if not k.startswith('perf_') or verbose:
print(" {0}: {1}".format(k, ulog.msg_info_dict[k]))
if len(ulog.msg_info_multiple_dict) > 0:
if verbose:
print("Info Multiple Messages:")
for k in sorted(ulog.msg_info_multiple_dict):
print(" {0}: {1}".format(k, ulog.msg_info_multiple_dict[k]))
else:
print("Info Multiple Messages: {}".format(
", ".join(["[{}: {}]".format(k, len(ulog.msg_info_multiple_dict[k])) for k in
sorted(ulog.msg_info_multiple_dict)])))
print("")
print("{:<41} {:7}, {:10}".format("Name (multi id, message size in bytes)",
"number of data points", "total bytes"))
data_list_sorted = sorted(ulog.data_list, key=lambda d: d.name + str(d.multi_id))
for d in data_list_sorted:
message_size = sum([ULog.get_field_size(f.type_str) for f in d.field_data])
num_data_points = len(d.data['timestamp'])
name_id = "{:} ({:}, {:})".format(d.name, d.multi_id, message_size)
print(" {:<40} {:7d} {:10d}".format(name_id, num_data_points,
message_size * num_data_points))
|
PX4/pyulog | pyulog/messages.py | main | python | def main():
parser = argparse.ArgumentParser(description='Display logged messages from an ULog file')
parser.add_argument('filename', metavar='file.ulg', help='ULog input file')
args = parser.parse_args()
ulog_file_name = args.filename
msg_filter = [] # we don't need the data messages
ulog = ULog(ulog_file_name, msg_filter)
for m in ulog.logged_messages:
m1, s1 = divmod(int(m.timestamp/1e6), 60)
h1, m1 = divmod(m1, 60)
print("{:d}:{:02d}:{:02d} {:}: {:}".format(
h1, m1, s1, m.log_level_str(), m.message)) | Commande line interface | train | https://github.com/PX4/pyulog/blob/3bc4f9338d30e2e0a0dfbed58f54d200967e5056/pyulog/messages.py#L13-L31 | null | #! /usr/bin/env python
"""
Display logged messages from an ULog file
"""
from __future__ import print_function
import argparse
from .core import ULog
#pylint: disable=invalid-name
|
PX4/pyulog | pyulog/params.py | main | python | def main():
parser = argparse.ArgumentParser(description='Extract parameters from an ULog file')
parser.add_argument('filename', metavar='file.ulg', help='ULog input file')
parser.add_argument('-d', '--delimiter', dest='delimiter', action='store',
help='Use delimiter in CSV (default is \',\')', default=',')
parser.add_argument('-i', '--initial', dest='initial', action='store_true',
help='Only extract initial parameters', default=False)
parser.add_argument('-o', '--octave', dest='octave', action='store_true',
help='Use Octave format', default=False)
parser.add_argument('-t', '--timestamps', dest='timestamps', action='store_true',
help='Extract changed parameters with timestamps', default=False)
parser.add_argument('output_filename', metavar='params.txt',
type=argparse.FileType('w'), nargs='?',
help='Output filename (default=stdout)', default=sys.stdout)
args = parser.parse_args()
ulog_file_name = args.filename
message_filter = []
if not args.initial: message_filter = None
ulog = ULog(ulog_file_name, message_filter)
param_keys = sorted(ulog.initial_parameters.keys())
delimiter = args.delimiter
output_file = args.output_filename
if not args.octave:
for param_key in param_keys:
output_file.write(param_key)
if args.initial:
output_file.write(delimiter)
output_file.write(str(ulog.initial_parameters[param_key]))
output_file.write('\n')
elif args.timestamps:
output_file.write(delimiter)
output_file.write(str(ulog.initial_parameters[param_key]))
for t, name, value in ulog.changed_parameters:
if name == param_key:
output_file.write(delimiter)
output_file.write(str(value))
output_file.write('\n')
output_file.write("timestamp")
output_file.write(delimiter)
output_file.write('0')
for t, name, value in ulog.changed_parameters:
if name == param_key:
output_file.write(delimiter)
output_file.write(str(t))
output_file.write('\n')
else:
for t, name, value in ulog.changed_parameters:
if name == param_key:
output_file.write(delimiter)
output_file.write(str(value))
output_file.write('\n')
else:
for param_key in param_keys:
output_file.write('# name ')
output_file.write(param_key)
values = [ulog.initial_parameters[param_key]]
if not args.initial:
for t, name, value in ulog.changed_parameters:
if name == param_key:
values += [value]
if len(values) > 1:
output_file.write('\n# type: matrix\n')
output_file.write('# rows: 1\n')
output_file.write('# columns: ')
output_file.write(str(len(values)) + '\n')
for value in values:
output_file.write(str(value) + ' ')
else:
output_file.write('\n# type: scalar\n')
output_file.write(str(values[0]))
output_file.write('\n') | Commande line interface | train | https://github.com/PX4/pyulog/blob/3bc4f9338d30e2e0a0dfbed58f54d200967e5056/pyulog/params.py#L14-L105 | null | #! /usr/bin/env python
"""
Extract parameters from an ULog file
"""
from __future__ import print_function
import argparse
import sys
from .core import ULog
#pylint: disable=unused-variable, too-many-branches
|
PX4/pyulog | pyulog/px4.py | PX4ULog.get_estimator | python | def get_estimator(self):
mav_type = self._ulog.initial_parameters.get('MAV_TYPE', None)
if mav_type == 1: # fixed wing always uses EKF2
return 'EKF2'
mc_est_group = self._ulog.initial_parameters.get('SYS_MC_EST_GROUP', None)
return {0: 'INAV',
1: 'LPE',
2: 'EKF2',
3: 'IEKF'}.get(mc_est_group, 'unknown ({})'.format(mc_est_group)) | return the configured estimator as string from initial parameters | train | https://github.com/PX4/pyulog/blob/3bc4f9338d30e2e0a0dfbed58f54d200967e5056/pyulog/px4.py#L54-L65 | null | class PX4ULog(object):
"""
This class contains PX4-specific ULog things (field names, etc.)
"""
def __init__(self, ulog_object):
"""
@param ulog_object: ULog instance
"""
self._ulog = ulog_object
def get_mav_type(self):
""" return the MAV type as string from initial parameters """
mav_type = self._ulog.initial_parameters.get('MAV_TYPE', None)
return {0: 'Generic',
1: 'Fixed Wing',
2: 'Quadrotor',
3: 'Coaxial helicopter',
4: 'Normal helicopter with tail rotor',
5: 'Ground installation',
6: 'Ground Control Station',
7: 'Airship, controlled',
8: 'Free balloon, uncontrolled',
9: 'Rocket',
10: 'Ground Rover',
11: 'Surface Vessel, Boat, Ship',
12: 'Submarine',
13: 'Hexarotor',
14: 'Octorotor',
15: 'Tricopter',
16: 'Flapping wing',
17: 'Kite',
18: 'Onboard Companion Controller',
19: 'Two-rotor VTOL (Tailsitter)',
20: 'Quad-rotor VTOL (Tailsitter)',
21: 'Tiltrotor VTOL',
22: 'VTOL Standard', #VTOL reserved 2
23: 'VTOL reserved 3',
24: 'VTOL reserved 4',
25: 'VTOL reserved 5',
26: 'Onboard Gimbal',
27: 'Onboard ADSB Peripheral'}.get(mav_type, 'unknown type')
def add_roll_pitch_yaw(self):
""" convenience method to add the fields 'roll', 'pitch', 'yaw' to the
loaded data using the quaternion fields (does not update field_data).
Messages are: 'vehicle_attitude.q' and 'vehicle_attitude_setpoint.q_d',
'vehicle_attitude_groundtruth.q' and 'vehicle_vision_attitude.q' """
self._add_roll_pitch_yaw_to_message('vehicle_attitude')
self._add_roll_pitch_yaw_to_message('vehicle_vision_attitude')
self._add_roll_pitch_yaw_to_message('vehicle_attitude_groundtruth')
self._add_roll_pitch_yaw_to_message('vehicle_attitude_setpoint', '_d')
def _add_roll_pitch_yaw_to_message(self, message_name, field_name_suffix=''):
message_data_all = [elem for elem in self._ulog.data_list if elem.name == message_name]
for message_data in message_data_all:
q = [message_data.data['q'+field_name_suffix+'['+str(i)+']'] for i in range(4)]
roll = np.arctan2(2.0 * (q[0] * q[1] + q[2] * q[3]),
1.0 - 2.0 * (q[1] * q[1] + q[2] * q[2]))
pitch = np.arcsin(2.0 * (q[0] * q[2] - q[3] * q[1]))
yaw = np.arctan2(2.0 * (q[0] * q[3] + q[1] * q[2]),
1.0 - 2.0 * (q[2] * q[2] + q[3] * q[3]))
message_data.data['roll'+field_name_suffix] = roll
message_data.data['pitch'+field_name_suffix] = pitch
message_data.data['yaw'+field_name_suffix] = yaw
def get_configured_rc_input_names(self, channel):
"""
find all RC mappings to a given channel and return their names
:param channel: input channel (0=first)
:return: list of strings or None
"""
ret_val = []
for key in self._ulog.initial_parameters:
param_val = self._ulog.initial_parameters[key]
if key.startswith('RC_MAP_') and param_val == channel + 1:
ret_val.append(key[7:].capitalize())
if len(ret_val) > 0:
return ret_val
return None
|
PX4/pyulog | pyulog/px4.py | PX4ULog.add_roll_pitch_yaw | python | def add_roll_pitch_yaw(self):
self._add_roll_pitch_yaw_to_message('vehicle_attitude')
self._add_roll_pitch_yaw_to_message('vehicle_vision_attitude')
self._add_roll_pitch_yaw_to_message('vehicle_attitude_groundtruth')
self._add_roll_pitch_yaw_to_message('vehicle_attitude_setpoint', '_d') | convenience method to add the fields 'roll', 'pitch', 'yaw' to the
loaded data using the quaternion fields (does not update field_data).
Messages are: 'vehicle_attitude.q' and 'vehicle_attitude_setpoint.q_d',
'vehicle_attitude_groundtruth.q' and 'vehicle_vision_attitude.q' | train | https://github.com/PX4/pyulog/blob/3bc4f9338d30e2e0a0dfbed58f54d200967e5056/pyulog/px4.py#L68-L78 | [
"def _add_roll_pitch_yaw_to_message(self, message_name, field_name_suffix=''):\n\n message_data_all = [elem for elem in self._ulog.data_list if elem.name == message_name]\n for message_data in message_data_all:\n q = [message_data.data['q'+field_name_suffix+'['+str(i)+']'] for i in range(4)]\n roll = np.arctan2(2.0 * (q[0] * q[1] + q[2] * q[3]),\n 1.0 - 2.0 * (q[1] * q[1] + q[2] * q[2]))\n pitch = np.arcsin(2.0 * (q[0] * q[2] - q[3] * q[1]))\n yaw = np.arctan2(2.0 * (q[0] * q[3] + q[1] * q[2]),\n 1.0 - 2.0 * (q[2] * q[2] + q[3] * q[3]))\n message_data.data['roll'+field_name_suffix] = roll\n message_data.data['pitch'+field_name_suffix] = pitch\n message_data.data['yaw'+field_name_suffix] = yaw\n"
] | class PX4ULog(object):
"""
This class contains PX4-specific ULog things (field names, etc.)
"""
def __init__(self, ulog_object):
"""
@param ulog_object: ULog instance
"""
self._ulog = ulog_object
def get_mav_type(self):
""" return the MAV type as string from initial parameters """
mav_type = self._ulog.initial_parameters.get('MAV_TYPE', None)
return {0: 'Generic',
1: 'Fixed Wing',
2: 'Quadrotor',
3: 'Coaxial helicopter',
4: 'Normal helicopter with tail rotor',
5: 'Ground installation',
6: 'Ground Control Station',
7: 'Airship, controlled',
8: 'Free balloon, uncontrolled',
9: 'Rocket',
10: 'Ground Rover',
11: 'Surface Vessel, Boat, Ship',
12: 'Submarine',
13: 'Hexarotor',
14: 'Octorotor',
15: 'Tricopter',
16: 'Flapping wing',
17: 'Kite',
18: 'Onboard Companion Controller',
19: 'Two-rotor VTOL (Tailsitter)',
20: 'Quad-rotor VTOL (Tailsitter)',
21: 'Tiltrotor VTOL',
22: 'VTOL Standard', #VTOL reserved 2
23: 'VTOL reserved 3',
24: 'VTOL reserved 4',
25: 'VTOL reserved 5',
26: 'Onboard Gimbal',
27: 'Onboard ADSB Peripheral'}.get(mav_type, 'unknown type')
def get_estimator(self):
"""return the configured estimator as string from initial parameters"""
mav_type = self._ulog.initial_parameters.get('MAV_TYPE', None)
if mav_type == 1: # fixed wing always uses EKF2
return 'EKF2'
mc_est_group = self._ulog.initial_parameters.get('SYS_MC_EST_GROUP', None)
return {0: 'INAV',
1: 'LPE',
2: 'EKF2',
3: 'IEKF'}.get(mc_est_group, 'unknown ({})'.format(mc_est_group))
def _add_roll_pitch_yaw_to_message(self, message_name, field_name_suffix=''):
message_data_all = [elem for elem in self._ulog.data_list if elem.name == message_name]
for message_data in message_data_all:
q = [message_data.data['q'+field_name_suffix+'['+str(i)+']'] for i in range(4)]
roll = np.arctan2(2.0 * (q[0] * q[1] + q[2] * q[3]),
1.0 - 2.0 * (q[1] * q[1] + q[2] * q[2]))
pitch = np.arcsin(2.0 * (q[0] * q[2] - q[3] * q[1]))
yaw = np.arctan2(2.0 * (q[0] * q[3] + q[1] * q[2]),
1.0 - 2.0 * (q[2] * q[2] + q[3] * q[3]))
message_data.data['roll'+field_name_suffix] = roll
message_data.data['pitch'+field_name_suffix] = pitch
message_data.data['yaw'+field_name_suffix] = yaw
def get_configured_rc_input_names(self, channel):
"""
find all RC mappings to a given channel and return their names
:param channel: input channel (0=first)
:return: list of strings or None
"""
ret_val = []
for key in self._ulog.initial_parameters:
param_val = self._ulog.initial_parameters[key]
if key.startswith('RC_MAP_') and param_val == channel + 1:
ret_val.append(key[7:].capitalize())
if len(ret_val) > 0:
return ret_val
return None
|
PX4/pyulog | pyulog/px4.py | PX4ULog.get_configured_rc_input_names | python | def get_configured_rc_input_names(self, channel):
ret_val = []
for key in self._ulog.initial_parameters:
param_val = self._ulog.initial_parameters[key]
if key.startswith('RC_MAP_') and param_val == channel + 1:
ret_val.append(key[7:].capitalize())
if len(ret_val) > 0:
return ret_val
return None | find all RC mappings to a given channel and return their names
:param channel: input channel (0=first)
:return: list of strings or None | train | https://github.com/PX4/pyulog/blob/3bc4f9338d30e2e0a0dfbed58f54d200967e5056/pyulog/px4.py#L96-L111 | null | class PX4ULog(object):
"""
This class contains PX4-specific ULog things (field names, etc.)
"""
def __init__(self, ulog_object):
"""
@param ulog_object: ULog instance
"""
self._ulog = ulog_object
def get_mav_type(self):
""" return the MAV type as string from initial parameters """
mav_type = self._ulog.initial_parameters.get('MAV_TYPE', None)
return {0: 'Generic',
1: 'Fixed Wing',
2: 'Quadrotor',
3: 'Coaxial helicopter',
4: 'Normal helicopter with tail rotor',
5: 'Ground installation',
6: 'Ground Control Station',
7: 'Airship, controlled',
8: 'Free balloon, uncontrolled',
9: 'Rocket',
10: 'Ground Rover',
11: 'Surface Vessel, Boat, Ship',
12: 'Submarine',
13: 'Hexarotor',
14: 'Octorotor',
15: 'Tricopter',
16: 'Flapping wing',
17: 'Kite',
18: 'Onboard Companion Controller',
19: 'Two-rotor VTOL (Tailsitter)',
20: 'Quad-rotor VTOL (Tailsitter)',
21: 'Tiltrotor VTOL',
22: 'VTOL Standard', #VTOL reserved 2
23: 'VTOL reserved 3',
24: 'VTOL reserved 4',
25: 'VTOL reserved 5',
26: 'Onboard Gimbal',
27: 'Onboard ADSB Peripheral'}.get(mav_type, 'unknown type')
def get_estimator(self):
"""return the configured estimator as string from initial parameters"""
mav_type = self._ulog.initial_parameters.get('MAV_TYPE', None)
if mav_type == 1: # fixed wing always uses EKF2
return 'EKF2'
mc_est_group = self._ulog.initial_parameters.get('SYS_MC_EST_GROUP', None)
return {0: 'INAV',
1: 'LPE',
2: 'EKF2',
3: 'IEKF'}.get(mc_est_group, 'unknown ({})'.format(mc_est_group))
def add_roll_pitch_yaw(self):
""" convenience method to add the fields 'roll', 'pitch', 'yaw' to the
loaded data using the quaternion fields (does not update field_data).
Messages are: 'vehicle_attitude.q' and 'vehicle_attitude_setpoint.q_d',
'vehicle_attitude_groundtruth.q' and 'vehicle_vision_attitude.q' """
self._add_roll_pitch_yaw_to_message('vehicle_attitude')
self._add_roll_pitch_yaw_to_message('vehicle_vision_attitude')
self._add_roll_pitch_yaw_to_message('vehicle_attitude_groundtruth')
self._add_roll_pitch_yaw_to_message('vehicle_attitude_setpoint', '_d')
def _add_roll_pitch_yaw_to_message(self, message_name, field_name_suffix=''):
message_data_all = [elem for elem in self._ulog.data_list if elem.name == message_name]
for message_data in message_data_all:
q = [message_data.data['q'+field_name_suffix+'['+str(i)+']'] for i in range(4)]
roll = np.arctan2(2.0 * (q[0] * q[1] + q[2] * q[3]),
1.0 - 2.0 * (q[1] * q[1] + q[2] * q[2]))
pitch = np.arcsin(2.0 * (q[0] * q[2] - q[3] * q[1]))
yaw = np.arctan2(2.0 * (q[0] * q[3] + q[1] * q[2]),
1.0 - 2.0 * (q[2] * q[2] + q[3] * q[3]))
message_data.data['roll'+field_name_suffix] = roll
message_data.data['pitch'+field_name_suffix] = pitch
message_data.data['yaw'+field_name_suffix] = yaw
|
Rambatino/CHAID | setup.py | get_version | python | def get_version():
version_regex = re.compile(
'__version__\\s*=\\s*(?P<q>[\'"])(?P<version>\\d+(\\.\\d+)*(-(alpha|beta|rc)(\\.\\d+)?)?)(?P=q)'
)
here = path.abspath(path.dirname(__file__))
init_location = path.join(here, "CHAID/__init__.py")
with open(init_location) as init_file:
for line in init_file:
match = version_regex.search(line)
if not match:
raise Exception(
"Couldn't read version information from '{0}'".format(init_location)
)
return match.group('version') | Read version from __init__.py | train | https://github.com/Rambatino/CHAID/blob/dc19e41ebdf2773168733efdf0d7579950c8d2e7/setup.py#L12-L31 | null | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/Rambatino/CHAID
"""
import re
from os import path
from setuptools import setup, find_packages
setup(
name='CHAID',
version=get_version(),
description='A CHAID tree building algorithm',
long_description="This package provides a python implementation of the Chi-Squared Automatic Inference Detection (CHAID) decision tree",
url='https://github.com/Rambatino/CHAID',
author='Mark Ramotowski, Richard Fitzgerald',
author_email='mark.tint.ramotowski@gmail.com',
license='Apache License 2.0',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='CHAID pandas numpy scipy statistics statistical analysis',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=['cython', 'numpy', 'pandas', 'treelib', 'pytest', 'scipy', 'savReaderWriter', 'graphviz', 'plotly', 'colorlover'],
extras_require={
'test': ['codecov', 'tox', 'tox-pyenv', 'detox', 'pytest', 'pytest-cov'],
}
)
|
Rambatino/CHAID | CHAID/stats.py | chisquare | python | def chisquare(n_ij, weighted):
if weighted:
m_ij = n_ij / n_ij
nan_mask = np.isnan(m_ij)
m_ij[nan_mask] = 0.000001 # otherwise it breaks the chi-squared test
w_ij = m_ij
n_ij_col_sum = n_ij.sum(axis=1)
n_ij_row_sum = n_ij.sum(axis=0)
alpha, beta, eps = (1, 1, 1)
while eps > 10e-6:
alpha = alpha * np.vstack(n_ij_col_sum / m_ij.sum(axis=1))
beta = n_ij_row_sum / (alpha * w_ij).sum(axis=0)
eps = np.max(np.absolute(w_ij * alpha * beta - m_ij))
m_ij = w_ij * alpha * beta
else:
m_ij = (np.vstack(n_ij.sum(axis=1)) * n_ij.sum(axis=0)) / n_ij.sum().astype(float)
dof = (n_ij.shape[0] - 1) * (n_ij.shape[1] - 1)
chi, p_val = stats.chisquare(n_ij, f_exp=m_ij, ddof=n_ij.size - 1 - dof, axis=None)
return (chi, p_val, dof) | Calculates the chisquare for a matrix of ind_v x dep_v
for the unweighted and SPSS weighted case | train | https://github.com/Rambatino/CHAID/blob/dc19e41ebdf2773168733efdf0d7579950c8d2e7/CHAID/stats.py#L9-L36 | null | import collections as cl
from .column import NominalColumn, OrdinalColumn, ContinuousColumn
from .split import Split
import numpy as np
from scipy import stats
from .invalid_split_reason import InvalidSplitReason
from numpy import nan as NaN
class Stats(object):
"""
Stats class that determines the correct statistical method to apply
"""
def __init__(self, alpha_merge, min_child_node_size, split_threshold, dep_population):
self.split_threshold = 1 - split_threshold
self.alpha_merge = alpha_merge
self.min_child_node_size = min_child_node_size
self.dep_population = dep_population
def best_split(self, ind, dep):
""" determine which splitting function to apply """
if isinstance(dep, ContinuousColumn):
return self.best_con_split(ind, dep)
else:
return self.best_cat_heuristic_split(ind, dep)
def best_cat_heuristic_split(self, ind, dep):
""" determine best categorical variable split using heuristic methods """
split = Split(None, None, None, None, 0)
min_child_node_size = self.min_child_node_size
all_dep = np.unique(dep.arr)
if len(all_dep) == 1:
split.invalid_reason = InvalidSplitReason.PURE_NODE
return split
elif len(dep.arr) < min_child_node_size and dep.weights is None:
# if not weights and too small, skip
split.invalid_reason = InvalidSplitReason.MIN_CHILD_NODE_SIZE
return split
elif dep.weights is not None and len(dep.weights) < min_child_node_size:
# if weighted count is too small, skip
split.invalid_reason = InvalidSplitReason.PURE_NODE
return split
for i, ind_var in enumerate(ind):
split.invalid_reason = None # must reset because using invalid reason to break
ind_var = ind_var.deep_copy()
unique = np.unique(ind_var.arr)
freq = {}
if dep.weights is None:
for col in unique:
counts = np.unique(np.compress(ind_var.arr == col, dep.arr), return_counts=True)
freq[col] = cl.defaultdict(int)
freq[col].update(np.transpose(counts))
else:
for col in unique:
counts = np.unique(np.compress(ind_var.arr == col, dep.arr), return_counts=True)
freq[col] = cl.defaultdict(int)
for dep_v in all_dep:
freq[col][dep_v] = dep.weights[(ind_var.arr == col) * (dep.arr == dep_v)].sum()
if dep.weights is not None:
row_count = dep.weights.sum()
else:
row_count = len(dep.arr)
if len(list(ind_var.possible_groupings())) == 0:
split.invalid_reason = InvalidSplitReason.PURE_NODE
while next(ind_var.possible_groupings(), None) is not None:
choice, highest_p_join, split_chi = None, None, None
for comb in ind_var.possible_groupings():
col1_freq = freq[comb[0]]
col2_freq = freq[comb[1]]
keys = set(col1_freq.keys()).union(col2_freq.keys())
n_ij = np.array([
[col1_freq.get(k, 0) for k in keys],
[col2_freq.get(k, 0) for k in keys]
])
# check to see if min_child_node_size permits this direction
# 31 can't merge with 10 if it only leaves 27 for the other node(s)
# but if these are the only two, can't skip, because the level can be defined
# as these two nodes
other_splits = row_count - n_ij.sum()
if other_splits < min_child_node_size and other_splits != 0:
p_split, dof, chi = 1, NaN, NaN
continue
if n_ij.shape[1] == 1:
p_split, dof, chi = 1, NaN, NaN
# could be the only valid combination, as we skip
# ones that result in other nodes that give min child node sizes
# this solves [[20], [10, 11]] even though 10 & 11 are exact,
# this must be the choice of this iteration
choice = comb
break
else:
chi, p_split, dof = chisquare(n_ij, dep.weights is not None)
if choice is None or p_split > highest_p_join or (p_split == highest_p_join and chi > split_chi):
choice, highest_p_join, split_chi = comb, p_split, chi
sufficient_split = not highest_p_join or highest_p_join < self.alpha_merge
if not sufficient_split:
split.invalid_reason = InvalidSplitReason.ALPHA_MERGE
elif (n_ij.sum(axis=1) < min_child_node_size).any():
split.invalid_reason = InvalidSplitReason.MIN_CHILD_NODE_SIZE
else:
n_ij = np.array([
[f[dep_val] for dep_val in all_dep] for f in freq.values()
])
dof = (n_ij.shape[0] - 1) * (n_ij.shape[1] - 1)
chi, p_split, dof = chisquare(n_ij, dep.weights is not None)
temp_split = Split(i, ind_var.groups(), chi, p_split, dof, split_name=ind_var.name)
better_split = not split.valid() or p_split < split.p or (p_split == split.p and chi > split.score)
if better_split:
split, temp_split = temp_split, split
chi_threshold = self.split_threshold * split.score
if temp_split.valid() and temp_split.score >= chi_threshold:
for sur in temp_split.surrogates:
if sur.column_id != i and sur.score >= chi_threshold:
split.surrogates.append(sur)
temp_split.surrogates = []
split.surrogates.append(temp_split)
break
# all combinations created don't suffice. i.e. what's left is below min_child_node_size
if choice is None:
break
else:
ind_var.group(choice[0], choice[1])
for val, count in freq[choice[1]].items():
freq[choice[0]][val] += count
del freq[choice[1]]
if split.valid():
split.sub_split_values(ind[split.column_id].metadata)
return split
def best_con_split(self, ind, dep):
""" determine best continuous variable split """
split = Split(None, None, None, None, 0)
is_normal = stats.normaltest(self.dep_population)[1] > 0.05
sig_test = stats.bartlett if is_normal else stats.levene
response_set = dep.arr
if dep.weights is not None:
response_set = dep.arr * dep.weights
for i, ind_var in enumerate(ind):
ind_var = ind_var.deep_copy()
unique = np.unique(ind_var.arr)
keyed_set = {}
for col in unique:
matched_elements = np.compress(ind_var.arr == col, response_set)
keyed_set[col] = matched_elements
while next(ind_var.possible_groupings(), None) is not None:
choice, highest_p_join, split_score = None, None, None
for comb in ind_var.possible_groupings():
col1_keyed_set = keyed_set[comb[0]]
col2_keyed_set = keyed_set[comb[1]]
dof = len(np.concatenate((col1_keyed_set, col2_keyed_set))) - 2
score, p_split = sig_test(col1_keyed_set, col2_keyed_set)
if choice is None or p_split > highest_p_join or (p_split == highest_p_join and score > split_score):
choice, highest_p_join, split_score = comb, p_split, score
sufficient_split = highest_p_join < self.alpha_merge and all(
len(node_v) >= self.min_child_node_size for node_v in keyed_set.values()
)
invalid_reason = None
sufficient_split = highest_p_join < self.alpha_merge
if not sufficient_split: invalid_reason = InvalidSplitReason.ALPHA_MERGE
sufficient_split = sufficient_split and all(
len(node_v) >= self.min_child_node_size for node_v in keyed_set.values()
)
if not sufficient_split: invalid_reason = InvalidSplitReason.MIN_CHILD_NODE_SIZE
if sufficient_split and len(keyed_set.values()) > 1:
dof = len(np.concatenate(list(keyed_set.values()))) - 2
score, p_split = sig_test(*keyed_set.values())
temp_split = Split(i, ind_var.groups(), score, p_split, dof, split_name=ind_var.name)
better_split = not split.valid() or p_split < split.p or (p_split == split.p and score > split.score)
if better_split:
split, temp_split = temp_split, split
score_threshold = self.split_threshold * split.score
if temp_split.valid() and temp_split.score >= score_threshold:
for sur in temp_split.surrogates:
if sur.column_id != i and sur.score >= score_threshold:
split.surrogates.append(sur)
temp_split.surrogates = []
split.surrogates.append(temp_split)
break
else:
split.invalid_reason = invalid_reason
ind_var.group(choice[0], choice[1])
keyed_set[choice[0]] = np.concatenate((keyed_set[choice[1]], keyed_set[choice[0]]))
del keyed_set[choice[1]]
if split.valid():
split.sub_split_values(ind[split.column_id].metadata)
return split
|
Rambatino/CHAID | CHAID/stats.py | Stats.best_split | python | def best_split(self, ind, dep):
if isinstance(dep, ContinuousColumn):
return self.best_con_split(ind, dep)
else:
return self.best_cat_heuristic_split(ind, dep) | determine which splitting function to apply | train | https://github.com/Rambatino/CHAID/blob/dc19e41ebdf2773168733efdf0d7579950c8d2e7/CHAID/stats.py#L49-L54 | [
"def best_cat_heuristic_split(self, ind, dep):\n \"\"\" determine best categorical variable split using heuristic methods \"\"\"\n split = Split(None, None, None, None, 0)\n min_child_node_size = self.min_child_node_size\n\n all_dep = np.unique(dep.arr)\n if len(all_dep) == 1:\n split.invalid_reason = InvalidSplitReason.PURE_NODE\n return split\n elif len(dep.arr) < min_child_node_size and dep.weights is None:\n # if not weights and too small, skip\n split.invalid_reason = InvalidSplitReason.MIN_CHILD_NODE_SIZE\n return split\n elif dep.weights is not None and len(dep.weights) < min_child_node_size:\n # if weighted count is too small, skip\n split.invalid_reason = InvalidSplitReason.PURE_NODE\n return split\n\n for i, ind_var in enumerate(ind):\n split.invalid_reason = None # must reset because using invalid reason to break\n ind_var = ind_var.deep_copy()\n unique = np.unique(ind_var.arr)\n\n freq = {}\n if dep.weights is None:\n for col in unique:\n counts = np.unique(np.compress(ind_var.arr == col, dep.arr), return_counts=True)\n freq[col] = cl.defaultdict(int)\n freq[col].update(np.transpose(counts))\n else:\n for col in unique:\n counts = np.unique(np.compress(ind_var.arr == col, dep.arr), return_counts=True)\n freq[col] = cl.defaultdict(int)\n for dep_v in all_dep:\n freq[col][dep_v] = dep.weights[(ind_var.arr == col) * (dep.arr == dep_v)].sum()\n\n\n if dep.weights is not None:\n row_count = dep.weights.sum()\n else:\n row_count = len(dep.arr)\n\n if len(list(ind_var.possible_groupings())) == 0:\n split.invalid_reason = InvalidSplitReason.PURE_NODE\n while next(ind_var.possible_groupings(), None) is not None:\n choice, highest_p_join, split_chi = None, None, None\n\n for comb in ind_var.possible_groupings():\n col1_freq = freq[comb[0]]\n col2_freq = freq[comb[1]]\n\n keys = set(col1_freq.keys()).union(col2_freq.keys())\n n_ij = np.array([\n [col1_freq.get(k, 0) for k in keys],\n [col2_freq.get(k, 0) for k in keys]\n ])\n\n # check to see if min_child_node_size permits this direction\n # 31 can't merge with 10 if it only leaves 27 for the other node(s)\n # but if these are the only two, can't skip, because the level can be defined\n # as these two nodes\n other_splits = row_count - n_ij.sum()\n if other_splits < min_child_node_size and other_splits != 0:\n p_split, dof, chi = 1, NaN, NaN\n continue\n\n if n_ij.shape[1] == 1:\n p_split, dof, chi = 1, NaN, NaN\n # could be the only valid combination, as we skip\n # ones that result in other nodes that give min child node sizes\n # this solves [[20], [10, 11]] even though 10 & 11 are exact,\n # this must be the choice of this iteration\n choice = comb\n break\n else:\n chi, p_split, dof = chisquare(n_ij, dep.weights is not None)\n\n if choice is None or p_split > highest_p_join or (p_split == highest_p_join and chi > split_chi):\n choice, highest_p_join, split_chi = comb, p_split, chi\n\n sufficient_split = not highest_p_join or highest_p_join < self.alpha_merge\n if not sufficient_split:\n split.invalid_reason = InvalidSplitReason.ALPHA_MERGE\n elif (n_ij.sum(axis=1) < min_child_node_size).any():\n split.invalid_reason = InvalidSplitReason.MIN_CHILD_NODE_SIZE\n else:\n n_ij = np.array([\n [f[dep_val] for dep_val in all_dep] for f in freq.values()\n ])\n\n dof = (n_ij.shape[0] - 1) * (n_ij.shape[1] - 1)\n chi, p_split, dof = chisquare(n_ij, dep.weights is not None)\n\n temp_split = Split(i, ind_var.groups(), chi, p_split, dof, split_name=ind_var.name)\n better_split = not split.valid() or p_split < split.p or (p_split == split.p and chi > split.score)\n\n if better_split:\n split, temp_split = temp_split, split\n\n chi_threshold = self.split_threshold * split.score\n\n if temp_split.valid() and temp_split.score >= chi_threshold:\n for sur in temp_split.surrogates:\n if sur.column_id != i and sur.score >= chi_threshold:\n split.surrogates.append(sur)\n\n temp_split.surrogates = []\n split.surrogates.append(temp_split)\n\n break\n\n # all combinations created don't suffice. i.e. what's left is below min_child_node_size\n if choice is None:\n break\n else:\n ind_var.group(choice[0], choice[1])\n for val, count in freq[choice[1]].items():\n freq[choice[0]][val] += count\n del freq[choice[1]]\n if split.valid():\n split.sub_split_values(ind[split.column_id].metadata)\n return split\n",
"def best_con_split(self, ind, dep):\n \"\"\" determine best continuous variable split \"\"\"\n split = Split(None, None, None, None, 0)\n is_normal = stats.normaltest(self.dep_population)[1] > 0.05\n sig_test = stats.bartlett if is_normal else stats.levene\n response_set = dep.arr\n if dep.weights is not None:\n response_set = dep.arr * dep.weights\n\n for i, ind_var in enumerate(ind):\n ind_var = ind_var.deep_copy()\n unique = np.unique(ind_var.arr)\n keyed_set = {}\n\n for col in unique:\n matched_elements = np.compress(ind_var.arr == col, response_set)\n keyed_set[col] = matched_elements\n\n while next(ind_var.possible_groupings(), None) is not None:\n choice, highest_p_join, split_score = None, None, None\n for comb in ind_var.possible_groupings():\n col1_keyed_set = keyed_set[comb[0]]\n col2_keyed_set = keyed_set[comb[1]]\n dof = len(np.concatenate((col1_keyed_set, col2_keyed_set))) - 2\n score, p_split = sig_test(col1_keyed_set, col2_keyed_set)\n\n if choice is None or p_split > highest_p_join or (p_split == highest_p_join and score > split_score):\n choice, highest_p_join, split_score = comb, p_split, score\n\n sufficient_split = highest_p_join < self.alpha_merge and all(\n len(node_v) >= self.min_child_node_size for node_v in keyed_set.values()\n )\n\n invalid_reason = None\n sufficient_split = highest_p_join < self.alpha_merge\n if not sufficient_split: invalid_reason = InvalidSplitReason.ALPHA_MERGE\n\n sufficient_split = sufficient_split and all(\n len(node_v) >= self.min_child_node_size for node_v in keyed_set.values()\n )\n\n if not sufficient_split: invalid_reason = InvalidSplitReason.MIN_CHILD_NODE_SIZE\n\n if sufficient_split and len(keyed_set.values()) > 1:\n dof = len(np.concatenate(list(keyed_set.values()))) - 2\n score, p_split = sig_test(*keyed_set.values())\n\n temp_split = Split(i, ind_var.groups(), score, p_split, dof, split_name=ind_var.name)\n\n better_split = not split.valid() or p_split < split.p or (p_split == split.p and score > split.score)\n\n if better_split:\n split, temp_split = temp_split, split\n\n score_threshold = self.split_threshold * split.score\n\n if temp_split.valid() and temp_split.score >= score_threshold:\n for sur in temp_split.surrogates:\n if sur.column_id != i and sur.score >= score_threshold:\n split.surrogates.append(sur)\n\n temp_split.surrogates = []\n split.surrogates.append(temp_split)\n\n break\n else:\n split.invalid_reason = invalid_reason\n\n ind_var.group(choice[0], choice[1])\n\n keyed_set[choice[0]] = np.concatenate((keyed_set[choice[1]], keyed_set[choice[0]]))\n del keyed_set[choice[1]]\n\n if split.valid():\n split.sub_split_values(ind[split.column_id].metadata)\n return split\n"
] | class Stats(object):
"""
Stats class that determines the correct statistical method to apply
"""
def __init__(self, alpha_merge, min_child_node_size, split_threshold, dep_population):
self.split_threshold = 1 - split_threshold
self.alpha_merge = alpha_merge
self.min_child_node_size = min_child_node_size
self.dep_population = dep_population
def best_cat_heuristic_split(self, ind, dep):
""" determine best categorical variable split using heuristic methods """
split = Split(None, None, None, None, 0)
min_child_node_size = self.min_child_node_size
all_dep = np.unique(dep.arr)
if len(all_dep) == 1:
split.invalid_reason = InvalidSplitReason.PURE_NODE
return split
elif len(dep.arr) < min_child_node_size and dep.weights is None:
# if not weights and too small, skip
split.invalid_reason = InvalidSplitReason.MIN_CHILD_NODE_SIZE
return split
elif dep.weights is not None and len(dep.weights) < min_child_node_size:
# if weighted count is too small, skip
split.invalid_reason = InvalidSplitReason.PURE_NODE
return split
for i, ind_var in enumerate(ind):
split.invalid_reason = None # must reset because using invalid reason to break
ind_var = ind_var.deep_copy()
unique = np.unique(ind_var.arr)
freq = {}
if dep.weights is None:
for col in unique:
counts = np.unique(np.compress(ind_var.arr == col, dep.arr), return_counts=True)
freq[col] = cl.defaultdict(int)
freq[col].update(np.transpose(counts))
else:
for col in unique:
counts = np.unique(np.compress(ind_var.arr == col, dep.arr), return_counts=True)
freq[col] = cl.defaultdict(int)
for dep_v in all_dep:
freq[col][dep_v] = dep.weights[(ind_var.arr == col) * (dep.arr == dep_v)].sum()
if dep.weights is not None:
row_count = dep.weights.sum()
else:
row_count = len(dep.arr)
if len(list(ind_var.possible_groupings())) == 0:
split.invalid_reason = InvalidSplitReason.PURE_NODE
while next(ind_var.possible_groupings(), None) is not None:
choice, highest_p_join, split_chi = None, None, None
for comb in ind_var.possible_groupings():
col1_freq = freq[comb[0]]
col2_freq = freq[comb[1]]
keys = set(col1_freq.keys()).union(col2_freq.keys())
n_ij = np.array([
[col1_freq.get(k, 0) for k in keys],
[col2_freq.get(k, 0) for k in keys]
])
# check to see if min_child_node_size permits this direction
# 31 can't merge with 10 if it only leaves 27 for the other node(s)
# but if these are the only two, can't skip, because the level can be defined
# as these two nodes
other_splits = row_count - n_ij.sum()
if other_splits < min_child_node_size and other_splits != 0:
p_split, dof, chi = 1, NaN, NaN
continue
if n_ij.shape[1] == 1:
p_split, dof, chi = 1, NaN, NaN
# could be the only valid combination, as we skip
# ones that result in other nodes that give min child node sizes
# this solves [[20], [10, 11]] even though 10 & 11 are exact,
# this must be the choice of this iteration
choice = comb
break
else:
chi, p_split, dof = chisquare(n_ij, dep.weights is not None)
if choice is None or p_split > highest_p_join or (p_split == highest_p_join and chi > split_chi):
choice, highest_p_join, split_chi = comb, p_split, chi
sufficient_split = not highest_p_join or highest_p_join < self.alpha_merge
if not sufficient_split:
split.invalid_reason = InvalidSplitReason.ALPHA_MERGE
elif (n_ij.sum(axis=1) < min_child_node_size).any():
split.invalid_reason = InvalidSplitReason.MIN_CHILD_NODE_SIZE
else:
n_ij = np.array([
[f[dep_val] for dep_val in all_dep] for f in freq.values()
])
dof = (n_ij.shape[0] - 1) * (n_ij.shape[1] - 1)
chi, p_split, dof = chisquare(n_ij, dep.weights is not None)
temp_split = Split(i, ind_var.groups(), chi, p_split, dof, split_name=ind_var.name)
better_split = not split.valid() or p_split < split.p or (p_split == split.p and chi > split.score)
if better_split:
split, temp_split = temp_split, split
chi_threshold = self.split_threshold * split.score
if temp_split.valid() and temp_split.score >= chi_threshold:
for sur in temp_split.surrogates:
if sur.column_id != i and sur.score >= chi_threshold:
split.surrogates.append(sur)
temp_split.surrogates = []
split.surrogates.append(temp_split)
break
# all combinations created don't suffice. i.e. what's left is below min_child_node_size
if choice is None:
break
else:
ind_var.group(choice[0], choice[1])
for val, count in freq[choice[1]].items():
freq[choice[0]][val] += count
del freq[choice[1]]
if split.valid():
split.sub_split_values(ind[split.column_id].metadata)
return split
def best_con_split(self, ind, dep):
""" determine best continuous variable split """
split = Split(None, None, None, None, 0)
is_normal = stats.normaltest(self.dep_population)[1] > 0.05
sig_test = stats.bartlett if is_normal else stats.levene
response_set = dep.arr
if dep.weights is not None:
response_set = dep.arr * dep.weights
for i, ind_var in enumerate(ind):
ind_var = ind_var.deep_copy()
unique = np.unique(ind_var.arr)
keyed_set = {}
for col in unique:
matched_elements = np.compress(ind_var.arr == col, response_set)
keyed_set[col] = matched_elements
while next(ind_var.possible_groupings(), None) is not None:
choice, highest_p_join, split_score = None, None, None
for comb in ind_var.possible_groupings():
col1_keyed_set = keyed_set[comb[0]]
col2_keyed_set = keyed_set[comb[1]]
dof = len(np.concatenate((col1_keyed_set, col2_keyed_set))) - 2
score, p_split = sig_test(col1_keyed_set, col2_keyed_set)
if choice is None or p_split > highest_p_join or (p_split == highest_p_join and score > split_score):
choice, highest_p_join, split_score = comb, p_split, score
sufficient_split = highest_p_join < self.alpha_merge and all(
len(node_v) >= self.min_child_node_size for node_v in keyed_set.values()
)
invalid_reason = None
sufficient_split = highest_p_join < self.alpha_merge
if not sufficient_split: invalid_reason = InvalidSplitReason.ALPHA_MERGE
sufficient_split = sufficient_split and all(
len(node_v) >= self.min_child_node_size for node_v in keyed_set.values()
)
if not sufficient_split: invalid_reason = InvalidSplitReason.MIN_CHILD_NODE_SIZE
if sufficient_split and len(keyed_set.values()) > 1:
dof = len(np.concatenate(list(keyed_set.values()))) - 2
score, p_split = sig_test(*keyed_set.values())
temp_split = Split(i, ind_var.groups(), score, p_split, dof, split_name=ind_var.name)
better_split = not split.valid() or p_split < split.p or (p_split == split.p and score > split.score)
if better_split:
split, temp_split = temp_split, split
score_threshold = self.split_threshold * split.score
if temp_split.valid() and temp_split.score >= score_threshold:
for sur in temp_split.surrogates:
if sur.column_id != i and sur.score >= score_threshold:
split.surrogates.append(sur)
temp_split.surrogates = []
split.surrogates.append(temp_split)
break
else:
split.invalid_reason = invalid_reason
ind_var.group(choice[0], choice[1])
keyed_set[choice[0]] = np.concatenate((keyed_set[choice[1]], keyed_set[choice[0]]))
del keyed_set[choice[1]]
if split.valid():
split.sub_split_values(ind[split.column_id].metadata)
return split
|
Rambatino/CHAID | CHAID/stats.py | Stats.best_cat_heuristic_split | python | def best_cat_heuristic_split(self, ind, dep):
split = Split(None, None, None, None, 0)
min_child_node_size = self.min_child_node_size
all_dep = np.unique(dep.arr)
if len(all_dep) == 1:
split.invalid_reason = InvalidSplitReason.PURE_NODE
return split
elif len(dep.arr) < min_child_node_size and dep.weights is None:
# if not weights and too small, skip
split.invalid_reason = InvalidSplitReason.MIN_CHILD_NODE_SIZE
return split
elif dep.weights is not None and len(dep.weights) < min_child_node_size:
# if weighted count is too small, skip
split.invalid_reason = InvalidSplitReason.PURE_NODE
return split
for i, ind_var in enumerate(ind):
split.invalid_reason = None # must reset because using invalid reason to break
ind_var = ind_var.deep_copy()
unique = np.unique(ind_var.arr)
freq = {}
if dep.weights is None:
for col in unique:
counts = np.unique(np.compress(ind_var.arr == col, dep.arr), return_counts=True)
freq[col] = cl.defaultdict(int)
freq[col].update(np.transpose(counts))
else:
for col in unique:
counts = np.unique(np.compress(ind_var.arr == col, dep.arr), return_counts=True)
freq[col] = cl.defaultdict(int)
for dep_v in all_dep:
freq[col][dep_v] = dep.weights[(ind_var.arr == col) * (dep.arr == dep_v)].sum()
if dep.weights is not None:
row_count = dep.weights.sum()
else:
row_count = len(dep.arr)
if len(list(ind_var.possible_groupings())) == 0:
split.invalid_reason = InvalidSplitReason.PURE_NODE
while next(ind_var.possible_groupings(), None) is not None:
choice, highest_p_join, split_chi = None, None, None
for comb in ind_var.possible_groupings():
col1_freq = freq[comb[0]]
col2_freq = freq[comb[1]]
keys = set(col1_freq.keys()).union(col2_freq.keys())
n_ij = np.array([
[col1_freq.get(k, 0) for k in keys],
[col2_freq.get(k, 0) for k in keys]
])
# check to see if min_child_node_size permits this direction
# 31 can't merge with 10 if it only leaves 27 for the other node(s)
# but if these are the only two, can't skip, because the level can be defined
# as these two nodes
other_splits = row_count - n_ij.sum()
if other_splits < min_child_node_size and other_splits != 0:
p_split, dof, chi = 1, NaN, NaN
continue
if n_ij.shape[1] == 1:
p_split, dof, chi = 1, NaN, NaN
# could be the only valid combination, as we skip
# ones that result in other nodes that give min child node sizes
# this solves [[20], [10, 11]] even though 10 & 11 are exact,
# this must be the choice of this iteration
choice = comb
break
else:
chi, p_split, dof = chisquare(n_ij, dep.weights is not None)
if choice is None or p_split > highest_p_join or (p_split == highest_p_join and chi > split_chi):
choice, highest_p_join, split_chi = comb, p_split, chi
sufficient_split = not highest_p_join or highest_p_join < self.alpha_merge
if not sufficient_split:
split.invalid_reason = InvalidSplitReason.ALPHA_MERGE
elif (n_ij.sum(axis=1) < min_child_node_size).any():
split.invalid_reason = InvalidSplitReason.MIN_CHILD_NODE_SIZE
else:
n_ij = np.array([
[f[dep_val] for dep_val in all_dep] for f in freq.values()
])
dof = (n_ij.shape[0] - 1) * (n_ij.shape[1] - 1)
chi, p_split, dof = chisquare(n_ij, dep.weights is not None)
temp_split = Split(i, ind_var.groups(), chi, p_split, dof, split_name=ind_var.name)
better_split = not split.valid() or p_split < split.p or (p_split == split.p and chi > split.score)
if better_split:
split, temp_split = temp_split, split
chi_threshold = self.split_threshold * split.score
if temp_split.valid() and temp_split.score >= chi_threshold:
for sur in temp_split.surrogates:
if sur.column_id != i and sur.score >= chi_threshold:
split.surrogates.append(sur)
temp_split.surrogates = []
split.surrogates.append(temp_split)
break
# all combinations created don't suffice. i.e. what's left is below min_child_node_size
if choice is None:
break
else:
ind_var.group(choice[0], choice[1])
for val, count in freq[choice[1]].items():
freq[choice[0]][val] += count
del freq[choice[1]]
if split.valid():
split.sub_split_values(ind[split.column_id].metadata)
return split | determine best categorical variable split using heuristic methods | train | https://github.com/Rambatino/CHAID/blob/dc19e41ebdf2773168733efdf0d7579950c8d2e7/CHAID/stats.py#L56-L177 | [
"def chisquare(n_ij, weighted):\n \"\"\"\n Calculates the chisquare for a matrix of ind_v x dep_v\n for the unweighted and SPSS weighted case\n \"\"\"\n if weighted:\n m_ij = n_ij / n_ij\n\n nan_mask = np.isnan(m_ij)\n m_ij[nan_mask] = 0.000001 # otherwise it breaks the chi-squared test\n\n w_ij = m_ij\n n_ij_col_sum = n_ij.sum(axis=1)\n n_ij_row_sum = n_ij.sum(axis=0)\n alpha, beta, eps = (1, 1, 1)\n while eps > 10e-6:\n alpha = alpha * np.vstack(n_ij_col_sum / m_ij.sum(axis=1))\n beta = n_ij_row_sum / (alpha * w_ij).sum(axis=0)\n eps = np.max(np.absolute(w_ij * alpha * beta - m_ij))\n m_ij = w_ij * alpha * beta\n\n else:\n m_ij = (np.vstack(n_ij.sum(axis=1)) * n_ij.sum(axis=0)) / n_ij.sum().astype(float)\n\n dof = (n_ij.shape[0] - 1) * (n_ij.shape[1] - 1)\n chi, p_val = stats.chisquare(n_ij, f_exp=m_ij, ddof=n_ij.size - 1 - dof, axis=None)\n\n return (chi, p_val, dof)\n",
"def valid(self):\n return self.column_id is not None\n"
] | class Stats(object):
"""
Stats class that determines the correct statistical method to apply
"""
def __init__(self, alpha_merge, min_child_node_size, split_threshold, dep_population):
self.split_threshold = 1 - split_threshold
self.alpha_merge = alpha_merge
self.min_child_node_size = min_child_node_size
self.dep_population = dep_population
def best_split(self, ind, dep):
""" determine which splitting function to apply """
if isinstance(dep, ContinuousColumn):
return self.best_con_split(ind, dep)
else:
return self.best_cat_heuristic_split(ind, dep)
def best_con_split(self, ind, dep):
""" determine best continuous variable split """
split = Split(None, None, None, None, 0)
is_normal = stats.normaltest(self.dep_population)[1] > 0.05
sig_test = stats.bartlett if is_normal else stats.levene
response_set = dep.arr
if dep.weights is not None:
response_set = dep.arr * dep.weights
for i, ind_var in enumerate(ind):
ind_var = ind_var.deep_copy()
unique = np.unique(ind_var.arr)
keyed_set = {}
for col in unique:
matched_elements = np.compress(ind_var.arr == col, response_set)
keyed_set[col] = matched_elements
while next(ind_var.possible_groupings(), None) is not None:
choice, highest_p_join, split_score = None, None, None
for comb in ind_var.possible_groupings():
col1_keyed_set = keyed_set[comb[0]]
col2_keyed_set = keyed_set[comb[1]]
dof = len(np.concatenate((col1_keyed_set, col2_keyed_set))) - 2
score, p_split = sig_test(col1_keyed_set, col2_keyed_set)
if choice is None or p_split > highest_p_join or (p_split == highest_p_join and score > split_score):
choice, highest_p_join, split_score = comb, p_split, score
sufficient_split = highest_p_join < self.alpha_merge and all(
len(node_v) >= self.min_child_node_size for node_v in keyed_set.values()
)
invalid_reason = None
sufficient_split = highest_p_join < self.alpha_merge
if not sufficient_split: invalid_reason = InvalidSplitReason.ALPHA_MERGE
sufficient_split = sufficient_split and all(
len(node_v) >= self.min_child_node_size for node_v in keyed_set.values()
)
if not sufficient_split: invalid_reason = InvalidSplitReason.MIN_CHILD_NODE_SIZE
if sufficient_split and len(keyed_set.values()) > 1:
dof = len(np.concatenate(list(keyed_set.values()))) - 2
score, p_split = sig_test(*keyed_set.values())
temp_split = Split(i, ind_var.groups(), score, p_split, dof, split_name=ind_var.name)
better_split = not split.valid() or p_split < split.p or (p_split == split.p and score > split.score)
if better_split:
split, temp_split = temp_split, split
score_threshold = self.split_threshold * split.score
if temp_split.valid() and temp_split.score >= score_threshold:
for sur in temp_split.surrogates:
if sur.column_id != i and sur.score >= score_threshold:
split.surrogates.append(sur)
temp_split.surrogates = []
split.surrogates.append(temp_split)
break
else:
split.invalid_reason = invalid_reason
ind_var.group(choice[0], choice[1])
keyed_set[choice[0]] = np.concatenate((keyed_set[choice[1]], keyed_set[choice[0]]))
del keyed_set[choice[1]]
if split.valid():
split.sub_split_values(ind[split.column_id].metadata)
return split
|
Rambatino/CHAID | CHAID/stats.py | Stats.best_con_split | python | def best_con_split(self, ind, dep):
split = Split(None, None, None, None, 0)
is_normal = stats.normaltest(self.dep_population)[1] > 0.05
sig_test = stats.bartlett if is_normal else stats.levene
response_set = dep.arr
if dep.weights is not None:
response_set = dep.arr * dep.weights
for i, ind_var in enumerate(ind):
ind_var = ind_var.deep_copy()
unique = np.unique(ind_var.arr)
keyed_set = {}
for col in unique:
matched_elements = np.compress(ind_var.arr == col, response_set)
keyed_set[col] = matched_elements
while next(ind_var.possible_groupings(), None) is not None:
choice, highest_p_join, split_score = None, None, None
for comb in ind_var.possible_groupings():
col1_keyed_set = keyed_set[comb[0]]
col2_keyed_set = keyed_set[comb[1]]
dof = len(np.concatenate((col1_keyed_set, col2_keyed_set))) - 2
score, p_split = sig_test(col1_keyed_set, col2_keyed_set)
if choice is None or p_split > highest_p_join or (p_split == highest_p_join and score > split_score):
choice, highest_p_join, split_score = comb, p_split, score
sufficient_split = highest_p_join < self.alpha_merge and all(
len(node_v) >= self.min_child_node_size for node_v in keyed_set.values()
)
invalid_reason = None
sufficient_split = highest_p_join < self.alpha_merge
if not sufficient_split: invalid_reason = InvalidSplitReason.ALPHA_MERGE
sufficient_split = sufficient_split and all(
len(node_v) >= self.min_child_node_size for node_v in keyed_set.values()
)
if not sufficient_split: invalid_reason = InvalidSplitReason.MIN_CHILD_NODE_SIZE
if sufficient_split and len(keyed_set.values()) > 1:
dof = len(np.concatenate(list(keyed_set.values()))) - 2
score, p_split = sig_test(*keyed_set.values())
temp_split = Split(i, ind_var.groups(), score, p_split, dof, split_name=ind_var.name)
better_split = not split.valid() or p_split < split.p or (p_split == split.p and score > split.score)
if better_split:
split, temp_split = temp_split, split
score_threshold = self.split_threshold * split.score
if temp_split.valid() and temp_split.score >= score_threshold:
for sur in temp_split.surrogates:
if sur.column_id != i and sur.score >= score_threshold:
split.surrogates.append(sur)
temp_split.surrogates = []
split.surrogates.append(temp_split)
break
else:
split.invalid_reason = invalid_reason
ind_var.group(choice[0], choice[1])
keyed_set[choice[0]] = np.concatenate((keyed_set[choice[1]], keyed_set[choice[0]]))
del keyed_set[choice[1]]
if split.valid():
split.sub_split_values(ind[split.column_id].metadata)
return split | determine best continuous variable split | train | https://github.com/Rambatino/CHAID/blob/dc19e41ebdf2773168733efdf0d7579950c8d2e7/CHAID/stats.py#L179-L254 | [
"def valid(self):\n return self.column_id is not None\n"
] | class Stats(object):
"""
Stats class that determines the correct statistical method to apply
"""
def __init__(self, alpha_merge, min_child_node_size, split_threshold, dep_population):
self.split_threshold = 1 - split_threshold
self.alpha_merge = alpha_merge
self.min_child_node_size = min_child_node_size
self.dep_population = dep_population
def best_split(self, ind, dep):
""" determine which splitting function to apply """
if isinstance(dep, ContinuousColumn):
return self.best_con_split(ind, dep)
else:
return self.best_cat_heuristic_split(ind, dep)
def best_cat_heuristic_split(self, ind, dep):
""" determine best categorical variable split using heuristic methods """
split = Split(None, None, None, None, 0)
min_child_node_size = self.min_child_node_size
all_dep = np.unique(dep.arr)
if len(all_dep) == 1:
split.invalid_reason = InvalidSplitReason.PURE_NODE
return split
elif len(dep.arr) < min_child_node_size and dep.weights is None:
# if not weights and too small, skip
split.invalid_reason = InvalidSplitReason.MIN_CHILD_NODE_SIZE
return split
elif dep.weights is not None and len(dep.weights) < min_child_node_size:
# if weighted count is too small, skip
split.invalid_reason = InvalidSplitReason.PURE_NODE
return split
for i, ind_var in enumerate(ind):
split.invalid_reason = None # must reset because using invalid reason to break
ind_var = ind_var.deep_copy()
unique = np.unique(ind_var.arr)
freq = {}
if dep.weights is None:
for col in unique:
counts = np.unique(np.compress(ind_var.arr == col, dep.arr), return_counts=True)
freq[col] = cl.defaultdict(int)
freq[col].update(np.transpose(counts))
else:
for col in unique:
counts = np.unique(np.compress(ind_var.arr == col, dep.arr), return_counts=True)
freq[col] = cl.defaultdict(int)
for dep_v in all_dep:
freq[col][dep_v] = dep.weights[(ind_var.arr == col) * (dep.arr == dep_v)].sum()
if dep.weights is not None:
row_count = dep.weights.sum()
else:
row_count = len(dep.arr)
if len(list(ind_var.possible_groupings())) == 0:
split.invalid_reason = InvalidSplitReason.PURE_NODE
while next(ind_var.possible_groupings(), None) is not None:
choice, highest_p_join, split_chi = None, None, None
for comb in ind_var.possible_groupings():
col1_freq = freq[comb[0]]
col2_freq = freq[comb[1]]
keys = set(col1_freq.keys()).union(col2_freq.keys())
n_ij = np.array([
[col1_freq.get(k, 0) for k in keys],
[col2_freq.get(k, 0) for k in keys]
])
# check to see if min_child_node_size permits this direction
# 31 can't merge with 10 if it only leaves 27 for the other node(s)
# but if these are the only two, can't skip, because the level can be defined
# as these two nodes
other_splits = row_count - n_ij.sum()
if other_splits < min_child_node_size and other_splits != 0:
p_split, dof, chi = 1, NaN, NaN
continue
if n_ij.shape[1] == 1:
p_split, dof, chi = 1, NaN, NaN
# could be the only valid combination, as we skip
# ones that result in other nodes that give min child node sizes
# this solves [[20], [10, 11]] even though 10 & 11 are exact,
# this must be the choice of this iteration
choice = comb
break
else:
chi, p_split, dof = chisquare(n_ij, dep.weights is not None)
if choice is None or p_split > highest_p_join or (p_split == highest_p_join and chi > split_chi):
choice, highest_p_join, split_chi = comb, p_split, chi
sufficient_split = not highest_p_join or highest_p_join < self.alpha_merge
if not sufficient_split:
split.invalid_reason = InvalidSplitReason.ALPHA_MERGE
elif (n_ij.sum(axis=1) < min_child_node_size).any():
split.invalid_reason = InvalidSplitReason.MIN_CHILD_NODE_SIZE
else:
n_ij = np.array([
[f[dep_val] for dep_val in all_dep] for f in freq.values()
])
dof = (n_ij.shape[0] - 1) * (n_ij.shape[1] - 1)
chi, p_split, dof = chisquare(n_ij, dep.weights is not None)
temp_split = Split(i, ind_var.groups(), chi, p_split, dof, split_name=ind_var.name)
better_split = not split.valid() or p_split < split.p or (p_split == split.p and chi > split.score)
if better_split:
split, temp_split = temp_split, split
chi_threshold = self.split_threshold * split.score
if temp_split.valid() and temp_split.score >= chi_threshold:
for sur in temp_split.surrogates:
if sur.column_id != i and sur.score >= chi_threshold:
split.surrogates.append(sur)
temp_split.surrogates = []
split.surrogates.append(temp_split)
break
# all combinations created don't suffice. i.e. what's left is below min_child_node_size
if choice is None:
break
else:
ind_var.group(choice[0], choice[1])
for val, count in freq[choice[1]].items():
freq[choice[0]][val] += count
del freq[choice[1]]
if split.valid():
split.sub_split_values(ind[split.column_id].metadata)
return split
|
Rambatino/CHAID | CHAID/tree.py | Tree.from_numpy | python | def from_numpy(ndarr, arr, alpha_merge=0.05, max_depth=2, min_parent_node_size=30,
min_child_node_size=30, split_titles=None, split_threshold=0, weights=None,
variable_types=None, dep_variable_type='categorical'):
vectorised_array = []
variable_types = variable_types or ['nominal'] * ndarr.shape[1]
for ind, col_type in enumerate(variable_types):
title = None
if split_titles is not None: title = split_titles[ind]
if col_type == 'ordinal':
col = OrdinalColumn(ndarr[:, ind], name=title)
elif col_type == 'nominal':
col = NominalColumn(ndarr[:, ind], name=title)
else:
raise NotImplementedError('Unknown independent variable type ' + col_type)
vectorised_array.append(col)
if dep_variable_type == 'categorical':
observed = NominalColumn(arr, weights=weights)
elif dep_variable_type == 'continuous':
observed = ContinuousColumn(arr, weights=weights)
else:
raise NotImplementedError('Unknown dependent variable type ' + dep_variable_type)
config = { 'alpha_merge': alpha_merge, 'max_depth': max_depth, 'min_parent_node_size': min_parent_node_size,
'min_child_node_size': min_child_node_size, 'split_threshold': split_threshold }
return Tree(vectorised_array, observed, config) | Create a CHAID object from numpy
Parameters
----------
ndarr : numpy.ndarray
non-aggregated 2-dimensional array containing
independent variables on the veritcal axis and (usually)
respondent level data on the horizontal axis
arr : numpy.ndarray
1-dimensional array of the dependent variable associated with
ndarr
alpha_merge : float
the threshold value in which to create a split (default 0.05)
max_depth : float
the threshold value for the maximum number of levels after the root
node in the tree (default 2)
min_parent_node_size : float
the threshold value of the number of respondents that the node must
contain (default 30)
split_titles : array-like
array of names for the independent variables in the data
variable_types : array-like or dict
array of variable types, or dict of column names to variable types.
Supported variable types are the strings 'nominal' or 'ordinal' in
lower case | train | https://github.com/Rambatino/CHAID/blob/dc19e41ebdf2773168733efdf0d7579950c8d2e7/CHAID/tree.py#L45-L96 | null | class Tree(object):
def __init__(self, independent_columns, dependent_column, config={}):
"""
Init method to derive the tree from the columns constructing it
Parameters
----------
independent_columns : array<Column>
an array of CHAID columns
dependent_column : Column
a single CHAID column to use as the dependent variable
config: Dict
{
alpha_merge=0.05,
max_depth=2,
min_parent_node_size=30,
min_child_node_size=30,
split_threshold=0
}
"""
self.max_depth = config.get('max_depth', 2)
self.min_parent_node_size = config.get('min_parent_node_size', 30)
self.vectorised_array = independent_columns
self.data_size = dependent_column.arr.shape[0]
self.node_count = 0
self._tree_store = None
self.observed = dependent_column
self._stats = Stats(
config.get('alpha_merge', 0.05),
config.get('min_child_node_size', 30),
config.get('split_threshold', 0),
dependent_column.arr
)
@staticmethod
def build_tree(self):
""" Build chaid tree """
self._tree_store = []
self.node(np.arange(0, self.data_size, dtype=np.int), self.vectorised_array, self.observed)
@property
def tree_store(self):
if not self._tree_store:
self.build_tree()
return self._tree_store
@staticmethod
def from_pandas_df(df, i_variables, d_variable, alpha_merge=0.05, max_depth=2,
min_parent_node_size=30, min_child_node_size=30, split_threshold=0,
weight=None, dep_variable_type='categorical'):
"""
Helper method to pre-process a pandas data frame in order to run CHAID
analysis
Parameters
----------
df : pandas.DataFrame
the dataframe with the dependent and independent variables in which
to slice from
i_variables : dict
dict of instance variable names with their variable types. Supported
variable types are the strings 'nominal' or 'ordinal' in lower case
d_variable : string
the name of the dependent variable in the dataframe
alpha_merge : float
the threshold value in which to create a split (default 0.05)
max_depth : float
the threshold value for the maximum number of levels after the root
node in the tree (default 2)
split_threshold : float
the variation in chi-score such that surrogate splits are created
(default 0)
min_parent_node_size : float
the threshold value of the number of respondents that the node must
contain (default 30)
min_child_node_size : float
the threshold value of the number of respondents that each child node must
contain (default 30)
weight : array-like
the respondent weights. If passed, weighted chi-square calculation is run
dep_variable_type : str
the type of dependent variable. Supported variable types are 'categorical' or
'continuous'
"""
ind_df = df[list(i_variables.keys())]
ind_values = ind_df.values
dep_values = df[d_variable].values
weights = df[weight] if weight is not None else None
return Tree.from_numpy(ind_values, dep_values, alpha_merge, max_depth, min_parent_node_size,
min_child_node_size, list(ind_df.columns.values), split_threshold, weights,
list(i_variables.values()), dep_variable_type)
def node(self, rows, ind, dep, depth=0, parent=None, parent_decisions=None):
""" internal method to create a node in the tree """
depth += 1
if self.max_depth < depth:
terminal_node = Node(choices=parent_decisions, node_id=self.node_count,
parent=parent, indices=rows, dep_v=dep)
self._tree_store.append(terminal_node)
self.node_count += 1
terminal_node.split.invalid_reason = InvalidSplitReason.MAX_DEPTH
return self._tree_store
split = self._stats.best_split(ind, dep)
node = Node(choices=parent_decisions, node_id=self.node_count, indices=rows, dep_v=dep,
parent=parent, split=split)
self._tree_store.append(node)
parent = self.node_count
self.node_count += 1
if not split.valid():
return self._tree_store
for index, choices in enumerate(split.splits):
correct_rows = np.in1d(ind[split.column_id].arr, choices)
dep_slice = dep[correct_rows]
ind_slice = [vect[correct_rows] for vect in ind]
row_slice = rows[correct_rows]
if self.min_parent_node_size < len(dep_slice.arr):
self.node(row_slice, ind_slice, dep_slice, depth=depth, parent=parent,
parent_decisions=split.split_map[index])
else:
terminal_node = Node(choices=split.split_map[index], node_id=self.node_count,
parent=parent, indices=row_slice, dep_v=dep_slice)
terminal_node.split.invalid_reason = InvalidSplitReason.MIN_PARENT_NODE_SIZE
self._tree_store.append(terminal_node)
self.node_count += 1
return self._tree_store
def generate_best_split(self, ind, dep):
""" internal method to generate the best split """
return self._stats.best_split(ind, dep)
def to_tree(self):
""" returns a TreeLib tree """
tree = TreeLibTree()
for node in self:
tree.create_node(node, node.node_id, parent=node.parent)
return tree
def __iter__(self):
""" Function to allow nodes to be iterated over """
return iter(self.tree_store)
def __repr__(self):
return str(self.tree_store)
def get_node(self, node_id):
"""
Returns the node with the given id
Parameters
----------
node_id : integer
Find the node with this ID
"""
return self.tree_store[node_id]
def print_tree(self):
""" prints the tree out """
self.to_tree().show(line_type='ascii')
def node_predictions(self):
""" Determines which rows fall into which node """
pred = np.zeros(self.data_size)
for node in self:
if node.is_terminal:
pred[node.indices] = node.node_id
return pred
def classification_rules(self, node=None, stack=None):
if node is None:
return [
rule for t_node in self for rule in self.classification_rules(t_node) if t_node.is_terminal
]
stack = stack or []
stack.append(node)
if node.parent is None:
return [
{
'node': stack[0].node_id,
'rules': [
{
# 'type': self.vectorised_array[x.tag.split.column_id].type,
'variable': self.get_node(ancestor.parent).split_variable,
'data': ancestor.choices
} for ancestor in stack[:-1]
]
}
]
else:
return self.classification_rules(self.get_node(node.parent), stack)
def model_predictions(self):
"""
Determines the highest frequency of
categorical dependent variable in the
terminal node where that row fell
"""
if isinstance(self.observed, ContinuousColumn):
return ValueError("Cannot make model predictions on a continuous scale")
pred = np.zeros(self.data_size).astype('object')
for node in self:
if node.is_terminal:
pred[node.indices] = max(node.members, key=node.members.get)
return pred
def risk(self):
"""
Calculates the fraction of risk associated
with the model predictions
"""
return 1 - self.accuracy()
def accuracy(self):
"""
Calculates the accuracy of the tree by comparing
the model predictions to the dataset
(TP + TN) / (TP + TN + FP + FN) == (T / (T + F))
"""
sub_observed = np.array([self.observed.metadata[i] for i in self.observed.arr])
return float((self.model_predictions() == sub_observed).sum()) / self.data_size
def render(self, path=None, view=False):
Graph(self).render(path, view)
|
Rambatino/CHAID | CHAID/tree.py | Tree.build_tree | python | def build_tree(self):
self._tree_store = []
self.node(np.arange(0, self.data_size, dtype=np.int), self.vectorised_array, self.observed) | Build chaid tree | train | https://github.com/Rambatino/CHAID/blob/dc19e41ebdf2773168733efdf0d7579950c8d2e7/CHAID/tree.py#L98-L101 | [
"def node(self, rows, ind, dep, depth=0, parent=None, parent_decisions=None):\n \"\"\" internal method to create a node in the tree \"\"\"\n depth += 1\n\n if self.max_depth < depth:\n terminal_node = Node(choices=parent_decisions, node_id=self.node_count,\n parent=parent, indices=rows, dep_v=dep)\n self._tree_store.append(terminal_node)\n self.node_count += 1\n terminal_node.split.invalid_reason = InvalidSplitReason.MAX_DEPTH\n return self._tree_store\n\n split = self._stats.best_split(ind, dep)\n\n node = Node(choices=parent_decisions, node_id=self.node_count, indices=rows, dep_v=dep,\n parent=parent, split=split)\n\n self._tree_store.append(node)\n parent = self.node_count\n self.node_count += 1\n\n if not split.valid():\n return self._tree_store\n\n for index, choices in enumerate(split.splits):\n correct_rows = np.in1d(ind[split.column_id].arr, choices)\n dep_slice = dep[correct_rows]\n ind_slice = [vect[correct_rows] for vect in ind]\n row_slice = rows[correct_rows]\n if self.min_parent_node_size < len(dep_slice.arr):\n self.node(row_slice, ind_slice, dep_slice, depth=depth, parent=parent,\n parent_decisions=split.split_map[index])\n else:\n terminal_node = Node(choices=split.split_map[index], node_id=self.node_count,\n parent=parent, indices=row_slice, dep_v=dep_slice)\n terminal_node.split.invalid_reason = InvalidSplitReason.MIN_PARENT_NODE_SIZE\n self._tree_store.append(terminal_node)\n self.node_count += 1\n return self._tree_store\n"
] | class Tree(object):
def __init__(self, independent_columns, dependent_column, config={}):
"""
Init method to derive the tree from the columns constructing it
Parameters
----------
independent_columns : array<Column>
an array of CHAID columns
dependent_column : Column
a single CHAID column to use as the dependent variable
config: Dict
{
alpha_merge=0.05,
max_depth=2,
min_parent_node_size=30,
min_child_node_size=30,
split_threshold=0
}
"""
self.max_depth = config.get('max_depth', 2)
self.min_parent_node_size = config.get('min_parent_node_size', 30)
self.vectorised_array = independent_columns
self.data_size = dependent_column.arr.shape[0]
self.node_count = 0
self._tree_store = None
self.observed = dependent_column
self._stats = Stats(
config.get('alpha_merge', 0.05),
config.get('min_child_node_size', 30),
config.get('split_threshold', 0),
dependent_column.arr
)
@staticmethod
def from_numpy(ndarr, arr, alpha_merge=0.05, max_depth=2, min_parent_node_size=30,
min_child_node_size=30, split_titles=None, split_threshold=0, weights=None,
variable_types=None, dep_variable_type='categorical'):
"""
Create a CHAID object from numpy
Parameters
----------
ndarr : numpy.ndarray
non-aggregated 2-dimensional array containing
independent variables on the veritcal axis and (usually)
respondent level data on the horizontal axis
arr : numpy.ndarray
1-dimensional array of the dependent variable associated with
ndarr
alpha_merge : float
the threshold value in which to create a split (default 0.05)
max_depth : float
the threshold value for the maximum number of levels after the root
node in the tree (default 2)
min_parent_node_size : float
the threshold value of the number of respondents that the node must
contain (default 30)
split_titles : array-like
array of names for the independent variables in the data
variable_types : array-like or dict
array of variable types, or dict of column names to variable types.
Supported variable types are the strings 'nominal' or 'ordinal' in
lower case
"""
vectorised_array = []
variable_types = variable_types or ['nominal'] * ndarr.shape[1]
for ind, col_type in enumerate(variable_types):
title = None
if split_titles is not None: title = split_titles[ind]
if col_type == 'ordinal':
col = OrdinalColumn(ndarr[:, ind], name=title)
elif col_type == 'nominal':
col = NominalColumn(ndarr[:, ind], name=title)
else:
raise NotImplementedError('Unknown independent variable type ' + col_type)
vectorised_array.append(col)
if dep_variable_type == 'categorical':
observed = NominalColumn(arr, weights=weights)
elif dep_variable_type == 'continuous':
observed = ContinuousColumn(arr, weights=weights)
else:
raise NotImplementedError('Unknown dependent variable type ' + dep_variable_type)
config = { 'alpha_merge': alpha_merge, 'max_depth': max_depth, 'min_parent_node_size': min_parent_node_size,
'min_child_node_size': min_child_node_size, 'split_threshold': split_threshold }
return Tree(vectorised_array, observed, config)
@property
def tree_store(self):
if not self._tree_store:
self.build_tree()
return self._tree_store
@staticmethod
def from_pandas_df(df, i_variables, d_variable, alpha_merge=0.05, max_depth=2,
min_parent_node_size=30, min_child_node_size=30, split_threshold=0,
weight=None, dep_variable_type='categorical'):
"""
Helper method to pre-process a pandas data frame in order to run CHAID
analysis
Parameters
----------
df : pandas.DataFrame
the dataframe with the dependent and independent variables in which
to slice from
i_variables : dict
dict of instance variable names with their variable types. Supported
variable types are the strings 'nominal' or 'ordinal' in lower case
d_variable : string
the name of the dependent variable in the dataframe
alpha_merge : float
the threshold value in which to create a split (default 0.05)
max_depth : float
the threshold value for the maximum number of levels after the root
node in the tree (default 2)
split_threshold : float
the variation in chi-score such that surrogate splits are created
(default 0)
min_parent_node_size : float
the threshold value of the number of respondents that the node must
contain (default 30)
min_child_node_size : float
the threshold value of the number of respondents that each child node must
contain (default 30)
weight : array-like
the respondent weights. If passed, weighted chi-square calculation is run
dep_variable_type : str
the type of dependent variable. Supported variable types are 'categorical' or
'continuous'
"""
ind_df = df[list(i_variables.keys())]
ind_values = ind_df.values
dep_values = df[d_variable].values
weights = df[weight] if weight is not None else None
return Tree.from_numpy(ind_values, dep_values, alpha_merge, max_depth, min_parent_node_size,
min_child_node_size, list(ind_df.columns.values), split_threshold, weights,
list(i_variables.values()), dep_variable_type)
def node(self, rows, ind, dep, depth=0, parent=None, parent_decisions=None):
""" internal method to create a node in the tree """
depth += 1
if self.max_depth < depth:
terminal_node = Node(choices=parent_decisions, node_id=self.node_count,
parent=parent, indices=rows, dep_v=dep)
self._tree_store.append(terminal_node)
self.node_count += 1
terminal_node.split.invalid_reason = InvalidSplitReason.MAX_DEPTH
return self._tree_store
split = self._stats.best_split(ind, dep)
node = Node(choices=parent_decisions, node_id=self.node_count, indices=rows, dep_v=dep,
parent=parent, split=split)
self._tree_store.append(node)
parent = self.node_count
self.node_count += 1
if not split.valid():
return self._tree_store
for index, choices in enumerate(split.splits):
correct_rows = np.in1d(ind[split.column_id].arr, choices)
dep_slice = dep[correct_rows]
ind_slice = [vect[correct_rows] for vect in ind]
row_slice = rows[correct_rows]
if self.min_parent_node_size < len(dep_slice.arr):
self.node(row_slice, ind_slice, dep_slice, depth=depth, parent=parent,
parent_decisions=split.split_map[index])
else:
terminal_node = Node(choices=split.split_map[index], node_id=self.node_count,
parent=parent, indices=row_slice, dep_v=dep_slice)
terminal_node.split.invalid_reason = InvalidSplitReason.MIN_PARENT_NODE_SIZE
self._tree_store.append(terminal_node)
self.node_count += 1
return self._tree_store
def generate_best_split(self, ind, dep):
""" internal method to generate the best split """
return self._stats.best_split(ind, dep)
def to_tree(self):
""" returns a TreeLib tree """
tree = TreeLibTree()
for node in self:
tree.create_node(node, node.node_id, parent=node.parent)
return tree
def __iter__(self):
""" Function to allow nodes to be iterated over """
return iter(self.tree_store)
def __repr__(self):
return str(self.tree_store)
def get_node(self, node_id):
"""
Returns the node with the given id
Parameters
----------
node_id : integer
Find the node with this ID
"""
return self.tree_store[node_id]
def print_tree(self):
""" prints the tree out """
self.to_tree().show(line_type='ascii')
def node_predictions(self):
""" Determines which rows fall into which node """
pred = np.zeros(self.data_size)
for node in self:
if node.is_terminal:
pred[node.indices] = node.node_id
return pred
def classification_rules(self, node=None, stack=None):
if node is None:
return [
rule for t_node in self for rule in self.classification_rules(t_node) if t_node.is_terminal
]
stack = stack or []
stack.append(node)
if node.parent is None:
return [
{
'node': stack[0].node_id,
'rules': [
{
# 'type': self.vectorised_array[x.tag.split.column_id].type,
'variable': self.get_node(ancestor.parent).split_variable,
'data': ancestor.choices
} for ancestor in stack[:-1]
]
}
]
else:
return self.classification_rules(self.get_node(node.parent), stack)
def model_predictions(self):
"""
Determines the highest frequency of
categorical dependent variable in the
terminal node where that row fell
"""
if isinstance(self.observed, ContinuousColumn):
return ValueError("Cannot make model predictions on a continuous scale")
pred = np.zeros(self.data_size).astype('object')
for node in self:
if node.is_terminal:
pred[node.indices] = max(node.members, key=node.members.get)
return pred
def risk(self):
"""
Calculates the fraction of risk associated
with the model predictions
"""
return 1 - self.accuracy()
def accuracy(self):
"""
Calculates the accuracy of the tree by comparing
the model predictions to the dataset
(TP + TN) / (TP + TN + FP + FN) == (T / (T + F))
"""
sub_observed = np.array([self.observed.metadata[i] for i in self.observed.arr])
return float((self.model_predictions() == sub_observed).sum()) / self.data_size
def render(self, path=None, view=False):
Graph(self).render(path, view)
|
Rambatino/CHAID | CHAID/tree.py | Tree.from_pandas_df | python | def from_pandas_df(df, i_variables, d_variable, alpha_merge=0.05, max_depth=2,
min_parent_node_size=30, min_child_node_size=30, split_threshold=0,
weight=None, dep_variable_type='categorical'):
ind_df = df[list(i_variables.keys())]
ind_values = ind_df.values
dep_values = df[d_variable].values
weights = df[weight] if weight is not None else None
return Tree.from_numpy(ind_values, dep_values, alpha_merge, max_depth, min_parent_node_size,
min_child_node_size, list(ind_df.columns.values), split_threshold, weights,
list(i_variables.values()), dep_variable_type) | Helper method to pre-process a pandas data frame in order to run CHAID
analysis
Parameters
----------
df : pandas.DataFrame
the dataframe with the dependent and independent variables in which
to slice from
i_variables : dict
dict of instance variable names with their variable types. Supported
variable types are the strings 'nominal' or 'ordinal' in lower case
d_variable : string
the name of the dependent variable in the dataframe
alpha_merge : float
the threshold value in which to create a split (default 0.05)
max_depth : float
the threshold value for the maximum number of levels after the root
node in the tree (default 2)
split_threshold : float
the variation in chi-score such that surrogate splits are created
(default 0)
min_parent_node_size : float
the threshold value of the number of respondents that the node must
contain (default 30)
min_child_node_size : float
the threshold value of the number of respondents that each child node must
contain (default 30)
weight : array-like
the respondent weights. If passed, weighted chi-square calculation is run
dep_variable_type : str
the type of dependent variable. Supported variable types are 'categorical' or
'continuous' | train | https://github.com/Rambatino/CHAID/blob/dc19e41ebdf2773168733efdf0d7579950c8d2e7/CHAID/tree.py#L110-L153 | [
"def from_numpy(ndarr, arr, alpha_merge=0.05, max_depth=2, min_parent_node_size=30,\n min_child_node_size=30, split_titles=None, split_threshold=0, weights=None,\n variable_types=None, dep_variable_type='categorical'):\n \"\"\"\n Create a CHAID object from numpy\n\n Parameters\n ----------\n ndarr : numpy.ndarray\n non-aggregated 2-dimensional array containing\n independent variables on the veritcal axis and (usually)\n respondent level data on the horizontal axis\n arr : numpy.ndarray\n 1-dimensional array of the dependent variable associated with\n ndarr\n alpha_merge : float\n the threshold value in which to create a split (default 0.05)\n max_depth : float\n the threshold value for the maximum number of levels after the root\n node in the tree (default 2)\n min_parent_node_size : float\n the threshold value of the number of respondents that the node must\n contain (default 30)\n split_titles : array-like\n array of names for the independent variables in the data\n variable_types : array-like or dict\n array of variable types, or dict of column names to variable types.\n Supported variable types are the strings 'nominal' or 'ordinal' in\n lower case\n \"\"\"\n vectorised_array = []\n variable_types = variable_types or ['nominal'] * ndarr.shape[1]\n for ind, col_type in enumerate(variable_types):\n title = None\n if split_titles is not None: title = split_titles[ind]\n if col_type == 'ordinal':\n col = OrdinalColumn(ndarr[:, ind], name=title)\n elif col_type == 'nominal':\n col = NominalColumn(ndarr[:, ind], name=title)\n else:\n raise NotImplementedError('Unknown independent variable type ' + col_type)\n vectorised_array.append(col)\n\n if dep_variable_type == 'categorical':\n observed = NominalColumn(arr, weights=weights)\n elif dep_variable_type == 'continuous':\n observed = ContinuousColumn(arr, weights=weights)\n else:\n raise NotImplementedError('Unknown dependent variable type ' + dep_variable_type)\n config = { 'alpha_merge': alpha_merge, 'max_depth': max_depth, 'min_parent_node_size': min_parent_node_size,\n 'min_child_node_size': min_child_node_size, 'split_threshold': split_threshold }\n return Tree(vectorised_array, observed, config)\n"
] | class Tree(object):
def __init__(self, independent_columns, dependent_column, config={}):
"""
Init method to derive the tree from the columns constructing it
Parameters
----------
independent_columns : array<Column>
an array of CHAID columns
dependent_column : Column
a single CHAID column to use as the dependent variable
config: Dict
{
alpha_merge=0.05,
max_depth=2,
min_parent_node_size=30,
min_child_node_size=30,
split_threshold=0
}
"""
self.max_depth = config.get('max_depth', 2)
self.min_parent_node_size = config.get('min_parent_node_size', 30)
self.vectorised_array = independent_columns
self.data_size = dependent_column.arr.shape[0]
self.node_count = 0
self._tree_store = None
self.observed = dependent_column
self._stats = Stats(
config.get('alpha_merge', 0.05),
config.get('min_child_node_size', 30),
config.get('split_threshold', 0),
dependent_column.arr
)
@staticmethod
def from_numpy(ndarr, arr, alpha_merge=0.05, max_depth=2, min_parent_node_size=30,
min_child_node_size=30, split_titles=None, split_threshold=0, weights=None,
variable_types=None, dep_variable_type='categorical'):
"""
Create a CHAID object from numpy
Parameters
----------
ndarr : numpy.ndarray
non-aggregated 2-dimensional array containing
independent variables on the veritcal axis and (usually)
respondent level data on the horizontal axis
arr : numpy.ndarray
1-dimensional array of the dependent variable associated with
ndarr
alpha_merge : float
the threshold value in which to create a split (default 0.05)
max_depth : float
the threshold value for the maximum number of levels after the root
node in the tree (default 2)
min_parent_node_size : float
the threshold value of the number of respondents that the node must
contain (default 30)
split_titles : array-like
array of names for the independent variables in the data
variable_types : array-like or dict
array of variable types, or dict of column names to variable types.
Supported variable types are the strings 'nominal' or 'ordinal' in
lower case
"""
vectorised_array = []
variable_types = variable_types or ['nominal'] * ndarr.shape[1]
for ind, col_type in enumerate(variable_types):
title = None
if split_titles is not None: title = split_titles[ind]
if col_type == 'ordinal':
col = OrdinalColumn(ndarr[:, ind], name=title)
elif col_type == 'nominal':
col = NominalColumn(ndarr[:, ind], name=title)
else:
raise NotImplementedError('Unknown independent variable type ' + col_type)
vectorised_array.append(col)
if dep_variable_type == 'categorical':
observed = NominalColumn(arr, weights=weights)
elif dep_variable_type == 'continuous':
observed = ContinuousColumn(arr, weights=weights)
else:
raise NotImplementedError('Unknown dependent variable type ' + dep_variable_type)
config = { 'alpha_merge': alpha_merge, 'max_depth': max_depth, 'min_parent_node_size': min_parent_node_size,
'min_child_node_size': min_child_node_size, 'split_threshold': split_threshold }
return Tree(vectorised_array, observed, config)
def build_tree(self):
""" Build chaid tree """
self._tree_store = []
self.node(np.arange(0, self.data_size, dtype=np.int), self.vectorised_array, self.observed)
@property
def tree_store(self):
if not self._tree_store:
self.build_tree()
return self._tree_store
@staticmethod
def node(self, rows, ind, dep, depth=0, parent=None, parent_decisions=None):
""" internal method to create a node in the tree """
depth += 1
if self.max_depth < depth:
terminal_node = Node(choices=parent_decisions, node_id=self.node_count,
parent=parent, indices=rows, dep_v=dep)
self._tree_store.append(terminal_node)
self.node_count += 1
terminal_node.split.invalid_reason = InvalidSplitReason.MAX_DEPTH
return self._tree_store
split = self._stats.best_split(ind, dep)
node = Node(choices=parent_decisions, node_id=self.node_count, indices=rows, dep_v=dep,
parent=parent, split=split)
self._tree_store.append(node)
parent = self.node_count
self.node_count += 1
if not split.valid():
return self._tree_store
for index, choices in enumerate(split.splits):
correct_rows = np.in1d(ind[split.column_id].arr, choices)
dep_slice = dep[correct_rows]
ind_slice = [vect[correct_rows] for vect in ind]
row_slice = rows[correct_rows]
if self.min_parent_node_size < len(dep_slice.arr):
self.node(row_slice, ind_slice, dep_slice, depth=depth, parent=parent,
parent_decisions=split.split_map[index])
else:
terminal_node = Node(choices=split.split_map[index], node_id=self.node_count,
parent=parent, indices=row_slice, dep_v=dep_slice)
terminal_node.split.invalid_reason = InvalidSplitReason.MIN_PARENT_NODE_SIZE
self._tree_store.append(terminal_node)
self.node_count += 1
return self._tree_store
def generate_best_split(self, ind, dep):
""" internal method to generate the best split """
return self._stats.best_split(ind, dep)
def to_tree(self):
""" returns a TreeLib tree """
tree = TreeLibTree()
for node in self:
tree.create_node(node, node.node_id, parent=node.parent)
return tree
def __iter__(self):
""" Function to allow nodes to be iterated over """
return iter(self.tree_store)
def __repr__(self):
return str(self.tree_store)
def get_node(self, node_id):
"""
Returns the node with the given id
Parameters
----------
node_id : integer
Find the node with this ID
"""
return self.tree_store[node_id]
def print_tree(self):
""" prints the tree out """
self.to_tree().show(line_type='ascii')
def node_predictions(self):
""" Determines which rows fall into which node """
pred = np.zeros(self.data_size)
for node in self:
if node.is_terminal:
pred[node.indices] = node.node_id
return pred
def classification_rules(self, node=None, stack=None):
if node is None:
return [
rule for t_node in self for rule in self.classification_rules(t_node) if t_node.is_terminal
]
stack = stack or []
stack.append(node)
if node.parent is None:
return [
{
'node': stack[0].node_id,
'rules': [
{
# 'type': self.vectorised_array[x.tag.split.column_id].type,
'variable': self.get_node(ancestor.parent).split_variable,
'data': ancestor.choices
} for ancestor in stack[:-1]
]
}
]
else:
return self.classification_rules(self.get_node(node.parent), stack)
def model_predictions(self):
"""
Determines the highest frequency of
categorical dependent variable in the
terminal node where that row fell
"""
if isinstance(self.observed, ContinuousColumn):
return ValueError("Cannot make model predictions on a continuous scale")
pred = np.zeros(self.data_size).astype('object')
for node in self:
if node.is_terminal:
pred[node.indices] = max(node.members, key=node.members.get)
return pred
def risk(self):
"""
Calculates the fraction of risk associated
with the model predictions
"""
return 1 - self.accuracy()
def accuracy(self):
"""
Calculates the accuracy of the tree by comparing
the model predictions to the dataset
(TP + TN) / (TP + TN + FP + FN) == (T / (T + F))
"""
sub_observed = np.array([self.observed.metadata[i] for i in self.observed.arr])
return float((self.model_predictions() == sub_observed).sum()) / self.data_size
def render(self, path=None, view=False):
Graph(self).render(path, view)
|
Rambatino/CHAID | CHAID/tree.py | Tree.node | python | def node(self, rows, ind, dep, depth=0, parent=None, parent_decisions=None):
depth += 1
if self.max_depth < depth:
terminal_node = Node(choices=parent_decisions, node_id=self.node_count,
parent=parent, indices=rows, dep_v=dep)
self._tree_store.append(terminal_node)
self.node_count += 1
terminal_node.split.invalid_reason = InvalidSplitReason.MAX_DEPTH
return self._tree_store
split = self._stats.best_split(ind, dep)
node = Node(choices=parent_decisions, node_id=self.node_count, indices=rows, dep_v=dep,
parent=parent, split=split)
self._tree_store.append(node)
parent = self.node_count
self.node_count += 1
if not split.valid():
return self._tree_store
for index, choices in enumerate(split.splits):
correct_rows = np.in1d(ind[split.column_id].arr, choices)
dep_slice = dep[correct_rows]
ind_slice = [vect[correct_rows] for vect in ind]
row_slice = rows[correct_rows]
if self.min_parent_node_size < len(dep_slice.arr):
self.node(row_slice, ind_slice, dep_slice, depth=depth, parent=parent,
parent_decisions=split.split_map[index])
else:
terminal_node = Node(choices=split.split_map[index], node_id=self.node_count,
parent=parent, indices=row_slice, dep_v=dep_slice)
terminal_node.split.invalid_reason = InvalidSplitReason.MIN_PARENT_NODE_SIZE
self._tree_store.append(terminal_node)
self.node_count += 1
return self._tree_store | internal method to create a node in the tree | train | https://github.com/Rambatino/CHAID/blob/dc19e41ebdf2773168733efdf0d7579950c8d2e7/CHAID/tree.py#L155-L193 | [
"def node(self, rows, ind, dep, depth=0, parent=None, parent_decisions=None):\n \"\"\" internal method to create a node in the tree \"\"\"\n depth += 1\n\n if self.max_depth < depth:\n terminal_node = Node(choices=parent_decisions, node_id=self.node_count,\n parent=parent, indices=rows, dep_v=dep)\n self._tree_store.append(terminal_node)\n self.node_count += 1\n terminal_node.split.invalid_reason = InvalidSplitReason.MAX_DEPTH\n return self._tree_store\n\n split = self._stats.best_split(ind, dep)\n\n node = Node(choices=parent_decisions, node_id=self.node_count, indices=rows, dep_v=dep,\n parent=parent, split=split)\n\n self._tree_store.append(node)\n parent = self.node_count\n self.node_count += 1\n\n if not split.valid():\n return self._tree_store\n\n for index, choices in enumerate(split.splits):\n correct_rows = np.in1d(ind[split.column_id].arr, choices)\n dep_slice = dep[correct_rows]\n ind_slice = [vect[correct_rows] for vect in ind]\n row_slice = rows[correct_rows]\n if self.min_parent_node_size < len(dep_slice.arr):\n self.node(row_slice, ind_slice, dep_slice, depth=depth, parent=parent,\n parent_decisions=split.split_map[index])\n else:\n terminal_node = Node(choices=split.split_map[index], node_id=self.node_count,\n parent=parent, indices=row_slice, dep_v=dep_slice)\n terminal_node.split.invalid_reason = InvalidSplitReason.MIN_PARENT_NODE_SIZE\n self._tree_store.append(terminal_node)\n self.node_count += 1\n return self._tree_store\n"
] | class Tree(object):
def __init__(self, independent_columns, dependent_column, config={}):
"""
Init method to derive the tree from the columns constructing it
Parameters
----------
independent_columns : array<Column>
an array of CHAID columns
dependent_column : Column
a single CHAID column to use as the dependent variable
config: Dict
{
alpha_merge=0.05,
max_depth=2,
min_parent_node_size=30,
min_child_node_size=30,
split_threshold=0
}
"""
self.max_depth = config.get('max_depth', 2)
self.min_parent_node_size = config.get('min_parent_node_size', 30)
self.vectorised_array = independent_columns
self.data_size = dependent_column.arr.shape[0]
self.node_count = 0
self._tree_store = None
self.observed = dependent_column
self._stats = Stats(
config.get('alpha_merge', 0.05),
config.get('min_child_node_size', 30),
config.get('split_threshold', 0),
dependent_column.arr
)
@staticmethod
def from_numpy(ndarr, arr, alpha_merge=0.05, max_depth=2, min_parent_node_size=30,
min_child_node_size=30, split_titles=None, split_threshold=0, weights=None,
variable_types=None, dep_variable_type='categorical'):
"""
Create a CHAID object from numpy
Parameters
----------
ndarr : numpy.ndarray
non-aggregated 2-dimensional array containing
independent variables on the veritcal axis and (usually)
respondent level data on the horizontal axis
arr : numpy.ndarray
1-dimensional array of the dependent variable associated with
ndarr
alpha_merge : float
the threshold value in which to create a split (default 0.05)
max_depth : float
the threshold value for the maximum number of levels after the root
node in the tree (default 2)
min_parent_node_size : float
the threshold value of the number of respondents that the node must
contain (default 30)
split_titles : array-like
array of names for the independent variables in the data
variable_types : array-like or dict
array of variable types, or dict of column names to variable types.
Supported variable types are the strings 'nominal' or 'ordinal' in
lower case
"""
vectorised_array = []
variable_types = variable_types or ['nominal'] * ndarr.shape[1]
for ind, col_type in enumerate(variable_types):
title = None
if split_titles is not None: title = split_titles[ind]
if col_type == 'ordinal':
col = OrdinalColumn(ndarr[:, ind], name=title)
elif col_type == 'nominal':
col = NominalColumn(ndarr[:, ind], name=title)
else:
raise NotImplementedError('Unknown independent variable type ' + col_type)
vectorised_array.append(col)
if dep_variable_type == 'categorical':
observed = NominalColumn(arr, weights=weights)
elif dep_variable_type == 'continuous':
observed = ContinuousColumn(arr, weights=weights)
else:
raise NotImplementedError('Unknown dependent variable type ' + dep_variable_type)
config = { 'alpha_merge': alpha_merge, 'max_depth': max_depth, 'min_parent_node_size': min_parent_node_size,
'min_child_node_size': min_child_node_size, 'split_threshold': split_threshold }
return Tree(vectorised_array, observed, config)
def build_tree(self):
""" Build chaid tree """
self._tree_store = []
self.node(np.arange(0, self.data_size, dtype=np.int), self.vectorised_array, self.observed)
@property
def tree_store(self):
if not self._tree_store:
self.build_tree()
return self._tree_store
@staticmethod
def from_pandas_df(df, i_variables, d_variable, alpha_merge=0.05, max_depth=2,
min_parent_node_size=30, min_child_node_size=30, split_threshold=0,
weight=None, dep_variable_type='categorical'):
"""
Helper method to pre-process a pandas data frame in order to run CHAID
analysis
Parameters
----------
df : pandas.DataFrame
the dataframe with the dependent and independent variables in which
to slice from
i_variables : dict
dict of instance variable names with their variable types. Supported
variable types are the strings 'nominal' or 'ordinal' in lower case
d_variable : string
the name of the dependent variable in the dataframe
alpha_merge : float
the threshold value in which to create a split (default 0.05)
max_depth : float
the threshold value for the maximum number of levels after the root
node in the tree (default 2)
split_threshold : float
the variation in chi-score such that surrogate splits are created
(default 0)
min_parent_node_size : float
the threshold value of the number of respondents that the node must
contain (default 30)
min_child_node_size : float
the threshold value of the number of respondents that each child node must
contain (default 30)
weight : array-like
the respondent weights. If passed, weighted chi-square calculation is run
dep_variable_type : str
the type of dependent variable. Supported variable types are 'categorical' or
'continuous'
"""
ind_df = df[list(i_variables.keys())]
ind_values = ind_df.values
dep_values = df[d_variable].values
weights = df[weight] if weight is not None else None
return Tree.from_numpy(ind_values, dep_values, alpha_merge, max_depth, min_parent_node_size,
min_child_node_size, list(ind_df.columns.values), split_threshold, weights,
list(i_variables.values()), dep_variable_type)
def generate_best_split(self, ind, dep):
""" internal method to generate the best split """
return self._stats.best_split(ind, dep)
def to_tree(self):
""" returns a TreeLib tree """
tree = TreeLibTree()
for node in self:
tree.create_node(node, node.node_id, parent=node.parent)
return tree
def __iter__(self):
""" Function to allow nodes to be iterated over """
return iter(self.tree_store)
def __repr__(self):
return str(self.tree_store)
def get_node(self, node_id):
"""
Returns the node with the given id
Parameters
----------
node_id : integer
Find the node with this ID
"""
return self.tree_store[node_id]
def print_tree(self):
""" prints the tree out """
self.to_tree().show(line_type='ascii')
def node_predictions(self):
""" Determines which rows fall into which node """
pred = np.zeros(self.data_size)
for node in self:
if node.is_terminal:
pred[node.indices] = node.node_id
return pred
def classification_rules(self, node=None, stack=None):
if node is None:
return [
rule for t_node in self for rule in self.classification_rules(t_node) if t_node.is_terminal
]
stack = stack or []
stack.append(node)
if node.parent is None:
return [
{
'node': stack[0].node_id,
'rules': [
{
# 'type': self.vectorised_array[x.tag.split.column_id].type,
'variable': self.get_node(ancestor.parent).split_variable,
'data': ancestor.choices
} for ancestor in stack[:-1]
]
}
]
else:
return self.classification_rules(self.get_node(node.parent), stack)
def model_predictions(self):
"""
Determines the highest frequency of
categorical dependent variable in the
terminal node where that row fell
"""
if isinstance(self.observed, ContinuousColumn):
return ValueError("Cannot make model predictions on a continuous scale")
pred = np.zeros(self.data_size).astype('object')
for node in self:
if node.is_terminal:
pred[node.indices] = max(node.members, key=node.members.get)
return pred
def risk(self):
"""
Calculates the fraction of risk associated
with the model predictions
"""
return 1 - self.accuracy()
def accuracy(self):
"""
Calculates the accuracy of the tree by comparing
the model predictions to the dataset
(TP + TN) / (TP + TN + FP + FN) == (T / (T + F))
"""
sub_observed = np.array([self.observed.metadata[i] for i in self.observed.arr])
return float((self.model_predictions() == sub_observed).sum()) / self.data_size
def render(self, path=None, view=False):
Graph(self).render(path, view)
|
Rambatino/CHAID | CHAID/tree.py | Tree.to_tree | python | def to_tree(self):
tree = TreeLibTree()
for node in self:
tree.create_node(node, node.node_id, parent=node.parent)
return tree | returns a TreeLib tree | train | https://github.com/Rambatino/CHAID/blob/dc19e41ebdf2773168733efdf0d7579950c8d2e7/CHAID/tree.py#L199-L204 | null | class Tree(object):
def __init__(self, independent_columns, dependent_column, config={}):
"""
Init method to derive the tree from the columns constructing it
Parameters
----------
independent_columns : array<Column>
an array of CHAID columns
dependent_column : Column
a single CHAID column to use as the dependent variable
config: Dict
{
alpha_merge=0.05,
max_depth=2,
min_parent_node_size=30,
min_child_node_size=30,
split_threshold=0
}
"""
self.max_depth = config.get('max_depth', 2)
self.min_parent_node_size = config.get('min_parent_node_size', 30)
self.vectorised_array = independent_columns
self.data_size = dependent_column.arr.shape[0]
self.node_count = 0
self._tree_store = None
self.observed = dependent_column
self._stats = Stats(
config.get('alpha_merge', 0.05),
config.get('min_child_node_size', 30),
config.get('split_threshold', 0),
dependent_column.arr
)
@staticmethod
def from_numpy(ndarr, arr, alpha_merge=0.05, max_depth=2, min_parent_node_size=30,
min_child_node_size=30, split_titles=None, split_threshold=0, weights=None,
variable_types=None, dep_variable_type='categorical'):
"""
Create a CHAID object from numpy
Parameters
----------
ndarr : numpy.ndarray
non-aggregated 2-dimensional array containing
independent variables on the veritcal axis and (usually)
respondent level data on the horizontal axis
arr : numpy.ndarray
1-dimensional array of the dependent variable associated with
ndarr
alpha_merge : float
the threshold value in which to create a split (default 0.05)
max_depth : float
the threshold value for the maximum number of levels after the root
node in the tree (default 2)
min_parent_node_size : float
the threshold value of the number of respondents that the node must
contain (default 30)
split_titles : array-like
array of names for the independent variables in the data
variable_types : array-like or dict
array of variable types, or dict of column names to variable types.
Supported variable types are the strings 'nominal' or 'ordinal' in
lower case
"""
vectorised_array = []
variable_types = variable_types or ['nominal'] * ndarr.shape[1]
for ind, col_type in enumerate(variable_types):
title = None
if split_titles is not None: title = split_titles[ind]
if col_type == 'ordinal':
col = OrdinalColumn(ndarr[:, ind], name=title)
elif col_type == 'nominal':
col = NominalColumn(ndarr[:, ind], name=title)
else:
raise NotImplementedError('Unknown independent variable type ' + col_type)
vectorised_array.append(col)
if dep_variable_type == 'categorical':
observed = NominalColumn(arr, weights=weights)
elif dep_variable_type == 'continuous':
observed = ContinuousColumn(arr, weights=weights)
else:
raise NotImplementedError('Unknown dependent variable type ' + dep_variable_type)
config = { 'alpha_merge': alpha_merge, 'max_depth': max_depth, 'min_parent_node_size': min_parent_node_size,
'min_child_node_size': min_child_node_size, 'split_threshold': split_threshold }
return Tree(vectorised_array, observed, config)
def build_tree(self):
""" Build chaid tree """
self._tree_store = []
self.node(np.arange(0, self.data_size, dtype=np.int), self.vectorised_array, self.observed)
@property
def tree_store(self):
if not self._tree_store:
self.build_tree()
return self._tree_store
@staticmethod
def from_pandas_df(df, i_variables, d_variable, alpha_merge=0.05, max_depth=2,
min_parent_node_size=30, min_child_node_size=30, split_threshold=0,
weight=None, dep_variable_type='categorical'):
"""
Helper method to pre-process a pandas data frame in order to run CHAID
analysis
Parameters
----------
df : pandas.DataFrame
the dataframe with the dependent and independent variables in which
to slice from
i_variables : dict
dict of instance variable names with their variable types. Supported
variable types are the strings 'nominal' or 'ordinal' in lower case
d_variable : string
the name of the dependent variable in the dataframe
alpha_merge : float
the threshold value in which to create a split (default 0.05)
max_depth : float
the threshold value for the maximum number of levels after the root
node in the tree (default 2)
split_threshold : float
the variation in chi-score such that surrogate splits are created
(default 0)
min_parent_node_size : float
the threshold value of the number of respondents that the node must
contain (default 30)
min_child_node_size : float
the threshold value of the number of respondents that each child node must
contain (default 30)
weight : array-like
the respondent weights. If passed, weighted chi-square calculation is run
dep_variable_type : str
the type of dependent variable. Supported variable types are 'categorical' or
'continuous'
"""
ind_df = df[list(i_variables.keys())]
ind_values = ind_df.values
dep_values = df[d_variable].values
weights = df[weight] if weight is not None else None
return Tree.from_numpy(ind_values, dep_values, alpha_merge, max_depth, min_parent_node_size,
min_child_node_size, list(ind_df.columns.values), split_threshold, weights,
list(i_variables.values()), dep_variable_type)
def node(self, rows, ind, dep, depth=0, parent=None, parent_decisions=None):
""" internal method to create a node in the tree """
depth += 1
if self.max_depth < depth:
terminal_node = Node(choices=parent_decisions, node_id=self.node_count,
parent=parent, indices=rows, dep_v=dep)
self._tree_store.append(terminal_node)
self.node_count += 1
terminal_node.split.invalid_reason = InvalidSplitReason.MAX_DEPTH
return self._tree_store
split = self._stats.best_split(ind, dep)
node = Node(choices=parent_decisions, node_id=self.node_count, indices=rows, dep_v=dep,
parent=parent, split=split)
self._tree_store.append(node)
parent = self.node_count
self.node_count += 1
if not split.valid():
return self._tree_store
for index, choices in enumerate(split.splits):
correct_rows = np.in1d(ind[split.column_id].arr, choices)
dep_slice = dep[correct_rows]
ind_slice = [vect[correct_rows] for vect in ind]
row_slice = rows[correct_rows]
if self.min_parent_node_size < len(dep_slice.arr):
self.node(row_slice, ind_slice, dep_slice, depth=depth, parent=parent,
parent_decisions=split.split_map[index])
else:
terminal_node = Node(choices=split.split_map[index], node_id=self.node_count,
parent=parent, indices=row_slice, dep_v=dep_slice)
terminal_node.split.invalid_reason = InvalidSplitReason.MIN_PARENT_NODE_SIZE
self._tree_store.append(terminal_node)
self.node_count += 1
return self._tree_store
def generate_best_split(self, ind, dep):
""" internal method to generate the best split """
return self._stats.best_split(ind, dep)
def __iter__(self):
""" Function to allow nodes to be iterated over """
return iter(self.tree_store)
def __repr__(self):
return str(self.tree_store)
def get_node(self, node_id):
"""
Returns the node with the given id
Parameters
----------
node_id : integer
Find the node with this ID
"""
return self.tree_store[node_id]
def print_tree(self):
""" prints the tree out """
self.to_tree().show(line_type='ascii')
def node_predictions(self):
""" Determines which rows fall into which node """
pred = np.zeros(self.data_size)
for node in self:
if node.is_terminal:
pred[node.indices] = node.node_id
return pred
def classification_rules(self, node=None, stack=None):
if node is None:
return [
rule for t_node in self for rule in self.classification_rules(t_node) if t_node.is_terminal
]
stack = stack or []
stack.append(node)
if node.parent is None:
return [
{
'node': stack[0].node_id,
'rules': [
{
# 'type': self.vectorised_array[x.tag.split.column_id].type,
'variable': self.get_node(ancestor.parent).split_variable,
'data': ancestor.choices
} for ancestor in stack[:-1]
]
}
]
else:
return self.classification_rules(self.get_node(node.parent), stack)
def model_predictions(self):
"""
Determines the highest frequency of
categorical dependent variable in the
terminal node where that row fell
"""
if isinstance(self.observed, ContinuousColumn):
return ValueError("Cannot make model predictions on a continuous scale")
pred = np.zeros(self.data_size).astype('object')
for node in self:
if node.is_terminal:
pred[node.indices] = max(node.members, key=node.members.get)
return pred
def risk(self):
"""
Calculates the fraction of risk associated
with the model predictions
"""
return 1 - self.accuracy()
def accuracy(self):
"""
Calculates the accuracy of the tree by comparing
the model predictions to the dataset
(TP + TN) / (TP + TN + FP + FN) == (T / (T + F))
"""
sub_observed = np.array([self.observed.metadata[i] for i in self.observed.arr])
return float((self.model_predictions() == sub_observed).sum()) / self.data_size
def render(self, path=None, view=False):
Graph(self).render(path, view)
|
Rambatino/CHAID | CHAID/tree.py | Tree.node_predictions | python | def node_predictions(self):
pred = np.zeros(self.data_size)
for node in self:
if node.is_terminal:
pred[node.indices] = node.node_id
return pred | Determines which rows fall into which node | train | https://github.com/Rambatino/CHAID/blob/dc19e41ebdf2773168733efdf0d7579950c8d2e7/CHAID/tree.py#L227-L233 | null | class Tree(object):
def __init__(self, independent_columns, dependent_column, config={}):
"""
Init method to derive the tree from the columns constructing it
Parameters
----------
independent_columns : array<Column>
an array of CHAID columns
dependent_column : Column
a single CHAID column to use as the dependent variable
config: Dict
{
alpha_merge=0.05,
max_depth=2,
min_parent_node_size=30,
min_child_node_size=30,
split_threshold=0
}
"""
self.max_depth = config.get('max_depth', 2)
self.min_parent_node_size = config.get('min_parent_node_size', 30)
self.vectorised_array = independent_columns
self.data_size = dependent_column.arr.shape[0]
self.node_count = 0
self._tree_store = None
self.observed = dependent_column
self._stats = Stats(
config.get('alpha_merge', 0.05),
config.get('min_child_node_size', 30),
config.get('split_threshold', 0),
dependent_column.arr
)
@staticmethod
def from_numpy(ndarr, arr, alpha_merge=0.05, max_depth=2, min_parent_node_size=30,
min_child_node_size=30, split_titles=None, split_threshold=0, weights=None,
variable_types=None, dep_variable_type='categorical'):
"""
Create a CHAID object from numpy
Parameters
----------
ndarr : numpy.ndarray
non-aggregated 2-dimensional array containing
independent variables on the veritcal axis and (usually)
respondent level data on the horizontal axis
arr : numpy.ndarray
1-dimensional array of the dependent variable associated with
ndarr
alpha_merge : float
the threshold value in which to create a split (default 0.05)
max_depth : float
the threshold value for the maximum number of levels after the root
node in the tree (default 2)
min_parent_node_size : float
the threshold value of the number of respondents that the node must
contain (default 30)
split_titles : array-like
array of names for the independent variables in the data
variable_types : array-like or dict
array of variable types, or dict of column names to variable types.
Supported variable types are the strings 'nominal' or 'ordinal' in
lower case
"""
vectorised_array = []
variable_types = variable_types or ['nominal'] * ndarr.shape[1]
for ind, col_type in enumerate(variable_types):
title = None
if split_titles is not None: title = split_titles[ind]
if col_type == 'ordinal':
col = OrdinalColumn(ndarr[:, ind], name=title)
elif col_type == 'nominal':
col = NominalColumn(ndarr[:, ind], name=title)
else:
raise NotImplementedError('Unknown independent variable type ' + col_type)
vectorised_array.append(col)
if dep_variable_type == 'categorical':
observed = NominalColumn(arr, weights=weights)
elif dep_variable_type == 'continuous':
observed = ContinuousColumn(arr, weights=weights)
else:
raise NotImplementedError('Unknown dependent variable type ' + dep_variable_type)
config = { 'alpha_merge': alpha_merge, 'max_depth': max_depth, 'min_parent_node_size': min_parent_node_size,
'min_child_node_size': min_child_node_size, 'split_threshold': split_threshold }
return Tree(vectorised_array, observed, config)
def build_tree(self):
""" Build chaid tree """
self._tree_store = []
self.node(np.arange(0, self.data_size, dtype=np.int), self.vectorised_array, self.observed)
@property
def tree_store(self):
if not self._tree_store:
self.build_tree()
return self._tree_store
@staticmethod
def from_pandas_df(df, i_variables, d_variable, alpha_merge=0.05, max_depth=2,
min_parent_node_size=30, min_child_node_size=30, split_threshold=0,
weight=None, dep_variable_type='categorical'):
"""
Helper method to pre-process a pandas data frame in order to run CHAID
analysis
Parameters
----------
df : pandas.DataFrame
the dataframe with the dependent and independent variables in which
to slice from
i_variables : dict
dict of instance variable names with their variable types. Supported
variable types are the strings 'nominal' or 'ordinal' in lower case
d_variable : string
the name of the dependent variable in the dataframe
alpha_merge : float
the threshold value in which to create a split (default 0.05)
max_depth : float
the threshold value for the maximum number of levels after the root
node in the tree (default 2)
split_threshold : float
the variation in chi-score such that surrogate splits are created
(default 0)
min_parent_node_size : float
the threshold value of the number of respondents that the node must
contain (default 30)
min_child_node_size : float
the threshold value of the number of respondents that each child node must
contain (default 30)
weight : array-like
the respondent weights. If passed, weighted chi-square calculation is run
dep_variable_type : str
the type of dependent variable. Supported variable types are 'categorical' or
'continuous'
"""
ind_df = df[list(i_variables.keys())]
ind_values = ind_df.values
dep_values = df[d_variable].values
weights = df[weight] if weight is not None else None
return Tree.from_numpy(ind_values, dep_values, alpha_merge, max_depth, min_parent_node_size,
min_child_node_size, list(ind_df.columns.values), split_threshold, weights,
list(i_variables.values()), dep_variable_type)
def node(self, rows, ind, dep, depth=0, parent=None, parent_decisions=None):
""" internal method to create a node in the tree """
depth += 1
if self.max_depth < depth:
terminal_node = Node(choices=parent_decisions, node_id=self.node_count,
parent=parent, indices=rows, dep_v=dep)
self._tree_store.append(terminal_node)
self.node_count += 1
terminal_node.split.invalid_reason = InvalidSplitReason.MAX_DEPTH
return self._tree_store
split = self._stats.best_split(ind, dep)
node = Node(choices=parent_decisions, node_id=self.node_count, indices=rows, dep_v=dep,
parent=parent, split=split)
self._tree_store.append(node)
parent = self.node_count
self.node_count += 1
if not split.valid():
return self._tree_store
for index, choices in enumerate(split.splits):
correct_rows = np.in1d(ind[split.column_id].arr, choices)
dep_slice = dep[correct_rows]
ind_slice = [vect[correct_rows] for vect in ind]
row_slice = rows[correct_rows]
if self.min_parent_node_size < len(dep_slice.arr):
self.node(row_slice, ind_slice, dep_slice, depth=depth, parent=parent,
parent_decisions=split.split_map[index])
else:
terminal_node = Node(choices=split.split_map[index], node_id=self.node_count,
parent=parent, indices=row_slice, dep_v=dep_slice)
terminal_node.split.invalid_reason = InvalidSplitReason.MIN_PARENT_NODE_SIZE
self._tree_store.append(terminal_node)
self.node_count += 1
return self._tree_store
def generate_best_split(self, ind, dep):
""" internal method to generate the best split """
return self._stats.best_split(ind, dep)
def to_tree(self):
""" returns a TreeLib tree """
tree = TreeLibTree()
for node in self:
tree.create_node(node, node.node_id, parent=node.parent)
return tree
def __iter__(self):
""" Function to allow nodes to be iterated over """
return iter(self.tree_store)
def __repr__(self):
return str(self.tree_store)
def get_node(self, node_id):
"""
Returns the node with the given id
Parameters
----------
node_id : integer
Find the node with this ID
"""
return self.tree_store[node_id]
def print_tree(self):
""" prints the tree out """
self.to_tree().show(line_type='ascii')
def classification_rules(self, node=None, stack=None):
if node is None:
return [
rule for t_node in self for rule in self.classification_rules(t_node) if t_node.is_terminal
]
stack = stack or []
stack.append(node)
if node.parent is None:
return [
{
'node': stack[0].node_id,
'rules': [
{
# 'type': self.vectorised_array[x.tag.split.column_id].type,
'variable': self.get_node(ancestor.parent).split_variable,
'data': ancestor.choices
} for ancestor in stack[:-1]
]
}
]
else:
return self.classification_rules(self.get_node(node.parent), stack)
def model_predictions(self):
"""
Determines the highest frequency of
categorical dependent variable in the
terminal node where that row fell
"""
if isinstance(self.observed, ContinuousColumn):
return ValueError("Cannot make model predictions on a continuous scale")
pred = np.zeros(self.data_size).astype('object')
for node in self:
if node.is_terminal:
pred[node.indices] = max(node.members, key=node.members.get)
return pred
def risk(self):
"""
Calculates the fraction of risk associated
with the model predictions
"""
return 1 - self.accuracy()
def accuracy(self):
"""
Calculates the accuracy of the tree by comparing
the model predictions to the dataset
(TP + TN) / (TP + TN + FP + FN) == (T / (T + F))
"""
sub_observed = np.array([self.observed.metadata[i] for i in self.observed.arr])
return float((self.model_predictions() == sub_observed).sum()) / self.data_size
def render(self, path=None, view=False):
Graph(self).render(path, view)
|
Rambatino/CHAID | CHAID/tree.py | Tree.model_predictions | python | def model_predictions(self):
if isinstance(self.observed, ContinuousColumn):
return ValueError("Cannot make model predictions on a continuous scale")
pred = np.zeros(self.data_size).astype('object')
for node in self:
if node.is_terminal:
pred[node.indices] = max(node.members, key=node.members.get)
return pred | Determines the highest frequency of
categorical dependent variable in the
terminal node where that row fell | train | https://github.com/Rambatino/CHAID/blob/dc19e41ebdf2773168733efdf0d7579950c8d2e7/CHAID/tree.py#L260-L272 | null | class Tree(object):
def __init__(self, independent_columns, dependent_column, config={}):
"""
Init method to derive the tree from the columns constructing it
Parameters
----------
independent_columns : array<Column>
an array of CHAID columns
dependent_column : Column
a single CHAID column to use as the dependent variable
config: Dict
{
alpha_merge=0.05,
max_depth=2,
min_parent_node_size=30,
min_child_node_size=30,
split_threshold=0
}
"""
self.max_depth = config.get('max_depth', 2)
self.min_parent_node_size = config.get('min_parent_node_size', 30)
self.vectorised_array = independent_columns
self.data_size = dependent_column.arr.shape[0]
self.node_count = 0
self._tree_store = None
self.observed = dependent_column
self._stats = Stats(
config.get('alpha_merge', 0.05),
config.get('min_child_node_size', 30),
config.get('split_threshold', 0),
dependent_column.arr
)
@staticmethod
def from_numpy(ndarr, arr, alpha_merge=0.05, max_depth=2, min_parent_node_size=30,
min_child_node_size=30, split_titles=None, split_threshold=0, weights=None,
variable_types=None, dep_variable_type='categorical'):
"""
Create a CHAID object from numpy
Parameters
----------
ndarr : numpy.ndarray
non-aggregated 2-dimensional array containing
independent variables on the veritcal axis and (usually)
respondent level data on the horizontal axis
arr : numpy.ndarray
1-dimensional array of the dependent variable associated with
ndarr
alpha_merge : float
the threshold value in which to create a split (default 0.05)
max_depth : float
the threshold value for the maximum number of levels after the root
node in the tree (default 2)
min_parent_node_size : float
the threshold value of the number of respondents that the node must
contain (default 30)
split_titles : array-like
array of names for the independent variables in the data
variable_types : array-like or dict
array of variable types, or dict of column names to variable types.
Supported variable types are the strings 'nominal' or 'ordinal' in
lower case
"""
vectorised_array = []
variable_types = variable_types or ['nominal'] * ndarr.shape[1]
for ind, col_type in enumerate(variable_types):
title = None
if split_titles is not None: title = split_titles[ind]
if col_type == 'ordinal':
col = OrdinalColumn(ndarr[:, ind], name=title)
elif col_type == 'nominal':
col = NominalColumn(ndarr[:, ind], name=title)
else:
raise NotImplementedError('Unknown independent variable type ' + col_type)
vectorised_array.append(col)
if dep_variable_type == 'categorical':
observed = NominalColumn(arr, weights=weights)
elif dep_variable_type == 'continuous':
observed = ContinuousColumn(arr, weights=weights)
else:
raise NotImplementedError('Unknown dependent variable type ' + dep_variable_type)
config = { 'alpha_merge': alpha_merge, 'max_depth': max_depth, 'min_parent_node_size': min_parent_node_size,
'min_child_node_size': min_child_node_size, 'split_threshold': split_threshold }
return Tree(vectorised_array, observed, config)
def build_tree(self):
""" Build chaid tree """
self._tree_store = []
self.node(np.arange(0, self.data_size, dtype=np.int), self.vectorised_array, self.observed)
@property
def tree_store(self):
if not self._tree_store:
self.build_tree()
return self._tree_store
@staticmethod
def from_pandas_df(df, i_variables, d_variable, alpha_merge=0.05, max_depth=2,
min_parent_node_size=30, min_child_node_size=30, split_threshold=0,
weight=None, dep_variable_type='categorical'):
"""
Helper method to pre-process a pandas data frame in order to run CHAID
analysis
Parameters
----------
df : pandas.DataFrame
the dataframe with the dependent and independent variables in which
to slice from
i_variables : dict
dict of instance variable names with their variable types. Supported
variable types are the strings 'nominal' or 'ordinal' in lower case
d_variable : string
the name of the dependent variable in the dataframe
alpha_merge : float
the threshold value in which to create a split (default 0.05)
max_depth : float
the threshold value for the maximum number of levels after the root
node in the tree (default 2)
split_threshold : float
the variation in chi-score such that surrogate splits are created
(default 0)
min_parent_node_size : float
the threshold value of the number of respondents that the node must
contain (default 30)
min_child_node_size : float
the threshold value of the number of respondents that each child node must
contain (default 30)
weight : array-like
the respondent weights. If passed, weighted chi-square calculation is run
dep_variable_type : str
the type of dependent variable. Supported variable types are 'categorical' or
'continuous'
"""
ind_df = df[list(i_variables.keys())]
ind_values = ind_df.values
dep_values = df[d_variable].values
weights = df[weight] if weight is not None else None
return Tree.from_numpy(ind_values, dep_values, alpha_merge, max_depth, min_parent_node_size,
min_child_node_size, list(ind_df.columns.values), split_threshold, weights,
list(i_variables.values()), dep_variable_type)
def node(self, rows, ind, dep, depth=0, parent=None, parent_decisions=None):
""" internal method to create a node in the tree """
depth += 1
if self.max_depth < depth:
terminal_node = Node(choices=parent_decisions, node_id=self.node_count,
parent=parent, indices=rows, dep_v=dep)
self._tree_store.append(terminal_node)
self.node_count += 1
terminal_node.split.invalid_reason = InvalidSplitReason.MAX_DEPTH
return self._tree_store
split = self._stats.best_split(ind, dep)
node = Node(choices=parent_decisions, node_id=self.node_count, indices=rows, dep_v=dep,
parent=parent, split=split)
self._tree_store.append(node)
parent = self.node_count
self.node_count += 1
if not split.valid():
return self._tree_store
for index, choices in enumerate(split.splits):
correct_rows = np.in1d(ind[split.column_id].arr, choices)
dep_slice = dep[correct_rows]
ind_slice = [vect[correct_rows] for vect in ind]
row_slice = rows[correct_rows]
if self.min_parent_node_size < len(dep_slice.arr):
self.node(row_slice, ind_slice, dep_slice, depth=depth, parent=parent,
parent_decisions=split.split_map[index])
else:
terminal_node = Node(choices=split.split_map[index], node_id=self.node_count,
parent=parent, indices=row_slice, dep_v=dep_slice)
terminal_node.split.invalid_reason = InvalidSplitReason.MIN_PARENT_NODE_SIZE
self._tree_store.append(terminal_node)
self.node_count += 1
return self._tree_store
def generate_best_split(self, ind, dep):
""" internal method to generate the best split """
return self._stats.best_split(ind, dep)
def to_tree(self):
""" returns a TreeLib tree """
tree = TreeLibTree()
for node in self:
tree.create_node(node, node.node_id, parent=node.parent)
return tree
def __iter__(self):
""" Function to allow nodes to be iterated over """
return iter(self.tree_store)
def __repr__(self):
return str(self.tree_store)
def get_node(self, node_id):
"""
Returns the node with the given id
Parameters
----------
node_id : integer
Find the node with this ID
"""
return self.tree_store[node_id]
def print_tree(self):
""" prints the tree out """
self.to_tree().show(line_type='ascii')
def node_predictions(self):
""" Determines which rows fall into which node """
pred = np.zeros(self.data_size)
for node in self:
if node.is_terminal:
pred[node.indices] = node.node_id
return pred
def classification_rules(self, node=None, stack=None):
if node is None:
return [
rule for t_node in self for rule in self.classification_rules(t_node) if t_node.is_terminal
]
stack = stack or []
stack.append(node)
if node.parent is None:
return [
{
'node': stack[0].node_id,
'rules': [
{
# 'type': self.vectorised_array[x.tag.split.column_id].type,
'variable': self.get_node(ancestor.parent).split_variable,
'data': ancestor.choices
} for ancestor in stack[:-1]
]
}
]
else:
return self.classification_rules(self.get_node(node.parent), stack)
def risk(self):
"""
Calculates the fraction of risk associated
with the model predictions
"""
return 1 - self.accuracy()
def accuracy(self):
"""
Calculates the accuracy of the tree by comparing
the model predictions to the dataset
(TP + TN) / (TP + TN + FP + FN) == (T / (T + F))
"""
sub_observed = np.array([self.observed.metadata[i] for i in self.observed.arr])
return float((self.model_predictions() == sub_observed).sum()) / self.data_size
def render(self, path=None, view=False):
Graph(self).render(path, view)
|
Rambatino/CHAID | CHAID/tree.py | Tree.accuracy | python | def accuracy(self):
sub_observed = np.array([self.observed.metadata[i] for i in self.observed.arr])
return float((self.model_predictions() == sub_observed).sum()) / self.data_size | Calculates the accuracy of the tree by comparing
the model predictions to the dataset
(TP + TN) / (TP + TN + FP + FN) == (T / (T + F)) | train | https://github.com/Rambatino/CHAID/blob/dc19e41ebdf2773168733efdf0d7579950c8d2e7/CHAID/tree.py#L281-L288 | [
"def model_predictions(self):\n \"\"\"\n Determines the highest frequency of\n categorical dependent variable in the\n terminal node where that row fell\n \"\"\"\n if isinstance(self.observed, ContinuousColumn):\n return ValueError(\"Cannot make model predictions on a continuous scale\")\n pred = np.zeros(self.data_size).astype('object')\n for node in self:\n if node.is_terminal:\n pred[node.indices] = max(node.members, key=node.members.get)\n return pred\n"
] | class Tree(object):
def __init__(self, independent_columns, dependent_column, config={}):
"""
Init method to derive the tree from the columns constructing it
Parameters
----------
independent_columns : array<Column>
an array of CHAID columns
dependent_column : Column
a single CHAID column to use as the dependent variable
config: Dict
{
alpha_merge=0.05,
max_depth=2,
min_parent_node_size=30,
min_child_node_size=30,
split_threshold=0
}
"""
self.max_depth = config.get('max_depth', 2)
self.min_parent_node_size = config.get('min_parent_node_size', 30)
self.vectorised_array = independent_columns
self.data_size = dependent_column.arr.shape[0]
self.node_count = 0
self._tree_store = None
self.observed = dependent_column
self._stats = Stats(
config.get('alpha_merge', 0.05),
config.get('min_child_node_size', 30),
config.get('split_threshold', 0),
dependent_column.arr
)
@staticmethod
def from_numpy(ndarr, arr, alpha_merge=0.05, max_depth=2, min_parent_node_size=30,
min_child_node_size=30, split_titles=None, split_threshold=0, weights=None,
variable_types=None, dep_variable_type='categorical'):
"""
Create a CHAID object from numpy
Parameters
----------
ndarr : numpy.ndarray
non-aggregated 2-dimensional array containing
independent variables on the veritcal axis and (usually)
respondent level data on the horizontal axis
arr : numpy.ndarray
1-dimensional array of the dependent variable associated with
ndarr
alpha_merge : float
the threshold value in which to create a split (default 0.05)
max_depth : float
the threshold value for the maximum number of levels after the root
node in the tree (default 2)
min_parent_node_size : float
the threshold value of the number of respondents that the node must
contain (default 30)
split_titles : array-like
array of names for the independent variables in the data
variable_types : array-like or dict
array of variable types, or dict of column names to variable types.
Supported variable types are the strings 'nominal' or 'ordinal' in
lower case
"""
vectorised_array = []
variable_types = variable_types or ['nominal'] * ndarr.shape[1]
for ind, col_type in enumerate(variable_types):
title = None
if split_titles is not None: title = split_titles[ind]
if col_type == 'ordinal':
col = OrdinalColumn(ndarr[:, ind], name=title)
elif col_type == 'nominal':
col = NominalColumn(ndarr[:, ind], name=title)
else:
raise NotImplementedError('Unknown independent variable type ' + col_type)
vectorised_array.append(col)
if dep_variable_type == 'categorical':
observed = NominalColumn(arr, weights=weights)
elif dep_variable_type == 'continuous':
observed = ContinuousColumn(arr, weights=weights)
else:
raise NotImplementedError('Unknown dependent variable type ' + dep_variable_type)
config = { 'alpha_merge': alpha_merge, 'max_depth': max_depth, 'min_parent_node_size': min_parent_node_size,
'min_child_node_size': min_child_node_size, 'split_threshold': split_threshold }
return Tree(vectorised_array, observed, config)
def build_tree(self):
""" Build chaid tree """
self._tree_store = []
self.node(np.arange(0, self.data_size, dtype=np.int), self.vectorised_array, self.observed)
@property
def tree_store(self):
if not self._tree_store:
self.build_tree()
return self._tree_store
@staticmethod
def from_pandas_df(df, i_variables, d_variable, alpha_merge=0.05, max_depth=2,
min_parent_node_size=30, min_child_node_size=30, split_threshold=0,
weight=None, dep_variable_type='categorical'):
"""
Helper method to pre-process a pandas data frame in order to run CHAID
analysis
Parameters
----------
df : pandas.DataFrame
the dataframe with the dependent and independent variables in which
to slice from
i_variables : dict
dict of instance variable names with their variable types. Supported
variable types are the strings 'nominal' or 'ordinal' in lower case
d_variable : string
the name of the dependent variable in the dataframe
alpha_merge : float
the threshold value in which to create a split (default 0.05)
max_depth : float
the threshold value for the maximum number of levels after the root
node in the tree (default 2)
split_threshold : float
the variation in chi-score such that surrogate splits are created
(default 0)
min_parent_node_size : float
the threshold value of the number of respondents that the node must
contain (default 30)
min_child_node_size : float
the threshold value of the number of respondents that each child node must
contain (default 30)
weight : array-like
the respondent weights. If passed, weighted chi-square calculation is run
dep_variable_type : str
the type of dependent variable. Supported variable types are 'categorical' or
'continuous'
"""
ind_df = df[list(i_variables.keys())]
ind_values = ind_df.values
dep_values = df[d_variable].values
weights = df[weight] if weight is not None else None
return Tree.from_numpy(ind_values, dep_values, alpha_merge, max_depth, min_parent_node_size,
min_child_node_size, list(ind_df.columns.values), split_threshold, weights,
list(i_variables.values()), dep_variable_type)
def node(self, rows, ind, dep, depth=0, parent=None, parent_decisions=None):
""" internal method to create a node in the tree """
depth += 1
if self.max_depth < depth:
terminal_node = Node(choices=parent_decisions, node_id=self.node_count,
parent=parent, indices=rows, dep_v=dep)
self._tree_store.append(terminal_node)
self.node_count += 1
terminal_node.split.invalid_reason = InvalidSplitReason.MAX_DEPTH
return self._tree_store
split = self._stats.best_split(ind, dep)
node = Node(choices=parent_decisions, node_id=self.node_count, indices=rows, dep_v=dep,
parent=parent, split=split)
self._tree_store.append(node)
parent = self.node_count
self.node_count += 1
if not split.valid():
return self._tree_store
for index, choices in enumerate(split.splits):
correct_rows = np.in1d(ind[split.column_id].arr, choices)
dep_slice = dep[correct_rows]
ind_slice = [vect[correct_rows] for vect in ind]
row_slice = rows[correct_rows]
if self.min_parent_node_size < len(dep_slice.arr):
self.node(row_slice, ind_slice, dep_slice, depth=depth, parent=parent,
parent_decisions=split.split_map[index])
else:
terminal_node = Node(choices=split.split_map[index], node_id=self.node_count,
parent=parent, indices=row_slice, dep_v=dep_slice)
terminal_node.split.invalid_reason = InvalidSplitReason.MIN_PARENT_NODE_SIZE
self._tree_store.append(terminal_node)
self.node_count += 1
return self._tree_store
def generate_best_split(self, ind, dep):
""" internal method to generate the best split """
return self._stats.best_split(ind, dep)
def to_tree(self):
""" returns a TreeLib tree """
tree = TreeLibTree()
for node in self:
tree.create_node(node, node.node_id, parent=node.parent)
return tree
def __iter__(self):
""" Function to allow nodes to be iterated over """
return iter(self.tree_store)
def __repr__(self):
return str(self.tree_store)
def get_node(self, node_id):
"""
Returns the node with the given id
Parameters
----------
node_id : integer
Find the node with this ID
"""
return self.tree_store[node_id]
def print_tree(self):
""" prints the tree out """
self.to_tree().show(line_type='ascii')
def node_predictions(self):
""" Determines which rows fall into which node """
pred = np.zeros(self.data_size)
for node in self:
if node.is_terminal:
pred[node.indices] = node.node_id
return pred
def classification_rules(self, node=None, stack=None):
if node is None:
return [
rule for t_node in self for rule in self.classification_rules(t_node) if t_node.is_terminal
]
stack = stack or []
stack.append(node)
if node.parent is None:
return [
{
'node': stack[0].node_id,
'rules': [
{
# 'type': self.vectorised_array[x.tag.split.column_id].type,
'variable': self.get_node(ancestor.parent).split_variable,
'data': ancestor.choices
} for ancestor in stack[:-1]
]
}
]
else:
return self.classification_rules(self.get_node(node.parent), stack)
def model_predictions(self):
"""
Determines the highest frequency of
categorical dependent variable in the
terminal node where that row fell
"""
if isinstance(self.observed, ContinuousColumn):
return ValueError("Cannot make model predictions on a continuous scale")
pred = np.zeros(self.data_size).astype('object')
for node in self:
if node.is_terminal:
pred[node.indices] = max(node.members, key=node.members.get)
return pred
def risk(self):
"""
Calculates the fraction of risk associated
with the model predictions
"""
return 1 - self.accuracy()
def render(self, path=None, view=False):
Graph(self).render(path, view)
|
Rambatino/CHAID | CHAID/column.py | Column.bell_set | python | def bell_set(self, collection, ordinal=False):
if len(collection) == 1:
yield [ collection ]
return
first = collection[0]
for smaller in self.bell_set(collection[1:]):
for n, subset in enumerate(smaller):
if not ordinal or (ordinal and is_sorted(smaller[:n] + [[ first ] + subset] + smaller[n+1:], self._nan)):
yield smaller[:n] + [[ first ] + subset] + smaller[n+1:]
if not ordinal or (ordinal and is_sorted([ [ first ] ] + smaller, self._nan)):
yield [ [ first ] ] + smaller | Calculates the Bell set | train | https://github.com/Rambatino/CHAID/blob/dc19e41ebdf2773168733efdf0d7579950c8d2e7/CHAID/column.py#L64-L79 | [
"def is_sorted(ndarr, nan_val=None):\n store = []\n for arr in ndarr:\n if arr == [] or len(arr) == 1: continue\n if nan_val is not None and nan_val in arr:\n arr.remove(nan_val)\n store.append(arr[-1] - arr[0] == len(arr) - 1)\n return all(store)\n",
"def bell_set(self, collection, ordinal=False):\n \"\"\"\n Calculates the Bell set\n \"\"\"\n if len(collection) == 1:\n yield [ collection ]\n return\n\n first = collection[0]\n for smaller in self.bell_set(collection[1:]):\n for n, subset in enumerate(smaller):\n if not ordinal or (ordinal and is_sorted(smaller[:n] + [[ first ] + subset] + smaller[n+1:], self._nan)):\n yield smaller[:n] + [[ first ] + subset] + smaller[n+1:]\n\n if not ordinal or (ordinal and is_sorted([ [ first ] ] + smaller, self._nan)):\n yield [ [ first ] ] + smaller\n"
] | class Column(object):
"""
A numpy array with metadata
Parameters
----------
arr : iterable object
The numpy array
metadata : dict
The substitutions of the vector
missing_id : string
An identifier for the missing value to be associated
substitute : bool
Whether the objects in the given array need to be substitued for
integers
"""
def __init__(self, arr=None, metadata=None, missing_id='<missing>',
substitute=True, weights=None, name=None):
self.metadata = dict(metadata or {})
self.arr = np.array(arr)
self._missing_id = missing_id
self.weights = weights
self.name = name
def __iter__(self):
return iter(self.arr)
def __getitem__(self, key):
raise NotImplementedError
def __setitem__(self, key, value):
raise NotImplementedError
def possible_groupings(self):
raise NotImplementedError
@property
def type(self):
"""
Returns a string representing the type
"""
raise NotImplementedError
def deep_copy(self):
"""
Returns a deep copy
"""
raise NotImplementedError
|
Rambatino/CHAID | CHAID/column.py | NominalColumn.deep_copy | python | def deep_copy(self):
return NominalColumn(self.arr, metadata=self.metadata, name=self.name,
missing_id=self._missing_id, substitute=False, weights=self.weights) | Returns a deep copy. | train | https://github.com/Rambatino/CHAID/blob/dc19e41ebdf2773168733efdf0d7579950c8d2e7/CHAID/column.py#L97-L102 | null | class NominalColumn(Column):
"""
A column containing numerical values that are unrelated to
one another (i.e. do not follow a progression)
"""
def __init__(self, arr=None, metadata=None, missing_id='<missing>',
substitute=True, weights=None, name=None):
super(self.__class__, self).__init__(arr, metadata=metadata, missing_id=missing_id, weights=weights, name=name)
if substitute and metadata is None:
self.substitute_values(arr)
self._groupings = MappingDict()
for x in np.unique(self.arr):
self._groupings[x] = [x]
def substitute_values(self, vect):
"""
Internal method to substitute integers into the vector, and construct
metadata to convert back to the original vector.
np.nan is always given -1, all other objects are given integers in
order of apperence.
Parameters
----------
vect : np.array
the vector in which to substitute values in
"""
try:
unique = np.unique(vect)
except:
unique = set(vect)
unique = [
x for x in unique if not isinstance(x, float) or not isnan(x)
]
arr = np.copy(vect)
for new_id, value in enumerate(unique):
np.place(arr, arr==value, new_id)
self.metadata[new_id] = value
arr = arr.astype(np.float)
np.place(arr, np.isnan(arr), -1)
self.arr = arr
if -1 in arr:
self.metadata[-1] = self._missing_id
def __getitem__(self, key):
new_weights = None if self.weights is None else self.weights[key]
return NominalColumn(self.arr[key], metadata=self.metadata, substitute=False, weights=new_weights, name=self.name)
def __setitem__(self, key, value):
self.arr[key] = value
return self
def groups(self):
return list(self._groupings.values())
def possible_groupings(self):
return combinations(self._groupings.keys(), 2)
def all_combinations(self):
bell_set = self.bell_set(sorted(list(self._groupings.keys())))
next(bell_set)
return bell_set
def group(self, x, y):
self._groupings[x] += self._groupings[y]
del self._groupings[y]
self.arr[self.arr == y] = x
@property
def type(self):
"""
Returns a string representing the type
"""
return 'nominal'
|
Rambatino/CHAID | CHAID/column.py | NominalColumn.substitute_values | python | def substitute_values(self, vect):
try:
unique = np.unique(vect)
except:
unique = set(vect)
unique = [
x for x in unique if not isinstance(x, float) or not isnan(x)
]
arr = np.copy(vect)
for new_id, value in enumerate(unique):
np.place(arr, arr==value, new_id)
self.metadata[new_id] = value
arr = arr.astype(np.float)
np.place(arr, np.isnan(arr), -1)
self.arr = arr
if -1 in arr:
self.metadata[-1] = self._missing_id | Internal method to substitute integers into the vector, and construct
metadata to convert back to the original vector.
np.nan is always given -1, all other objects are given integers in
order of apperence.
Parameters
----------
vect : np.array
the vector in which to substitute values in | train | https://github.com/Rambatino/CHAID/blob/dc19e41ebdf2773168733efdf0d7579950c8d2e7/CHAID/column.py#L104-L136 | null | class NominalColumn(Column):
"""
A column containing numerical values that are unrelated to
one another (i.e. do not follow a progression)
"""
def __init__(self, arr=None, metadata=None, missing_id='<missing>',
substitute=True, weights=None, name=None):
super(self.__class__, self).__init__(arr, metadata=metadata, missing_id=missing_id, weights=weights, name=name)
if substitute and metadata is None:
self.substitute_values(arr)
self._groupings = MappingDict()
for x in np.unique(self.arr):
self._groupings[x] = [x]
def deep_copy(self):
"""
Returns a deep copy.
"""
return NominalColumn(self.arr, metadata=self.metadata, name=self.name,
missing_id=self._missing_id, substitute=False, weights=self.weights)
def __getitem__(self, key):
new_weights = None if self.weights is None else self.weights[key]
return NominalColumn(self.arr[key], metadata=self.metadata, substitute=False, weights=new_weights, name=self.name)
def __setitem__(self, key, value):
self.arr[key] = value
return self
def groups(self):
return list(self._groupings.values())
def possible_groupings(self):
return combinations(self._groupings.keys(), 2)
def all_combinations(self):
bell_set = self.bell_set(sorted(list(self._groupings.keys())))
next(bell_set)
return bell_set
def group(self, x, y):
self._groupings[x] += self._groupings[y]
del self._groupings[y]
self.arr[self.arr == y] = x
@property
def type(self):
"""
Returns a string representing the type
"""
return 'nominal'
|
Rambatino/CHAID | CHAID/column.py | OrdinalColumn.deep_copy | python | def deep_copy(self):
return OrdinalColumn(self.arr, metadata=self.metadata, name=self.name,
missing_id=self._missing_id, substitute=True,
groupings=self._groupings, weights=self.weights) | Returns a deep copy. | train | https://github.com/Rambatino/CHAID/blob/dc19e41ebdf2773168733efdf0d7579950c8d2e7/CHAID/column.py#L209-L215 | null | class OrdinalColumn(Column):
"""
A column containing integer values that have an order
"""
def __init__(self, arr=None, metadata=None, missing_id='<missing>',
groupings=None, substitute=True, weights=None, name=None):
super(self.__class__, self).__init__(arr, metadata, missing_id=missing_id, weights=weights, name=name)
self._nan = np.array([np.nan]).astype(int)[0]
if substitute and metadata is None:
self.arr, self.orig_type = self.substitute_values(self.arr)
elif substitute and metadata and not np.issubdtype(self.arr.dtype, np.integer):
# custom metadata has been passed in from external source, and must be converted to int
self.arr = self.arr.astype(int)
self.metadata = { int(k):v for k, v in metadata.items() }
self.metadata[self._nan] = missing_id
self._groupings = {}
if groupings is None:
for x in np.unique(self.arr):
self._groupings[x] = [x, x + 1, False]
else:
for x in np.unique(self.arr):
self._groupings[x] = list(groupings[x])
self._possible_groups = None
def substitute_values(self, vect):
if not np.issubdtype(vect.dtype, np.integer):
uniq = set(vect)
uniq_floats = np.array(list(uniq), dtype=float)
uniq_ints = uniq_floats.astype(int)
nan = self._missing_id
self.metadata = {
new: nan if isnan(as_float) else old
for old, as_float, new in zip(uniq, uniq_floats, uniq_ints)
}
self.arr = self.arr.astype(float)
return self.arr.astype(int), self.arr.dtype.type
def __getitem__(self, key):
new_weights = None if self.weights is None else self.weights[key]
return OrdinalColumn(self.arr[key], metadata=self.metadata, name=self.name,
missing_id=self._missing_id, substitute=True,
groupings=self._groupings, weights=new_weights)
def __setitem__(self, key, value):
self.arr[key] = value
return self
def groups(self):
vals = self._groupings.values()
return [
[x for x in range(minmax[0], minmax[1])] + ([self._nan] if minmax[2] else [])
for minmax in vals
]
def possible_groupings(self):
if self._possible_groups is None:
ranges = sorted(self._groupings.items())
candidates = zip(ranges[0:], ranges[1:])
self._possible_groups = [
(k1, k2) for (k1, minmax1), (k2, minmax2) in candidates
if minmax1[1] == minmax2[0]
]
if self._nan in self.arr:
self._possible_groups += [
(key, self._nan) for key in self._groupings.keys() if key != self._nan
]
return self._possible_groups.__iter__()
def all_combinations(self):
bell_set = self.bell_set(sorted(list(self._groupings.keys())), True)
next(bell_set)
return bell_set
def group(self, x, y):
self._possible_groups = None
if y != self._nan:
x = int(x)
y = int(y)
x_max = self._groupings[x][1]
y_min = self._groupings[y][0]
if y_min >= x_max:
self._groupings[x][1] = self._groupings[y][1]
else:
self._groupings[x][0] = y_min
self._groupings[x][2] = self._groupings[x][2] or self._groupings[y][2]
else:
self._groupings[x][2] = True
del self._groupings[y]
self.arr[self.arr == y] = x
@property
def type(self):
"""
Returns a string representing the type
"""
return 'ordinal'
|
Rambatino/CHAID | CHAID/column.py | ContinuousColumn.deep_copy | python | def deep_copy(self):
return ContinuousColumn(self.arr, metadata=self.metadata, missing_id=self._missing_id, weights=self.weights) | Returns a deep copy. | train | https://github.com/Rambatino/CHAID/blob/dc19e41ebdf2773168733efdf0d7579950c8d2e7/CHAID/column.py#L290-L294 | null | class ContinuousColumn(Column):
"""
A column containing numerical values on a continuous scale
"""
def __init__(self, arr=None, metadata=None, missing_id='<missing>',
weights=None):
if not np.issubdtype(arr.dtype, np.number):
raise ValueError('Must only pass numerical values to create continuous column')
super(self.__class__, self).__init__(np.nan_to_num(arr), metadata, missing_id=missing_id, weights=weights)
def __getitem__(self, key):
new_weights = None if self.weights is None else self.weights[key]
return ContinuousColumn(self.arr[key], metadata=self.metadata, weights=new_weights)
def __setitem__(self, key, value):
self.arr[key] = value
return self
@property
def type(self):
"""
Returns a string representing the type
"""
return 'continuous'
|
Rambatino/CHAID | CHAID/split.py | Split.sub_split_values | python | def sub_split_values(self, sub):
for i, arr in enumerate(self.splits):
self.split_map[i] = [sub.get(x, x) for x in arr]
for split in self.surrogates:
split.sub_split_values(sub) | Substitutes the splits with other values into the split_map | train | https://github.com/Rambatino/CHAID/blob/dc19e41ebdf2773168733efdf0d7579950c8d2e7/CHAID/split.py#L34-L39 | null | class Split(object):
"""
A potential split for a node in to produce children
Parameters
----------
column : float
The key of where the split is occuring relative to the input data
splits : array-like
The grouped variables
split_map : array-like
The name of the grouped variables
score : float
The score value of that split
p : float
The p value of that split
dof : int
The degrees of freedom as a result of this split
invalid_reason : InvalidSplitReason()
The reason why the node failed to split
"""
def __init__(self, column, splits, score, p, dof, invalid_reason=None, split_name=None):
splits = splits or []
self.surrogates = []
self.column_id = column
self.split_name = split_name
self.splits = list(splits)
self.split_map = [None] * len(self.splits)
self.score = score
self.p = p
self._dof = dof
self._invalid_reason = invalid_reason
def name_columns(self, sub):
""" Substitutes the split column index with a human readable string """
if self.column_id is not None and len(sub) > self.column_id:
self.split_name = sub[self.column_id]
for split in self.surrogates:
split.name_columns(sub)
def __repr__(self):
if not self.valid():
return '<Invalid Chaid Split> - {}'.format(self.invalid_reason)
format_str = u'({0.column}, p={0.p}, score={0.score}, groups={0.groupings})'\
', dof={0.dof})'
return format_str.format(self)
@property
def column(self):
if not self.valid():
return None
return self.split_name or str(self.column_id)
@property
def groupings(self):
if not self.valid():
return "[]"
if all(x is None for x in self.split_map):
return str(self.splits)
return str(self.split_map)
@property
def dof(self):
return self._dof
@property
def invalid_reason(self):
return self._invalid_reason
@invalid_reason.setter
def invalid_reason(self, value):
self._invalid_reason = value
def valid(self):
return self.column_id is not None
|
Rambatino/CHAID | CHAID/split.py | Split.name_columns | python | def name_columns(self, sub):
if self.column_id is not None and len(sub) > self.column_id:
self.split_name = sub[self.column_id]
for split in self.surrogates:
split.name_columns(sub) | Substitutes the split column index with a human readable string | train | https://github.com/Rambatino/CHAID/blob/dc19e41ebdf2773168733efdf0d7579950c8d2e7/CHAID/split.py#L41-L46 | null | class Split(object):
"""
A potential split for a node in to produce children
Parameters
----------
column : float
The key of where the split is occuring relative to the input data
splits : array-like
The grouped variables
split_map : array-like
The name of the grouped variables
score : float
The score value of that split
p : float
The p value of that split
dof : int
The degrees of freedom as a result of this split
invalid_reason : InvalidSplitReason()
The reason why the node failed to split
"""
def __init__(self, column, splits, score, p, dof, invalid_reason=None, split_name=None):
splits = splits or []
self.surrogates = []
self.column_id = column
self.split_name = split_name
self.splits = list(splits)
self.split_map = [None] * len(self.splits)
self.score = score
self.p = p
self._dof = dof
self._invalid_reason = invalid_reason
def sub_split_values(self, sub):
""" Substitutes the splits with other values into the split_map """
for i, arr in enumerate(self.splits):
self.split_map[i] = [sub.get(x, x) for x in arr]
for split in self.surrogates:
split.sub_split_values(sub)
def __repr__(self):
if not self.valid():
return '<Invalid Chaid Split> - {}'.format(self.invalid_reason)
format_str = u'({0.column}, p={0.p}, score={0.score}, groups={0.groupings})'\
', dof={0.dof})'
return format_str.format(self)
@property
def column(self):
if not self.valid():
return None
return self.split_name or str(self.column_id)
@property
def groupings(self):
if not self.valid():
return "[]"
if all(x is None for x in self.split_map):
return str(self.splits)
return str(self.split_map)
@property
def dof(self):
return self._dof
@property
def invalid_reason(self):
return self._invalid_reason
@invalid_reason.setter
def invalid_reason(self, value):
self._invalid_reason = value
def valid(self):
return self.column_id is not None
|
Rambatino/CHAID | CHAID/__main__.py | main | python | def main():
parser = argparse.ArgumentParser(description='Run the chaid algorithm on a'
' csv/sav file.')
parser.add_argument('file')
parser.add_argument('dependent_variable', nargs=1)
parser.add_argument('--dependent-variable-type', type=str)
var = parser.add_argument_group('Independent Variable Specification')
var.add_argument('nominal_variables', nargs='*', help='The names of '
'independent variables to use that have no intrinsic '
'order to them')
var.add_argument('--ordinal-variables', type=str, nargs='*',
help='The names of independent variables to use that '
'have an intrinsic order but a finite amount of states')
parser.add_argument('--weights', type=str, help='Name of weight column')
parser.add_argument('--max-depth', type=int, help='Max depth of generated '
'tree')
parser.add_argument('--min-parent-node-size', type=int, help='Minimum number of '
'samples required to split the parent node')
parser.add_argument('--min-child-node-size', type=int, help='Minimum number of '
'samples required to split the child node')
parser.add_argument('--alpha-merge', type=float, help='Alpha Merge')
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--classify', action='store_true', help='Add column to'
' input with the node id of the node that that '
'respondent has been placed into')
group.add_argument('--predict', action='store_true', help='Add column to '
'input with the value of the dependent variable that '
'the majority of respondents in that node selected')
group.add_argument('--rules', action='store_true')
group.add_argument('--export', action='store_true', help='Whether to export the chart to pdf/dot')
group.add_argument('--export-path', type=str, help='Path to store chart output')
nspace = parser.parse_args()
if nspace.file[-4:] == '.csv':
data = pd.read_csv(nspace.file)
elif nspace.file[-4:] == '.sav':
import savReaderWriter as spss
raw_data = spss.SavReader(nspace.file, returnHeader=True)
raw_data_list = list(raw_data)
data = pd.DataFrame(raw_data_list)
data = data.rename(columns=data.loc[0]).iloc[1:]
else:
print('Unknown file type')
exit(1)
config = {}
if nspace.max_depth:
config['max_depth'] = nspace.max_depth
if nspace.alpha_merge:
config['alpha_merge'] = nspace.alpha_merge
if nspace.min_parent_node_size:
config['min_parent_node_size'] = nspace.min_parent_node_size
if nspace.min_child_node_size:
config['min_child_node_size'] = nspace.min_child_node_size
if nspace.weights:
config['weight'] = nspace.weights
if nspace.dependent_variable_type:
config['dep_variable_type'] = nspace.dependent_variable_type
ordinal = nspace.ordinal_variables or []
nominal = nspace.nominal_variables or []
independent_variables = nominal + ordinal
types = dict(zip(nominal + ordinal, ['nominal'] * len(nominal) + ['ordinal'] * len(ordinal)))
if len(independent_variables) == 0:
print('Need to provide at least one independent variable')
exit(1)
tree = Tree.from_pandas_df(data, types, nspace.dependent_variable[0],
**config)
if nspace.export or nspace.export_path:
tree.render(nspace.export_path, True)
if nspace.classify:
predictions = pd.Series(tree.node_predictions())
predictions.name = 'node_id'
data = pd.concat([data, predictions], axis=1)
print(data.to_csv())
elif nspace.predict:
predictions = pd.Series(tree.model_predictions())
predictions.name = 'predicted'
data = pd.concat([data, predictions], axis=1)
print(data.to_csv())
elif nspace.rules:
print('\n'.join(str(x) for x in tree.classification_rules()))
else:
tree.print_tree()
print('Accuracy: ', tree.accuracy()) | Entry point when module is run from command line | train | https://github.com/Rambatino/CHAID/blob/dc19e41ebdf2773168733efdf0d7579950c8d2e7/CHAID/__main__.py#L11-L104 | [
"def from_pandas_df(df, i_variables, d_variable, alpha_merge=0.05, max_depth=2,\n min_parent_node_size=30, min_child_node_size=30, split_threshold=0,\n weight=None, dep_variable_type='categorical'):\n \"\"\"\n Helper method to pre-process a pandas data frame in order to run CHAID\n analysis\n\n Parameters\n ----------\n df : pandas.DataFrame\n the dataframe with the dependent and independent variables in which\n to slice from\n i_variables : dict\n dict of instance variable names with their variable types. Supported\n variable types are the strings 'nominal' or 'ordinal' in lower case\n d_variable : string\n the name of the dependent variable in the dataframe\n alpha_merge : float\n the threshold value in which to create a split (default 0.05)\n max_depth : float\n the threshold value for the maximum number of levels after the root\n node in the tree (default 2)\n split_threshold : float\n the variation in chi-score such that surrogate splits are created\n (default 0)\n min_parent_node_size : float\n the threshold value of the number of respondents that the node must\n contain (default 30)\n min_child_node_size : float\n the threshold value of the number of respondents that each child node must\n contain (default 30)\n weight : array-like\n the respondent weights. If passed, weighted chi-square calculation is run\n dep_variable_type : str\n the type of dependent variable. Supported variable types are 'categorical' or\n 'continuous'\n \"\"\"\n ind_df = df[list(i_variables.keys())]\n ind_values = ind_df.values\n dep_values = df[d_variable].values\n weights = df[weight] if weight is not None else None\n return Tree.from_numpy(ind_values, dep_values, alpha_merge, max_depth, min_parent_node_size,\n min_child_node_size, list(ind_df.columns.values), split_threshold, weights,\n list(i_variables.values()), dep_variable_type)\n"
] | """
This package provides a python implementation of the Chi-Squared Automatic
Inference Detection (CHAID) decision tree.
"""
import argparse
from .tree import Tree
import pandas as pd
import numpy as np
if __name__ == "__main__":
main()
|
pyqt/python-qt5 | PyQt5/uic/properties.py | Properties.set_base_dir | python | def set_base_dir(self, base_dir):
self._base_dir = base_dir
self.icon_cache.set_base_dir(base_dir) | Set the base directory to be used for all relative filenames. | train | https://github.com/pyqt/python-qt5/blob/c9ed180c56f6fd3521ffe5fb70904bc5d3f50e5f/PyQt5/uic/properties.py#L92-L96 | null | class Properties(object):
def __init__(self, factory, qtcore_module, qtgui_module, qtwidgets_module):
self.factory = factory
global QtCore, QtGui, QtWidgets
QtCore = qtcore_module
QtGui = qtgui_module
QtWidgets = qtwidgets_module
self._base_dir = ''
self.reset()
def reset(self):
self.buddies = []
self.delayed_props = []
self.icon_cache = IconCache(self.factory, QtGui)
def _pyEnumMember(self, cpp_name):
try:
prefix, membername = cpp_name.split("::")
except ValueError:
prefix = 'Qt'
membername = cpp_name
if prefix == 'Qt':
return getattr(QtCore.Qt, membername)
scope = self.factory.findQObjectType(prefix)
if scope is None:
raise AttributeError("unknown enum %s" % cpp_name)
return getattr(scope, membername)
def _set(self, prop):
expr = [self._pyEnumMember(v) for v in prop.text.split('|')]
value = expr[0]
for v in expr[1:]:
value |= v
return value
def _enum(self, prop):
return self._pyEnumMember(prop.text)
def _number(self, prop):
return int(prop.text)
_uInt = _longLong = _uLongLong = _number
def _double(self, prop):
return float(prop.text)
def _bool(self, prop):
return prop.text == 'true'
def _stringlist(self, prop):
return [self._string(p, notr='true') for p in prop]
def _string(self, prop, notr=None):
text = prop.text
if text is None:
return ""
if prop.get('notr', notr) == 'true':
return text
disambig = prop.get('comment')
return QtWidgets.QApplication.translate(self.uiname, text, disambig)
_char = _string
def _cstring(self, prop):
return str(prop.text)
def _color(self, prop):
args = int_list(prop)
# Handle the optional alpha component.
alpha = int(prop.get("alpha", "255"))
if alpha != 255:
args.append(alpha)
return QtGui.QColor(*args)
def _point(self, prop):
return QtCore.QPoint(*int_list(prop))
def _pointf(self, prop):
return QtCore.QPointF(*float_list(prop))
def _rect(self, prop):
return QtCore.QRect(*int_list(prop))
def _rectf(self, prop):
return QtCore.QRectF(*float_list(prop))
def _size(self, prop):
return QtCore.QSize(*int_list(prop))
def _sizef(self, prop):
return QtCore.QSizeF(*float_list(prop))
def _pixmap(self, prop):
if prop.text:
fname = prop.text.replace("\\", "\\\\")
if self._base_dir != '' and fname[0] != ':' and not os.path.isabs(fname):
fname = os.path.join(self._base_dir, fname)
return QtGui.QPixmap(fname)
# Don't bother to set the property if the pixmap is empty.
return None
def _iconset(self, prop):
return self.icon_cache.get_icon(prop)
def _url(self, prop):
return QtCore.QUrl(prop[0].text)
def _locale(self, prop):
lang = getattr(QtCore.QLocale, prop.attrib['language'])
country = getattr(QtCore.QLocale, prop.attrib['country'])
return QtCore.QLocale(lang, country)
def _date(self, prop):
return QtCore.QDate(*int_list(prop))
def _datetime(self, prop):
args = int_list(prop)
return QtCore.QDateTime(QtCore.QDate(*args[-3:]), QtCore.QTime(*args[:-3]))
def _time(self, prop):
return QtCore.QTime(*int_list(prop))
def _gradient(self, prop):
name = 'gradient'
# Create the specific gradient.
gtype = prop.get('type', '')
if gtype == 'LinearGradient':
startx = float(prop.get('startx'))
starty = float(prop.get('starty'))
endx = float(prop.get('endx'))
endy = float(prop.get('endy'))
gradient = self.factory.createQObject('QLinearGradient', name,
(startx, starty, endx, endy), is_attribute=False)
elif gtype == 'ConicalGradient':
centralx = float(prop.get('centralx'))
centraly = float(prop.get('centraly'))
angle = float(prop.get('angle'))
gradient = self.factory.createQObject('QConicalGradient', name,
(centralx, centraly, angle), is_attribute=False)
elif gtype == 'RadialGradient':
centralx = float(prop.get('centralx'))
centraly = float(prop.get('centraly'))
radius = float(prop.get('radius'))
focalx = float(prop.get('focalx'))
focaly = float(prop.get('focaly'))
gradient = self.factory.createQObject('QRadialGradient', name,
(centralx, centraly, radius, focalx, focaly),
is_attribute=False)
else:
raise UnsupportedPropertyError(prop.tag)
# Set the common values.
spread = prop.get('spread')
if spread:
gradient.setSpread(getattr(QtGui.QGradient, spread))
cmode = prop.get('coordinatemode')
if cmode:
gradient.setCoordinateMode(getattr(QtGui.QGradient, cmode))
# Get the gradient stops.
for gstop in prop:
if gstop.tag != 'gradientstop':
raise UnsupportedPropertyError(gstop.tag)
position = float(gstop.get('position'))
color = self._color(gstop[0])
gradient.setColorAt(position, color)
return name
def _palette(self, prop):
palette = self.factory.createQObject("QPalette", "palette", (),
is_attribute=False)
for palette_elem in prop:
sub_palette = getattr(QtGui.QPalette, palette_elem.tag.title())
for role, color in enumerate(palette_elem):
if color.tag == 'color':
# Handle simple colour descriptions where the role is
# implied by the colour's position.
palette.setColor(sub_palette,
QtGui.QPalette.ColorRole(role), self._color(color))
elif color.tag == 'colorrole':
role = getattr(QtGui.QPalette, color.get('role'))
brush = self._brush(color[0])
palette.setBrush(sub_palette, role, brush)
else:
raise UnsupportedPropertyError(color.tag)
return palette
def _brush(self, prop):
brushstyle = prop.get('brushstyle')
if brushstyle in ('LinearGradientPattern', 'ConicalGradientPattern', 'RadialGradientPattern'):
gradient = self._gradient(prop[0])
brush = self.factory.createQObject("QBrush", "brush", (gradient, ),
is_attribute=False)
else:
color = self._color(prop[0])
brush = self.factory.createQObject("QBrush", "brush", (color, ),
is_attribute=False)
brushstyle = getattr(QtCore.Qt, brushstyle)
brush.setStyle(brushstyle)
return brush
#@needsWidget
def _sizepolicy(self, prop, widget):
values = [int(child.text) for child in prop]
if len(values) == 2:
# Qt v4.3.0 and later.
horstretch, verstretch = values
hsizetype = getattr(QtWidgets.QSizePolicy, prop.get('hsizetype'))
vsizetype = getattr(QtWidgets.QSizePolicy, prop.get('vsizetype'))
else:
hsizetype, vsizetype, horstretch, verstretch = values
hsizetype = QtWidgets.QSizePolicy.Policy(hsizetype)
vsizetype = QtWidgets.QSizePolicy.Policy(vsizetype)
sizePolicy = self.factory.createQObject('QSizePolicy', 'sizePolicy',
(hsizetype, vsizetype), is_attribute=False)
sizePolicy.setHorizontalStretch(horstretch)
sizePolicy.setVerticalStretch(verstretch)
sizePolicy.setHeightForWidth(widget.sizePolicy().hasHeightForWidth())
return sizePolicy
_sizepolicy = needsWidget(_sizepolicy)
# font needs special handling/conversion of all child elements.
_font_attributes = (("Family", lambda s: s),
("PointSize", int),
("Bold", bool_),
("Italic", bool_),
("Underline", bool_),
("Weight", int),
("StrikeOut", bool_),
("Kerning", bool_),
("StyleStrategy", qfont_enum))
def _font(self, prop):
newfont = self.factory.createQObject("QFont", "font", (),
is_attribute = False)
for attr, converter in self._font_attributes:
v = prop.findtext("./%s" % (attr.lower(),))
if v is None:
continue
getattr(newfont, "set%s" % (attr,))(converter(v))
return newfont
def _cursor(self, prop):
return QtGui.QCursor(QtCore.Qt.CursorShape(int(prop.text)))
def _cursorShape(self, prop):
return QtGui.QCursor(getattr(QtCore.Qt, prop.text))
def convert(self, prop, widget=None):
try:
func = getattr(self, "_" + prop[0].tag)
except AttributeError:
raise UnsupportedPropertyError(prop[0].tag)
else:
args = {}
if getattr(func, "needsWidget", False):
assert widget is not None
args["widget"] = widget
return func(prop[0], **args)
def _getChild(self, elem_tag, elem, name, default=None):
for prop in elem.findall(elem_tag):
if prop.attrib["name"] == name:
return self.convert(prop)
else:
return default
def getProperty(self, elem, name, default=None):
return self._getChild("property", elem, name, default)
def getAttribute(self, elem, name, default=None):
return self._getChild("attribute", elem, name, default)
def setProperties(self, widget, elem):
# Lines are sunken unless the frame shadow is explicitly set.
set_sunken = (elem.attrib.get('class') == 'Line')
for prop in elem.findall('property'):
prop_name = prop.attrib['name']
DEBUG("setting property %s" % (prop_name,))
if prop_name == 'frameShadow':
set_sunken = False
try:
stdset = bool(int(prop.attrib['stdset']))
except KeyError:
stdset = True
if not stdset:
self._setViaSetProperty(widget, prop)
elif hasattr(self, prop_name):
getattr(self, prop_name)(widget, prop)
else:
prop_value = self.convert(prop, widget)
if prop_value is not None:
getattr(widget, 'set%s%s' % (ascii_upper(prop_name[0]), prop_name[1:]))(prop_value)
if set_sunken:
widget.setFrameShadow(QtWidgets.QFrame.Sunken)
# SPECIAL PROPERTIES
# If a property has a well-known value type but needs special,
# context-dependent handling, the default behaviour can be overridden here.
# Delayed properties will be set after the whole widget tree has been
# populated.
def _delayed_property(self, widget, prop):
prop_value = self.convert(prop)
if prop_value is not None:
prop_name = prop.attrib["name"]
self.delayed_props.append((widget, False,
'set%s%s' % (ascii_upper(prop_name[0]), prop_name[1:]),
prop_value))
# These properties will be set with a widget.setProperty call rather than
# calling the set<property> function.
def _setViaSetProperty(self, widget, prop):
prop_value = self.convert(prop)
if prop_value is not None:
prop_name = prop.attrib['name']
# This appears to be a Designer/uic hack where stdset=0 means that
# the viewport should be used.
if prop[0].tag == 'cursorShape':
widget.viewport().setProperty(prop_name, prop_value)
else:
widget.setProperty(prop_name, prop_value)
# Ignore the property.
def _ignore(self, widget, prop):
pass
# Define properties that use the canned handlers.
currentIndex = _delayed_property
currentRow = _delayed_property
showDropIndicator = _setViaSetProperty
intValue = _setViaSetProperty
value = _setViaSetProperty
objectName = _ignore
margin = _ignore
leftMargin = _ignore
topMargin = _ignore
rightMargin = _ignore
bottomMargin = _ignore
spacing = _ignore
horizontalSpacing = _ignore
verticalSpacing = _ignore
# tabSpacing is actually the spacing property of the widget's layout.
def tabSpacing(self, widget, prop):
prop_value = self.convert(prop)
if prop_value is not None:
self.delayed_props.append((widget, True, 'setSpacing', prop_value))
# buddy setting has to be done after the whole widget tree has been
# populated. We can't use delay here because we cannot get the actual
# buddy yet.
def buddy(self, widget, prop):
buddy_name = prop[0].text
if buddy_name:
self.buddies.append((widget, buddy_name))
# geometry is handled specially if set on the toplevel widget.
def geometry(self, widget, prop):
if widget.objectName() == self.uiname:
geom = int_list(prop[0])
widget.resize(geom[2], geom[3])
else:
widget.setGeometry(self._rect(prop[0]))
def orientation(self, widget, prop):
# If the class is a QFrame, it's a line.
if widget.metaObject().className() == 'QFrame':
widget.setFrameShape(
{'Qt::Horizontal': QtWidgets.QFrame.HLine,
'Qt::Vertical' : QtWidgets.QFrame.VLine}[prop[0].text])
else:
widget.setOrientation(self._enum(prop[0]))
# The isWrapping attribute of QListView is named inconsistently, it should
# be wrapping.
def isWrapping(self, widget, prop):
widget.setWrapping(self.convert(prop))
# This is a pseudo-property injected to deal with margins.
def pyuicMargins(self, widget, prop):
widget.setContentsMargins(*int_list(prop))
# This is a pseudo-property injected to deal with spacing.
def pyuicSpacing(self, widget, prop):
horiz, vert = int_list(prop)
if horiz == vert:
widget.setSpacing(horiz)
else:
if horiz >= 0:
widget.setHorizontalSpacing(horiz)
if vert >= 0:
widget.setVerticalSpacing(vert)
|
pyqt/python-qt5 | PyQt5/uic/uiparser.py | _parse_alignment | python | def _parse_alignment(alignment):
align_flags = None
for qt_align in alignment.split('|'):
_, qt_align = qt_align.split('::')
align = getattr(QtCore.Qt, qt_align)
if align_flags is None:
align_flags = align
else:
align_flags |= align
return align_flags | Convert a C++ alignment to the corresponding flags. | train | https://github.com/pyqt/python-qt5/blob/c9ed180c56f6fd3521ffe5fb70904bc5d3f50e5f/PyQt5/uic/uiparser.py#L58-L71 | null | #############################################################################
##
## Copyright (C) 2016 Riverbank Computing Limited.
## Copyright (C) 2006 Thorsten Marek.
## All right reserved.
##
## This file is part of PyQt.
##
## You may use this file under the terms of the GPL v2 or the revised BSD
## license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of the Riverbank Computing Limited nor the names
## of its contributors may be used to endorse or promote products
## derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
#############################################################################
import sys
import logging
import os.path
import re
from xml.etree.ElementTree import parse, SubElement
from .objcreator import QObjectCreator
from .properties import Properties
logger = logging.getLogger(__name__)
DEBUG = logger.debug
QtCore = None
QtWidgets = None
def _layout_position(elem):
""" Return either (), (0, alignment), (row, column, rowspan, colspan) or
(row, column, rowspan, colspan, alignment) depending on the type of layout
and its configuration. The result will be suitable to use as arguments to
the layout.
"""
row = elem.attrib.get('row')
column = elem.attrib.get('column')
alignment = elem.attrib.get('alignment')
# See if it is a box layout.
if row is None or column is None:
if alignment is None:
return ()
return (0, _parse_alignment(alignment))
# It must be a grid or a form layout.
row = int(row)
column = int(column)
rowspan = int(elem.attrib.get('rowspan', 1))
colspan = int(elem.attrib.get('colspan', 1))
if alignment is None:
return (row, column, rowspan, colspan)
return (row, column, rowspan, colspan, _parse_alignment(alignment))
class WidgetStack(list):
topwidget = None
def push(self, item):
DEBUG("push %s %s" % (item.metaObject().className(),
item.objectName()))
self.append(item)
if isinstance(item, QtWidgets.QWidget):
self.topwidget = item
def popLayout(self):
layout = list.pop(self)
DEBUG("pop layout %s %s" % (layout.metaObject().className(),
layout.objectName()))
return layout
def popWidget(self):
widget = list.pop(self)
DEBUG("pop widget %s %s" % (widget.metaObject().className(),
widget.objectName()))
for item in reversed(self):
if isinstance(item, QtWidgets.QWidget):
self.topwidget = item
break
else:
self.topwidget = None
DEBUG("new topwidget %s" % (self.topwidget,))
return widget
def peek(self):
return self[-1]
def topIsLayout(self):
return isinstance(self[-1], QtWidgets.QLayout)
def topIsLayoutWidget(self):
# A plain QWidget is a layout widget unless it's parent is a
# QMainWindow. Note that the corresponding uic test is a little more
# complicated as it involves features not supported by pyuic.
if type(self[-1]) is not QtWidgets.QWidget:
return False
if len(self) < 2:
return False
return type(self[-2]) is not QtWidgets.QMainWindow
class ButtonGroup(object):
""" Encapsulate the configuration of a button group and its implementation.
"""
def __init__(self):
""" Initialise the button group. """
self.exclusive = True
self.object = None
class UIParser(object):
def __init__(self, qtcore_module, qtgui_module, qtwidgets_module, creatorPolicy):
self.factory = QObjectCreator(creatorPolicy)
self.wprops = Properties(self.factory, qtcore_module, qtgui_module,
qtwidgets_module)
global QtCore, QtWidgets
QtCore = qtcore_module
QtWidgets = qtwidgets_module
self.reset()
def uniqueName(self, name):
"""UIParser.uniqueName(string) -> string
Create a unique name from a string.
>>> p = UIParser(QtCore, QtGui, QtWidgets)
>>> p.uniqueName("foo")
'foo'
>>> p.uniqueName("foo")
'foo1'
"""
try:
suffix = self.name_suffixes[name]
except KeyError:
self.name_suffixes[name] = 0
return name
suffix += 1
self.name_suffixes[name] = suffix
return "%s%i" % (name, suffix)
def reset(self):
try: self.wprops.reset()
except AttributeError: pass
self.toplevelWidget = None
self.stack = WidgetStack()
self.name_suffixes = {}
self.defaults = {'spacing': -1, 'margin': -1}
self.actions = []
self.currentActionGroup = None
self.resources = []
self.button_groups = {}
def setupObject(self, clsname, parent, branch, is_attribute=True):
name = self.uniqueName(branch.attrib.get('name') or clsname[1:].lower())
if parent is None:
args = ()
else:
args = (parent, )
obj = self.factory.createQObject(clsname, name, args, is_attribute)
self.wprops.setProperties(obj, branch)
obj.setObjectName(name)
if is_attribute:
setattr(self.toplevelWidget, name, obj)
return obj
def getProperty(self, elem, name):
for prop in elem.findall('property'):
if prop.attrib['name'] == name:
return prop
return None
def createWidget(self, elem):
self.column_counter = 0
self.row_counter = 0
self.item_nr = 0
self.itemstack = []
self.sorting_enabled = None
widget_class = elem.attrib['class'].replace('::', '.')
if widget_class == 'Line':
widget_class = 'QFrame'
# Ignore the parent if it is a container.
parent = self.stack.topwidget
if isinstance(parent, (QtWidgets.QDockWidget, QtWidgets.QMdiArea,
QtWidgets.QScrollArea, QtWidgets.QStackedWidget,
QtWidgets.QToolBox, QtWidgets.QTabWidget,
QtWidgets.QWizard)):
parent = None
self.stack.push(self.setupObject(widget_class, parent, elem))
if isinstance(self.stack.topwidget, QtWidgets.QTableWidget):
if self.getProperty(elem, 'columnCount') is None:
self.stack.topwidget.setColumnCount(len(elem.findall("column")))
if self.getProperty(elem, 'rowCount') is None:
self.stack.topwidget.setRowCount(len(elem.findall("row")))
self.traverseWidgetTree(elem)
widget = self.stack.popWidget()
if isinstance(widget, QtWidgets.QTreeView):
self.handleHeaderView(elem, "header", widget.header())
elif isinstance(widget, QtWidgets.QTableView):
self.handleHeaderView(elem, "horizontalHeader",
widget.horizontalHeader())
self.handleHeaderView(elem, "verticalHeader",
widget.verticalHeader())
elif isinstance(widget, QtWidgets.QAbstractButton):
bg_i18n = self.wprops.getAttribute(elem, "buttonGroup")
if bg_i18n is not None:
# This should be handled properly in case the problem arises
# elsewhere as well.
try:
# We are compiling the .ui file.
bg_name = bg_i18n.string
except AttributeError:
# We are loading the .ui file.
bg_name = bg_i18n
# Designer allows the creation of .ui files without explicit
# button groups, even though uic then issues warnings. We
# handle it in two stages by first making sure it has a name
# and then making sure one exists with that name.
if not bg_name:
bg_name = 'buttonGroup'
try:
bg = self.button_groups[bg_name]
except KeyError:
bg = self.button_groups[bg_name] = ButtonGroup()
if bg.object is None:
bg.object = self.factory.createQObject("QButtonGroup",
bg_name, (self.toplevelWidget, ))
setattr(self.toplevelWidget, bg_name, bg.object)
bg.object.setObjectName(bg_name)
if not bg.exclusive:
bg.object.setExclusive(False)
bg.object.addButton(widget)
if self.sorting_enabled is not None:
widget.setSortingEnabled(self.sorting_enabled)
self.sorting_enabled = None
if self.stack.topIsLayout():
lay = self.stack.peek()
lp = elem.attrib['layout-position']
if isinstance(lay, QtWidgets.QFormLayout):
lay.setWidget(lp[0], self._form_layout_role(lp), widget)
else:
lay.addWidget(widget, *lp)
topwidget = self.stack.topwidget
if isinstance(topwidget, QtWidgets.QToolBox):
icon = self.wprops.getAttribute(elem, "icon")
if icon is not None:
topwidget.addItem(widget, icon, self.wprops.getAttribute(elem, "label"))
else:
topwidget.addItem(widget, self.wprops.getAttribute(elem, "label"))
tooltip = self.wprops.getAttribute(elem, "toolTip")
if tooltip is not None:
topwidget.setItemToolTip(topwidget.indexOf(widget), tooltip)
elif isinstance(topwidget, QtWidgets.QTabWidget):
icon = self.wprops.getAttribute(elem, "icon")
if icon is not None:
topwidget.addTab(widget, icon, self.wprops.getAttribute(elem, "title"))
else:
topwidget.addTab(widget, self.wprops.getAttribute(elem, "title"))
tooltip = self.wprops.getAttribute(elem, "toolTip")
if tooltip is not None:
topwidget.setTabToolTip(topwidget.indexOf(widget), tooltip)
elif isinstance(topwidget, QtWidgets.QWizard):
topwidget.addPage(widget)
elif isinstance(topwidget, QtWidgets.QStackedWidget):
topwidget.addWidget(widget)
elif isinstance(topwidget, (QtWidgets.QDockWidget, QtWidgets.QScrollArea)):
topwidget.setWidget(widget)
elif isinstance(topwidget, QtWidgets.QMainWindow):
if type(widget) == QtWidgets.QWidget:
topwidget.setCentralWidget(widget)
elif isinstance(widget, QtWidgets.QToolBar):
tbArea = self.wprops.getAttribute(elem, "toolBarArea")
if tbArea is None:
topwidget.addToolBar(widget)
else:
topwidget.addToolBar(tbArea, widget)
tbBreak = self.wprops.getAttribute(elem, "toolBarBreak")
if tbBreak:
topwidget.insertToolBarBreak(widget)
elif isinstance(widget, QtWidgets.QMenuBar):
topwidget.setMenuBar(widget)
elif isinstance(widget, QtWidgets.QStatusBar):
topwidget.setStatusBar(widget)
elif isinstance(widget, QtWidgets.QDockWidget):
dwArea = self.wprops.getAttribute(elem, "dockWidgetArea")
topwidget.addDockWidget(QtCore.Qt.DockWidgetArea(dwArea),
widget)
def handleHeaderView(self, elem, name, header):
value = self.wprops.getAttribute(elem, name + "Visible")
if value is not None:
header.setVisible(value)
value = self.wprops.getAttribute(elem, name + "CascadingSectionResizes")
if value is not None:
header.setCascadingSectionResizes(value)
value = self.wprops.getAttribute(elem, name + "DefaultSectionSize")
if value is not None:
header.setDefaultSectionSize(value)
value = self.wprops.getAttribute(elem, name + "HighlightSections")
if value is not None:
header.setHighlightSections(value)
value = self.wprops.getAttribute(elem, name + "MinimumSectionSize")
if value is not None:
header.setMinimumSectionSize(value)
value = self.wprops.getAttribute(elem, name + "ShowSortIndicator")
if value is not None:
header.setSortIndicatorShown(value)
value = self.wprops.getAttribute(elem, name + "StretchLastSection")
if value is not None:
header.setStretchLastSection(value)
def createSpacer(self, elem):
width = elem.findtext("property/size/width")
height = elem.findtext("property/size/height")
if width is None or height is None:
size_args = ()
else:
size_args = (int(width), int(height))
sizeType = self.wprops.getProperty(elem, "sizeType",
QtWidgets.QSizePolicy.Expanding)
policy = (QtWidgets.QSizePolicy.Minimum, sizeType)
if self.wprops.getProperty(elem, "orientation") == QtCore.Qt.Horizontal:
policy = policy[1], policy[0]
spacer = self.factory.createQObject("QSpacerItem",
self.uniqueName("spacerItem"), size_args + policy,
is_attribute=False)
if self.stack.topIsLayout():
lay = self.stack.peek()
lp = elem.attrib['layout-position']
if isinstance(lay, QtWidgets.QFormLayout):
lay.setItem(lp[0], self._form_layout_role(lp), spacer)
else:
lay.addItem(spacer, *lp)
def createLayout(self, elem):
# We use an internal property to handle margins which will use separate
# left, top, right and bottom margins if they are found to be
# different. The following will select, in order of preference,
# separate margins, the same margin in all directions, and the default
# margin.
margin = self.wprops.getProperty(elem, 'margin',
self.defaults['margin'])
left = self.wprops.getProperty(elem, 'leftMargin', margin)
top = self.wprops.getProperty(elem, 'topMargin', margin)
right = self.wprops.getProperty(elem, 'rightMargin', margin)
bottom = self.wprops.getProperty(elem, 'bottomMargin', margin)
# A layout widget should, by default, have no margins.
if self.stack.topIsLayoutWidget():
if left < 0: left = 0
if top < 0: top = 0
if right < 0: right = 0
if bottom < 0: bottom = 0
if left >= 0 or top >= 0 or right >= 0 or bottom >= 0:
# We inject the new internal property.
cme = SubElement(elem, 'property', name='pyuicMargins')
SubElement(cme, 'number').text = str(left)
SubElement(cme, 'number').text = str(top)
SubElement(cme, 'number').text = str(right)
SubElement(cme, 'number').text = str(bottom)
# We use an internal property to handle spacing which will use separate
# horizontal and vertical spacing if they are found to be different.
# The following will select, in order of preference, separate
# horizontal and vertical spacing, the same spacing in both directions,
# and the default spacing.
spacing = self.wprops.getProperty(elem, 'spacing',
self.defaults['spacing'])
horiz = self.wprops.getProperty(elem, 'horizontalSpacing', spacing)
vert = self.wprops.getProperty(elem, 'verticalSpacing', spacing)
if horiz >= 0 or vert >= 0:
# We inject the new internal property.
cme = SubElement(elem, 'property', name='pyuicSpacing')
SubElement(cme, 'number').text = str(horiz)
SubElement(cme, 'number').text = str(vert)
classname = elem.attrib["class"]
if self.stack.topIsLayout():
parent = None
else:
parent = self.stack.topwidget
if "name" not in elem.attrib:
elem.attrib["name"] = classname[1:].lower()
self.stack.push(self.setupObject(classname, parent, elem))
self.traverseWidgetTree(elem)
layout = self.stack.popLayout()
self.configureLayout(elem, layout)
if self.stack.topIsLayout():
top_layout = self.stack.peek()
lp = elem.attrib['layout-position']
if isinstance(top_layout, QtWidgets.QFormLayout):
top_layout.setLayout(lp[0], self._form_layout_role(lp), layout)
else:
top_layout.addLayout(layout, *lp)
def configureLayout(self, elem, layout):
if isinstance(layout, QtWidgets.QGridLayout):
self.setArray(elem, 'columnminimumwidth',
layout.setColumnMinimumWidth)
self.setArray(elem, 'rowminimumheight',
layout.setRowMinimumHeight)
self.setArray(elem, 'columnstretch', layout.setColumnStretch)
self.setArray(elem, 'rowstretch', layout.setRowStretch)
elif isinstance(layout, QtWidgets.QBoxLayout):
self.setArray(elem, 'stretch', layout.setStretch)
def setArray(self, elem, name, setter):
array = elem.attrib.get(name)
if array:
for idx, value in enumerate(array.split(',')):
value = int(value)
if value > 0:
setter(idx, value)
def disableSorting(self, w):
if self.item_nr == 0:
self.sorting_enabled = self.factory.invoke("__sortingEnabled",
w.isSortingEnabled)
w.setSortingEnabled(False)
def handleItem(self, elem):
if self.stack.topIsLayout():
elem[0].attrib['layout-position'] = _layout_position(elem)
self.traverseWidgetTree(elem)
else:
w = self.stack.topwidget
if isinstance(w, QtWidgets.QComboBox):
text = self.wprops.getProperty(elem, "text")
icon = self.wprops.getProperty(elem, "icon")
if icon:
w.addItem(icon, '')
else:
w.addItem('')
w.setItemText(self.item_nr, text)
elif isinstance(w, QtWidgets.QListWidget):
self.disableSorting(w)
item = self.createWidgetItem('QListWidgetItem', elem, w.item,
self.item_nr)
w.addItem(item)
elif isinstance(w, QtWidgets.QTreeWidget):
if self.itemstack:
parent, _ = self.itemstack[-1]
_, nr_in_root = self.itemstack[0]
else:
parent = w
nr_in_root = self.item_nr
item = self.factory.createQObject("QTreeWidgetItem",
"item_%d" % len(self.itemstack), (parent, ), False)
if self.item_nr == 0 and not self.itemstack:
self.sorting_enabled = self.factory.invoke("__sortingEnabled", w.isSortingEnabled)
w.setSortingEnabled(False)
self.itemstack.append((item, self.item_nr))
self.item_nr = 0
# We have to access the item via the tree when setting the
# text.
titm = w.topLevelItem(nr_in_root)
for child, nr_in_parent in self.itemstack[1:]:
titm = titm.child(nr_in_parent)
column = -1
for prop in elem.findall('property'):
c_prop = self.wprops.convert(prop)
c_prop_name = prop.attrib['name']
if c_prop_name == 'text':
column += 1
if c_prop:
titm.setText(column, c_prop)
elif c_prop_name == 'statusTip':
item.setStatusTip(column, c_prop)
elif c_prop_name == 'toolTip':
item.setToolTip(column, c_prop)
elif c_prop_name == 'whatsThis':
item.setWhatsThis(column, c_prop)
elif c_prop_name == 'font':
item.setFont(column, c_prop)
elif c_prop_name == 'icon':
item.setIcon(column, c_prop)
elif c_prop_name == 'background':
item.setBackground(column, c_prop)
elif c_prop_name == 'foreground':
item.setForeground(column, c_prop)
elif c_prop_name == 'flags':
item.setFlags(c_prop)
elif c_prop_name == 'checkState':
item.setCheckState(column, c_prop)
self.traverseWidgetTree(elem)
_, self.item_nr = self.itemstack.pop()
elif isinstance(w, QtWidgets.QTableWidget):
row = int(elem.attrib['row'])
col = int(elem.attrib['column'])
self.disableSorting(w)
item = self.createWidgetItem('QTableWidgetItem', elem, w.item,
row, col)
w.setItem(row, col, item)
self.item_nr += 1
def addAction(self, elem):
self.actions.append((self.stack.topwidget, elem.attrib["name"]))
@staticmethod
def any_i18n(*args):
""" Return True if any argument appears to be an i18n string. """
for a in args:
if a is not None and not isinstance(a, str):
return True
return False
def createWidgetItem(self, item_type, elem, getter, *getter_args):
""" Create a specific type of widget item. """
item = self.factory.createQObject(item_type, "item", (), False)
props = self.wprops
# Note that not all types of widget items support the full set of
# properties.
text = props.getProperty(elem, 'text')
status_tip = props.getProperty(elem, 'statusTip')
tool_tip = props.getProperty(elem, 'toolTip')
whats_this = props.getProperty(elem, 'whatsThis')
if self.any_i18n(text, status_tip, tool_tip, whats_this):
self.factory.invoke("item", getter, getter_args)
if text:
item.setText(text)
if status_tip:
item.setStatusTip(status_tip)
if tool_tip:
item.setToolTip(tool_tip)
if whats_this:
item.setWhatsThis(whats_this)
text_alignment = props.getProperty(elem, 'textAlignment')
if text_alignment:
item.setTextAlignment(text_alignment)
font = props.getProperty(elem, 'font')
if font:
item.setFont(font)
icon = props.getProperty(elem, 'icon')
if icon:
item.setIcon(icon)
background = props.getProperty(elem, 'background')
if background:
item.setBackground(background)
foreground = props.getProperty(elem, 'foreground')
if foreground:
item.setForeground(foreground)
flags = props.getProperty(elem, 'flags')
if flags:
item.setFlags(flags)
check_state = props.getProperty(elem, 'checkState')
if check_state:
item.setCheckState(check_state)
return item
def addHeader(self, elem):
w = self.stack.topwidget
if isinstance(w, QtWidgets.QTreeWidget):
props = self.wprops
col = self.column_counter
text = props.getProperty(elem, 'text')
if text:
w.headerItem().setText(col, text)
status_tip = props.getProperty(elem, 'statusTip')
if status_tip:
w.headerItem().setStatusTip(col, status_tip)
tool_tip = props.getProperty(elem, 'toolTip')
if tool_tip:
w.headerItem().setToolTip(col, tool_tip)
whats_this = props.getProperty(elem, 'whatsThis')
if whats_this:
w.headerItem().setWhatsThis(col, whats_this)
text_alignment = props.getProperty(elem, 'textAlignment')
if text_alignment:
w.headerItem().setTextAlignment(col, text_alignment)
font = props.getProperty(elem, 'font')
if font:
w.headerItem().setFont(col, font)
icon = props.getProperty(elem, 'icon')
if icon:
w.headerItem().setIcon(col, icon)
background = props.getProperty(elem, 'background')
if background:
w.headerItem().setBackground(col, background)
foreground = props.getProperty(elem, 'foreground')
if foreground:
w.headerItem().setForeground(col, foreground)
self.column_counter += 1
elif isinstance(w, QtWidgets.QTableWidget):
if len(elem) != 0:
if elem.tag == 'column':
item = self.createWidgetItem('QTableWidgetItem', elem,
w.horizontalHeaderItem, self.column_counter)
w.setHorizontalHeaderItem(self.column_counter, item)
self.column_counter += 1
elif elem.tag == 'row':
item = self.createWidgetItem('QTableWidgetItem', elem,
w.verticalHeaderItem, self.row_counter)
w.setVerticalHeaderItem(self.row_counter, item)
self.row_counter += 1
def setZOrder(self, elem):
# Designer can generate empty zorder elements.
if elem.text is None:
return
# Designer allows the z-order of spacer items to be specified even
# though they can't be raised, so ignore any missing raise_() method.
try:
getattr(self.toplevelWidget, elem.text).raise_()
except AttributeError:
# Note that uic issues a warning message.
pass
def createAction(self, elem):
self.setupObject("QAction", self.currentActionGroup or self.toplevelWidget,
elem)
def createActionGroup(self, elem):
action_group = self.setupObject("QActionGroup", self.toplevelWidget, elem)
self.currentActionGroup = action_group
self.traverseWidgetTree(elem)
self.currentActionGroup = None
widgetTreeItemHandlers = {
"widget" : createWidget,
"addaction" : addAction,
"layout" : createLayout,
"spacer" : createSpacer,
"item" : handleItem,
"action" : createAction,
"actiongroup": createActionGroup,
"column" : addHeader,
"row" : addHeader,
"zorder" : setZOrder,
}
def traverseWidgetTree(self, elem):
for child in iter(elem):
try:
handler = self.widgetTreeItemHandlers[child.tag]
except KeyError:
continue
handler(self, child)
def createUserInterface(self, elem):
# Get the names of the class and widget.
cname = elem.attrib["class"]
wname = elem.attrib["name"]
# If there was no widget name then derive it from the class name.
if not wname:
wname = cname
if wname.startswith("Q"):
wname = wname[1:]
wname = wname[0].lower() + wname[1:]
self.toplevelWidget = self.createToplevelWidget(cname, wname)
self.toplevelWidget.setObjectName(wname)
DEBUG("toplevel widget is %s",
self.toplevelWidget.metaObject().className())
self.wprops.setProperties(self.toplevelWidget, elem)
self.stack.push(self.toplevelWidget)
self.traverseWidgetTree(elem)
self.stack.popWidget()
self.addActions()
self.setBuddies()
self.setDelayedProps()
def addActions(self):
for widget, action_name in self.actions:
if action_name == "separator":
widget.addSeparator()
else:
DEBUG("add action %s to %s", action_name, widget.objectName())
action_obj = getattr(self.toplevelWidget, action_name)
if isinstance(action_obj, QtWidgets.QMenu):
widget.addAction(action_obj.menuAction())
elif not isinstance(action_obj, QtWidgets.QActionGroup):
widget.addAction(action_obj)
def setDelayedProps(self):
for widget, layout, setter, args in self.wprops.delayed_props:
if layout:
widget = widget.layout()
setter = getattr(widget, setter)
setter(args)
def setBuddies(self):
for widget, buddy in self.wprops.buddies:
DEBUG("%s is buddy of %s", buddy, widget.objectName())
try:
widget.setBuddy(getattr(self.toplevelWidget, buddy))
except AttributeError:
DEBUG("ERROR in ui spec: %s (buddy of %s) does not exist",
buddy, widget.objectName())
def classname(self, elem):
DEBUG("uiname is %s", elem.text)
name = elem.text
if name is None:
name = ""
self.uiname = name
self.wprops.uiname = name
self.setContext(name)
def setContext(self, context):
"""
Reimplemented by a sub-class if it needs to know the translation
context.
"""
pass
def readDefaults(self, elem):
self.defaults['margin'] = int(elem.attrib['margin'])
self.defaults['spacing'] = int(elem.attrib['spacing'])
def setTaborder(self, elem):
lastwidget = None
for widget_elem in elem:
widget = getattr(self.toplevelWidget, widget_elem.text)
if lastwidget is not None:
self.toplevelWidget.setTabOrder(lastwidget, widget)
lastwidget = widget
def readResources(self, elem):
"""
Read a "resources" tag and add the module to import to the parser's
list of them.
"""
try:
iterator = getattr(elem, 'iter')
except AttributeError:
iterator = getattr(elem, 'getiterator')
for include in iterator("include"):
loc = include.attrib.get("location")
# Apply the convention for naming the Python files generated by
# pyrcc5.
if loc and loc.endswith('.qrc'):
mname = os.path.basename(loc[:-4] + self._resource_suffix)
if mname not in self.resources:
self.resources.append(mname)
def createConnections(self, elem):
def name2object(obj):
if obj == self.uiname:
return self.toplevelWidget
else:
return getattr(self.toplevelWidget, obj)
for conn in iter(elem):
signal = conn.findtext('signal')
signal_name, signal_args = signal.split('(')
signal_args = signal_args[:-1].replace(' ', '')
sender = name2object(conn.findtext('sender'))
bound_signal = getattr(sender, signal_name)
slot = self.factory.getSlot(name2object(conn.findtext('receiver')),
conn.findtext('slot').split('(')[0])
if signal_args == '':
bound_signal.connect(slot)
else:
signal_args = signal_args.split(',')
if len(signal_args) == 1:
bound_signal[signal_args[0]].connect(slot)
else:
bound_signal[tuple(signal_args)].connect(slot)
QtCore.QMetaObject.connectSlotsByName(self.toplevelWidget)
def customWidgets(self, elem):
def header2module(header):
"""header2module(header) -> string
Convert paths to C++ header files to according Python modules
>>> header2module("foo/bar/baz.h")
'foo.bar.baz'
"""
if header.endswith(".h"):
header = header[:-2]
mpath = []
for part in header.split('/'):
# Ignore any empty parts or those that refer to the current
# directory.
if part not in ('', '.'):
if part == '..':
# We should allow this for Python3.
raise SyntaxError("custom widget header file name may not contain '..'.")
mpath.append(part)
return '.'.join(mpath)
for custom_widget in iter(elem):
classname = custom_widget.findtext("class")
self.factory.addCustomWidget(classname,
custom_widget.findtext("extends") or "QWidget",
header2module(custom_widget.findtext("header")))
def createToplevelWidget(self, classname, widgetname):
raise NotImplementedError
def buttonGroups(self, elem):
for button_group in iter(elem):
if button_group.tag == 'buttongroup':
bg_name = button_group.attrib['name']
bg = ButtonGroup()
self.button_groups[bg_name] = bg
prop = self.getProperty(button_group, 'exclusive')
if prop is not None:
if prop.findtext('bool') == 'false':
bg.exclusive = False
# finalize will be called after the whole tree has been parsed and can be
# overridden.
def finalize(self):
pass
def parse(self, filename, resource_suffix, base_dir=''):
self.wprops.set_base_dir(base_dir)
self._resource_suffix = resource_suffix
# The order in which the different branches are handled is important.
# The widget tree handler relies on all custom widgets being known, and
# in order to create the connections, all widgets have to be populated.
branchHandlers = (
("layoutdefault", self.readDefaults),
("class", self.classname),
("buttongroups", self.buttonGroups),
("customwidgets", self.customWidgets),
("widget", self.createUserInterface),
("connections", self.createConnections),
("tabstops", self.setTaborder),
("resources", self.readResources),
)
document = parse(filename)
version = document.getroot().attrib["version"]
DEBUG("UI version is %s" % (version,))
# Right now, only version 4.0 is supported.
assert version in ("4.0",)
for tagname, actor in branchHandlers:
elem = document.find(tagname)
if elem is not None:
actor(elem)
self.finalize()
w = self.toplevelWidget
self.reset()
return w
@staticmethod
def _form_layout_role(layout_position):
if layout_position[3] > 1:
role = QtWidgets.QFormLayout.SpanningRole
elif layout_position[1] == 1:
role = QtWidgets.QFormLayout.FieldRole
else:
role = QtWidgets.QFormLayout.LabelRole
return role
|
pyqt/python-qt5 | PyQt5/uic/uiparser.py | _layout_position | python | def _layout_position(elem):
row = elem.attrib.get('row')
column = elem.attrib.get('column')
alignment = elem.attrib.get('alignment')
# See if it is a box layout.
if row is None or column is None:
if alignment is None:
return ()
return (0, _parse_alignment(alignment))
# It must be a grid or a form layout.
row = int(row)
column = int(column)
rowspan = int(elem.attrib.get('rowspan', 1))
colspan = int(elem.attrib.get('colspan', 1))
if alignment is None:
return (row, column, rowspan, colspan)
return (row, column, rowspan, colspan, _parse_alignment(alignment)) | Return either (), (0, alignment), (row, column, rowspan, colspan) or
(row, column, rowspan, colspan, alignment) depending on the type of layout
and its configuration. The result will be suitable to use as arguments to
the layout. | train | https://github.com/pyqt/python-qt5/blob/c9ed180c56f6fd3521ffe5fb70904bc5d3f50e5f/PyQt5/uic/uiparser.py#L74-L102 | [
"def _parse_alignment(alignment):\n \"\"\" Convert a C++ alignment to the corresponding flags. \"\"\"\n\n align_flags = None\n for qt_align in alignment.split('|'):\n _, qt_align = qt_align.split('::')\n align = getattr(QtCore.Qt, qt_align)\n\n if align_flags is None:\n align_flags = align\n else:\n align_flags |= align\n\n return align_flags\n"
] | #############################################################################
##
## Copyright (C) 2016 Riverbank Computing Limited.
## Copyright (C) 2006 Thorsten Marek.
## All right reserved.
##
## This file is part of PyQt.
##
## You may use this file under the terms of the GPL v2 or the revised BSD
## license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of the Riverbank Computing Limited nor the names
## of its contributors may be used to endorse or promote products
## derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
#############################################################################
import sys
import logging
import os.path
import re
from xml.etree.ElementTree import parse, SubElement
from .objcreator import QObjectCreator
from .properties import Properties
logger = logging.getLogger(__name__)
DEBUG = logger.debug
QtCore = None
QtWidgets = None
def _parse_alignment(alignment):
""" Convert a C++ alignment to the corresponding flags. """
align_flags = None
for qt_align in alignment.split('|'):
_, qt_align = qt_align.split('::')
align = getattr(QtCore.Qt, qt_align)
if align_flags is None:
align_flags = align
else:
align_flags |= align
return align_flags
class WidgetStack(list):
topwidget = None
def push(self, item):
DEBUG("push %s %s" % (item.metaObject().className(),
item.objectName()))
self.append(item)
if isinstance(item, QtWidgets.QWidget):
self.topwidget = item
def popLayout(self):
layout = list.pop(self)
DEBUG("pop layout %s %s" % (layout.metaObject().className(),
layout.objectName()))
return layout
def popWidget(self):
widget = list.pop(self)
DEBUG("pop widget %s %s" % (widget.metaObject().className(),
widget.objectName()))
for item in reversed(self):
if isinstance(item, QtWidgets.QWidget):
self.topwidget = item
break
else:
self.topwidget = None
DEBUG("new topwidget %s" % (self.topwidget,))
return widget
def peek(self):
return self[-1]
def topIsLayout(self):
return isinstance(self[-1], QtWidgets.QLayout)
def topIsLayoutWidget(self):
# A plain QWidget is a layout widget unless it's parent is a
# QMainWindow. Note that the corresponding uic test is a little more
# complicated as it involves features not supported by pyuic.
if type(self[-1]) is not QtWidgets.QWidget:
return False
if len(self) < 2:
return False
return type(self[-2]) is not QtWidgets.QMainWindow
class ButtonGroup(object):
""" Encapsulate the configuration of a button group and its implementation.
"""
def __init__(self):
""" Initialise the button group. """
self.exclusive = True
self.object = None
class UIParser(object):
def __init__(self, qtcore_module, qtgui_module, qtwidgets_module, creatorPolicy):
self.factory = QObjectCreator(creatorPolicy)
self.wprops = Properties(self.factory, qtcore_module, qtgui_module,
qtwidgets_module)
global QtCore, QtWidgets
QtCore = qtcore_module
QtWidgets = qtwidgets_module
self.reset()
def uniqueName(self, name):
"""UIParser.uniqueName(string) -> string
Create a unique name from a string.
>>> p = UIParser(QtCore, QtGui, QtWidgets)
>>> p.uniqueName("foo")
'foo'
>>> p.uniqueName("foo")
'foo1'
"""
try:
suffix = self.name_suffixes[name]
except KeyError:
self.name_suffixes[name] = 0
return name
suffix += 1
self.name_suffixes[name] = suffix
return "%s%i" % (name, suffix)
def reset(self):
try: self.wprops.reset()
except AttributeError: pass
self.toplevelWidget = None
self.stack = WidgetStack()
self.name_suffixes = {}
self.defaults = {'spacing': -1, 'margin': -1}
self.actions = []
self.currentActionGroup = None
self.resources = []
self.button_groups = {}
def setupObject(self, clsname, parent, branch, is_attribute=True):
name = self.uniqueName(branch.attrib.get('name') or clsname[1:].lower())
if parent is None:
args = ()
else:
args = (parent, )
obj = self.factory.createQObject(clsname, name, args, is_attribute)
self.wprops.setProperties(obj, branch)
obj.setObjectName(name)
if is_attribute:
setattr(self.toplevelWidget, name, obj)
return obj
def getProperty(self, elem, name):
for prop in elem.findall('property'):
if prop.attrib['name'] == name:
return prop
return None
def createWidget(self, elem):
self.column_counter = 0
self.row_counter = 0
self.item_nr = 0
self.itemstack = []
self.sorting_enabled = None
widget_class = elem.attrib['class'].replace('::', '.')
if widget_class == 'Line':
widget_class = 'QFrame'
# Ignore the parent if it is a container.
parent = self.stack.topwidget
if isinstance(parent, (QtWidgets.QDockWidget, QtWidgets.QMdiArea,
QtWidgets.QScrollArea, QtWidgets.QStackedWidget,
QtWidgets.QToolBox, QtWidgets.QTabWidget,
QtWidgets.QWizard)):
parent = None
self.stack.push(self.setupObject(widget_class, parent, elem))
if isinstance(self.stack.topwidget, QtWidgets.QTableWidget):
if self.getProperty(elem, 'columnCount') is None:
self.stack.topwidget.setColumnCount(len(elem.findall("column")))
if self.getProperty(elem, 'rowCount') is None:
self.stack.topwidget.setRowCount(len(elem.findall("row")))
self.traverseWidgetTree(elem)
widget = self.stack.popWidget()
if isinstance(widget, QtWidgets.QTreeView):
self.handleHeaderView(elem, "header", widget.header())
elif isinstance(widget, QtWidgets.QTableView):
self.handleHeaderView(elem, "horizontalHeader",
widget.horizontalHeader())
self.handleHeaderView(elem, "verticalHeader",
widget.verticalHeader())
elif isinstance(widget, QtWidgets.QAbstractButton):
bg_i18n = self.wprops.getAttribute(elem, "buttonGroup")
if bg_i18n is not None:
# This should be handled properly in case the problem arises
# elsewhere as well.
try:
# We are compiling the .ui file.
bg_name = bg_i18n.string
except AttributeError:
# We are loading the .ui file.
bg_name = bg_i18n
# Designer allows the creation of .ui files without explicit
# button groups, even though uic then issues warnings. We
# handle it in two stages by first making sure it has a name
# and then making sure one exists with that name.
if not bg_name:
bg_name = 'buttonGroup'
try:
bg = self.button_groups[bg_name]
except KeyError:
bg = self.button_groups[bg_name] = ButtonGroup()
if bg.object is None:
bg.object = self.factory.createQObject("QButtonGroup",
bg_name, (self.toplevelWidget, ))
setattr(self.toplevelWidget, bg_name, bg.object)
bg.object.setObjectName(bg_name)
if not bg.exclusive:
bg.object.setExclusive(False)
bg.object.addButton(widget)
if self.sorting_enabled is not None:
widget.setSortingEnabled(self.sorting_enabled)
self.sorting_enabled = None
if self.stack.topIsLayout():
lay = self.stack.peek()
lp = elem.attrib['layout-position']
if isinstance(lay, QtWidgets.QFormLayout):
lay.setWidget(lp[0], self._form_layout_role(lp), widget)
else:
lay.addWidget(widget, *lp)
topwidget = self.stack.topwidget
if isinstance(topwidget, QtWidgets.QToolBox):
icon = self.wprops.getAttribute(elem, "icon")
if icon is not None:
topwidget.addItem(widget, icon, self.wprops.getAttribute(elem, "label"))
else:
topwidget.addItem(widget, self.wprops.getAttribute(elem, "label"))
tooltip = self.wprops.getAttribute(elem, "toolTip")
if tooltip is not None:
topwidget.setItemToolTip(topwidget.indexOf(widget), tooltip)
elif isinstance(topwidget, QtWidgets.QTabWidget):
icon = self.wprops.getAttribute(elem, "icon")
if icon is not None:
topwidget.addTab(widget, icon, self.wprops.getAttribute(elem, "title"))
else:
topwidget.addTab(widget, self.wprops.getAttribute(elem, "title"))
tooltip = self.wprops.getAttribute(elem, "toolTip")
if tooltip is not None:
topwidget.setTabToolTip(topwidget.indexOf(widget), tooltip)
elif isinstance(topwidget, QtWidgets.QWizard):
topwidget.addPage(widget)
elif isinstance(topwidget, QtWidgets.QStackedWidget):
topwidget.addWidget(widget)
elif isinstance(topwidget, (QtWidgets.QDockWidget, QtWidgets.QScrollArea)):
topwidget.setWidget(widget)
elif isinstance(topwidget, QtWidgets.QMainWindow):
if type(widget) == QtWidgets.QWidget:
topwidget.setCentralWidget(widget)
elif isinstance(widget, QtWidgets.QToolBar):
tbArea = self.wprops.getAttribute(elem, "toolBarArea")
if tbArea is None:
topwidget.addToolBar(widget)
else:
topwidget.addToolBar(tbArea, widget)
tbBreak = self.wprops.getAttribute(elem, "toolBarBreak")
if tbBreak:
topwidget.insertToolBarBreak(widget)
elif isinstance(widget, QtWidgets.QMenuBar):
topwidget.setMenuBar(widget)
elif isinstance(widget, QtWidgets.QStatusBar):
topwidget.setStatusBar(widget)
elif isinstance(widget, QtWidgets.QDockWidget):
dwArea = self.wprops.getAttribute(elem, "dockWidgetArea")
topwidget.addDockWidget(QtCore.Qt.DockWidgetArea(dwArea),
widget)
def handleHeaderView(self, elem, name, header):
value = self.wprops.getAttribute(elem, name + "Visible")
if value is not None:
header.setVisible(value)
value = self.wprops.getAttribute(elem, name + "CascadingSectionResizes")
if value is not None:
header.setCascadingSectionResizes(value)
value = self.wprops.getAttribute(elem, name + "DefaultSectionSize")
if value is not None:
header.setDefaultSectionSize(value)
value = self.wprops.getAttribute(elem, name + "HighlightSections")
if value is not None:
header.setHighlightSections(value)
value = self.wprops.getAttribute(elem, name + "MinimumSectionSize")
if value is not None:
header.setMinimumSectionSize(value)
value = self.wprops.getAttribute(elem, name + "ShowSortIndicator")
if value is not None:
header.setSortIndicatorShown(value)
value = self.wprops.getAttribute(elem, name + "StretchLastSection")
if value is not None:
header.setStretchLastSection(value)
def createSpacer(self, elem):
width = elem.findtext("property/size/width")
height = elem.findtext("property/size/height")
if width is None or height is None:
size_args = ()
else:
size_args = (int(width), int(height))
sizeType = self.wprops.getProperty(elem, "sizeType",
QtWidgets.QSizePolicy.Expanding)
policy = (QtWidgets.QSizePolicy.Minimum, sizeType)
if self.wprops.getProperty(elem, "orientation") == QtCore.Qt.Horizontal:
policy = policy[1], policy[0]
spacer = self.factory.createQObject("QSpacerItem",
self.uniqueName("spacerItem"), size_args + policy,
is_attribute=False)
if self.stack.topIsLayout():
lay = self.stack.peek()
lp = elem.attrib['layout-position']
if isinstance(lay, QtWidgets.QFormLayout):
lay.setItem(lp[0], self._form_layout_role(lp), spacer)
else:
lay.addItem(spacer, *lp)
def createLayout(self, elem):
# We use an internal property to handle margins which will use separate
# left, top, right and bottom margins if they are found to be
# different. The following will select, in order of preference,
# separate margins, the same margin in all directions, and the default
# margin.
margin = self.wprops.getProperty(elem, 'margin',
self.defaults['margin'])
left = self.wprops.getProperty(elem, 'leftMargin', margin)
top = self.wprops.getProperty(elem, 'topMargin', margin)
right = self.wprops.getProperty(elem, 'rightMargin', margin)
bottom = self.wprops.getProperty(elem, 'bottomMargin', margin)
# A layout widget should, by default, have no margins.
if self.stack.topIsLayoutWidget():
if left < 0: left = 0
if top < 0: top = 0
if right < 0: right = 0
if bottom < 0: bottom = 0
if left >= 0 or top >= 0 or right >= 0 or bottom >= 0:
# We inject the new internal property.
cme = SubElement(elem, 'property', name='pyuicMargins')
SubElement(cme, 'number').text = str(left)
SubElement(cme, 'number').text = str(top)
SubElement(cme, 'number').text = str(right)
SubElement(cme, 'number').text = str(bottom)
# We use an internal property to handle spacing which will use separate
# horizontal and vertical spacing if they are found to be different.
# The following will select, in order of preference, separate
# horizontal and vertical spacing, the same spacing in both directions,
# and the default spacing.
spacing = self.wprops.getProperty(elem, 'spacing',
self.defaults['spacing'])
horiz = self.wprops.getProperty(elem, 'horizontalSpacing', spacing)
vert = self.wprops.getProperty(elem, 'verticalSpacing', spacing)
if horiz >= 0 or vert >= 0:
# We inject the new internal property.
cme = SubElement(elem, 'property', name='pyuicSpacing')
SubElement(cme, 'number').text = str(horiz)
SubElement(cme, 'number').text = str(vert)
classname = elem.attrib["class"]
if self.stack.topIsLayout():
parent = None
else:
parent = self.stack.topwidget
if "name" not in elem.attrib:
elem.attrib["name"] = classname[1:].lower()
self.stack.push(self.setupObject(classname, parent, elem))
self.traverseWidgetTree(elem)
layout = self.stack.popLayout()
self.configureLayout(elem, layout)
if self.stack.topIsLayout():
top_layout = self.stack.peek()
lp = elem.attrib['layout-position']
if isinstance(top_layout, QtWidgets.QFormLayout):
top_layout.setLayout(lp[0], self._form_layout_role(lp), layout)
else:
top_layout.addLayout(layout, *lp)
def configureLayout(self, elem, layout):
if isinstance(layout, QtWidgets.QGridLayout):
self.setArray(elem, 'columnminimumwidth',
layout.setColumnMinimumWidth)
self.setArray(elem, 'rowminimumheight',
layout.setRowMinimumHeight)
self.setArray(elem, 'columnstretch', layout.setColumnStretch)
self.setArray(elem, 'rowstretch', layout.setRowStretch)
elif isinstance(layout, QtWidgets.QBoxLayout):
self.setArray(elem, 'stretch', layout.setStretch)
def setArray(self, elem, name, setter):
array = elem.attrib.get(name)
if array:
for idx, value in enumerate(array.split(',')):
value = int(value)
if value > 0:
setter(idx, value)
def disableSorting(self, w):
if self.item_nr == 0:
self.sorting_enabled = self.factory.invoke("__sortingEnabled",
w.isSortingEnabled)
w.setSortingEnabled(False)
def handleItem(self, elem):
if self.stack.topIsLayout():
elem[0].attrib['layout-position'] = _layout_position(elem)
self.traverseWidgetTree(elem)
else:
w = self.stack.topwidget
if isinstance(w, QtWidgets.QComboBox):
text = self.wprops.getProperty(elem, "text")
icon = self.wprops.getProperty(elem, "icon")
if icon:
w.addItem(icon, '')
else:
w.addItem('')
w.setItemText(self.item_nr, text)
elif isinstance(w, QtWidgets.QListWidget):
self.disableSorting(w)
item = self.createWidgetItem('QListWidgetItem', elem, w.item,
self.item_nr)
w.addItem(item)
elif isinstance(w, QtWidgets.QTreeWidget):
if self.itemstack:
parent, _ = self.itemstack[-1]
_, nr_in_root = self.itemstack[0]
else:
parent = w
nr_in_root = self.item_nr
item = self.factory.createQObject("QTreeWidgetItem",
"item_%d" % len(self.itemstack), (parent, ), False)
if self.item_nr == 0 and not self.itemstack:
self.sorting_enabled = self.factory.invoke("__sortingEnabled", w.isSortingEnabled)
w.setSortingEnabled(False)
self.itemstack.append((item, self.item_nr))
self.item_nr = 0
# We have to access the item via the tree when setting the
# text.
titm = w.topLevelItem(nr_in_root)
for child, nr_in_parent in self.itemstack[1:]:
titm = titm.child(nr_in_parent)
column = -1
for prop in elem.findall('property'):
c_prop = self.wprops.convert(prop)
c_prop_name = prop.attrib['name']
if c_prop_name == 'text':
column += 1
if c_prop:
titm.setText(column, c_prop)
elif c_prop_name == 'statusTip':
item.setStatusTip(column, c_prop)
elif c_prop_name == 'toolTip':
item.setToolTip(column, c_prop)
elif c_prop_name == 'whatsThis':
item.setWhatsThis(column, c_prop)
elif c_prop_name == 'font':
item.setFont(column, c_prop)
elif c_prop_name == 'icon':
item.setIcon(column, c_prop)
elif c_prop_name == 'background':
item.setBackground(column, c_prop)
elif c_prop_name == 'foreground':
item.setForeground(column, c_prop)
elif c_prop_name == 'flags':
item.setFlags(c_prop)
elif c_prop_name == 'checkState':
item.setCheckState(column, c_prop)
self.traverseWidgetTree(elem)
_, self.item_nr = self.itemstack.pop()
elif isinstance(w, QtWidgets.QTableWidget):
row = int(elem.attrib['row'])
col = int(elem.attrib['column'])
self.disableSorting(w)
item = self.createWidgetItem('QTableWidgetItem', elem, w.item,
row, col)
w.setItem(row, col, item)
self.item_nr += 1
def addAction(self, elem):
self.actions.append((self.stack.topwidget, elem.attrib["name"]))
@staticmethod
def any_i18n(*args):
""" Return True if any argument appears to be an i18n string. """
for a in args:
if a is not None and not isinstance(a, str):
return True
return False
def createWidgetItem(self, item_type, elem, getter, *getter_args):
""" Create a specific type of widget item. """
item = self.factory.createQObject(item_type, "item", (), False)
props = self.wprops
# Note that not all types of widget items support the full set of
# properties.
text = props.getProperty(elem, 'text')
status_tip = props.getProperty(elem, 'statusTip')
tool_tip = props.getProperty(elem, 'toolTip')
whats_this = props.getProperty(elem, 'whatsThis')
if self.any_i18n(text, status_tip, tool_tip, whats_this):
self.factory.invoke("item", getter, getter_args)
if text:
item.setText(text)
if status_tip:
item.setStatusTip(status_tip)
if tool_tip:
item.setToolTip(tool_tip)
if whats_this:
item.setWhatsThis(whats_this)
text_alignment = props.getProperty(elem, 'textAlignment')
if text_alignment:
item.setTextAlignment(text_alignment)
font = props.getProperty(elem, 'font')
if font:
item.setFont(font)
icon = props.getProperty(elem, 'icon')
if icon:
item.setIcon(icon)
background = props.getProperty(elem, 'background')
if background:
item.setBackground(background)
foreground = props.getProperty(elem, 'foreground')
if foreground:
item.setForeground(foreground)
flags = props.getProperty(elem, 'flags')
if flags:
item.setFlags(flags)
check_state = props.getProperty(elem, 'checkState')
if check_state:
item.setCheckState(check_state)
return item
def addHeader(self, elem):
w = self.stack.topwidget
if isinstance(w, QtWidgets.QTreeWidget):
props = self.wprops
col = self.column_counter
text = props.getProperty(elem, 'text')
if text:
w.headerItem().setText(col, text)
status_tip = props.getProperty(elem, 'statusTip')
if status_tip:
w.headerItem().setStatusTip(col, status_tip)
tool_tip = props.getProperty(elem, 'toolTip')
if tool_tip:
w.headerItem().setToolTip(col, tool_tip)
whats_this = props.getProperty(elem, 'whatsThis')
if whats_this:
w.headerItem().setWhatsThis(col, whats_this)
text_alignment = props.getProperty(elem, 'textAlignment')
if text_alignment:
w.headerItem().setTextAlignment(col, text_alignment)
font = props.getProperty(elem, 'font')
if font:
w.headerItem().setFont(col, font)
icon = props.getProperty(elem, 'icon')
if icon:
w.headerItem().setIcon(col, icon)
background = props.getProperty(elem, 'background')
if background:
w.headerItem().setBackground(col, background)
foreground = props.getProperty(elem, 'foreground')
if foreground:
w.headerItem().setForeground(col, foreground)
self.column_counter += 1
elif isinstance(w, QtWidgets.QTableWidget):
if len(elem) != 0:
if elem.tag == 'column':
item = self.createWidgetItem('QTableWidgetItem', elem,
w.horizontalHeaderItem, self.column_counter)
w.setHorizontalHeaderItem(self.column_counter, item)
self.column_counter += 1
elif elem.tag == 'row':
item = self.createWidgetItem('QTableWidgetItem', elem,
w.verticalHeaderItem, self.row_counter)
w.setVerticalHeaderItem(self.row_counter, item)
self.row_counter += 1
def setZOrder(self, elem):
# Designer can generate empty zorder elements.
if elem.text is None:
return
# Designer allows the z-order of spacer items to be specified even
# though they can't be raised, so ignore any missing raise_() method.
try:
getattr(self.toplevelWidget, elem.text).raise_()
except AttributeError:
# Note that uic issues a warning message.
pass
def createAction(self, elem):
self.setupObject("QAction", self.currentActionGroup or self.toplevelWidget,
elem)
def createActionGroup(self, elem):
action_group = self.setupObject("QActionGroup", self.toplevelWidget, elem)
self.currentActionGroup = action_group
self.traverseWidgetTree(elem)
self.currentActionGroup = None
widgetTreeItemHandlers = {
"widget" : createWidget,
"addaction" : addAction,
"layout" : createLayout,
"spacer" : createSpacer,
"item" : handleItem,
"action" : createAction,
"actiongroup": createActionGroup,
"column" : addHeader,
"row" : addHeader,
"zorder" : setZOrder,
}
def traverseWidgetTree(self, elem):
for child in iter(elem):
try:
handler = self.widgetTreeItemHandlers[child.tag]
except KeyError:
continue
handler(self, child)
def createUserInterface(self, elem):
# Get the names of the class and widget.
cname = elem.attrib["class"]
wname = elem.attrib["name"]
# If there was no widget name then derive it from the class name.
if not wname:
wname = cname
if wname.startswith("Q"):
wname = wname[1:]
wname = wname[0].lower() + wname[1:]
self.toplevelWidget = self.createToplevelWidget(cname, wname)
self.toplevelWidget.setObjectName(wname)
DEBUG("toplevel widget is %s",
self.toplevelWidget.metaObject().className())
self.wprops.setProperties(self.toplevelWidget, elem)
self.stack.push(self.toplevelWidget)
self.traverseWidgetTree(elem)
self.stack.popWidget()
self.addActions()
self.setBuddies()
self.setDelayedProps()
def addActions(self):
for widget, action_name in self.actions:
if action_name == "separator":
widget.addSeparator()
else:
DEBUG("add action %s to %s", action_name, widget.objectName())
action_obj = getattr(self.toplevelWidget, action_name)
if isinstance(action_obj, QtWidgets.QMenu):
widget.addAction(action_obj.menuAction())
elif not isinstance(action_obj, QtWidgets.QActionGroup):
widget.addAction(action_obj)
def setDelayedProps(self):
for widget, layout, setter, args in self.wprops.delayed_props:
if layout:
widget = widget.layout()
setter = getattr(widget, setter)
setter(args)
def setBuddies(self):
for widget, buddy in self.wprops.buddies:
DEBUG("%s is buddy of %s", buddy, widget.objectName())
try:
widget.setBuddy(getattr(self.toplevelWidget, buddy))
except AttributeError:
DEBUG("ERROR in ui spec: %s (buddy of %s) does not exist",
buddy, widget.objectName())
def classname(self, elem):
DEBUG("uiname is %s", elem.text)
name = elem.text
if name is None:
name = ""
self.uiname = name
self.wprops.uiname = name
self.setContext(name)
def setContext(self, context):
"""
Reimplemented by a sub-class if it needs to know the translation
context.
"""
pass
def readDefaults(self, elem):
self.defaults['margin'] = int(elem.attrib['margin'])
self.defaults['spacing'] = int(elem.attrib['spacing'])
def setTaborder(self, elem):
lastwidget = None
for widget_elem in elem:
widget = getattr(self.toplevelWidget, widget_elem.text)
if lastwidget is not None:
self.toplevelWidget.setTabOrder(lastwidget, widget)
lastwidget = widget
def readResources(self, elem):
"""
Read a "resources" tag and add the module to import to the parser's
list of them.
"""
try:
iterator = getattr(elem, 'iter')
except AttributeError:
iterator = getattr(elem, 'getiterator')
for include in iterator("include"):
loc = include.attrib.get("location")
# Apply the convention for naming the Python files generated by
# pyrcc5.
if loc and loc.endswith('.qrc'):
mname = os.path.basename(loc[:-4] + self._resource_suffix)
if mname not in self.resources:
self.resources.append(mname)
def createConnections(self, elem):
def name2object(obj):
if obj == self.uiname:
return self.toplevelWidget
else:
return getattr(self.toplevelWidget, obj)
for conn in iter(elem):
signal = conn.findtext('signal')
signal_name, signal_args = signal.split('(')
signal_args = signal_args[:-1].replace(' ', '')
sender = name2object(conn.findtext('sender'))
bound_signal = getattr(sender, signal_name)
slot = self.factory.getSlot(name2object(conn.findtext('receiver')),
conn.findtext('slot').split('(')[0])
if signal_args == '':
bound_signal.connect(slot)
else:
signal_args = signal_args.split(',')
if len(signal_args) == 1:
bound_signal[signal_args[0]].connect(slot)
else:
bound_signal[tuple(signal_args)].connect(slot)
QtCore.QMetaObject.connectSlotsByName(self.toplevelWidget)
def customWidgets(self, elem):
def header2module(header):
"""header2module(header) -> string
Convert paths to C++ header files to according Python modules
>>> header2module("foo/bar/baz.h")
'foo.bar.baz'
"""
if header.endswith(".h"):
header = header[:-2]
mpath = []
for part in header.split('/'):
# Ignore any empty parts or those that refer to the current
# directory.
if part not in ('', '.'):
if part == '..':
# We should allow this for Python3.
raise SyntaxError("custom widget header file name may not contain '..'.")
mpath.append(part)
return '.'.join(mpath)
for custom_widget in iter(elem):
classname = custom_widget.findtext("class")
self.factory.addCustomWidget(classname,
custom_widget.findtext("extends") or "QWidget",
header2module(custom_widget.findtext("header")))
def createToplevelWidget(self, classname, widgetname):
raise NotImplementedError
def buttonGroups(self, elem):
for button_group in iter(elem):
if button_group.tag == 'buttongroup':
bg_name = button_group.attrib['name']
bg = ButtonGroup()
self.button_groups[bg_name] = bg
prop = self.getProperty(button_group, 'exclusive')
if prop is not None:
if prop.findtext('bool') == 'false':
bg.exclusive = False
# finalize will be called after the whole tree has been parsed and can be
# overridden.
def finalize(self):
pass
def parse(self, filename, resource_suffix, base_dir=''):
self.wprops.set_base_dir(base_dir)
self._resource_suffix = resource_suffix
# The order in which the different branches are handled is important.
# The widget tree handler relies on all custom widgets being known, and
# in order to create the connections, all widgets have to be populated.
branchHandlers = (
("layoutdefault", self.readDefaults),
("class", self.classname),
("buttongroups", self.buttonGroups),
("customwidgets", self.customWidgets),
("widget", self.createUserInterface),
("connections", self.createConnections),
("tabstops", self.setTaborder),
("resources", self.readResources),
)
document = parse(filename)
version = document.getroot().attrib["version"]
DEBUG("UI version is %s" % (version,))
# Right now, only version 4.0 is supported.
assert version in ("4.0",)
for tagname, actor in branchHandlers:
elem = document.find(tagname)
if elem is not None:
actor(elem)
self.finalize()
w = self.toplevelWidget
self.reset()
return w
@staticmethod
def _form_layout_role(layout_position):
if layout_position[3] > 1:
role = QtWidgets.QFormLayout.SpanningRole
elif layout_position[1] == 1:
role = QtWidgets.QFormLayout.FieldRole
else:
role = QtWidgets.QFormLayout.LabelRole
return role
|
pyqt/python-qt5 | PyQt5/uic/uiparser.py | UIParser.uniqueName | python | def uniqueName(self, name):
try:
suffix = self.name_suffixes[name]
except KeyError:
self.name_suffixes[name] = 0
return name
suffix += 1
self.name_suffixes[name] = suffix
return "%s%i" % (name, suffix) | UIParser.uniqueName(string) -> string
Create a unique name from a string.
>>> p = UIParser(QtCore, QtGui, QtWidgets)
>>> p.uniqueName("foo")
'foo'
>>> p.uniqueName("foo")
'foo1' | train | https://github.com/pyqt/python-qt5/blob/c9ed180c56f6fd3521ffe5fb70904bc5d3f50e5f/PyQt5/uic/uiparser.py#L176-L195 | null | class UIParser(object):
def __init__(self, qtcore_module, qtgui_module, qtwidgets_module, creatorPolicy):
self.factory = QObjectCreator(creatorPolicy)
self.wprops = Properties(self.factory, qtcore_module, qtgui_module,
qtwidgets_module)
global QtCore, QtWidgets
QtCore = qtcore_module
QtWidgets = qtwidgets_module
self.reset()
def reset(self):
try: self.wprops.reset()
except AttributeError: pass
self.toplevelWidget = None
self.stack = WidgetStack()
self.name_suffixes = {}
self.defaults = {'spacing': -1, 'margin': -1}
self.actions = []
self.currentActionGroup = None
self.resources = []
self.button_groups = {}
def setupObject(self, clsname, parent, branch, is_attribute=True):
name = self.uniqueName(branch.attrib.get('name') or clsname[1:].lower())
if parent is None:
args = ()
else:
args = (parent, )
obj = self.factory.createQObject(clsname, name, args, is_attribute)
self.wprops.setProperties(obj, branch)
obj.setObjectName(name)
if is_attribute:
setattr(self.toplevelWidget, name, obj)
return obj
def getProperty(self, elem, name):
for prop in elem.findall('property'):
if prop.attrib['name'] == name:
return prop
return None
def createWidget(self, elem):
self.column_counter = 0
self.row_counter = 0
self.item_nr = 0
self.itemstack = []
self.sorting_enabled = None
widget_class = elem.attrib['class'].replace('::', '.')
if widget_class == 'Line':
widget_class = 'QFrame'
# Ignore the parent if it is a container.
parent = self.stack.topwidget
if isinstance(parent, (QtWidgets.QDockWidget, QtWidgets.QMdiArea,
QtWidgets.QScrollArea, QtWidgets.QStackedWidget,
QtWidgets.QToolBox, QtWidgets.QTabWidget,
QtWidgets.QWizard)):
parent = None
self.stack.push(self.setupObject(widget_class, parent, elem))
if isinstance(self.stack.topwidget, QtWidgets.QTableWidget):
if self.getProperty(elem, 'columnCount') is None:
self.stack.topwidget.setColumnCount(len(elem.findall("column")))
if self.getProperty(elem, 'rowCount') is None:
self.stack.topwidget.setRowCount(len(elem.findall("row")))
self.traverseWidgetTree(elem)
widget = self.stack.popWidget()
if isinstance(widget, QtWidgets.QTreeView):
self.handleHeaderView(elem, "header", widget.header())
elif isinstance(widget, QtWidgets.QTableView):
self.handleHeaderView(elem, "horizontalHeader",
widget.horizontalHeader())
self.handleHeaderView(elem, "verticalHeader",
widget.verticalHeader())
elif isinstance(widget, QtWidgets.QAbstractButton):
bg_i18n = self.wprops.getAttribute(elem, "buttonGroup")
if bg_i18n is not None:
# This should be handled properly in case the problem arises
# elsewhere as well.
try:
# We are compiling the .ui file.
bg_name = bg_i18n.string
except AttributeError:
# We are loading the .ui file.
bg_name = bg_i18n
# Designer allows the creation of .ui files without explicit
# button groups, even though uic then issues warnings. We
# handle it in two stages by first making sure it has a name
# and then making sure one exists with that name.
if not bg_name:
bg_name = 'buttonGroup'
try:
bg = self.button_groups[bg_name]
except KeyError:
bg = self.button_groups[bg_name] = ButtonGroup()
if bg.object is None:
bg.object = self.factory.createQObject("QButtonGroup",
bg_name, (self.toplevelWidget, ))
setattr(self.toplevelWidget, bg_name, bg.object)
bg.object.setObjectName(bg_name)
if not bg.exclusive:
bg.object.setExclusive(False)
bg.object.addButton(widget)
if self.sorting_enabled is not None:
widget.setSortingEnabled(self.sorting_enabled)
self.sorting_enabled = None
if self.stack.topIsLayout():
lay = self.stack.peek()
lp = elem.attrib['layout-position']
if isinstance(lay, QtWidgets.QFormLayout):
lay.setWidget(lp[0], self._form_layout_role(lp), widget)
else:
lay.addWidget(widget, *lp)
topwidget = self.stack.topwidget
if isinstance(topwidget, QtWidgets.QToolBox):
icon = self.wprops.getAttribute(elem, "icon")
if icon is not None:
topwidget.addItem(widget, icon, self.wprops.getAttribute(elem, "label"))
else:
topwidget.addItem(widget, self.wprops.getAttribute(elem, "label"))
tooltip = self.wprops.getAttribute(elem, "toolTip")
if tooltip is not None:
topwidget.setItemToolTip(topwidget.indexOf(widget), tooltip)
elif isinstance(topwidget, QtWidgets.QTabWidget):
icon = self.wprops.getAttribute(elem, "icon")
if icon is not None:
topwidget.addTab(widget, icon, self.wprops.getAttribute(elem, "title"))
else:
topwidget.addTab(widget, self.wprops.getAttribute(elem, "title"))
tooltip = self.wprops.getAttribute(elem, "toolTip")
if tooltip is not None:
topwidget.setTabToolTip(topwidget.indexOf(widget), tooltip)
elif isinstance(topwidget, QtWidgets.QWizard):
topwidget.addPage(widget)
elif isinstance(topwidget, QtWidgets.QStackedWidget):
topwidget.addWidget(widget)
elif isinstance(topwidget, (QtWidgets.QDockWidget, QtWidgets.QScrollArea)):
topwidget.setWidget(widget)
elif isinstance(topwidget, QtWidgets.QMainWindow):
if type(widget) == QtWidgets.QWidget:
topwidget.setCentralWidget(widget)
elif isinstance(widget, QtWidgets.QToolBar):
tbArea = self.wprops.getAttribute(elem, "toolBarArea")
if tbArea is None:
topwidget.addToolBar(widget)
else:
topwidget.addToolBar(tbArea, widget)
tbBreak = self.wprops.getAttribute(elem, "toolBarBreak")
if tbBreak:
topwidget.insertToolBarBreak(widget)
elif isinstance(widget, QtWidgets.QMenuBar):
topwidget.setMenuBar(widget)
elif isinstance(widget, QtWidgets.QStatusBar):
topwidget.setStatusBar(widget)
elif isinstance(widget, QtWidgets.QDockWidget):
dwArea = self.wprops.getAttribute(elem, "dockWidgetArea")
topwidget.addDockWidget(QtCore.Qt.DockWidgetArea(dwArea),
widget)
def handleHeaderView(self, elem, name, header):
value = self.wprops.getAttribute(elem, name + "Visible")
if value is not None:
header.setVisible(value)
value = self.wprops.getAttribute(elem, name + "CascadingSectionResizes")
if value is not None:
header.setCascadingSectionResizes(value)
value = self.wprops.getAttribute(elem, name + "DefaultSectionSize")
if value is not None:
header.setDefaultSectionSize(value)
value = self.wprops.getAttribute(elem, name + "HighlightSections")
if value is not None:
header.setHighlightSections(value)
value = self.wprops.getAttribute(elem, name + "MinimumSectionSize")
if value is not None:
header.setMinimumSectionSize(value)
value = self.wprops.getAttribute(elem, name + "ShowSortIndicator")
if value is not None:
header.setSortIndicatorShown(value)
value = self.wprops.getAttribute(elem, name + "StretchLastSection")
if value is not None:
header.setStretchLastSection(value)
def createSpacer(self, elem):
width = elem.findtext("property/size/width")
height = elem.findtext("property/size/height")
if width is None or height is None:
size_args = ()
else:
size_args = (int(width), int(height))
sizeType = self.wprops.getProperty(elem, "sizeType",
QtWidgets.QSizePolicy.Expanding)
policy = (QtWidgets.QSizePolicy.Minimum, sizeType)
if self.wprops.getProperty(elem, "orientation") == QtCore.Qt.Horizontal:
policy = policy[1], policy[0]
spacer = self.factory.createQObject("QSpacerItem",
self.uniqueName("spacerItem"), size_args + policy,
is_attribute=False)
if self.stack.topIsLayout():
lay = self.stack.peek()
lp = elem.attrib['layout-position']
if isinstance(lay, QtWidgets.QFormLayout):
lay.setItem(lp[0], self._form_layout_role(lp), spacer)
else:
lay.addItem(spacer, *lp)
def createLayout(self, elem):
# We use an internal property to handle margins which will use separate
# left, top, right and bottom margins if they are found to be
# different. The following will select, in order of preference,
# separate margins, the same margin in all directions, and the default
# margin.
margin = self.wprops.getProperty(elem, 'margin',
self.defaults['margin'])
left = self.wprops.getProperty(elem, 'leftMargin', margin)
top = self.wprops.getProperty(elem, 'topMargin', margin)
right = self.wprops.getProperty(elem, 'rightMargin', margin)
bottom = self.wprops.getProperty(elem, 'bottomMargin', margin)
# A layout widget should, by default, have no margins.
if self.stack.topIsLayoutWidget():
if left < 0: left = 0
if top < 0: top = 0
if right < 0: right = 0
if bottom < 0: bottom = 0
if left >= 0 or top >= 0 or right >= 0 or bottom >= 0:
# We inject the new internal property.
cme = SubElement(elem, 'property', name='pyuicMargins')
SubElement(cme, 'number').text = str(left)
SubElement(cme, 'number').text = str(top)
SubElement(cme, 'number').text = str(right)
SubElement(cme, 'number').text = str(bottom)
# We use an internal property to handle spacing which will use separate
# horizontal and vertical spacing if they are found to be different.
# The following will select, in order of preference, separate
# horizontal and vertical spacing, the same spacing in both directions,
# and the default spacing.
spacing = self.wprops.getProperty(elem, 'spacing',
self.defaults['spacing'])
horiz = self.wprops.getProperty(elem, 'horizontalSpacing', spacing)
vert = self.wprops.getProperty(elem, 'verticalSpacing', spacing)
if horiz >= 0 or vert >= 0:
# We inject the new internal property.
cme = SubElement(elem, 'property', name='pyuicSpacing')
SubElement(cme, 'number').text = str(horiz)
SubElement(cme, 'number').text = str(vert)
classname = elem.attrib["class"]
if self.stack.topIsLayout():
parent = None
else:
parent = self.stack.topwidget
if "name" not in elem.attrib:
elem.attrib["name"] = classname[1:].lower()
self.stack.push(self.setupObject(classname, parent, elem))
self.traverseWidgetTree(elem)
layout = self.stack.popLayout()
self.configureLayout(elem, layout)
if self.stack.topIsLayout():
top_layout = self.stack.peek()
lp = elem.attrib['layout-position']
if isinstance(top_layout, QtWidgets.QFormLayout):
top_layout.setLayout(lp[0], self._form_layout_role(lp), layout)
else:
top_layout.addLayout(layout, *lp)
def configureLayout(self, elem, layout):
if isinstance(layout, QtWidgets.QGridLayout):
self.setArray(elem, 'columnminimumwidth',
layout.setColumnMinimumWidth)
self.setArray(elem, 'rowminimumheight',
layout.setRowMinimumHeight)
self.setArray(elem, 'columnstretch', layout.setColumnStretch)
self.setArray(elem, 'rowstretch', layout.setRowStretch)
elif isinstance(layout, QtWidgets.QBoxLayout):
self.setArray(elem, 'stretch', layout.setStretch)
def setArray(self, elem, name, setter):
array = elem.attrib.get(name)
if array:
for idx, value in enumerate(array.split(',')):
value = int(value)
if value > 0:
setter(idx, value)
def disableSorting(self, w):
if self.item_nr == 0:
self.sorting_enabled = self.factory.invoke("__sortingEnabled",
w.isSortingEnabled)
w.setSortingEnabled(False)
def handleItem(self, elem):
if self.stack.topIsLayout():
elem[0].attrib['layout-position'] = _layout_position(elem)
self.traverseWidgetTree(elem)
else:
w = self.stack.topwidget
if isinstance(w, QtWidgets.QComboBox):
text = self.wprops.getProperty(elem, "text")
icon = self.wprops.getProperty(elem, "icon")
if icon:
w.addItem(icon, '')
else:
w.addItem('')
w.setItemText(self.item_nr, text)
elif isinstance(w, QtWidgets.QListWidget):
self.disableSorting(w)
item = self.createWidgetItem('QListWidgetItem', elem, w.item,
self.item_nr)
w.addItem(item)
elif isinstance(w, QtWidgets.QTreeWidget):
if self.itemstack:
parent, _ = self.itemstack[-1]
_, nr_in_root = self.itemstack[0]
else:
parent = w
nr_in_root = self.item_nr
item = self.factory.createQObject("QTreeWidgetItem",
"item_%d" % len(self.itemstack), (parent, ), False)
if self.item_nr == 0 and not self.itemstack:
self.sorting_enabled = self.factory.invoke("__sortingEnabled", w.isSortingEnabled)
w.setSortingEnabled(False)
self.itemstack.append((item, self.item_nr))
self.item_nr = 0
# We have to access the item via the tree when setting the
# text.
titm = w.topLevelItem(nr_in_root)
for child, nr_in_parent in self.itemstack[1:]:
titm = titm.child(nr_in_parent)
column = -1
for prop in elem.findall('property'):
c_prop = self.wprops.convert(prop)
c_prop_name = prop.attrib['name']
if c_prop_name == 'text':
column += 1
if c_prop:
titm.setText(column, c_prop)
elif c_prop_name == 'statusTip':
item.setStatusTip(column, c_prop)
elif c_prop_name == 'toolTip':
item.setToolTip(column, c_prop)
elif c_prop_name == 'whatsThis':
item.setWhatsThis(column, c_prop)
elif c_prop_name == 'font':
item.setFont(column, c_prop)
elif c_prop_name == 'icon':
item.setIcon(column, c_prop)
elif c_prop_name == 'background':
item.setBackground(column, c_prop)
elif c_prop_name == 'foreground':
item.setForeground(column, c_prop)
elif c_prop_name == 'flags':
item.setFlags(c_prop)
elif c_prop_name == 'checkState':
item.setCheckState(column, c_prop)
self.traverseWidgetTree(elem)
_, self.item_nr = self.itemstack.pop()
elif isinstance(w, QtWidgets.QTableWidget):
row = int(elem.attrib['row'])
col = int(elem.attrib['column'])
self.disableSorting(w)
item = self.createWidgetItem('QTableWidgetItem', elem, w.item,
row, col)
w.setItem(row, col, item)
self.item_nr += 1
def addAction(self, elem):
self.actions.append((self.stack.topwidget, elem.attrib["name"]))
@staticmethod
def any_i18n(*args):
""" Return True if any argument appears to be an i18n string. """
for a in args:
if a is not None and not isinstance(a, str):
return True
return False
def createWidgetItem(self, item_type, elem, getter, *getter_args):
""" Create a specific type of widget item. """
item = self.factory.createQObject(item_type, "item", (), False)
props = self.wprops
# Note that not all types of widget items support the full set of
# properties.
text = props.getProperty(elem, 'text')
status_tip = props.getProperty(elem, 'statusTip')
tool_tip = props.getProperty(elem, 'toolTip')
whats_this = props.getProperty(elem, 'whatsThis')
if self.any_i18n(text, status_tip, tool_tip, whats_this):
self.factory.invoke("item", getter, getter_args)
if text:
item.setText(text)
if status_tip:
item.setStatusTip(status_tip)
if tool_tip:
item.setToolTip(tool_tip)
if whats_this:
item.setWhatsThis(whats_this)
text_alignment = props.getProperty(elem, 'textAlignment')
if text_alignment:
item.setTextAlignment(text_alignment)
font = props.getProperty(elem, 'font')
if font:
item.setFont(font)
icon = props.getProperty(elem, 'icon')
if icon:
item.setIcon(icon)
background = props.getProperty(elem, 'background')
if background:
item.setBackground(background)
foreground = props.getProperty(elem, 'foreground')
if foreground:
item.setForeground(foreground)
flags = props.getProperty(elem, 'flags')
if flags:
item.setFlags(flags)
check_state = props.getProperty(elem, 'checkState')
if check_state:
item.setCheckState(check_state)
return item
def addHeader(self, elem):
w = self.stack.topwidget
if isinstance(w, QtWidgets.QTreeWidget):
props = self.wprops
col = self.column_counter
text = props.getProperty(elem, 'text')
if text:
w.headerItem().setText(col, text)
status_tip = props.getProperty(elem, 'statusTip')
if status_tip:
w.headerItem().setStatusTip(col, status_tip)
tool_tip = props.getProperty(elem, 'toolTip')
if tool_tip:
w.headerItem().setToolTip(col, tool_tip)
whats_this = props.getProperty(elem, 'whatsThis')
if whats_this:
w.headerItem().setWhatsThis(col, whats_this)
text_alignment = props.getProperty(elem, 'textAlignment')
if text_alignment:
w.headerItem().setTextAlignment(col, text_alignment)
font = props.getProperty(elem, 'font')
if font:
w.headerItem().setFont(col, font)
icon = props.getProperty(elem, 'icon')
if icon:
w.headerItem().setIcon(col, icon)
background = props.getProperty(elem, 'background')
if background:
w.headerItem().setBackground(col, background)
foreground = props.getProperty(elem, 'foreground')
if foreground:
w.headerItem().setForeground(col, foreground)
self.column_counter += 1
elif isinstance(w, QtWidgets.QTableWidget):
if len(elem) != 0:
if elem.tag == 'column':
item = self.createWidgetItem('QTableWidgetItem', elem,
w.horizontalHeaderItem, self.column_counter)
w.setHorizontalHeaderItem(self.column_counter, item)
self.column_counter += 1
elif elem.tag == 'row':
item = self.createWidgetItem('QTableWidgetItem', elem,
w.verticalHeaderItem, self.row_counter)
w.setVerticalHeaderItem(self.row_counter, item)
self.row_counter += 1
def setZOrder(self, elem):
# Designer can generate empty zorder elements.
if elem.text is None:
return
# Designer allows the z-order of spacer items to be specified even
# though they can't be raised, so ignore any missing raise_() method.
try:
getattr(self.toplevelWidget, elem.text).raise_()
except AttributeError:
# Note that uic issues a warning message.
pass
def createAction(self, elem):
self.setupObject("QAction", self.currentActionGroup or self.toplevelWidget,
elem)
def createActionGroup(self, elem):
action_group = self.setupObject("QActionGroup", self.toplevelWidget, elem)
self.currentActionGroup = action_group
self.traverseWidgetTree(elem)
self.currentActionGroup = None
widgetTreeItemHandlers = {
"widget" : createWidget,
"addaction" : addAction,
"layout" : createLayout,
"spacer" : createSpacer,
"item" : handleItem,
"action" : createAction,
"actiongroup": createActionGroup,
"column" : addHeader,
"row" : addHeader,
"zorder" : setZOrder,
}
def traverseWidgetTree(self, elem):
for child in iter(elem):
try:
handler = self.widgetTreeItemHandlers[child.tag]
except KeyError:
continue
handler(self, child)
def createUserInterface(self, elem):
# Get the names of the class and widget.
cname = elem.attrib["class"]
wname = elem.attrib["name"]
# If there was no widget name then derive it from the class name.
if not wname:
wname = cname
if wname.startswith("Q"):
wname = wname[1:]
wname = wname[0].lower() + wname[1:]
self.toplevelWidget = self.createToplevelWidget(cname, wname)
self.toplevelWidget.setObjectName(wname)
DEBUG("toplevel widget is %s",
self.toplevelWidget.metaObject().className())
self.wprops.setProperties(self.toplevelWidget, elem)
self.stack.push(self.toplevelWidget)
self.traverseWidgetTree(elem)
self.stack.popWidget()
self.addActions()
self.setBuddies()
self.setDelayedProps()
def addActions(self):
for widget, action_name in self.actions:
if action_name == "separator":
widget.addSeparator()
else:
DEBUG("add action %s to %s", action_name, widget.objectName())
action_obj = getattr(self.toplevelWidget, action_name)
if isinstance(action_obj, QtWidgets.QMenu):
widget.addAction(action_obj.menuAction())
elif not isinstance(action_obj, QtWidgets.QActionGroup):
widget.addAction(action_obj)
def setDelayedProps(self):
for widget, layout, setter, args in self.wprops.delayed_props:
if layout:
widget = widget.layout()
setter = getattr(widget, setter)
setter(args)
def setBuddies(self):
for widget, buddy in self.wprops.buddies:
DEBUG("%s is buddy of %s", buddy, widget.objectName())
try:
widget.setBuddy(getattr(self.toplevelWidget, buddy))
except AttributeError:
DEBUG("ERROR in ui spec: %s (buddy of %s) does not exist",
buddy, widget.objectName())
def classname(self, elem):
DEBUG("uiname is %s", elem.text)
name = elem.text
if name is None:
name = ""
self.uiname = name
self.wprops.uiname = name
self.setContext(name)
def setContext(self, context):
"""
Reimplemented by a sub-class if it needs to know the translation
context.
"""
pass
def readDefaults(self, elem):
self.defaults['margin'] = int(elem.attrib['margin'])
self.defaults['spacing'] = int(elem.attrib['spacing'])
def setTaborder(self, elem):
lastwidget = None
for widget_elem in elem:
widget = getattr(self.toplevelWidget, widget_elem.text)
if lastwidget is not None:
self.toplevelWidget.setTabOrder(lastwidget, widget)
lastwidget = widget
def readResources(self, elem):
"""
Read a "resources" tag and add the module to import to the parser's
list of them.
"""
try:
iterator = getattr(elem, 'iter')
except AttributeError:
iterator = getattr(elem, 'getiterator')
for include in iterator("include"):
loc = include.attrib.get("location")
# Apply the convention for naming the Python files generated by
# pyrcc5.
if loc and loc.endswith('.qrc'):
mname = os.path.basename(loc[:-4] + self._resource_suffix)
if mname not in self.resources:
self.resources.append(mname)
def createConnections(self, elem):
def name2object(obj):
if obj == self.uiname:
return self.toplevelWidget
else:
return getattr(self.toplevelWidget, obj)
for conn in iter(elem):
signal = conn.findtext('signal')
signal_name, signal_args = signal.split('(')
signal_args = signal_args[:-1].replace(' ', '')
sender = name2object(conn.findtext('sender'))
bound_signal = getattr(sender, signal_name)
slot = self.factory.getSlot(name2object(conn.findtext('receiver')),
conn.findtext('slot').split('(')[0])
if signal_args == '':
bound_signal.connect(slot)
else:
signal_args = signal_args.split(',')
if len(signal_args) == 1:
bound_signal[signal_args[0]].connect(slot)
else:
bound_signal[tuple(signal_args)].connect(slot)
QtCore.QMetaObject.connectSlotsByName(self.toplevelWidget)
def customWidgets(self, elem):
def header2module(header):
"""header2module(header) -> string
Convert paths to C++ header files to according Python modules
>>> header2module("foo/bar/baz.h")
'foo.bar.baz'
"""
if header.endswith(".h"):
header = header[:-2]
mpath = []
for part in header.split('/'):
# Ignore any empty parts or those that refer to the current
# directory.
if part not in ('', '.'):
if part == '..':
# We should allow this for Python3.
raise SyntaxError("custom widget header file name may not contain '..'.")
mpath.append(part)
return '.'.join(mpath)
for custom_widget in iter(elem):
classname = custom_widget.findtext("class")
self.factory.addCustomWidget(classname,
custom_widget.findtext("extends") or "QWidget",
header2module(custom_widget.findtext("header")))
def createToplevelWidget(self, classname, widgetname):
raise NotImplementedError
def buttonGroups(self, elem):
for button_group in iter(elem):
if button_group.tag == 'buttongroup':
bg_name = button_group.attrib['name']
bg = ButtonGroup()
self.button_groups[bg_name] = bg
prop = self.getProperty(button_group, 'exclusive')
if prop is not None:
if prop.findtext('bool') == 'false':
bg.exclusive = False
# finalize will be called after the whole tree has been parsed and can be
# overridden.
def finalize(self):
pass
def parse(self, filename, resource_suffix, base_dir=''):
self.wprops.set_base_dir(base_dir)
self._resource_suffix = resource_suffix
# The order in which the different branches are handled is important.
# The widget tree handler relies on all custom widgets being known, and
# in order to create the connections, all widgets have to be populated.
branchHandlers = (
("layoutdefault", self.readDefaults),
("class", self.classname),
("buttongroups", self.buttonGroups),
("customwidgets", self.customWidgets),
("widget", self.createUserInterface),
("connections", self.createConnections),
("tabstops", self.setTaborder),
("resources", self.readResources),
)
document = parse(filename)
version = document.getroot().attrib["version"]
DEBUG("UI version is %s" % (version,))
# Right now, only version 4.0 is supported.
assert version in ("4.0",)
for tagname, actor in branchHandlers:
elem = document.find(tagname)
if elem is not None:
actor(elem)
self.finalize()
w = self.toplevelWidget
self.reset()
return w
@staticmethod
def _form_layout_role(layout_position):
if layout_position[3] > 1:
role = QtWidgets.QFormLayout.SpanningRole
elif layout_position[1] == 1:
role = QtWidgets.QFormLayout.FieldRole
else:
role = QtWidgets.QFormLayout.LabelRole
return role
|
pyqt/python-qt5 | PyQt5/uic/uiparser.py | UIParser.any_i18n | python | def any_i18n(*args):
for a in args:
if a is not None and not isinstance(a, str):
return True
return False | Return True if any argument appears to be an i18n string. | train | https://github.com/pyqt/python-qt5/blob/c9ed180c56f6fd3521ffe5fb70904bc5d3f50e5f/PyQt5/uic/uiparser.py#L626-L633 | null | class UIParser(object):
def __init__(self, qtcore_module, qtgui_module, qtwidgets_module, creatorPolicy):
self.factory = QObjectCreator(creatorPolicy)
self.wprops = Properties(self.factory, qtcore_module, qtgui_module,
qtwidgets_module)
global QtCore, QtWidgets
QtCore = qtcore_module
QtWidgets = qtwidgets_module
self.reset()
def uniqueName(self, name):
"""UIParser.uniqueName(string) -> string
Create a unique name from a string.
>>> p = UIParser(QtCore, QtGui, QtWidgets)
>>> p.uniqueName("foo")
'foo'
>>> p.uniqueName("foo")
'foo1'
"""
try:
suffix = self.name_suffixes[name]
except KeyError:
self.name_suffixes[name] = 0
return name
suffix += 1
self.name_suffixes[name] = suffix
return "%s%i" % (name, suffix)
def reset(self):
try: self.wprops.reset()
except AttributeError: pass
self.toplevelWidget = None
self.stack = WidgetStack()
self.name_suffixes = {}
self.defaults = {'spacing': -1, 'margin': -1}
self.actions = []
self.currentActionGroup = None
self.resources = []
self.button_groups = {}
def setupObject(self, clsname, parent, branch, is_attribute=True):
name = self.uniqueName(branch.attrib.get('name') or clsname[1:].lower())
if parent is None:
args = ()
else:
args = (parent, )
obj = self.factory.createQObject(clsname, name, args, is_attribute)
self.wprops.setProperties(obj, branch)
obj.setObjectName(name)
if is_attribute:
setattr(self.toplevelWidget, name, obj)
return obj
def getProperty(self, elem, name):
for prop in elem.findall('property'):
if prop.attrib['name'] == name:
return prop
return None
def createWidget(self, elem):
self.column_counter = 0
self.row_counter = 0
self.item_nr = 0
self.itemstack = []
self.sorting_enabled = None
widget_class = elem.attrib['class'].replace('::', '.')
if widget_class == 'Line':
widget_class = 'QFrame'
# Ignore the parent if it is a container.
parent = self.stack.topwidget
if isinstance(parent, (QtWidgets.QDockWidget, QtWidgets.QMdiArea,
QtWidgets.QScrollArea, QtWidgets.QStackedWidget,
QtWidgets.QToolBox, QtWidgets.QTabWidget,
QtWidgets.QWizard)):
parent = None
self.stack.push(self.setupObject(widget_class, parent, elem))
if isinstance(self.stack.topwidget, QtWidgets.QTableWidget):
if self.getProperty(elem, 'columnCount') is None:
self.stack.topwidget.setColumnCount(len(elem.findall("column")))
if self.getProperty(elem, 'rowCount') is None:
self.stack.topwidget.setRowCount(len(elem.findall("row")))
self.traverseWidgetTree(elem)
widget = self.stack.popWidget()
if isinstance(widget, QtWidgets.QTreeView):
self.handleHeaderView(elem, "header", widget.header())
elif isinstance(widget, QtWidgets.QTableView):
self.handleHeaderView(elem, "horizontalHeader",
widget.horizontalHeader())
self.handleHeaderView(elem, "verticalHeader",
widget.verticalHeader())
elif isinstance(widget, QtWidgets.QAbstractButton):
bg_i18n = self.wprops.getAttribute(elem, "buttonGroup")
if bg_i18n is not None:
# This should be handled properly in case the problem arises
# elsewhere as well.
try:
# We are compiling the .ui file.
bg_name = bg_i18n.string
except AttributeError:
# We are loading the .ui file.
bg_name = bg_i18n
# Designer allows the creation of .ui files without explicit
# button groups, even though uic then issues warnings. We
# handle it in two stages by first making sure it has a name
# and then making sure one exists with that name.
if not bg_name:
bg_name = 'buttonGroup'
try:
bg = self.button_groups[bg_name]
except KeyError:
bg = self.button_groups[bg_name] = ButtonGroup()
if bg.object is None:
bg.object = self.factory.createQObject("QButtonGroup",
bg_name, (self.toplevelWidget, ))
setattr(self.toplevelWidget, bg_name, bg.object)
bg.object.setObjectName(bg_name)
if not bg.exclusive:
bg.object.setExclusive(False)
bg.object.addButton(widget)
if self.sorting_enabled is not None:
widget.setSortingEnabled(self.sorting_enabled)
self.sorting_enabled = None
if self.stack.topIsLayout():
lay = self.stack.peek()
lp = elem.attrib['layout-position']
if isinstance(lay, QtWidgets.QFormLayout):
lay.setWidget(lp[0], self._form_layout_role(lp), widget)
else:
lay.addWidget(widget, *lp)
topwidget = self.stack.topwidget
if isinstance(topwidget, QtWidgets.QToolBox):
icon = self.wprops.getAttribute(elem, "icon")
if icon is not None:
topwidget.addItem(widget, icon, self.wprops.getAttribute(elem, "label"))
else:
topwidget.addItem(widget, self.wprops.getAttribute(elem, "label"))
tooltip = self.wprops.getAttribute(elem, "toolTip")
if tooltip is not None:
topwidget.setItemToolTip(topwidget.indexOf(widget), tooltip)
elif isinstance(topwidget, QtWidgets.QTabWidget):
icon = self.wprops.getAttribute(elem, "icon")
if icon is not None:
topwidget.addTab(widget, icon, self.wprops.getAttribute(elem, "title"))
else:
topwidget.addTab(widget, self.wprops.getAttribute(elem, "title"))
tooltip = self.wprops.getAttribute(elem, "toolTip")
if tooltip is not None:
topwidget.setTabToolTip(topwidget.indexOf(widget), tooltip)
elif isinstance(topwidget, QtWidgets.QWizard):
topwidget.addPage(widget)
elif isinstance(topwidget, QtWidgets.QStackedWidget):
topwidget.addWidget(widget)
elif isinstance(topwidget, (QtWidgets.QDockWidget, QtWidgets.QScrollArea)):
topwidget.setWidget(widget)
elif isinstance(topwidget, QtWidgets.QMainWindow):
if type(widget) == QtWidgets.QWidget:
topwidget.setCentralWidget(widget)
elif isinstance(widget, QtWidgets.QToolBar):
tbArea = self.wprops.getAttribute(elem, "toolBarArea")
if tbArea is None:
topwidget.addToolBar(widget)
else:
topwidget.addToolBar(tbArea, widget)
tbBreak = self.wprops.getAttribute(elem, "toolBarBreak")
if tbBreak:
topwidget.insertToolBarBreak(widget)
elif isinstance(widget, QtWidgets.QMenuBar):
topwidget.setMenuBar(widget)
elif isinstance(widget, QtWidgets.QStatusBar):
topwidget.setStatusBar(widget)
elif isinstance(widget, QtWidgets.QDockWidget):
dwArea = self.wprops.getAttribute(elem, "dockWidgetArea")
topwidget.addDockWidget(QtCore.Qt.DockWidgetArea(dwArea),
widget)
def handleHeaderView(self, elem, name, header):
value = self.wprops.getAttribute(elem, name + "Visible")
if value is not None:
header.setVisible(value)
value = self.wprops.getAttribute(elem, name + "CascadingSectionResizes")
if value is not None:
header.setCascadingSectionResizes(value)
value = self.wprops.getAttribute(elem, name + "DefaultSectionSize")
if value is not None:
header.setDefaultSectionSize(value)
value = self.wprops.getAttribute(elem, name + "HighlightSections")
if value is not None:
header.setHighlightSections(value)
value = self.wprops.getAttribute(elem, name + "MinimumSectionSize")
if value is not None:
header.setMinimumSectionSize(value)
value = self.wprops.getAttribute(elem, name + "ShowSortIndicator")
if value is not None:
header.setSortIndicatorShown(value)
value = self.wprops.getAttribute(elem, name + "StretchLastSection")
if value is not None:
header.setStretchLastSection(value)
def createSpacer(self, elem):
width = elem.findtext("property/size/width")
height = elem.findtext("property/size/height")
if width is None or height is None:
size_args = ()
else:
size_args = (int(width), int(height))
sizeType = self.wprops.getProperty(elem, "sizeType",
QtWidgets.QSizePolicy.Expanding)
policy = (QtWidgets.QSizePolicy.Minimum, sizeType)
if self.wprops.getProperty(elem, "orientation") == QtCore.Qt.Horizontal:
policy = policy[1], policy[0]
spacer = self.factory.createQObject("QSpacerItem",
self.uniqueName("spacerItem"), size_args + policy,
is_attribute=False)
if self.stack.topIsLayout():
lay = self.stack.peek()
lp = elem.attrib['layout-position']
if isinstance(lay, QtWidgets.QFormLayout):
lay.setItem(lp[0], self._form_layout_role(lp), spacer)
else:
lay.addItem(spacer, *lp)
def createLayout(self, elem):
# We use an internal property to handle margins which will use separate
# left, top, right and bottom margins if they are found to be
# different. The following will select, in order of preference,
# separate margins, the same margin in all directions, and the default
# margin.
margin = self.wprops.getProperty(elem, 'margin',
self.defaults['margin'])
left = self.wprops.getProperty(elem, 'leftMargin', margin)
top = self.wprops.getProperty(elem, 'topMargin', margin)
right = self.wprops.getProperty(elem, 'rightMargin', margin)
bottom = self.wprops.getProperty(elem, 'bottomMargin', margin)
# A layout widget should, by default, have no margins.
if self.stack.topIsLayoutWidget():
if left < 0: left = 0
if top < 0: top = 0
if right < 0: right = 0
if bottom < 0: bottom = 0
if left >= 0 or top >= 0 or right >= 0 or bottom >= 0:
# We inject the new internal property.
cme = SubElement(elem, 'property', name='pyuicMargins')
SubElement(cme, 'number').text = str(left)
SubElement(cme, 'number').text = str(top)
SubElement(cme, 'number').text = str(right)
SubElement(cme, 'number').text = str(bottom)
# We use an internal property to handle spacing which will use separate
# horizontal and vertical spacing if they are found to be different.
# The following will select, in order of preference, separate
# horizontal and vertical spacing, the same spacing in both directions,
# and the default spacing.
spacing = self.wprops.getProperty(elem, 'spacing',
self.defaults['spacing'])
horiz = self.wprops.getProperty(elem, 'horizontalSpacing', spacing)
vert = self.wprops.getProperty(elem, 'verticalSpacing', spacing)
if horiz >= 0 or vert >= 0:
# We inject the new internal property.
cme = SubElement(elem, 'property', name='pyuicSpacing')
SubElement(cme, 'number').text = str(horiz)
SubElement(cme, 'number').text = str(vert)
classname = elem.attrib["class"]
if self.stack.topIsLayout():
parent = None
else:
parent = self.stack.topwidget
if "name" not in elem.attrib:
elem.attrib["name"] = classname[1:].lower()
self.stack.push(self.setupObject(classname, parent, elem))
self.traverseWidgetTree(elem)
layout = self.stack.popLayout()
self.configureLayout(elem, layout)
if self.stack.topIsLayout():
top_layout = self.stack.peek()
lp = elem.attrib['layout-position']
if isinstance(top_layout, QtWidgets.QFormLayout):
top_layout.setLayout(lp[0], self._form_layout_role(lp), layout)
else:
top_layout.addLayout(layout, *lp)
def configureLayout(self, elem, layout):
if isinstance(layout, QtWidgets.QGridLayout):
self.setArray(elem, 'columnminimumwidth',
layout.setColumnMinimumWidth)
self.setArray(elem, 'rowminimumheight',
layout.setRowMinimumHeight)
self.setArray(elem, 'columnstretch', layout.setColumnStretch)
self.setArray(elem, 'rowstretch', layout.setRowStretch)
elif isinstance(layout, QtWidgets.QBoxLayout):
self.setArray(elem, 'stretch', layout.setStretch)
def setArray(self, elem, name, setter):
array = elem.attrib.get(name)
if array:
for idx, value in enumerate(array.split(',')):
value = int(value)
if value > 0:
setter(idx, value)
def disableSorting(self, w):
if self.item_nr == 0:
self.sorting_enabled = self.factory.invoke("__sortingEnabled",
w.isSortingEnabled)
w.setSortingEnabled(False)
def handleItem(self, elem):
if self.stack.topIsLayout():
elem[0].attrib['layout-position'] = _layout_position(elem)
self.traverseWidgetTree(elem)
else:
w = self.stack.topwidget
if isinstance(w, QtWidgets.QComboBox):
text = self.wprops.getProperty(elem, "text")
icon = self.wprops.getProperty(elem, "icon")
if icon:
w.addItem(icon, '')
else:
w.addItem('')
w.setItemText(self.item_nr, text)
elif isinstance(w, QtWidgets.QListWidget):
self.disableSorting(w)
item = self.createWidgetItem('QListWidgetItem', elem, w.item,
self.item_nr)
w.addItem(item)
elif isinstance(w, QtWidgets.QTreeWidget):
if self.itemstack:
parent, _ = self.itemstack[-1]
_, nr_in_root = self.itemstack[0]
else:
parent = w
nr_in_root = self.item_nr
item = self.factory.createQObject("QTreeWidgetItem",
"item_%d" % len(self.itemstack), (parent, ), False)
if self.item_nr == 0 and not self.itemstack:
self.sorting_enabled = self.factory.invoke("__sortingEnabled", w.isSortingEnabled)
w.setSortingEnabled(False)
self.itemstack.append((item, self.item_nr))
self.item_nr = 0
# We have to access the item via the tree when setting the
# text.
titm = w.topLevelItem(nr_in_root)
for child, nr_in_parent in self.itemstack[1:]:
titm = titm.child(nr_in_parent)
column = -1
for prop in elem.findall('property'):
c_prop = self.wprops.convert(prop)
c_prop_name = prop.attrib['name']
if c_prop_name == 'text':
column += 1
if c_prop:
titm.setText(column, c_prop)
elif c_prop_name == 'statusTip':
item.setStatusTip(column, c_prop)
elif c_prop_name == 'toolTip':
item.setToolTip(column, c_prop)
elif c_prop_name == 'whatsThis':
item.setWhatsThis(column, c_prop)
elif c_prop_name == 'font':
item.setFont(column, c_prop)
elif c_prop_name == 'icon':
item.setIcon(column, c_prop)
elif c_prop_name == 'background':
item.setBackground(column, c_prop)
elif c_prop_name == 'foreground':
item.setForeground(column, c_prop)
elif c_prop_name == 'flags':
item.setFlags(c_prop)
elif c_prop_name == 'checkState':
item.setCheckState(column, c_prop)
self.traverseWidgetTree(elem)
_, self.item_nr = self.itemstack.pop()
elif isinstance(w, QtWidgets.QTableWidget):
row = int(elem.attrib['row'])
col = int(elem.attrib['column'])
self.disableSorting(w)
item = self.createWidgetItem('QTableWidgetItem', elem, w.item,
row, col)
w.setItem(row, col, item)
self.item_nr += 1
def addAction(self, elem):
self.actions.append((self.stack.topwidget, elem.attrib["name"]))
@staticmethod
def createWidgetItem(self, item_type, elem, getter, *getter_args):
""" Create a specific type of widget item. """
item = self.factory.createQObject(item_type, "item", (), False)
props = self.wprops
# Note that not all types of widget items support the full set of
# properties.
text = props.getProperty(elem, 'text')
status_tip = props.getProperty(elem, 'statusTip')
tool_tip = props.getProperty(elem, 'toolTip')
whats_this = props.getProperty(elem, 'whatsThis')
if self.any_i18n(text, status_tip, tool_tip, whats_this):
self.factory.invoke("item", getter, getter_args)
if text:
item.setText(text)
if status_tip:
item.setStatusTip(status_tip)
if tool_tip:
item.setToolTip(tool_tip)
if whats_this:
item.setWhatsThis(whats_this)
text_alignment = props.getProperty(elem, 'textAlignment')
if text_alignment:
item.setTextAlignment(text_alignment)
font = props.getProperty(elem, 'font')
if font:
item.setFont(font)
icon = props.getProperty(elem, 'icon')
if icon:
item.setIcon(icon)
background = props.getProperty(elem, 'background')
if background:
item.setBackground(background)
foreground = props.getProperty(elem, 'foreground')
if foreground:
item.setForeground(foreground)
flags = props.getProperty(elem, 'flags')
if flags:
item.setFlags(flags)
check_state = props.getProperty(elem, 'checkState')
if check_state:
item.setCheckState(check_state)
return item
def addHeader(self, elem):
w = self.stack.topwidget
if isinstance(w, QtWidgets.QTreeWidget):
props = self.wprops
col = self.column_counter
text = props.getProperty(elem, 'text')
if text:
w.headerItem().setText(col, text)
status_tip = props.getProperty(elem, 'statusTip')
if status_tip:
w.headerItem().setStatusTip(col, status_tip)
tool_tip = props.getProperty(elem, 'toolTip')
if tool_tip:
w.headerItem().setToolTip(col, tool_tip)
whats_this = props.getProperty(elem, 'whatsThis')
if whats_this:
w.headerItem().setWhatsThis(col, whats_this)
text_alignment = props.getProperty(elem, 'textAlignment')
if text_alignment:
w.headerItem().setTextAlignment(col, text_alignment)
font = props.getProperty(elem, 'font')
if font:
w.headerItem().setFont(col, font)
icon = props.getProperty(elem, 'icon')
if icon:
w.headerItem().setIcon(col, icon)
background = props.getProperty(elem, 'background')
if background:
w.headerItem().setBackground(col, background)
foreground = props.getProperty(elem, 'foreground')
if foreground:
w.headerItem().setForeground(col, foreground)
self.column_counter += 1
elif isinstance(w, QtWidgets.QTableWidget):
if len(elem) != 0:
if elem.tag == 'column':
item = self.createWidgetItem('QTableWidgetItem', elem,
w.horizontalHeaderItem, self.column_counter)
w.setHorizontalHeaderItem(self.column_counter, item)
self.column_counter += 1
elif elem.tag == 'row':
item = self.createWidgetItem('QTableWidgetItem', elem,
w.verticalHeaderItem, self.row_counter)
w.setVerticalHeaderItem(self.row_counter, item)
self.row_counter += 1
def setZOrder(self, elem):
# Designer can generate empty zorder elements.
if elem.text is None:
return
# Designer allows the z-order of spacer items to be specified even
# though they can't be raised, so ignore any missing raise_() method.
try:
getattr(self.toplevelWidget, elem.text).raise_()
except AttributeError:
# Note that uic issues a warning message.
pass
def createAction(self, elem):
self.setupObject("QAction", self.currentActionGroup or self.toplevelWidget,
elem)
def createActionGroup(self, elem):
action_group = self.setupObject("QActionGroup", self.toplevelWidget, elem)
self.currentActionGroup = action_group
self.traverseWidgetTree(elem)
self.currentActionGroup = None
widgetTreeItemHandlers = {
"widget" : createWidget,
"addaction" : addAction,
"layout" : createLayout,
"spacer" : createSpacer,
"item" : handleItem,
"action" : createAction,
"actiongroup": createActionGroup,
"column" : addHeader,
"row" : addHeader,
"zorder" : setZOrder,
}
def traverseWidgetTree(self, elem):
for child in iter(elem):
try:
handler = self.widgetTreeItemHandlers[child.tag]
except KeyError:
continue
handler(self, child)
def createUserInterface(self, elem):
# Get the names of the class and widget.
cname = elem.attrib["class"]
wname = elem.attrib["name"]
# If there was no widget name then derive it from the class name.
if not wname:
wname = cname
if wname.startswith("Q"):
wname = wname[1:]
wname = wname[0].lower() + wname[1:]
self.toplevelWidget = self.createToplevelWidget(cname, wname)
self.toplevelWidget.setObjectName(wname)
DEBUG("toplevel widget is %s",
self.toplevelWidget.metaObject().className())
self.wprops.setProperties(self.toplevelWidget, elem)
self.stack.push(self.toplevelWidget)
self.traverseWidgetTree(elem)
self.stack.popWidget()
self.addActions()
self.setBuddies()
self.setDelayedProps()
def addActions(self):
for widget, action_name in self.actions:
if action_name == "separator":
widget.addSeparator()
else:
DEBUG("add action %s to %s", action_name, widget.objectName())
action_obj = getattr(self.toplevelWidget, action_name)
if isinstance(action_obj, QtWidgets.QMenu):
widget.addAction(action_obj.menuAction())
elif not isinstance(action_obj, QtWidgets.QActionGroup):
widget.addAction(action_obj)
def setDelayedProps(self):
for widget, layout, setter, args in self.wprops.delayed_props:
if layout:
widget = widget.layout()
setter = getattr(widget, setter)
setter(args)
def setBuddies(self):
for widget, buddy in self.wprops.buddies:
DEBUG("%s is buddy of %s", buddy, widget.objectName())
try:
widget.setBuddy(getattr(self.toplevelWidget, buddy))
except AttributeError:
DEBUG("ERROR in ui spec: %s (buddy of %s) does not exist",
buddy, widget.objectName())
def classname(self, elem):
DEBUG("uiname is %s", elem.text)
name = elem.text
if name is None:
name = ""
self.uiname = name
self.wprops.uiname = name
self.setContext(name)
def setContext(self, context):
"""
Reimplemented by a sub-class if it needs to know the translation
context.
"""
pass
def readDefaults(self, elem):
self.defaults['margin'] = int(elem.attrib['margin'])
self.defaults['spacing'] = int(elem.attrib['spacing'])
def setTaborder(self, elem):
lastwidget = None
for widget_elem in elem:
widget = getattr(self.toplevelWidget, widget_elem.text)
if lastwidget is not None:
self.toplevelWidget.setTabOrder(lastwidget, widget)
lastwidget = widget
def readResources(self, elem):
"""
Read a "resources" tag and add the module to import to the parser's
list of them.
"""
try:
iterator = getattr(elem, 'iter')
except AttributeError:
iterator = getattr(elem, 'getiterator')
for include in iterator("include"):
loc = include.attrib.get("location")
# Apply the convention for naming the Python files generated by
# pyrcc5.
if loc and loc.endswith('.qrc'):
mname = os.path.basename(loc[:-4] + self._resource_suffix)
if mname not in self.resources:
self.resources.append(mname)
def createConnections(self, elem):
def name2object(obj):
if obj == self.uiname:
return self.toplevelWidget
else:
return getattr(self.toplevelWidget, obj)
for conn in iter(elem):
signal = conn.findtext('signal')
signal_name, signal_args = signal.split('(')
signal_args = signal_args[:-1].replace(' ', '')
sender = name2object(conn.findtext('sender'))
bound_signal = getattr(sender, signal_name)
slot = self.factory.getSlot(name2object(conn.findtext('receiver')),
conn.findtext('slot').split('(')[0])
if signal_args == '':
bound_signal.connect(slot)
else:
signal_args = signal_args.split(',')
if len(signal_args) == 1:
bound_signal[signal_args[0]].connect(slot)
else:
bound_signal[tuple(signal_args)].connect(slot)
QtCore.QMetaObject.connectSlotsByName(self.toplevelWidget)
def customWidgets(self, elem):
def header2module(header):
"""header2module(header) -> string
Convert paths to C++ header files to according Python modules
>>> header2module("foo/bar/baz.h")
'foo.bar.baz'
"""
if header.endswith(".h"):
header = header[:-2]
mpath = []
for part in header.split('/'):
# Ignore any empty parts or those that refer to the current
# directory.
if part not in ('', '.'):
if part == '..':
# We should allow this for Python3.
raise SyntaxError("custom widget header file name may not contain '..'.")
mpath.append(part)
return '.'.join(mpath)
for custom_widget in iter(elem):
classname = custom_widget.findtext("class")
self.factory.addCustomWidget(classname,
custom_widget.findtext("extends") or "QWidget",
header2module(custom_widget.findtext("header")))
def createToplevelWidget(self, classname, widgetname):
raise NotImplementedError
def buttonGroups(self, elem):
for button_group in iter(elem):
if button_group.tag == 'buttongroup':
bg_name = button_group.attrib['name']
bg = ButtonGroup()
self.button_groups[bg_name] = bg
prop = self.getProperty(button_group, 'exclusive')
if prop is not None:
if prop.findtext('bool') == 'false':
bg.exclusive = False
# finalize will be called after the whole tree has been parsed and can be
# overridden.
def finalize(self):
pass
def parse(self, filename, resource_suffix, base_dir=''):
self.wprops.set_base_dir(base_dir)
self._resource_suffix = resource_suffix
# The order in which the different branches are handled is important.
# The widget tree handler relies on all custom widgets being known, and
# in order to create the connections, all widgets have to be populated.
branchHandlers = (
("layoutdefault", self.readDefaults),
("class", self.classname),
("buttongroups", self.buttonGroups),
("customwidgets", self.customWidgets),
("widget", self.createUserInterface),
("connections", self.createConnections),
("tabstops", self.setTaborder),
("resources", self.readResources),
)
document = parse(filename)
version = document.getroot().attrib["version"]
DEBUG("UI version is %s" % (version,))
# Right now, only version 4.0 is supported.
assert version in ("4.0",)
for tagname, actor in branchHandlers:
elem = document.find(tagname)
if elem is not None:
actor(elem)
self.finalize()
w = self.toplevelWidget
self.reset()
return w
@staticmethod
def _form_layout_role(layout_position):
if layout_position[3] > 1:
role = QtWidgets.QFormLayout.SpanningRole
elif layout_position[1] == 1:
role = QtWidgets.QFormLayout.FieldRole
else:
role = QtWidgets.QFormLayout.LabelRole
return role
|
pyqt/python-qt5 | PyQt5/uic/uiparser.py | UIParser.createWidgetItem | python | def createWidgetItem(self, item_type, elem, getter, *getter_args):
item = self.factory.createQObject(item_type, "item", (), False)
props = self.wprops
# Note that not all types of widget items support the full set of
# properties.
text = props.getProperty(elem, 'text')
status_tip = props.getProperty(elem, 'statusTip')
tool_tip = props.getProperty(elem, 'toolTip')
whats_this = props.getProperty(elem, 'whatsThis')
if self.any_i18n(text, status_tip, tool_tip, whats_this):
self.factory.invoke("item", getter, getter_args)
if text:
item.setText(text)
if status_tip:
item.setStatusTip(status_tip)
if tool_tip:
item.setToolTip(tool_tip)
if whats_this:
item.setWhatsThis(whats_this)
text_alignment = props.getProperty(elem, 'textAlignment')
if text_alignment:
item.setTextAlignment(text_alignment)
font = props.getProperty(elem, 'font')
if font:
item.setFont(font)
icon = props.getProperty(elem, 'icon')
if icon:
item.setIcon(icon)
background = props.getProperty(elem, 'background')
if background:
item.setBackground(background)
foreground = props.getProperty(elem, 'foreground')
if foreground:
item.setForeground(foreground)
flags = props.getProperty(elem, 'flags')
if flags:
item.setFlags(flags)
check_state = props.getProperty(elem, 'checkState')
if check_state:
item.setCheckState(check_state)
return item | Create a specific type of widget item. | train | https://github.com/pyqt/python-qt5/blob/c9ed180c56f6fd3521ffe5fb70904bc5d3f50e5f/PyQt5/uic/uiparser.py#L635-L692 | [
"def createQObject(self, classname, *args, **kwargs):\n # Handle regular and custom widgets.\n factory = self.findQObjectType(classname)\n\n if factory is None:\n # Handle scoped names, typically static factory methods.\n parts = classname.split('.')\n\n if len(parts) > 1:\n factory = self.findQObjectType(parts[0])\n\n if factory is not None:\n for part in parts[1:]:\n factory = getattr(factory, part, None)\n if factory is None:\n break\n\n if factory is None:\n raise NoSuchWidgetError(classname)\n\n return self._cpolicy.instantiate(factory, *args, **kwargs)\n",
"def getProperty(self, elem, name, default=None):\n return self._getChild(\"property\", elem, name, default)\n",
"def any_i18n(*args):\n \"\"\" Return True if any argument appears to be an i18n string. \"\"\"\n\n for a in args:\n if a is not None and not isinstance(a, str):\n return True\n\n return False\n"
] | class UIParser(object):
def __init__(self, qtcore_module, qtgui_module, qtwidgets_module, creatorPolicy):
self.factory = QObjectCreator(creatorPolicy)
self.wprops = Properties(self.factory, qtcore_module, qtgui_module,
qtwidgets_module)
global QtCore, QtWidgets
QtCore = qtcore_module
QtWidgets = qtwidgets_module
self.reset()
def uniqueName(self, name):
"""UIParser.uniqueName(string) -> string
Create a unique name from a string.
>>> p = UIParser(QtCore, QtGui, QtWidgets)
>>> p.uniqueName("foo")
'foo'
>>> p.uniqueName("foo")
'foo1'
"""
try:
suffix = self.name_suffixes[name]
except KeyError:
self.name_suffixes[name] = 0
return name
suffix += 1
self.name_suffixes[name] = suffix
return "%s%i" % (name, suffix)
def reset(self):
try: self.wprops.reset()
except AttributeError: pass
self.toplevelWidget = None
self.stack = WidgetStack()
self.name_suffixes = {}
self.defaults = {'spacing': -1, 'margin': -1}
self.actions = []
self.currentActionGroup = None
self.resources = []
self.button_groups = {}
def setupObject(self, clsname, parent, branch, is_attribute=True):
name = self.uniqueName(branch.attrib.get('name') or clsname[1:].lower())
if parent is None:
args = ()
else:
args = (parent, )
obj = self.factory.createQObject(clsname, name, args, is_attribute)
self.wprops.setProperties(obj, branch)
obj.setObjectName(name)
if is_attribute:
setattr(self.toplevelWidget, name, obj)
return obj
def getProperty(self, elem, name):
for prop in elem.findall('property'):
if prop.attrib['name'] == name:
return prop
return None
def createWidget(self, elem):
self.column_counter = 0
self.row_counter = 0
self.item_nr = 0
self.itemstack = []
self.sorting_enabled = None
widget_class = elem.attrib['class'].replace('::', '.')
if widget_class == 'Line':
widget_class = 'QFrame'
# Ignore the parent if it is a container.
parent = self.stack.topwidget
if isinstance(parent, (QtWidgets.QDockWidget, QtWidgets.QMdiArea,
QtWidgets.QScrollArea, QtWidgets.QStackedWidget,
QtWidgets.QToolBox, QtWidgets.QTabWidget,
QtWidgets.QWizard)):
parent = None
self.stack.push(self.setupObject(widget_class, parent, elem))
if isinstance(self.stack.topwidget, QtWidgets.QTableWidget):
if self.getProperty(elem, 'columnCount') is None:
self.stack.topwidget.setColumnCount(len(elem.findall("column")))
if self.getProperty(elem, 'rowCount') is None:
self.stack.topwidget.setRowCount(len(elem.findall("row")))
self.traverseWidgetTree(elem)
widget = self.stack.popWidget()
if isinstance(widget, QtWidgets.QTreeView):
self.handleHeaderView(elem, "header", widget.header())
elif isinstance(widget, QtWidgets.QTableView):
self.handleHeaderView(elem, "horizontalHeader",
widget.horizontalHeader())
self.handleHeaderView(elem, "verticalHeader",
widget.verticalHeader())
elif isinstance(widget, QtWidgets.QAbstractButton):
bg_i18n = self.wprops.getAttribute(elem, "buttonGroup")
if bg_i18n is not None:
# This should be handled properly in case the problem arises
# elsewhere as well.
try:
# We are compiling the .ui file.
bg_name = bg_i18n.string
except AttributeError:
# We are loading the .ui file.
bg_name = bg_i18n
# Designer allows the creation of .ui files without explicit
# button groups, even though uic then issues warnings. We
# handle it in two stages by first making sure it has a name
# and then making sure one exists with that name.
if not bg_name:
bg_name = 'buttonGroup'
try:
bg = self.button_groups[bg_name]
except KeyError:
bg = self.button_groups[bg_name] = ButtonGroup()
if bg.object is None:
bg.object = self.factory.createQObject("QButtonGroup",
bg_name, (self.toplevelWidget, ))
setattr(self.toplevelWidget, bg_name, bg.object)
bg.object.setObjectName(bg_name)
if not bg.exclusive:
bg.object.setExclusive(False)
bg.object.addButton(widget)
if self.sorting_enabled is not None:
widget.setSortingEnabled(self.sorting_enabled)
self.sorting_enabled = None
if self.stack.topIsLayout():
lay = self.stack.peek()
lp = elem.attrib['layout-position']
if isinstance(lay, QtWidgets.QFormLayout):
lay.setWidget(lp[0], self._form_layout_role(lp), widget)
else:
lay.addWidget(widget, *lp)
topwidget = self.stack.topwidget
if isinstance(topwidget, QtWidgets.QToolBox):
icon = self.wprops.getAttribute(elem, "icon")
if icon is not None:
topwidget.addItem(widget, icon, self.wprops.getAttribute(elem, "label"))
else:
topwidget.addItem(widget, self.wprops.getAttribute(elem, "label"))
tooltip = self.wprops.getAttribute(elem, "toolTip")
if tooltip is not None:
topwidget.setItemToolTip(topwidget.indexOf(widget), tooltip)
elif isinstance(topwidget, QtWidgets.QTabWidget):
icon = self.wprops.getAttribute(elem, "icon")
if icon is not None:
topwidget.addTab(widget, icon, self.wprops.getAttribute(elem, "title"))
else:
topwidget.addTab(widget, self.wprops.getAttribute(elem, "title"))
tooltip = self.wprops.getAttribute(elem, "toolTip")
if tooltip is not None:
topwidget.setTabToolTip(topwidget.indexOf(widget), tooltip)
elif isinstance(topwidget, QtWidgets.QWizard):
topwidget.addPage(widget)
elif isinstance(topwidget, QtWidgets.QStackedWidget):
topwidget.addWidget(widget)
elif isinstance(topwidget, (QtWidgets.QDockWidget, QtWidgets.QScrollArea)):
topwidget.setWidget(widget)
elif isinstance(topwidget, QtWidgets.QMainWindow):
if type(widget) == QtWidgets.QWidget:
topwidget.setCentralWidget(widget)
elif isinstance(widget, QtWidgets.QToolBar):
tbArea = self.wprops.getAttribute(elem, "toolBarArea")
if tbArea is None:
topwidget.addToolBar(widget)
else:
topwidget.addToolBar(tbArea, widget)
tbBreak = self.wprops.getAttribute(elem, "toolBarBreak")
if tbBreak:
topwidget.insertToolBarBreak(widget)
elif isinstance(widget, QtWidgets.QMenuBar):
topwidget.setMenuBar(widget)
elif isinstance(widget, QtWidgets.QStatusBar):
topwidget.setStatusBar(widget)
elif isinstance(widget, QtWidgets.QDockWidget):
dwArea = self.wprops.getAttribute(elem, "dockWidgetArea")
topwidget.addDockWidget(QtCore.Qt.DockWidgetArea(dwArea),
widget)
def handleHeaderView(self, elem, name, header):
value = self.wprops.getAttribute(elem, name + "Visible")
if value is not None:
header.setVisible(value)
value = self.wprops.getAttribute(elem, name + "CascadingSectionResizes")
if value is not None:
header.setCascadingSectionResizes(value)
value = self.wprops.getAttribute(elem, name + "DefaultSectionSize")
if value is not None:
header.setDefaultSectionSize(value)
value = self.wprops.getAttribute(elem, name + "HighlightSections")
if value is not None:
header.setHighlightSections(value)
value = self.wprops.getAttribute(elem, name + "MinimumSectionSize")
if value is not None:
header.setMinimumSectionSize(value)
value = self.wprops.getAttribute(elem, name + "ShowSortIndicator")
if value is not None:
header.setSortIndicatorShown(value)
value = self.wprops.getAttribute(elem, name + "StretchLastSection")
if value is not None:
header.setStretchLastSection(value)
def createSpacer(self, elem):
width = elem.findtext("property/size/width")
height = elem.findtext("property/size/height")
if width is None or height is None:
size_args = ()
else:
size_args = (int(width), int(height))
sizeType = self.wprops.getProperty(elem, "sizeType",
QtWidgets.QSizePolicy.Expanding)
policy = (QtWidgets.QSizePolicy.Minimum, sizeType)
if self.wprops.getProperty(elem, "orientation") == QtCore.Qt.Horizontal:
policy = policy[1], policy[0]
spacer = self.factory.createQObject("QSpacerItem",
self.uniqueName("spacerItem"), size_args + policy,
is_attribute=False)
if self.stack.topIsLayout():
lay = self.stack.peek()
lp = elem.attrib['layout-position']
if isinstance(lay, QtWidgets.QFormLayout):
lay.setItem(lp[0], self._form_layout_role(lp), spacer)
else:
lay.addItem(spacer, *lp)
def createLayout(self, elem):
# We use an internal property to handle margins which will use separate
# left, top, right and bottom margins if they are found to be
# different. The following will select, in order of preference,
# separate margins, the same margin in all directions, and the default
# margin.
margin = self.wprops.getProperty(elem, 'margin',
self.defaults['margin'])
left = self.wprops.getProperty(elem, 'leftMargin', margin)
top = self.wprops.getProperty(elem, 'topMargin', margin)
right = self.wprops.getProperty(elem, 'rightMargin', margin)
bottom = self.wprops.getProperty(elem, 'bottomMargin', margin)
# A layout widget should, by default, have no margins.
if self.stack.topIsLayoutWidget():
if left < 0: left = 0
if top < 0: top = 0
if right < 0: right = 0
if bottom < 0: bottom = 0
if left >= 0 or top >= 0 or right >= 0 or bottom >= 0:
# We inject the new internal property.
cme = SubElement(elem, 'property', name='pyuicMargins')
SubElement(cme, 'number').text = str(left)
SubElement(cme, 'number').text = str(top)
SubElement(cme, 'number').text = str(right)
SubElement(cme, 'number').text = str(bottom)
# We use an internal property to handle spacing which will use separate
# horizontal and vertical spacing if they are found to be different.
# The following will select, in order of preference, separate
# horizontal and vertical spacing, the same spacing in both directions,
# and the default spacing.
spacing = self.wprops.getProperty(elem, 'spacing',
self.defaults['spacing'])
horiz = self.wprops.getProperty(elem, 'horizontalSpacing', spacing)
vert = self.wprops.getProperty(elem, 'verticalSpacing', spacing)
if horiz >= 0 or vert >= 0:
# We inject the new internal property.
cme = SubElement(elem, 'property', name='pyuicSpacing')
SubElement(cme, 'number').text = str(horiz)
SubElement(cme, 'number').text = str(vert)
classname = elem.attrib["class"]
if self.stack.topIsLayout():
parent = None
else:
parent = self.stack.topwidget
if "name" not in elem.attrib:
elem.attrib["name"] = classname[1:].lower()
self.stack.push(self.setupObject(classname, parent, elem))
self.traverseWidgetTree(elem)
layout = self.stack.popLayout()
self.configureLayout(elem, layout)
if self.stack.topIsLayout():
top_layout = self.stack.peek()
lp = elem.attrib['layout-position']
if isinstance(top_layout, QtWidgets.QFormLayout):
top_layout.setLayout(lp[0], self._form_layout_role(lp), layout)
else:
top_layout.addLayout(layout, *lp)
def configureLayout(self, elem, layout):
if isinstance(layout, QtWidgets.QGridLayout):
self.setArray(elem, 'columnminimumwidth',
layout.setColumnMinimumWidth)
self.setArray(elem, 'rowminimumheight',
layout.setRowMinimumHeight)
self.setArray(elem, 'columnstretch', layout.setColumnStretch)
self.setArray(elem, 'rowstretch', layout.setRowStretch)
elif isinstance(layout, QtWidgets.QBoxLayout):
self.setArray(elem, 'stretch', layout.setStretch)
def setArray(self, elem, name, setter):
array = elem.attrib.get(name)
if array:
for idx, value in enumerate(array.split(',')):
value = int(value)
if value > 0:
setter(idx, value)
def disableSorting(self, w):
if self.item_nr == 0:
self.sorting_enabled = self.factory.invoke("__sortingEnabled",
w.isSortingEnabled)
w.setSortingEnabled(False)
def handleItem(self, elem):
if self.stack.topIsLayout():
elem[0].attrib['layout-position'] = _layout_position(elem)
self.traverseWidgetTree(elem)
else:
w = self.stack.topwidget
if isinstance(w, QtWidgets.QComboBox):
text = self.wprops.getProperty(elem, "text")
icon = self.wprops.getProperty(elem, "icon")
if icon:
w.addItem(icon, '')
else:
w.addItem('')
w.setItemText(self.item_nr, text)
elif isinstance(w, QtWidgets.QListWidget):
self.disableSorting(w)
item = self.createWidgetItem('QListWidgetItem', elem, w.item,
self.item_nr)
w.addItem(item)
elif isinstance(w, QtWidgets.QTreeWidget):
if self.itemstack:
parent, _ = self.itemstack[-1]
_, nr_in_root = self.itemstack[0]
else:
parent = w
nr_in_root = self.item_nr
item = self.factory.createQObject("QTreeWidgetItem",
"item_%d" % len(self.itemstack), (parent, ), False)
if self.item_nr == 0 and not self.itemstack:
self.sorting_enabled = self.factory.invoke("__sortingEnabled", w.isSortingEnabled)
w.setSortingEnabled(False)
self.itemstack.append((item, self.item_nr))
self.item_nr = 0
# We have to access the item via the tree when setting the
# text.
titm = w.topLevelItem(nr_in_root)
for child, nr_in_parent in self.itemstack[1:]:
titm = titm.child(nr_in_parent)
column = -1
for prop in elem.findall('property'):
c_prop = self.wprops.convert(prop)
c_prop_name = prop.attrib['name']
if c_prop_name == 'text':
column += 1
if c_prop:
titm.setText(column, c_prop)
elif c_prop_name == 'statusTip':
item.setStatusTip(column, c_prop)
elif c_prop_name == 'toolTip':
item.setToolTip(column, c_prop)
elif c_prop_name == 'whatsThis':
item.setWhatsThis(column, c_prop)
elif c_prop_name == 'font':
item.setFont(column, c_prop)
elif c_prop_name == 'icon':
item.setIcon(column, c_prop)
elif c_prop_name == 'background':
item.setBackground(column, c_prop)
elif c_prop_name == 'foreground':
item.setForeground(column, c_prop)
elif c_prop_name == 'flags':
item.setFlags(c_prop)
elif c_prop_name == 'checkState':
item.setCheckState(column, c_prop)
self.traverseWidgetTree(elem)
_, self.item_nr = self.itemstack.pop()
elif isinstance(w, QtWidgets.QTableWidget):
row = int(elem.attrib['row'])
col = int(elem.attrib['column'])
self.disableSorting(w)
item = self.createWidgetItem('QTableWidgetItem', elem, w.item,
row, col)
w.setItem(row, col, item)
self.item_nr += 1
def addAction(self, elem):
self.actions.append((self.stack.topwidget, elem.attrib["name"]))
@staticmethod
def any_i18n(*args):
""" Return True if any argument appears to be an i18n string. """
for a in args:
if a is not None and not isinstance(a, str):
return True
return False
def addHeader(self, elem):
w = self.stack.topwidget
if isinstance(w, QtWidgets.QTreeWidget):
props = self.wprops
col = self.column_counter
text = props.getProperty(elem, 'text')
if text:
w.headerItem().setText(col, text)
status_tip = props.getProperty(elem, 'statusTip')
if status_tip:
w.headerItem().setStatusTip(col, status_tip)
tool_tip = props.getProperty(elem, 'toolTip')
if tool_tip:
w.headerItem().setToolTip(col, tool_tip)
whats_this = props.getProperty(elem, 'whatsThis')
if whats_this:
w.headerItem().setWhatsThis(col, whats_this)
text_alignment = props.getProperty(elem, 'textAlignment')
if text_alignment:
w.headerItem().setTextAlignment(col, text_alignment)
font = props.getProperty(elem, 'font')
if font:
w.headerItem().setFont(col, font)
icon = props.getProperty(elem, 'icon')
if icon:
w.headerItem().setIcon(col, icon)
background = props.getProperty(elem, 'background')
if background:
w.headerItem().setBackground(col, background)
foreground = props.getProperty(elem, 'foreground')
if foreground:
w.headerItem().setForeground(col, foreground)
self.column_counter += 1
elif isinstance(w, QtWidgets.QTableWidget):
if len(elem) != 0:
if elem.tag == 'column':
item = self.createWidgetItem('QTableWidgetItem', elem,
w.horizontalHeaderItem, self.column_counter)
w.setHorizontalHeaderItem(self.column_counter, item)
self.column_counter += 1
elif elem.tag == 'row':
item = self.createWidgetItem('QTableWidgetItem', elem,
w.verticalHeaderItem, self.row_counter)
w.setVerticalHeaderItem(self.row_counter, item)
self.row_counter += 1
def setZOrder(self, elem):
# Designer can generate empty zorder elements.
if elem.text is None:
return
# Designer allows the z-order of spacer items to be specified even
# though they can't be raised, so ignore any missing raise_() method.
try:
getattr(self.toplevelWidget, elem.text).raise_()
except AttributeError:
# Note that uic issues a warning message.
pass
def createAction(self, elem):
self.setupObject("QAction", self.currentActionGroup or self.toplevelWidget,
elem)
def createActionGroup(self, elem):
action_group = self.setupObject("QActionGroup", self.toplevelWidget, elem)
self.currentActionGroup = action_group
self.traverseWidgetTree(elem)
self.currentActionGroup = None
widgetTreeItemHandlers = {
"widget" : createWidget,
"addaction" : addAction,
"layout" : createLayout,
"spacer" : createSpacer,
"item" : handleItem,
"action" : createAction,
"actiongroup": createActionGroup,
"column" : addHeader,
"row" : addHeader,
"zorder" : setZOrder,
}
def traverseWidgetTree(self, elem):
for child in iter(elem):
try:
handler = self.widgetTreeItemHandlers[child.tag]
except KeyError:
continue
handler(self, child)
def createUserInterface(self, elem):
# Get the names of the class and widget.
cname = elem.attrib["class"]
wname = elem.attrib["name"]
# If there was no widget name then derive it from the class name.
if not wname:
wname = cname
if wname.startswith("Q"):
wname = wname[1:]
wname = wname[0].lower() + wname[1:]
self.toplevelWidget = self.createToplevelWidget(cname, wname)
self.toplevelWidget.setObjectName(wname)
DEBUG("toplevel widget is %s",
self.toplevelWidget.metaObject().className())
self.wprops.setProperties(self.toplevelWidget, elem)
self.stack.push(self.toplevelWidget)
self.traverseWidgetTree(elem)
self.stack.popWidget()
self.addActions()
self.setBuddies()
self.setDelayedProps()
def addActions(self):
for widget, action_name in self.actions:
if action_name == "separator":
widget.addSeparator()
else:
DEBUG("add action %s to %s", action_name, widget.objectName())
action_obj = getattr(self.toplevelWidget, action_name)
if isinstance(action_obj, QtWidgets.QMenu):
widget.addAction(action_obj.menuAction())
elif not isinstance(action_obj, QtWidgets.QActionGroup):
widget.addAction(action_obj)
def setDelayedProps(self):
for widget, layout, setter, args in self.wprops.delayed_props:
if layout:
widget = widget.layout()
setter = getattr(widget, setter)
setter(args)
def setBuddies(self):
for widget, buddy in self.wprops.buddies:
DEBUG("%s is buddy of %s", buddy, widget.objectName())
try:
widget.setBuddy(getattr(self.toplevelWidget, buddy))
except AttributeError:
DEBUG("ERROR in ui spec: %s (buddy of %s) does not exist",
buddy, widget.objectName())
def classname(self, elem):
DEBUG("uiname is %s", elem.text)
name = elem.text
if name is None:
name = ""
self.uiname = name
self.wprops.uiname = name
self.setContext(name)
def setContext(self, context):
"""
Reimplemented by a sub-class if it needs to know the translation
context.
"""
pass
def readDefaults(self, elem):
self.defaults['margin'] = int(elem.attrib['margin'])
self.defaults['spacing'] = int(elem.attrib['spacing'])
def setTaborder(self, elem):
lastwidget = None
for widget_elem in elem:
widget = getattr(self.toplevelWidget, widget_elem.text)
if lastwidget is not None:
self.toplevelWidget.setTabOrder(lastwidget, widget)
lastwidget = widget
def readResources(self, elem):
"""
Read a "resources" tag and add the module to import to the parser's
list of them.
"""
try:
iterator = getattr(elem, 'iter')
except AttributeError:
iterator = getattr(elem, 'getiterator')
for include in iterator("include"):
loc = include.attrib.get("location")
# Apply the convention for naming the Python files generated by
# pyrcc5.
if loc and loc.endswith('.qrc'):
mname = os.path.basename(loc[:-4] + self._resource_suffix)
if mname not in self.resources:
self.resources.append(mname)
def createConnections(self, elem):
def name2object(obj):
if obj == self.uiname:
return self.toplevelWidget
else:
return getattr(self.toplevelWidget, obj)
for conn in iter(elem):
signal = conn.findtext('signal')
signal_name, signal_args = signal.split('(')
signal_args = signal_args[:-1].replace(' ', '')
sender = name2object(conn.findtext('sender'))
bound_signal = getattr(sender, signal_name)
slot = self.factory.getSlot(name2object(conn.findtext('receiver')),
conn.findtext('slot').split('(')[0])
if signal_args == '':
bound_signal.connect(slot)
else:
signal_args = signal_args.split(',')
if len(signal_args) == 1:
bound_signal[signal_args[0]].connect(slot)
else:
bound_signal[tuple(signal_args)].connect(slot)
QtCore.QMetaObject.connectSlotsByName(self.toplevelWidget)
def customWidgets(self, elem):
def header2module(header):
"""header2module(header) -> string
Convert paths to C++ header files to according Python modules
>>> header2module("foo/bar/baz.h")
'foo.bar.baz'
"""
if header.endswith(".h"):
header = header[:-2]
mpath = []
for part in header.split('/'):
# Ignore any empty parts or those that refer to the current
# directory.
if part not in ('', '.'):
if part == '..':
# We should allow this for Python3.
raise SyntaxError("custom widget header file name may not contain '..'.")
mpath.append(part)
return '.'.join(mpath)
for custom_widget in iter(elem):
classname = custom_widget.findtext("class")
self.factory.addCustomWidget(classname,
custom_widget.findtext("extends") or "QWidget",
header2module(custom_widget.findtext("header")))
def createToplevelWidget(self, classname, widgetname):
raise NotImplementedError
def buttonGroups(self, elem):
for button_group in iter(elem):
if button_group.tag == 'buttongroup':
bg_name = button_group.attrib['name']
bg = ButtonGroup()
self.button_groups[bg_name] = bg
prop = self.getProperty(button_group, 'exclusive')
if prop is not None:
if prop.findtext('bool') == 'false':
bg.exclusive = False
# finalize will be called after the whole tree has been parsed and can be
# overridden.
def finalize(self):
pass
def parse(self, filename, resource_suffix, base_dir=''):
self.wprops.set_base_dir(base_dir)
self._resource_suffix = resource_suffix
# The order in which the different branches are handled is important.
# The widget tree handler relies on all custom widgets being known, and
# in order to create the connections, all widgets have to be populated.
branchHandlers = (
("layoutdefault", self.readDefaults),
("class", self.classname),
("buttongroups", self.buttonGroups),
("customwidgets", self.customWidgets),
("widget", self.createUserInterface),
("connections", self.createConnections),
("tabstops", self.setTaborder),
("resources", self.readResources),
)
document = parse(filename)
version = document.getroot().attrib["version"]
DEBUG("UI version is %s" % (version,))
# Right now, only version 4.0 is supported.
assert version in ("4.0",)
for tagname, actor in branchHandlers:
elem = document.find(tagname)
if elem is not None:
actor(elem)
self.finalize()
w = self.toplevelWidget
self.reset()
return w
@staticmethod
def _form_layout_role(layout_position):
if layout_position[3] > 1:
role = QtWidgets.QFormLayout.SpanningRole
elif layout_position[1] == 1:
role = QtWidgets.QFormLayout.FieldRole
else:
role = QtWidgets.QFormLayout.LabelRole
return role
|
pyqt/python-qt5 | PyQt5/uic/uiparser.py | UIParser.readResources | python | def readResources(self, elem):
try:
iterator = getattr(elem, 'iter')
except AttributeError:
iterator = getattr(elem, 'getiterator')
for include in iterator("include"):
loc = include.attrib.get("location")
# Apply the convention for naming the Python files generated by
# pyrcc5.
if loc and loc.endswith('.qrc'):
mname = os.path.basename(loc[:-4] + self._resource_suffix)
if mname not in self.resources:
self.resources.append(mname) | Read a "resources" tag and add the module to import to the parser's
list of them. | train | https://github.com/pyqt/python-qt5/blob/c9ed180c56f6fd3521ffe5fb70904bc5d3f50e5f/PyQt5/uic/uiparser.py#L884-L902 | null | class UIParser(object):
def __init__(self, qtcore_module, qtgui_module, qtwidgets_module, creatorPolicy):
self.factory = QObjectCreator(creatorPolicy)
self.wprops = Properties(self.factory, qtcore_module, qtgui_module,
qtwidgets_module)
global QtCore, QtWidgets
QtCore = qtcore_module
QtWidgets = qtwidgets_module
self.reset()
def uniqueName(self, name):
"""UIParser.uniqueName(string) -> string
Create a unique name from a string.
>>> p = UIParser(QtCore, QtGui, QtWidgets)
>>> p.uniqueName("foo")
'foo'
>>> p.uniqueName("foo")
'foo1'
"""
try:
suffix = self.name_suffixes[name]
except KeyError:
self.name_suffixes[name] = 0
return name
suffix += 1
self.name_suffixes[name] = suffix
return "%s%i" % (name, suffix)
def reset(self):
try: self.wprops.reset()
except AttributeError: pass
self.toplevelWidget = None
self.stack = WidgetStack()
self.name_suffixes = {}
self.defaults = {'spacing': -1, 'margin': -1}
self.actions = []
self.currentActionGroup = None
self.resources = []
self.button_groups = {}
def setupObject(self, clsname, parent, branch, is_attribute=True):
name = self.uniqueName(branch.attrib.get('name') or clsname[1:].lower())
if parent is None:
args = ()
else:
args = (parent, )
obj = self.factory.createQObject(clsname, name, args, is_attribute)
self.wprops.setProperties(obj, branch)
obj.setObjectName(name)
if is_attribute:
setattr(self.toplevelWidget, name, obj)
return obj
def getProperty(self, elem, name):
for prop in elem.findall('property'):
if prop.attrib['name'] == name:
return prop
return None
def createWidget(self, elem):
self.column_counter = 0
self.row_counter = 0
self.item_nr = 0
self.itemstack = []
self.sorting_enabled = None
widget_class = elem.attrib['class'].replace('::', '.')
if widget_class == 'Line':
widget_class = 'QFrame'
# Ignore the parent if it is a container.
parent = self.stack.topwidget
if isinstance(parent, (QtWidgets.QDockWidget, QtWidgets.QMdiArea,
QtWidgets.QScrollArea, QtWidgets.QStackedWidget,
QtWidgets.QToolBox, QtWidgets.QTabWidget,
QtWidgets.QWizard)):
parent = None
self.stack.push(self.setupObject(widget_class, parent, elem))
if isinstance(self.stack.topwidget, QtWidgets.QTableWidget):
if self.getProperty(elem, 'columnCount') is None:
self.stack.topwidget.setColumnCount(len(elem.findall("column")))
if self.getProperty(elem, 'rowCount') is None:
self.stack.topwidget.setRowCount(len(elem.findall("row")))
self.traverseWidgetTree(elem)
widget = self.stack.popWidget()
if isinstance(widget, QtWidgets.QTreeView):
self.handleHeaderView(elem, "header", widget.header())
elif isinstance(widget, QtWidgets.QTableView):
self.handleHeaderView(elem, "horizontalHeader",
widget.horizontalHeader())
self.handleHeaderView(elem, "verticalHeader",
widget.verticalHeader())
elif isinstance(widget, QtWidgets.QAbstractButton):
bg_i18n = self.wprops.getAttribute(elem, "buttonGroup")
if bg_i18n is not None:
# This should be handled properly in case the problem arises
# elsewhere as well.
try:
# We are compiling the .ui file.
bg_name = bg_i18n.string
except AttributeError:
# We are loading the .ui file.
bg_name = bg_i18n
# Designer allows the creation of .ui files without explicit
# button groups, even though uic then issues warnings. We
# handle it in two stages by first making sure it has a name
# and then making sure one exists with that name.
if not bg_name:
bg_name = 'buttonGroup'
try:
bg = self.button_groups[bg_name]
except KeyError:
bg = self.button_groups[bg_name] = ButtonGroup()
if bg.object is None:
bg.object = self.factory.createQObject("QButtonGroup",
bg_name, (self.toplevelWidget, ))
setattr(self.toplevelWidget, bg_name, bg.object)
bg.object.setObjectName(bg_name)
if not bg.exclusive:
bg.object.setExclusive(False)
bg.object.addButton(widget)
if self.sorting_enabled is not None:
widget.setSortingEnabled(self.sorting_enabled)
self.sorting_enabled = None
if self.stack.topIsLayout():
lay = self.stack.peek()
lp = elem.attrib['layout-position']
if isinstance(lay, QtWidgets.QFormLayout):
lay.setWidget(lp[0], self._form_layout_role(lp), widget)
else:
lay.addWidget(widget, *lp)
topwidget = self.stack.topwidget
if isinstance(topwidget, QtWidgets.QToolBox):
icon = self.wprops.getAttribute(elem, "icon")
if icon is not None:
topwidget.addItem(widget, icon, self.wprops.getAttribute(elem, "label"))
else:
topwidget.addItem(widget, self.wprops.getAttribute(elem, "label"))
tooltip = self.wprops.getAttribute(elem, "toolTip")
if tooltip is not None:
topwidget.setItemToolTip(topwidget.indexOf(widget), tooltip)
elif isinstance(topwidget, QtWidgets.QTabWidget):
icon = self.wprops.getAttribute(elem, "icon")
if icon is not None:
topwidget.addTab(widget, icon, self.wprops.getAttribute(elem, "title"))
else:
topwidget.addTab(widget, self.wprops.getAttribute(elem, "title"))
tooltip = self.wprops.getAttribute(elem, "toolTip")
if tooltip is not None:
topwidget.setTabToolTip(topwidget.indexOf(widget), tooltip)
elif isinstance(topwidget, QtWidgets.QWizard):
topwidget.addPage(widget)
elif isinstance(topwidget, QtWidgets.QStackedWidget):
topwidget.addWidget(widget)
elif isinstance(topwidget, (QtWidgets.QDockWidget, QtWidgets.QScrollArea)):
topwidget.setWidget(widget)
elif isinstance(topwidget, QtWidgets.QMainWindow):
if type(widget) == QtWidgets.QWidget:
topwidget.setCentralWidget(widget)
elif isinstance(widget, QtWidgets.QToolBar):
tbArea = self.wprops.getAttribute(elem, "toolBarArea")
if tbArea is None:
topwidget.addToolBar(widget)
else:
topwidget.addToolBar(tbArea, widget)
tbBreak = self.wprops.getAttribute(elem, "toolBarBreak")
if tbBreak:
topwidget.insertToolBarBreak(widget)
elif isinstance(widget, QtWidgets.QMenuBar):
topwidget.setMenuBar(widget)
elif isinstance(widget, QtWidgets.QStatusBar):
topwidget.setStatusBar(widget)
elif isinstance(widget, QtWidgets.QDockWidget):
dwArea = self.wprops.getAttribute(elem, "dockWidgetArea")
topwidget.addDockWidget(QtCore.Qt.DockWidgetArea(dwArea),
widget)
def handleHeaderView(self, elem, name, header):
value = self.wprops.getAttribute(elem, name + "Visible")
if value is not None:
header.setVisible(value)
value = self.wprops.getAttribute(elem, name + "CascadingSectionResizes")
if value is not None:
header.setCascadingSectionResizes(value)
value = self.wprops.getAttribute(elem, name + "DefaultSectionSize")
if value is not None:
header.setDefaultSectionSize(value)
value = self.wprops.getAttribute(elem, name + "HighlightSections")
if value is not None:
header.setHighlightSections(value)
value = self.wprops.getAttribute(elem, name + "MinimumSectionSize")
if value is not None:
header.setMinimumSectionSize(value)
value = self.wprops.getAttribute(elem, name + "ShowSortIndicator")
if value is not None:
header.setSortIndicatorShown(value)
value = self.wprops.getAttribute(elem, name + "StretchLastSection")
if value is not None:
header.setStretchLastSection(value)
def createSpacer(self, elem):
width = elem.findtext("property/size/width")
height = elem.findtext("property/size/height")
if width is None or height is None:
size_args = ()
else:
size_args = (int(width), int(height))
sizeType = self.wprops.getProperty(elem, "sizeType",
QtWidgets.QSizePolicy.Expanding)
policy = (QtWidgets.QSizePolicy.Minimum, sizeType)
if self.wprops.getProperty(elem, "orientation") == QtCore.Qt.Horizontal:
policy = policy[1], policy[0]
spacer = self.factory.createQObject("QSpacerItem",
self.uniqueName("spacerItem"), size_args + policy,
is_attribute=False)
if self.stack.topIsLayout():
lay = self.stack.peek()
lp = elem.attrib['layout-position']
if isinstance(lay, QtWidgets.QFormLayout):
lay.setItem(lp[0], self._form_layout_role(lp), spacer)
else:
lay.addItem(spacer, *lp)
def createLayout(self, elem):
# We use an internal property to handle margins which will use separate
# left, top, right and bottom margins if they are found to be
# different. The following will select, in order of preference,
# separate margins, the same margin in all directions, and the default
# margin.
margin = self.wprops.getProperty(elem, 'margin',
self.defaults['margin'])
left = self.wprops.getProperty(elem, 'leftMargin', margin)
top = self.wprops.getProperty(elem, 'topMargin', margin)
right = self.wprops.getProperty(elem, 'rightMargin', margin)
bottom = self.wprops.getProperty(elem, 'bottomMargin', margin)
# A layout widget should, by default, have no margins.
if self.stack.topIsLayoutWidget():
if left < 0: left = 0
if top < 0: top = 0
if right < 0: right = 0
if bottom < 0: bottom = 0
if left >= 0 or top >= 0 or right >= 0 or bottom >= 0:
# We inject the new internal property.
cme = SubElement(elem, 'property', name='pyuicMargins')
SubElement(cme, 'number').text = str(left)
SubElement(cme, 'number').text = str(top)
SubElement(cme, 'number').text = str(right)
SubElement(cme, 'number').text = str(bottom)
# We use an internal property to handle spacing which will use separate
# horizontal and vertical spacing if they are found to be different.
# The following will select, in order of preference, separate
# horizontal and vertical spacing, the same spacing in both directions,
# and the default spacing.
spacing = self.wprops.getProperty(elem, 'spacing',
self.defaults['spacing'])
horiz = self.wprops.getProperty(elem, 'horizontalSpacing', spacing)
vert = self.wprops.getProperty(elem, 'verticalSpacing', spacing)
if horiz >= 0 or vert >= 0:
# We inject the new internal property.
cme = SubElement(elem, 'property', name='pyuicSpacing')
SubElement(cme, 'number').text = str(horiz)
SubElement(cme, 'number').text = str(vert)
classname = elem.attrib["class"]
if self.stack.topIsLayout():
parent = None
else:
parent = self.stack.topwidget
if "name" not in elem.attrib:
elem.attrib["name"] = classname[1:].lower()
self.stack.push(self.setupObject(classname, parent, elem))
self.traverseWidgetTree(elem)
layout = self.stack.popLayout()
self.configureLayout(elem, layout)
if self.stack.topIsLayout():
top_layout = self.stack.peek()
lp = elem.attrib['layout-position']
if isinstance(top_layout, QtWidgets.QFormLayout):
top_layout.setLayout(lp[0], self._form_layout_role(lp), layout)
else:
top_layout.addLayout(layout, *lp)
def configureLayout(self, elem, layout):
if isinstance(layout, QtWidgets.QGridLayout):
self.setArray(elem, 'columnminimumwidth',
layout.setColumnMinimumWidth)
self.setArray(elem, 'rowminimumheight',
layout.setRowMinimumHeight)
self.setArray(elem, 'columnstretch', layout.setColumnStretch)
self.setArray(elem, 'rowstretch', layout.setRowStretch)
elif isinstance(layout, QtWidgets.QBoxLayout):
self.setArray(elem, 'stretch', layout.setStretch)
def setArray(self, elem, name, setter):
array = elem.attrib.get(name)
if array:
for idx, value in enumerate(array.split(',')):
value = int(value)
if value > 0:
setter(idx, value)
def disableSorting(self, w):
if self.item_nr == 0:
self.sorting_enabled = self.factory.invoke("__sortingEnabled",
w.isSortingEnabled)
w.setSortingEnabled(False)
def handleItem(self, elem):
if self.stack.topIsLayout():
elem[0].attrib['layout-position'] = _layout_position(elem)
self.traverseWidgetTree(elem)
else:
w = self.stack.topwidget
if isinstance(w, QtWidgets.QComboBox):
text = self.wprops.getProperty(elem, "text")
icon = self.wprops.getProperty(elem, "icon")
if icon:
w.addItem(icon, '')
else:
w.addItem('')
w.setItemText(self.item_nr, text)
elif isinstance(w, QtWidgets.QListWidget):
self.disableSorting(w)
item = self.createWidgetItem('QListWidgetItem', elem, w.item,
self.item_nr)
w.addItem(item)
elif isinstance(w, QtWidgets.QTreeWidget):
if self.itemstack:
parent, _ = self.itemstack[-1]
_, nr_in_root = self.itemstack[0]
else:
parent = w
nr_in_root = self.item_nr
item = self.factory.createQObject("QTreeWidgetItem",
"item_%d" % len(self.itemstack), (parent, ), False)
if self.item_nr == 0 and not self.itemstack:
self.sorting_enabled = self.factory.invoke("__sortingEnabled", w.isSortingEnabled)
w.setSortingEnabled(False)
self.itemstack.append((item, self.item_nr))
self.item_nr = 0
# We have to access the item via the tree when setting the
# text.
titm = w.topLevelItem(nr_in_root)
for child, nr_in_parent in self.itemstack[1:]:
titm = titm.child(nr_in_parent)
column = -1
for prop in elem.findall('property'):
c_prop = self.wprops.convert(prop)
c_prop_name = prop.attrib['name']
if c_prop_name == 'text':
column += 1
if c_prop:
titm.setText(column, c_prop)
elif c_prop_name == 'statusTip':
item.setStatusTip(column, c_prop)
elif c_prop_name == 'toolTip':
item.setToolTip(column, c_prop)
elif c_prop_name == 'whatsThis':
item.setWhatsThis(column, c_prop)
elif c_prop_name == 'font':
item.setFont(column, c_prop)
elif c_prop_name == 'icon':
item.setIcon(column, c_prop)
elif c_prop_name == 'background':
item.setBackground(column, c_prop)
elif c_prop_name == 'foreground':
item.setForeground(column, c_prop)
elif c_prop_name == 'flags':
item.setFlags(c_prop)
elif c_prop_name == 'checkState':
item.setCheckState(column, c_prop)
self.traverseWidgetTree(elem)
_, self.item_nr = self.itemstack.pop()
elif isinstance(w, QtWidgets.QTableWidget):
row = int(elem.attrib['row'])
col = int(elem.attrib['column'])
self.disableSorting(w)
item = self.createWidgetItem('QTableWidgetItem', elem, w.item,
row, col)
w.setItem(row, col, item)
self.item_nr += 1
def addAction(self, elem):
self.actions.append((self.stack.topwidget, elem.attrib["name"]))
@staticmethod
def any_i18n(*args):
""" Return True if any argument appears to be an i18n string. """
for a in args:
if a is not None and not isinstance(a, str):
return True
return False
def createWidgetItem(self, item_type, elem, getter, *getter_args):
""" Create a specific type of widget item. """
item = self.factory.createQObject(item_type, "item", (), False)
props = self.wprops
# Note that not all types of widget items support the full set of
# properties.
text = props.getProperty(elem, 'text')
status_tip = props.getProperty(elem, 'statusTip')
tool_tip = props.getProperty(elem, 'toolTip')
whats_this = props.getProperty(elem, 'whatsThis')
if self.any_i18n(text, status_tip, tool_tip, whats_this):
self.factory.invoke("item", getter, getter_args)
if text:
item.setText(text)
if status_tip:
item.setStatusTip(status_tip)
if tool_tip:
item.setToolTip(tool_tip)
if whats_this:
item.setWhatsThis(whats_this)
text_alignment = props.getProperty(elem, 'textAlignment')
if text_alignment:
item.setTextAlignment(text_alignment)
font = props.getProperty(elem, 'font')
if font:
item.setFont(font)
icon = props.getProperty(elem, 'icon')
if icon:
item.setIcon(icon)
background = props.getProperty(elem, 'background')
if background:
item.setBackground(background)
foreground = props.getProperty(elem, 'foreground')
if foreground:
item.setForeground(foreground)
flags = props.getProperty(elem, 'flags')
if flags:
item.setFlags(flags)
check_state = props.getProperty(elem, 'checkState')
if check_state:
item.setCheckState(check_state)
return item
def addHeader(self, elem):
w = self.stack.topwidget
if isinstance(w, QtWidgets.QTreeWidget):
props = self.wprops
col = self.column_counter
text = props.getProperty(elem, 'text')
if text:
w.headerItem().setText(col, text)
status_tip = props.getProperty(elem, 'statusTip')
if status_tip:
w.headerItem().setStatusTip(col, status_tip)
tool_tip = props.getProperty(elem, 'toolTip')
if tool_tip:
w.headerItem().setToolTip(col, tool_tip)
whats_this = props.getProperty(elem, 'whatsThis')
if whats_this:
w.headerItem().setWhatsThis(col, whats_this)
text_alignment = props.getProperty(elem, 'textAlignment')
if text_alignment:
w.headerItem().setTextAlignment(col, text_alignment)
font = props.getProperty(elem, 'font')
if font:
w.headerItem().setFont(col, font)
icon = props.getProperty(elem, 'icon')
if icon:
w.headerItem().setIcon(col, icon)
background = props.getProperty(elem, 'background')
if background:
w.headerItem().setBackground(col, background)
foreground = props.getProperty(elem, 'foreground')
if foreground:
w.headerItem().setForeground(col, foreground)
self.column_counter += 1
elif isinstance(w, QtWidgets.QTableWidget):
if len(elem) != 0:
if elem.tag == 'column':
item = self.createWidgetItem('QTableWidgetItem', elem,
w.horizontalHeaderItem, self.column_counter)
w.setHorizontalHeaderItem(self.column_counter, item)
self.column_counter += 1
elif elem.tag == 'row':
item = self.createWidgetItem('QTableWidgetItem', elem,
w.verticalHeaderItem, self.row_counter)
w.setVerticalHeaderItem(self.row_counter, item)
self.row_counter += 1
def setZOrder(self, elem):
# Designer can generate empty zorder elements.
if elem.text is None:
return
# Designer allows the z-order of spacer items to be specified even
# though they can't be raised, so ignore any missing raise_() method.
try:
getattr(self.toplevelWidget, elem.text).raise_()
except AttributeError:
# Note that uic issues a warning message.
pass
def createAction(self, elem):
self.setupObject("QAction", self.currentActionGroup or self.toplevelWidget,
elem)
def createActionGroup(self, elem):
action_group = self.setupObject("QActionGroup", self.toplevelWidget, elem)
self.currentActionGroup = action_group
self.traverseWidgetTree(elem)
self.currentActionGroup = None
widgetTreeItemHandlers = {
"widget" : createWidget,
"addaction" : addAction,
"layout" : createLayout,
"spacer" : createSpacer,
"item" : handleItem,
"action" : createAction,
"actiongroup": createActionGroup,
"column" : addHeader,
"row" : addHeader,
"zorder" : setZOrder,
}
def traverseWidgetTree(self, elem):
for child in iter(elem):
try:
handler = self.widgetTreeItemHandlers[child.tag]
except KeyError:
continue
handler(self, child)
def createUserInterface(self, elem):
# Get the names of the class and widget.
cname = elem.attrib["class"]
wname = elem.attrib["name"]
# If there was no widget name then derive it from the class name.
if not wname:
wname = cname
if wname.startswith("Q"):
wname = wname[1:]
wname = wname[0].lower() + wname[1:]
self.toplevelWidget = self.createToplevelWidget(cname, wname)
self.toplevelWidget.setObjectName(wname)
DEBUG("toplevel widget is %s",
self.toplevelWidget.metaObject().className())
self.wprops.setProperties(self.toplevelWidget, elem)
self.stack.push(self.toplevelWidget)
self.traverseWidgetTree(elem)
self.stack.popWidget()
self.addActions()
self.setBuddies()
self.setDelayedProps()
def addActions(self):
for widget, action_name in self.actions:
if action_name == "separator":
widget.addSeparator()
else:
DEBUG("add action %s to %s", action_name, widget.objectName())
action_obj = getattr(self.toplevelWidget, action_name)
if isinstance(action_obj, QtWidgets.QMenu):
widget.addAction(action_obj.menuAction())
elif not isinstance(action_obj, QtWidgets.QActionGroup):
widget.addAction(action_obj)
def setDelayedProps(self):
for widget, layout, setter, args in self.wprops.delayed_props:
if layout:
widget = widget.layout()
setter = getattr(widget, setter)
setter(args)
def setBuddies(self):
for widget, buddy in self.wprops.buddies:
DEBUG("%s is buddy of %s", buddy, widget.objectName())
try:
widget.setBuddy(getattr(self.toplevelWidget, buddy))
except AttributeError:
DEBUG("ERROR in ui spec: %s (buddy of %s) does not exist",
buddy, widget.objectName())
def classname(self, elem):
DEBUG("uiname is %s", elem.text)
name = elem.text
if name is None:
name = ""
self.uiname = name
self.wprops.uiname = name
self.setContext(name)
def setContext(self, context):
"""
Reimplemented by a sub-class if it needs to know the translation
context.
"""
pass
def readDefaults(self, elem):
self.defaults['margin'] = int(elem.attrib['margin'])
self.defaults['spacing'] = int(elem.attrib['spacing'])
def setTaborder(self, elem):
lastwidget = None
for widget_elem in elem:
widget = getattr(self.toplevelWidget, widget_elem.text)
if lastwidget is not None:
self.toplevelWidget.setTabOrder(lastwidget, widget)
lastwidget = widget
def createConnections(self, elem):
def name2object(obj):
if obj == self.uiname:
return self.toplevelWidget
else:
return getattr(self.toplevelWidget, obj)
for conn in iter(elem):
signal = conn.findtext('signal')
signal_name, signal_args = signal.split('(')
signal_args = signal_args[:-1].replace(' ', '')
sender = name2object(conn.findtext('sender'))
bound_signal = getattr(sender, signal_name)
slot = self.factory.getSlot(name2object(conn.findtext('receiver')),
conn.findtext('slot').split('(')[0])
if signal_args == '':
bound_signal.connect(slot)
else:
signal_args = signal_args.split(',')
if len(signal_args) == 1:
bound_signal[signal_args[0]].connect(slot)
else:
bound_signal[tuple(signal_args)].connect(slot)
QtCore.QMetaObject.connectSlotsByName(self.toplevelWidget)
def customWidgets(self, elem):
def header2module(header):
"""header2module(header) -> string
Convert paths to C++ header files to according Python modules
>>> header2module("foo/bar/baz.h")
'foo.bar.baz'
"""
if header.endswith(".h"):
header = header[:-2]
mpath = []
for part in header.split('/'):
# Ignore any empty parts or those that refer to the current
# directory.
if part not in ('', '.'):
if part == '..':
# We should allow this for Python3.
raise SyntaxError("custom widget header file name may not contain '..'.")
mpath.append(part)
return '.'.join(mpath)
for custom_widget in iter(elem):
classname = custom_widget.findtext("class")
self.factory.addCustomWidget(classname,
custom_widget.findtext("extends") or "QWidget",
header2module(custom_widget.findtext("header")))
def createToplevelWidget(self, classname, widgetname):
raise NotImplementedError
def buttonGroups(self, elem):
for button_group in iter(elem):
if button_group.tag == 'buttongroup':
bg_name = button_group.attrib['name']
bg = ButtonGroup()
self.button_groups[bg_name] = bg
prop = self.getProperty(button_group, 'exclusive')
if prop is not None:
if prop.findtext('bool') == 'false':
bg.exclusive = False
# finalize will be called after the whole tree has been parsed and can be
# overridden.
def finalize(self):
pass
def parse(self, filename, resource_suffix, base_dir=''):
self.wprops.set_base_dir(base_dir)
self._resource_suffix = resource_suffix
# The order in which the different branches are handled is important.
# The widget tree handler relies on all custom widgets being known, and
# in order to create the connections, all widgets have to be populated.
branchHandlers = (
("layoutdefault", self.readDefaults),
("class", self.classname),
("buttongroups", self.buttonGroups),
("customwidgets", self.customWidgets),
("widget", self.createUserInterface),
("connections", self.createConnections),
("tabstops", self.setTaborder),
("resources", self.readResources),
)
document = parse(filename)
version = document.getroot().attrib["version"]
DEBUG("UI version is %s" % (version,))
# Right now, only version 4.0 is supported.
assert version in ("4.0",)
for tagname, actor in branchHandlers:
elem = document.find(tagname)
if elem is not None:
actor(elem)
self.finalize()
w = self.toplevelWidget
self.reset()
return w
@staticmethod
def _form_layout_role(layout_position):
if layout_position[3] > 1:
role = QtWidgets.QFormLayout.SpanningRole
elif layout_position[1] == 1:
role = QtWidgets.QFormLayout.FieldRole
else:
role = QtWidgets.QFormLayout.LabelRole
return role
|
pyqt/python-qt5 | PyQt5/uic/__init__.py | compileUiDir | python | def compileUiDir(dir, recurse=False, map=None, **compileUi_args):
import os
# Compile a single .ui file.
def compile_ui(ui_dir, ui_file):
# Ignore if it doesn't seem to be a .ui file.
if ui_file.endswith('.ui'):
py_dir = ui_dir
py_file = ui_file[:-3] + '.py'
# Allow the caller to change the name of the .py file or generate
# it in a different directory.
if map is not None:
py_dir, py_file = map(py_dir, py_file)
# Make sure the destination directory exists.
try:
os.makedirs(py_dir)
except:
pass
ui_path = os.path.join(ui_dir, ui_file)
py_path = os.path.join(py_dir, py_file)
ui_file = open(ui_path, 'r')
py_file = open(py_path, 'w')
try:
compileUi(ui_file, py_file, **compileUi_args)
finally:
ui_file.close()
py_file.close()
if recurse:
for root, _, files in os.walk(dir):
for ui in files:
compile_ui(root, ui)
else:
for ui in os.listdir(dir):
if os.path.isfile(os.path.join(dir, ui)):
compile_ui(dir, ui) | compileUiDir(dir, recurse=False, map=None, **compileUi_args)
Creates Python modules from Qt Designer .ui files in a directory or
directory tree.
dir is the name of the directory to scan for files whose name ends with
'.ui'. By default the generated Python module is created in the same
directory ending with '.py'.
recurse is set if any sub-directories should be scanned. The default is
False.
map is an optional callable that is passed the name of the directory
containing the '.ui' file and the name of the Python module that will be
created. The callable should return a tuple of the name of the directory
in which the Python module will be created and the (possibly modified)
name of the module. The default is None.
compileUi_args are any additional keyword arguments that are passed to
the compileUi() function that is called to create each Python module. | train | https://github.com/pyqt/python-qt5/blob/c9ed180c56f6fd3521ffe5fb70904bc5d3f50e5f/PyQt5/uic/__init__.py#L69-L128 | [
"def compile_ui(ui_dir, ui_file):\n # Ignore if it doesn't seem to be a .ui file.\n if ui_file.endswith('.ui'):\n py_dir = ui_dir\n py_file = ui_file[:-3] + '.py'\n\n # Allow the caller to change the name of the .py file or generate\n # it in a different directory.\n if map is not None:\n py_dir, py_file = map(py_dir, py_file)\n\n # Make sure the destination directory exists.\n try:\n os.makedirs(py_dir)\n except:\n pass\n\n ui_path = os.path.join(ui_dir, ui_file)\n py_path = os.path.join(py_dir, py_file)\n\n ui_file = open(ui_path, 'r')\n py_file = open(py_path, 'w')\n\n try:\n compileUi(ui_file, py_file, **compileUi_args)\n finally:\n ui_file.close()\n py_file.close()\n"
] | #############################################################################
##
## Copyright (C) 2015 Riverbank Computing Limited.
## Copyright (C) 2006 Thorsten Marek.
## All right reserved.
##
## This file is part of PyQt.
##
## You may use this file under the terms of the GPL v2 or the revised BSD
## license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of the Riverbank Computing Limited nor the names
## of its contributors may be used to endorse or promote products
## derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
#############################################################################
__all__ = ("compileUi", "compileUiDir", "loadUiType", "loadUi", "widgetPluginPath")
from .Compiler import indenter, compiler
_header = """# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '%s'
#
# Created by: PyQt5 UI code generator %s
#
# WARNING! All changes made in this file will be lost!
"""
_display_code = """
if __name__ == "__main__":
\timport sys
\tapp = QtWidgets.QApplication(sys.argv)
\t%(widgetname)s = QtWidgets.%(baseclass)s()
\tui = %(uiclass)s()
\tui.setupUi(%(widgetname)s)
\t%(widgetname)s.show()
\tsys.exit(app.exec_())
"""
def compileUi(uifile, pyfile, execute=False, indent=4, from_imports=False, resource_suffix='_rc', import_from='.'):
"""compileUi(uifile, pyfile, execute=False, indent=4, from_imports=False, resource_suffix='_rc', import_from='.')
Creates a Python module from a Qt Designer .ui file.
uifile is a file name or file-like object containing the .ui file.
pyfile is the file-like object to which the Python code will be written to.
execute is optionally set to generate extra Python code that allows the
code to be run as a standalone application. The default is False.
indent is the optional indentation width using spaces. If it is 0 then a
tab is used. The default is 4.
from_imports is optionally set to generate relative import statements. At
the moment this only applies to the import of resource modules.
resource_suffix is the suffix appended to the basename of any resource file
specified in the .ui file to create the name of the Python module generated
from the resource file by pyrcc4. The default is '_rc', i.e. if the .ui
file specified a resource file called foo.qrc then the corresponding Python
module is foo_rc.
import_from is optionally set to the package used for relative import
statements. The default is ``'.'``.
"""
from PyQt5.QtCore import PYQT_VERSION_STR
try:
uifname = uifile.name
except AttributeError:
uifname = uifile
indenter.indentwidth = indent
pyfile.write(_header % (uifname, PYQT_VERSION_STR))
winfo = compiler.UICompiler().compileUi(uifile, pyfile, from_imports, resource_suffix, import_from)
if execute:
indenter.write_code(_display_code % winfo)
def loadUiType(uifile, from_imports=False, resource_suffix='_rc', import_from='.'):
"""loadUiType(uifile, from_imports=False, resource_suffix='_rc', import_from='.') -> (form class, base class)
Load a Qt Designer .ui file and return the generated form class and the Qt
base class.
uifile is a file name or file-like object containing the .ui file.
from_imports is optionally set to generate relative import statements. At
the moment this only applies to the import of resource modules.
resource_suffix is the suffix appended to the basename of any resource file
specified in the .ui file to create the name of the Python module generated
from the resource file by pyrcc4. The default is '_rc', i.e. if the .ui
file specified a resource file called foo.qrc then the corresponding Python
module is foo_rc.
import_from is optionally set to the package used for relative import
statements. The default is ``'.'``.
"""
import sys
from PyQt5 import QtWidgets
if sys.hexversion >= 0x03000000:
from .port_v3.string_io import StringIO
else:
from .port_v2.string_io import StringIO
code_string = StringIO()
winfo = compiler.UICompiler().compileUi(uifile, code_string, from_imports, resource_suffix, import_from)
ui_globals = {}
exec(code_string.getvalue(), ui_globals)
return (ui_globals[winfo["uiclass"]], getattr(QtWidgets, winfo["baseclass"]))
def loadUi(uifile, baseinstance=None, package='', resource_suffix='_rc'):
"""loadUi(uifile, baseinstance=None, package='') -> widget
Load a Qt Designer .ui file and return an instance of the user interface.
uifile is a file name or file-like object containing the .ui file.
baseinstance is an optional instance of the Qt base class. If specified
then the user interface is created in it. Otherwise a new instance of the
base class is automatically created.
package is the optional package which is used as the base for any relative
imports of custom widgets.
resource_suffix is the suffix appended to the basename of any resource file
specified in the .ui file to create the name of the Python module generated
from the resource file by pyrcc4. The default is '_rc', i.e. if the .ui
file specified a resource file called foo.qrc then the corresponding Python
module is foo_rc.
"""
from .Loader.loader import DynamicUILoader
return DynamicUILoader(package).loadUi(uifile, baseinstance, resource_suffix)
# The list of directories that are searched for widget plugins.
from .objcreator import widgetPluginPath
|
pyqt/python-qt5 | PyQt5/uic/__init__.py | compileUi | python | def compileUi(uifile, pyfile, execute=False, indent=4, from_imports=False, resource_suffix='_rc', import_from='.'):
from PyQt5.QtCore import PYQT_VERSION_STR
try:
uifname = uifile.name
except AttributeError:
uifname = uifile
indenter.indentwidth = indent
pyfile.write(_header % (uifname, PYQT_VERSION_STR))
winfo = compiler.UICompiler().compileUi(uifile, pyfile, from_imports, resource_suffix, import_from)
if execute:
indenter.write_code(_display_code % winfo) | compileUi(uifile, pyfile, execute=False, indent=4, from_imports=False, resource_suffix='_rc', import_from='.')
Creates a Python module from a Qt Designer .ui file.
uifile is a file name or file-like object containing the .ui file.
pyfile is the file-like object to which the Python code will be written to.
execute is optionally set to generate extra Python code that allows the
code to be run as a standalone application. The default is False.
indent is the optional indentation width using spaces. If it is 0 then a
tab is used. The default is 4.
from_imports is optionally set to generate relative import statements. At
the moment this only applies to the import of resource modules.
resource_suffix is the suffix appended to the basename of any resource file
specified in the .ui file to create the name of the Python module generated
from the resource file by pyrcc4. The default is '_rc', i.e. if the .ui
file specified a resource file called foo.qrc then the corresponding Python
module is foo_rc.
import_from is optionally set to the package used for relative import
statements. The default is ``'.'``. | train | https://github.com/pyqt/python-qt5/blob/c9ed180c56f6fd3521ffe5fb70904bc5d3f50e5f/PyQt5/uic/__init__.py#L131-L167 | [
"def write_code(string):\n _indenter.write(string)\n",
"def compileUi(self, input_stream, output_stream, from_imports, resource_suffix, import_from):\n createCodeIndenter(output_stream)\n w = self.parse(input_stream, resource_suffix)\n\n indenter = getIndenter()\n indenter.write(\"\")\n\n self.factory._cpolicy._writeOutImports()\n\n for res in self._resources:\n if from_imports:\n write_code(\"from %s import %s\" % (import_from, res))\n else:\n write_code(\"import %s\" % res)\n\n return {\"widgetname\": str(w),\n \"uiclass\" : w.uiclass,\n \"baseclass\" : w.baseclass}\n"
] | #############################################################################
##
## Copyright (C) 2015 Riverbank Computing Limited.
## Copyright (C) 2006 Thorsten Marek.
## All right reserved.
##
## This file is part of PyQt.
##
## You may use this file under the terms of the GPL v2 or the revised BSD
## license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of the Riverbank Computing Limited nor the names
## of its contributors may be used to endorse or promote products
## derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
#############################################################################
__all__ = ("compileUi", "compileUiDir", "loadUiType", "loadUi", "widgetPluginPath")
from .Compiler import indenter, compiler
_header = """# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '%s'
#
# Created by: PyQt5 UI code generator %s
#
# WARNING! All changes made in this file will be lost!
"""
_display_code = """
if __name__ == "__main__":
\timport sys
\tapp = QtWidgets.QApplication(sys.argv)
\t%(widgetname)s = QtWidgets.%(baseclass)s()
\tui = %(uiclass)s()
\tui.setupUi(%(widgetname)s)
\t%(widgetname)s.show()
\tsys.exit(app.exec_())
"""
def compileUiDir(dir, recurse=False, map=None, **compileUi_args):
"""compileUiDir(dir, recurse=False, map=None, **compileUi_args)
Creates Python modules from Qt Designer .ui files in a directory or
directory tree.
dir is the name of the directory to scan for files whose name ends with
'.ui'. By default the generated Python module is created in the same
directory ending with '.py'.
recurse is set if any sub-directories should be scanned. The default is
False.
map is an optional callable that is passed the name of the directory
containing the '.ui' file and the name of the Python module that will be
created. The callable should return a tuple of the name of the directory
in which the Python module will be created and the (possibly modified)
name of the module. The default is None.
compileUi_args are any additional keyword arguments that are passed to
the compileUi() function that is called to create each Python module.
"""
import os
# Compile a single .ui file.
def compile_ui(ui_dir, ui_file):
# Ignore if it doesn't seem to be a .ui file.
if ui_file.endswith('.ui'):
py_dir = ui_dir
py_file = ui_file[:-3] + '.py'
# Allow the caller to change the name of the .py file or generate
# it in a different directory.
if map is not None:
py_dir, py_file = map(py_dir, py_file)
# Make sure the destination directory exists.
try:
os.makedirs(py_dir)
except:
pass
ui_path = os.path.join(ui_dir, ui_file)
py_path = os.path.join(py_dir, py_file)
ui_file = open(ui_path, 'r')
py_file = open(py_path, 'w')
try:
compileUi(ui_file, py_file, **compileUi_args)
finally:
ui_file.close()
py_file.close()
if recurse:
for root, _, files in os.walk(dir):
for ui in files:
compile_ui(root, ui)
else:
for ui in os.listdir(dir):
if os.path.isfile(os.path.join(dir, ui)):
compile_ui(dir, ui)
def compileUi(uifile, pyfile, execute=False, indent=4, from_imports=False, resource_suffix='_rc', import_from='.'):
"""compileUi(uifile, pyfile, execute=False, indent=4, from_imports=False, resource_suffix='_rc', import_from='.')
Creates a Python module from a Qt Designer .ui file.
uifile is a file name or file-like object containing the .ui file.
pyfile is the file-like object to which the Python code will be written to.
execute is optionally set to generate extra Python code that allows the
code to be run as a standalone application. The default is False.
indent is the optional indentation width using spaces. If it is 0 then a
tab is used. The default is 4.
from_imports is optionally set to generate relative import statements. At
the moment this only applies to the import of resource modules.
resource_suffix is the suffix appended to the basename of any resource file
specified in the .ui file to create the name of the Python module generated
from the resource file by pyrcc4. The default is '_rc', i.e. if the .ui
file specified a resource file called foo.qrc then the corresponding Python
module is foo_rc.
import_from is optionally set to the package used for relative import
statements. The default is ``'.'``.
"""
from PyQt5.QtCore import PYQT_VERSION_STR
try:
uifname = uifile.name
except AttributeError:
uifname = uifile
indenter.indentwidth = indent
pyfile.write(_header % (uifname, PYQT_VERSION_STR))
winfo = compiler.UICompiler().compileUi(uifile, pyfile, from_imports, resource_suffix, import_from)
if execute:
indenter.write_code(_display_code % winfo)
def loadUiType(uifile, from_imports=False, resource_suffix='_rc', import_from='.'):
"""loadUiType(uifile, from_imports=False, resource_suffix='_rc', import_from='.') -> (form class, base class)
Load a Qt Designer .ui file and return the generated form class and the Qt
base class.
uifile is a file name or file-like object containing the .ui file.
from_imports is optionally set to generate relative import statements. At
the moment this only applies to the import of resource modules.
resource_suffix is the suffix appended to the basename of any resource file
specified in the .ui file to create the name of the Python module generated
from the resource file by pyrcc4. The default is '_rc', i.e. if the .ui
file specified a resource file called foo.qrc then the corresponding Python
module is foo_rc.
import_from is optionally set to the package used for relative import
statements. The default is ``'.'``.
"""
import sys
from PyQt5 import QtWidgets
if sys.hexversion >= 0x03000000:
from .port_v3.string_io import StringIO
else:
from .port_v2.string_io import StringIO
code_string = StringIO()
winfo = compiler.UICompiler().compileUi(uifile, code_string, from_imports, resource_suffix, import_from)
ui_globals = {}
exec(code_string.getvalue(), ui_globals)
return (ui_globals[winfo["uiclass"]], getattr(QtWidgets, winfo["baseclass"]))
def loadUi(uifile, baseinstance=None, package='', resource_suffix='_rc'):
"""loadUi(uifile, baseinstance=None, package='') -> widget
Load a Qt Designer .ui file and return an instance of the user interface.
uifile is a file name or file-like object containing the .ui file.
baseinstance is an optional instance of the Qt base class. If specified
then the user interface is created in it. Otherwise a new instance of the
base class is automatically created.
package is the optional package which is used as the base for any relative
imports of custom widgets.
resource_suffix is the suffix appended to the basename of any resource file
specified in the .ui file to create the name of the Python module generated
from the resource file by pyrcc4. The default is '_rc', i.e. if the .ui
file specified a resource file called foo.qrc then the corresponding Python
module is foo_rc.
"""
from .Loader.loader import DynamicUILoader
return DynamicUILoader(package).loadUi(uifile, baseinstance, resource_suffix)
# The list of directories that are searched for widget plugins.
from .objcreator import widgetPluginPath
|
pyqt/python-qt5 | PyQt5/uic/__init__.py | loadUiType | python | def loadUiType(uifile, from_imports=False, resource_suffix='_rc', import_from='.'):
import sys
from PyQt5 import QtWidgets
if sys.hexversion >= 0x03000000:
from .port_v3.string_io import StringIO
else:
from .port_v2.string_io import StringIO
code_string = StringIO()
winfo = compiler.UICompiler().compileUi(uifile, code_string, from_imports, resource_suffix, import_from)
ui_globals = {}
exec(code_string.getvalue(), ui_globals)
return (ui_globals[winfo["uiclass"]], getattr(QtWidgets, winfo["baseclass"])) | loadUiType(uifile, from_imports=False, resource_suffix='_rc', import_from='.') -> (form class, base class)
Load a Qt Designer .ui file and return the generated form class and the Qt
base class.
uifile is a file name or file-like object containing the .ui file.
from_imports is optionally set to generate relative import statements. At
the moment this only applies to the import of resource modules.
resource_suffix is the suffix appended to the basename of any resource file
specified in the .ui file to create the name of the Python module generated
from the resource file by pyrcc4. The default is '_rc', i.e. if the .ui
file specified a resource file called foo.qrc then the corresponding Python
module is foo_rc.
import_from is optionally set to the package used for relative import
statements. The default is ``'.'``. | train | https://github.com/pyqt/python-qt5/blob/c9ed180c56f6fd3521ffe5fb70904bc5d3f50e5f/PyQt5/uic/__init__.py#L170-L203 | [
"def compileUi(self, input_stream, output_stream, from_imports, resource_suffix, import_from):\n createCodeIndenter(output_stream)\n w = self.parse(input_stream, resource_suffix)\n\n indenter = getIndenter()\n indenter.write(\"\")\n\n self.factory._cpolicy._writeOutImports()\n\n for res in self._resources:\n if from_imports:\n write_code(\"from %s import %s\" % (import_from, res))\n else:\n write_code(\"import %s\" % res)\n\n return {\"widgetname\": str(w),\n \"uiclass\" : w.uiclass,\n \"baseclass\" : w.baseclass}\n"
] | #############################################################################
##
## Copyright (C) 2015 Riverbank Computing Limited.
## Copyright (C) 2006 Thorsten Marek.
## All right reserved.
##
## This file is part of PyQt.
##
## You may use this file under the terms of the GPL v2 or the revised BSD
## license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of the Riverbank Computing Limited nor the names
## of its contributors may be used to endorse or promote products
## derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
#############################################################################
__all__ = ("compileUi", "compileUiDir", "loadUiType", "loadUi", "widgetPluginPath")
from .Compiler import indenter, compiler
_header = """# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '%s'
#
# Created by: PyQt5 UI code generator %s
#
# WARNING! All changes made in this file will be lost!
"""
_display_code = """
if __name__ == "__main__":
\timport sys
\tapp = QtWidgets.QApplication(sys.argv)
\t%(widgetname)s = QtWidgets.%(baseclass)s()
\tui = %(uiclass)s()
\tui.setupUi(%(widgetname)s)
\t%(widgetname)s.show()
\tsys.exit(app.exec_())
"""
def compileUiDir(dir, recurse=False, map=None, **compileUi_args):
"""compileUiDir(dir, recurse=False, map=None, **compileUi_args)
Creates Python modules from Qt Designer .ui files in a directory or
directory tree.
dir is the name of the directory to scan for files whose name ends with
'.ui'. By default the generated Python module is created in the same
directory ending with '.py'.
recurse is set if any sub-directories should be scanned. The default is
False.
map is an optional callable that is passed the name of the directory
containing the '.ui' file and the name of the Python module that will be
created. The callable should return a tuple of the name of the directory
in which the Python module will be created and the (possibly modified)
name of the module. The default is None.
compileUi_args are any additional keyword arguments that are passed to
the compileUi() function that is called to create each Python module.
"""
import os
# Compile a single .ui file.
def compile_ui(ui_dir, ui_file):
# Ignore if it doesn't seem to be a .ui file.
if ui_file.endswith('.ui'):
py_dir = ui_dir
py_file = ui_file[:-3] + '.py'
# Allow the caller to change the name of the .py file or generate
# it in a different directory.
if map is not None:
py_dir, py_file = map(py_dir, py_file)
# Make sure the destination directory exists.
try:
os.makedirs(py_dir)
except:
pass
ui_path = os.path.join(ui_dir, ui_file)
py_path = os.path.join(py_dir, py_file)
ui_file = open(ui_path, 'r')
py_file = open(py_path, 'w')
try:
compileUi(ui_file, py_file, **compileUi_args)
finally:
ui_file.close()
py_file.close()
if recurse:
for root, _, files in os.walk(dir):
for ui in files:
compile_ui(root, ui)
else:
for ui in os.listdir(dir):
if os.path.isfile(os.path.join(dir, ui)):
compile_ui(dir, ui)
def compileUi(uifile, pyfile, execute=False, indent=4, from_imports=False, resource_suffix='_rc', import_from='.'):
"""compileUi(uifile, pyfile, execute=False, indent=4, from_imports=False, resource_suffix='_rc', import_from='.')
Creates a Python module from a Qt Designer .ui file.
uifile is a file name or file-like object containing the .ui file.
pyfile is the file-like object to which the Python code will be written to.
execute is optionally set to generate extra Python code that allows the
code to be run as a standalone application. The default is False.
indent is the optional indentation width using spaces. If it is 0 then a
tab is used. The default is 4.
from_imports is optionally set to generate relative import statements. At
the moment this only applies to the import of resource modules.
resource_suffix is the suffix appended to the basename of any resource file
specified in the .ui file to create the name of the Python module generated
from the resource file by pyrcc4. The default is '_rc', i.e. if the .ui
file specified a resource file called foo.qrc then the corresponding Python
module is foo_rc.
import_from is optionally set to the package used for relative import
statements. The default is ``'.'``.
"""
from PyQt5.QtCore import PYQT_VERSION_STR
try:
uifname = uifile.name
except AttributeError:
uifname = uifile
indenter.indentwidth = indent
pyfile.write(_header % (uifname, PYQT_VERSION_STR))
winfo = compiler.UICompiler().compileUi(uifile, pyfile, from_imports, resource_suffix, import_from)
if execute:
indenter.write_code(_display_code % winfo)
def loadUi(uifile, baseinstance=None, package='', resource_suffix='_rc'):
"""loadUi(uifile, baseinstance=None, package='') -> widget
Load a Qt Designer .ui file and return an instance of the user interface.
uifile is a file name or file-like object containing the .ui file.
baseinstance is an optional instance of the Qt base class. If specified
then the user interface is created in it. Otherwise a new instance of the
base class is automatically created.
package is the optional package which is used as the base for any relative
imports of custom widgets.
resource_suffix is the suffix appended to the basename of any resource file
specified in the .ui file to create the name of the Python module generated
from the resource file by pyrcc4. The default is '_rc', i.e. if the .ui
file specified a resource file called foo.qrc then the corresponding Python
module is foo_rc.
"""
from .Loader.loader import DynamicUILoader
return DynamicUILoader(package).loadUi(uifile, baseinstance, resource_suffix)
# The list of directories that are searched for widget plugins.
from .objcreator import widgetPluginPath
|
pyqt/python-qt5 | PyQt5/uic/__init__.py | loadUi | python | def loadUi(uifile, baseinstance=None, package='', resource_suffix='_rc'):
from .Loader.loader import DynamicUILoader
return DynamicUILoader(package).loadUi(uifile, baseinstance, resource_suffix) | loadUi(uifile, baseinstance=None, package='') -> widget
Load a Qt Designer .ui file and return an instance of the user interface.
uifile is a file name or file-like object containing the .ui file.
baseinstance is an optional instance of the Qt base class. If specified
then the user interface is created in it. Otherwise a new instance of the
base class is automatically created.
package is the optional package which is used as the base for any relative
imports of custom widgets.
resource_suffix is the suffix appended to the basename of any resource file
specified in the .ui file to create the name of the Python module generated
from the resource file by pyrcc4. The default is '_rc', i.e. if the .ui
file specified a resource file called foo.qrc then the corresponding Python
module is foo_rc. | train | https://github.com/pyqt/python-qt5/blob/c9ed180c56f6fd3521ffe5fb70904bc5d3f50e5f/PyQt5/uic/__init__.py#L206-L226 | [
"def loadUi(self, filename, toplevelInst, resource_suffix):\n self.toplevelInst = toplevelInst\n\n if hasattr(filename, 'read'):\n basedir = ''\n else:\n # Allow the filename to be a QString.\n filename = str(filename)\n basedir = os.path.dirname(filename)\n\n return self.parse(filename, resource_suffix, basedir)\n"
] | #############################################################################
##
## Copyright (C) 2015 Riverbank Computing Limited.
## Copyright (C) 2006 Thorsten Marek.
## All right reserved.
##
## This file is part of PyQt.
##
## You may use this file under the terms of the GPL v2 or the revised BSD
## license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of the Riverbank Computing Limited nor the names
## of its contributors may be used to endorse or promote products
## derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
#############################################################################
__all__ = ("compileUi", "compileUiDir", "loadUiType", "loadUi", "widgetPluginPath")
from .Compiler import indenter, compiler
_header = """# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '%s'
#
# Created by: PyQt5 UI code generator %s
#
# WARNING! All changes made in this file will be lost!
"""
_display_code = """
if __name__ == "__main__":
\timport sys
\tapp = QtWidgets.QApplication(sys.argv)
\t%(widgetname)s = QtWidgets.%(baseclass)s()
\tui = %(uiclass)s()
\tui.setupUi(%(widgetname)s)
\t%(widgetname)s.show()
\tsys.exit(app.exec_())
"""
def compileUiDir(dir, recurse=False, map=None, **compileUi_args):
"""compileUiDir(dir, recurse=False, map=None, **compileUi_args)
Creates Python modules from Qt Designer .ui files in a directory or
directory tree.
dir is the name of the directory to scan for files whose name ends with
'.ui'. By default the generated Python module is created in the same
directory ending with '.py'.
recurse is set if any sub-directories should be scanned. The default is
False.
map is an optional callable that is passed the name of the directory
containing the '.ui' file and the name of the Python module that will be
created. The callable should return a tuple of the name of the directory
in which the Python module will be created and the (possibly modified)
name of the module. The default is None.
compileUi_args are any additional keyword arguments that are passed to
the compileUi() function that is called to create each Python module.
"""
import os
# Compile a single .ui file.
def compile_ui(ui_dir, ui_file):
# Ignore if it doesn't seem to be a .ui file.
if ui_file.endswith('.ui'):
py_dir = ui_dir
py_file = ui_file[:-3] + '.py'
# Allow the caller to change the name of the .py file or generate
# it in a different directory.
if map is not None:
py_dir, py_file = map(py_dir, py_file)
# Make sure the destination directory exists.
try:
os.makedirs(py_dir)
except:
pass
ui_path = os.path.join(ui_dir, ui_file)
py_path = os.path.join(py_dir, py_file)
ui_file = open(ui_path, 'r')
py_file = open(py_path, 'w')
try:
compileUi(ui_file, py_file, **compileUi_args)
finally:
ui_file.close()
py_file.close()
if recurse:
for root, _, files in os.walk(dir):
for ui in files:
compile_ui(root, ui)
else:
for ui in os.listdir(dir):
if os.path.isfile(os.path.join(dir, ui)):
compile_ui(dir, ui)
def compileUi(uifile, pyfile, execute=False, indent=4, from_imports=False, resource_suffix='_rc', import_from='.'):
"""compileUi(uifile, pyfile, execute=False, indent=4, from_imports=False, resource_suffix='_rc', import_from='.')
Creates a Python module from a Qt Designer .ui file.
uifile is a file name or file-like object containing the .ui file.
pyfile is the file-like object to which the Python code will be written to.
execute is optionally set to generate extra Python code that allows the
code to be run as a standalone application. The default is False.
indent is the optional indentation width using spaces. If it is 0 then a
tab is used. The default is 4.
from_imports is optionally set to generate relative import statements. At
the moment this only applies to the import of resource modules.
resource_suffix is the suffix appended to the basename of any resource file
specified in the .ui file to create the name of the Python module generated
from the resource file by pyrcc4. The default is '_rc', i.e. if the .ui
file specified a resource file called foo.qrc then the corresponding Python
module is foo_rc.
import_from is optionally set to the package used for relative import
statements. The default is ``'.'``.
"""
from PyQt5.QtCore import PYQT_VERSION_STR
try:
uifname = uifile.name
except AttributeError:
uifname = uifile
indenter.indentwidth = indent
pyfile.write(_header % (uifname, PYQT_VERSION_STR))
winfo = compiler.UICompiler().compileUi(uifile, pyfile, from_imports, resource_suffix, import_from)
if execute:
indenter.write_code(_display_code % winfo)
def loadUiType(uifile, from_imports=False, resource_suffix='_rc', import_from='.'):
"""loadUiType(uifile, from_imports=False, resource_suffix='_rc', import_from='.') -> (form class, base class)
Load a Qt Designer .ui file and return the generated form class and the Qt
base class.
uifile is a file name or file-like object containing the .ui file.
from_imports is optionally set to generate relative import statements. At
the moment this only applies to the import of resource modules.
resource_suffix is the suffix appended to the basename of any resource file
specified in the .ui file to create the name of the Python module generated
from the resource file by pyrcc4. The default is '_rc', i.e. if the .ui
file specified a resource file called foo.qrc then the corresponding Python
module is foo_rc.
import_from is optionally set to the package used for relative import
statements. The default is ``'.'``.
"""
import sys
from PyQt5 import QtWidgets
if sys.hexversion >= 0x03000000:
from .port_v3.string_io import StringIO
else:
from .port_v2.string_io import StringIO
code_string = StringIO()
winfo = compiler.UICompiler().compileUi(uifile, code_string, from_imports, resource_suffix, import_from)
ui_globals = {}
exec(code_string.getvalue(), ui_globals)
return (ui_globals[winfo["uiclass"]], getattr(QtWidgets, winfo["baseclass"]))
# The list of directories that are searched for widget plugins.
from .objcreator import widgetPluginPath
|
pyqt/python-qt5 | PyQt5/uic/icon_cache.py | IconCache.get_icon | python | def get_icon(self, iconset):
# Handle a themed icon.
theme = iconset.attrib.get('theme')
if theme is not None:
return self._object_factory.createQObject("QIcon.fromTheme",
'icon', (self._object_factory.asString(theme), ),
is_attribute=False)
# Handle an empty iconset property.
if iconset.text is None:
return None
iset = _IconSet(iconset, self._base_dir)
try:
idx = self._cache.index(iset)
except ValueError:
idx = -1
if idx >= 0:
# Return the icon from the cache.
iset = self._cache[idx]
else:
# Follow uic's naming convention.
name = 'icon'
idx = len(self._cache)
if idx > 0:
name += str(idx)
icon = self._object_factory.createQObject("QIcon", name, (),
is_attribute=False)
iset.set_icon(icon, self._qtgui_module)
self._cache.append(iset)
return iset.icon | Return an icon described by the given iconset tag. | train | https://github.com/pyqt/python-qt5/blob/c9ed180c56f6fd3521ffe5fb70904bc5d3f50e5f/PyQt5/uic/icon_cache.py#L44-L81 | [
"def set_icon(self, icon, qtgui_module):\n \"\"\"Save the icon and set its attributes.\"\"\"\n\n if self._use_fallback:\n icon.addFile(self._fallback)\n else:\n for role, pixmap in self._roles.items():\n if role.endswith(\"off\"):\n mode = role[:-3]\n state = qtgui_module.QIcon.Off\n elif role.endswith(\"on\"):\n mode = role[:-2]\n state = qtgui_module.QIcon.On\n else:\n continue\n\n mode = getattr(qtgui_module.QIcon, mode.title())\n\n if pixmap:\n icon.addPixmap(qtgui_module.QPixmap(pixmap), mode, state)\n else:\n icon.addPixmap(qtgui_module.QPixmap(), mode, state)\n\n self.icon = icon\n"
] | class IconCache(object):
"""Maintain a cache of icons. If an icon is used more than once by a GUI
then ensure that only one copy is created.
"""
def __init__(self, object_factory, qtgui_module):
"""Initialise the cache."""
self._object_factory = object_factory
self._qtgui_module = qtgui_module
self._base_dir = ''
self._cache = []
def set_base_dir(self, base_dir):
""" Set the base directory to be used for all relative filenames. """
self._base_dir = base_dir
|
pyqt/python-qt5 | PyQt5/uic/icon_cache.py | _IconSet._file_name | python | def _file_name(fname, base_dir):
fname = fname.replace("\\", "\\\\")
if base_dir != '' and fname[0] != ':' and not os.path.isabs(fname):
fname = os.path.join(base_dir, fname)
return fname | Convert a relative filename if we have a base directory. | train | https://github.com/pyqt/python-qt5/blob/c9ed180c56f6fd3521ffe5fb70904bc5d3f50e5f/PyQt5/uic/icon_cache.py#L109-L117 | null | class _IconSet(object):
"""An icon set, ie. the mode and state and the pixmap used for each."""
def __init__(self, iconset, base_dir):
"""Initialise the icon set from an XML tag."""
# Set the pre-Qt v4.4 fallback (ie. with no roles).
self._fallback = self._file_name(iconset.text, base_dir)
self._use_fallback = True
# Parse the icon set.
self._roles = {}
for i in iconset:
file_name = i.text
if file_name is not None:
file_name = self._file_name(file_name, base_dir)
self._roles[i.tag] = file_name
self._use_fallback = False
# There is no real icon yet.
self.icon = None
@staticmethod
def set_icon(self, icon, qtgui_module):
"""Save the icon and set its attributes."""
if self._use_fallback:
icon.addFile(self._fallback)
else:
for role, pixmap in self._roles.items():
if role.endswith("off"):
mode = role[:-3]
state = qtgui_module.QIcon.Off
elif role.endswith("on"):
mode = role[:-2]
state = qtgui_module.QIcon.On
else:
continue
mode = getattr(qtgui_module.QIcon, mode.title())
if pixmap:
icon.addPixmap(qtgui_module.QPixmap(pixmap), mode, state)
else:
icon.addPixmap(qtgui_module.QPixmap(), mode, state)
self.icon = icon
def __eq__(self, other):
"""Compare two icon sets for equality."""
if not isinstance(other, type(self)):
return NotImplemented
if self._use_fallback:
if other._use_fallback:
return self._fallback == other._fallback
return False
if other._use_fallback:
return False
return self._roles == other._roles
|
pyqt/python-qt5 | PyQt5/uic/icon_cache.py | _IconSet.set_icon | python | def set_icon(self, icon, qtgui_module):
if self._use_fallback:
icon.addFile(self._fallback)
else:
for role, pixmap in self._roles.items():
if role.endswith("off"):
mode = role[:-3]
state = qtgui_module.QIcon.Off
elif role.endswith("on"):
mode = role[:-2]
state = qtgui_module.QIcon.On
else:
continue
mode = getattr(qtgui_module.QIcon, mode.title())
if pixmap:
icon.addPixmap(qtgui_module.QPixmap(pixmap), mode, state)
else:
icon.addPixmap(qtgui_module.QPixmap(), mode, state)
self.icon = icon | Save the icon and set its attributes. | train | https://github.com/pyqt/python-qt5/blob/c9ed180c56f6fd3521ffe5fb70904bc5d3f50e5f/PyQt5/uic/icon_cache.py#L119-L142 | null | class _IconSet(object):
"""An icon set, ie. the mode and state and the pixmap used for each."""
def __init__(self, iconset, base_dir):
"""Initialise the icon set from an XML tag."""
# Set the pre-Qt v4.4 fallback (ie. with no roles).
self._fallback = self._file_name(iconset.text, base_dir)
self._use_fallback = True
# Parse the icon set.
self._roles = {}
for i in iconset:
file_name = i.text
if file_name is not None:
file_name = self._file_name(file_name, base_dir)
self._roles[i.tag] = file_name
self._use_fallback = False
# There is no real icon yet.
self.icon = None
@staticmethod
def _file_name(fname, base_dir):
""" Convert a relative filename if we have a base directory. """
fname = fname.replace("\\", "\\\\")
if base_dir != '' and fname[0] != ':' and not os.path.isabs(fname):
fname = os.path.join(base_dir, fname)
return fname
def __eq__(self, other):
"""Compare two icon sets for equality."""
if not isinstance(other, type(self)):
return NotImplemented
if self._use_fallback:
if other._use_fallback:
return self._fallback == other._fallback
return False
if other._use_fallback:
return False
return self._roles == other._roles
|
pyqt/python-qt5 | util.py | createqtconf | python | def createqtconf():
template = """[Paths]
Prefix = {path}
Binaries = {path}
"""
import PyQt5
exedir = os.path.dirname(sys.executable)
qtpath = os.path.join(exedir, "qt.conf")
pyqt5path = os.path.abspath(PyQt5.__file__)
binpath = os.path.dirname(pyqt5path).replace("\\", "/")
try:
with open(qtpath, "w") as f:
f.write(template.format(path=binpath))
except:
pass | Create a qt.conf file next to the current executable | train | https://github.com/pyqt/python-qt5/blob/c9ed180c56f6fd3521ffe5fb70904bc5d3f50e5f/util.py#L5-L24 | null | import os
import sys
|
pyqt/python-qt5 | setup.py | get_package_data | python | def get_package_data():
package_data = dict()
package_data['PyQt5'] = list()
for subdir in ("doc/", "examples/", "include/",
"mkspecs/", "plugins/", "qml/",
"qsci/", "sip/", "translations/", "uic/"):
abspath = os.path.abspath("PyQt5/" + subdir)
for root, dirs, files in os.walk(abspath):
for f in files:
fpath = os.path.join(root, f)
relpath = os.path.relpath(fpath, abspath)
relpath = relpath.replace("\\", "/")
package_data['PyQt5'].append(subdir + relpath)
package_data['PyQt5'].extend(["*.exe",
"*.dll",
"*.pyd",
"*.conf",
"*.api",
"*.qm",
"*.bat"])
return package_data | Include all files from all sub-directories | train | https://github.com/pyqt/python-qt5/blob/c9ed180c56f6fd3521ffe5fb70904bc5d3f50e5f/setup.py#L35-L58 | null | import os
import sys
import platform
from setuptools import setup, find_packages
import util
if os.name != "nt":
print("""
\nThe binaries distributed with this version are Windows only,
- If you are on linux, look at
github.com/pyqt/python-qt5/wiki/Compiling-PyQt5-on-Ubuntu-12.04.
- If you are on OS X, look at the OSX specific port:
github.com/pyqt/python-qt5-mavericks
""")
sys.exit()
if "64bit" not in platform.architecture():
print("""
\nThe binaries distributed wtih this version are for the
64-bit version of Python only.
""")
sys.exit()
def get_version():
repo_dir = os.path.dirname(__file__)
sys.path.insert(0, repo_dir)
import PyQt5
return PyQt5.__version__
def get_data_files():
return [('', ['qt.conf'])]
def get_readme():
with open('README.txt') as f:
readme = f.read()
return readme
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities'
]
setup(
name='python-qt5',
version=get_version(),
description='PyQt5',
long_description=get_readme(),
author='Marcus Ottosson',
author_email='marcus@abstractfactory.com',
url='https://github.com/pyqt/python-qt5',
license='GPLv3',
packages=find_packages(),
zip_safe=False,
classifiers=classifiers,
package_data=get_package_data(),
data_files=get_data_files()
)
|
pyqt/python-qt5 | PyQt5/uic/driver.py | Driver._preview | python | def _preview(self):
from PyQt5 import QtWidgets
app = QtWidgets.QApplication([self._ui_file])
widget = loadUi(self._ui_file)
widget.show()
return app.exec_() | Preview the .ui file. Return the exit status to be passed back to
the parent process. | train | https://github.com/pyqt/python-qt5/blob/c9ed180c56f6fd3521ffe5fb70904bc5d3f50e5f/PyQt5/uic/driver.py#L63-L74 | [
"def loadUi(uifile, baseinstance=None, package='', resource_suffix='_rc'):\n \"\"\"loadUi(uifile, baseinstance=None, package='') -> widget\n\n Load a Qt Designer .ui file and return an instance of the user interface.\n\n uifile is a file name or file-like object containing the .ui file.\n baseinstance is an optional instance of the Qt base class. If specified\n then the user interface is created in it. Otherwise a new instance of the\n base class is automatically created.\n package is the optional package which is used as the base for any relative\n imports of custom widgets.\n resource_suffix is the suffix appended to the basename of any resource file\n specified in the .ui file to create the name of the Python module generated\n from the resource file by pyrcc4. The default is '_rc', i.e. if the .ui\n file specified a resource file called foo.qrc then the corresponding Python\n module is foo_rc.\n \"\"\"\n\n from .Loader.loader import DynamicUILoader\n\n return DynamicUILoader(package).loadUi(uifile, baseinstance, resource_suffix)\n"
] | class Driver(object):
""" This encapsulates access to the pyuic functionality so that it can be
called by code that is Python v2/v3 specific.
"""
LOGGER_NAME = 'PyQt5.uic'
def __init__(self, opts, ui_file):
""" Initialise the object. opts is the parsed options. ui_file is the
name of the .ui file.
"""
if opts.debug:
logger = logging.getLogger(self.LOGGER_NAME)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(name)s: %(message)s"))
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
self._opts = opts
self._ui_file = ui_file
def invoke(self):
""" Invoke the action as specified by the parsed options. Returns 0 if
there was no error.
"""
if self._opts.preview:
return self._preview()
self._generate()
return 0
def _generate(self):
""" Generate the Python code. """
needs_close = False
if sys.hexversion >= 0x03000000:
if self._opts.output == '-':
from io import TextIOWrapper
pyfile = TextIOWrapper(sys.stdout.buffer, encoding='utf8')
else:
pyfile = open(self._opts.output, 'wt', encoding='utf8')
needs_close = True
else:
if self._opts.output == '-':
pyfile = sys.stdout
else:
pyfile = open(self._opts.output, 'wt')
needs_close = True
import_from = self._opts.import_from
if import_from:
from_imports = True
elif self._opts.from_imports:
from_imports = True
import_from = '.'
else:
from_imports = False
compileUi(self._ui_file, pyfile, self._opts.execute, self._opts.indent,
from_imports, self._opts.resource_suffix, import_from)
if needs_close:
pyfile.close()
def on_IOError(self, e):
""" Handle an IOError exception. """
sys.stderr.write("Error: %s: \"%s\"\n" % (e.strerror, e.filename))
def on_SyntaxError(self, e):
""" Handle a SyntaxError exception. """
sys.stderr.write("Error in input file: %s\n" % e)
def on_NoSuchWidgetError(self, e):
""" Handle a NoSuchWidgetError exception. """
sys.stderr.write(str(e) + "\n")
def on_Exception(self, e):
""" Handle a generic exception. """
if logging.getLogger(self.LOGGER_NAME).level == logging.DEBUG:
import traceback
traceback.print_exception(*sys.exc_info())
else:
from PyQt5 import QtCore
sys.stderr.write("""An unexpected error occurred.
Check that you are using the latest version of PyQt5 and send an error report to
support@riverbankcomputing.com, including the following information:
* your version of PyQt (%s)
* the UI file that caused this error
* the debug output of pyuic5 (use the -d flag when calling pyuic5)
""" % QtCore.PYQT_VERSION_STR)
|
pyqt/python-qt5 | PyQt5/uic/driver.py | Driver._generate | python | def _generate(self):
needs_close = False
if sys.hexversion >= 0x03000000:
if self._opts.output == '-':
from io import TextIOWrapper
pyfile = TextIOWrapper(sys.stdout.buffer, encoding='utf8')
else:
pyfile = open(self._opts.output, 'wt', encoding='utf8')
needs_close = True
else:
if self._opts.output == '-':
pyfile = sys.stdout
else:
pyfile = open(self._opts.output, 'wt')
needs_close = True
import_from = self._opts.import_from
if import_from:
from_imports = True
elif self._opts.from_imports:
from_imports = True
import_from = '.'
else:
from_imports = False
compileUi(self._ui_file, pyfile, self._opts.execute, self._opts.indent,
from_imports, self._opts.resource_suffix, import_from)
if needs_close:
pyfile.close() | Generate the Python code. | train | https://github.com/pyqt/python-qt5/blob/c9ed180c56f6fd3521ffe5fb70904bc5d3f50e5f/PyQt5/uic/driver.py#L76-L110 | [
"def compileUi(uifile, pyfile, execute=False, indent=4, from_imports=False, resource_suffix='_rc', import_from='.'):\n \"\"\"compileUi(uifile, pyfile, execute=False, indent=4, from_imports=False, resource_suffix='_rc', import_from='.')\n\n Creates a Python module from a Qt Designer .ui file.\n\n uifile is a file name or file-like object containing the .ui file.\n pyfile is the file-like object to which the Python code will be written to.\n execute is optionally set to generate extra Python code that allows the\n code to be run as a standalone application. The default is False.\n indent is the optional indentation width using spaces. If it is 0 then a\n tab is used. The default is 4.\n from_imports is optionally set to generate relative import statements. At\n the moment this only applies to the import of resource modules.\n resource_suffix is the suffix appended to the basename of any resource file\n specified in the .ui file to create the name of the Python module generated\n from the resource file by pyrcc4. The default is '_rc', i.e. if the .ui\n file specified a resource file called foo.qrc then the corresponding Python\n module is foo_rc.\n import_from is optionally set to the package used for relative import\n statements. The default is ``'.'``.\n \"\"\"\n\n from PyQt5.QtCore import PYQT_VERSION_STR\n\n try:\n uifname = uifile.name\n except AttributeError:\n uifname = uifile\n\n indenter.indentwidth = indent\n\n pyfile.write(_header % (uifname, PYQT_VERSION_STR))\n\n winfo = compiler.UICompiler().compileUi(uifile, pyfile, from_imports, resource_suffix, import_from)\n\n if execute:\n indenter.write_code(_display_code % winfo)\n"
] | class Driver(object):
""" This encapsulates access to the pyuic functionality so that it can be
called by code that is Python v2/v3 specific.
"""
LOGGER_NAME = 'PyQt5.uic'
def __init__(self, opts, ui_file):
""" Initialise the object. opts is the parsed options. ui_file is the
name of the .ui file.
"""
if opts.debug:
logger = logging.getLogger(self.LOGGER_NAME)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(name)s: %(message)s"))
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
self._opts = opts
self._ui_file = ui_file
def invoke(self):
""" Invoke the action as specified by the parsed options. Returns 0 if
there was no error.
"""
if self._opts.preview:
return self._preview()
self._generate()
return 0
def _preview(self):
""" Preview the .ui file. Return the exit status to be passed back to
the parent process.
"""
from PyQt5 import QtWidgets
app = QtWidgets.QApplication([self._ui_file])
widget = loadUi(self._ui_file)
widget.show()
return app.exec_()
def on_IOError(self, e):
""" Handle an IOError exception. """
sys.stderr.write("Error: %s: \"%s\"\n" % (e.strerror, e.filename))
def on_SyntaxError(self, e):
""" Handle a SyntaxError exception. """
sys.stderr.write("Error in input file: %s\n" % e)
def on_NoSuchWidgetError(self, e):
""" Handle a NoSuchWidgetError exception. """
sys.stderr.write(str(e) + "\n")
def on_Exception(self, e):
""" Handle a generic exception. """
if logging.getLogger(self.LOGGER_NAME).level == logging.DEBUG:
import traceback
traceback.print_exception(*sys.exc_info())
else:
from PyQt5 import QtCore
sys.stderr.write("""An unexpected error occurred.
Check that you are using the latest version of PyQt5 and send an error report to
support@riverbankcomputing.com, including the following information:
* your version of PyQt (%s)
* the UI file that caused this error
* the debug output of pyuic5 (use the -d flag when calling pyuic5)
""" % QtCore.PYQT_VERSION_STR)
|
pyqt/python-qt5 | PyQt5/uic/driver.py | Driver.on_IOError | python | def on_IOError(self, e):
sys.stderr.write("Error: %s: \"%s\"\n" % (e.strerror, e.filename)) | Handle an IOError exception. | train | https://github.com/pyqt/python-qt5/blob/c9ed180c56f6fd3521ffe5fb70904bc5d3f50e5f/PyQt5/uic/driver.py#L112-L115 | null | class Driver(object):
""" This encapsulates access to the pyuic functionality so that it can be
called by code that is Python v2/v3 specific.
"""
LOGGER_NAME = 'PyQt5.uic'
def __init__(self, opts, ui_file):
""" Initialise the object. opts is the parsed options. ui_file is the
name of the .ui file.
"""
if opts.debug:
logger = logging.getLogger(self.LOGGER_NAME)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(name)s: %(message)s"))
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
self._opts = opts
self._ui_file = ui_file
def invoke(self):
""" Invoke the action as specified by the parsed options. Returns 0 if
there was no error.
"""
if self._opts.preview:
return self._preview()
self._generate()
return 0
def _preview(self):
""" Preview the .ui file. Return the exit status to be passed back to
the parent process.
"""
from PyQt5 import QtWidgets
app = QtWidgets.QApplication([self._ui_file])
widget = loadUi(self._ui_file)
widget.show()
return app.exec_()
def _generate(self):
""" Generate the Python code. """
needs_close = False
if sys.hexversion >= 0x03000000:
if self._opts.output == '-':
from io import TextIOWrapper
pyfile = TextIOWrapper(sys.stdout.buffer, encoding='utf8')
else:
pyfile = open(self._opts.output, 'wt', encoding='utf8')
needs_close = True
else:
if self._opts.output == '-':
pyfile = sys.stdout
else:
pyfile = open(self._opts.output, 'wt')
needs_close = True
import_from = self._opts.import_from
if import_from:
from_imports = True
elif self._opts.from_imports:
from_imports = True
import_from = '.'
else:
from_imports = False
compileUi(self._ui_file, pyfile, self._opts.execute, self._opts.indent,
from_imports, self._opts.resource_suffix, import_from)
if needs_close:
pyfile.close()
def on_SyntaxError(self, e):
""" Handle a SyntaxError exception. """
sys.stderr.write("Error in input file: %s\n" % e)
def on_NoSuchWidgetError(self, e):
""" Handle a NoSuchWidgetError exception. """
sys.stderr.write(str(e) + "\n")
def on_Exception(self, e):
""" Handle a generic exception. """
if logging.getLogger(self.LOGGER_NAME).level == logging.DEBUG:
import traceback
traceback.print_exception(*sys.exc_info())
else:
from PyQt5 import QtCore
sys.stderr.write("""An unexpected error occurred.
Check that you are using the latest version of PyQt5 and send an error report to
support@riverbankcomputing.com, including the following information:
* your version of PyQt (%s)
* the UI file that caused this error
* the debug output of pyuic5 (use the -d flag when calling pyuic5)
""" % QtCore.PYQT_VERSION_STR)
|
pyqt/python-qt5 | PyQt5/uic/driver.py | Driver.on_Exception | python | def on_Exception(self, e):
if logging.getLogger(self.LOGGER_NAME).level == logging.DEBUG:
import traceback
traceback.print_exception(*sys.exc_info())
else:
from PyQt5 import QtCore
sys.stderr.write("""An unexpected error occurred.
Check that you are using the latest version of PyQt5 and send an error report to
support@riverbankcomputing.com, including the following information:
* your version of PyQt (%s)
* the UI file that caused this error
* the debug output of pyuic5 (use the -d flag when calling pyuic5)
""" % QtCore.PYQT_VERSION_STR) | Handle a generic exception. | train | https://github.com/pyqt/python-qt5/blob/c9ed180c56f6fd3521ffe5fb70904bc5d3f50e5f/PyQt5/uic/driver.py#L127-L144 | null | class Driver(object):
""" This encapsulates access to the pyuic functionality so that it can be
called by code that is Python v2/v3 specific.
"""
LOGGER_NAME = 'PyQt5.uic'
def __init__(self, opts, ui_file):
""" Initialise the object. opts is the parsed options. ui_file is the
name of the .ui file.
"""
if opts.debug:
logger = logging.getLogger(self.LOGGER_NAME)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(name)s: %(message)s"))
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
self._opts = opts
self._ui_file = ui_file
def invoke(self):
""" Invoke the action as specified by the parsed options. Returns 0 if
there was no error.
"""
if self._opts.preview:
return self._preview()
self._generate()
return 0
def _preview(self):
""" Preview the .ui file. Return the exit status to be passed back to
the parent process.
"""
from PyQt5 import QtWidgets
app = QtWidgets.QApplication([self._ui_file])
widget = loadUi(self._ui_file)
widget.show()
return app.exec_()
def _generate(self):
""" Generate the Python code. """
needs_close = False
if sys.hexversion >= 0x03000000:
if self._opts.output == '-':
from io import TextIOWrapper
pyfile = TextIOWrapper(sys.stdout.buffer, encoding='utf8')
else:
pyfile = open(self._opts.output, 'wt', encoding='utf8')
needs_close = True
else:
if self._opts.output == '-':
pyfile = sys.stdout
else:
pyfile = open(self._opts.output, 'wt')
needs_close = True
import_from = self._opts.import_from
if import_from:
from_imports = True
elif self._opts.from_imports:
from_imports = True
import_from = '.'
else:
from_imports = False
compileUi(self._ui_file, pyfile, self._opts.execute, self._opts.indent,
from_imports, self._opts.resource_suffix, import_from)
if needs_close:
pyfile.close()
def on_IOError(self, e):
""" Handle an IOError exception. """
sys.stderr.write("Error: %s: \"%s\"\n" % (e.strerror, e.filename))
def on_SyntaxError(self, e):
""" Handle a SyntaxError exception. """
sys.stderr.write("Error in input file: %s\n" % e)
def on_NoSuchWidgetError(self, e):
""" Handle a NoSuchWidgetError exception. """
sys.stderr.write(str(e) + "\n")
|
pyqt/python-qt5 | PyQt5/uic/objcreator.py | QObjectCreator.load_plugin | python | def load_plugin(filename, plugin_globals, plugin_locals):
plugin = open(filename, 'rU')
try:
exec(plugin.read(), plugin_globals, plugin_locals)
except ImportError:
return False
except Exception as e:
raise WidgetPluginError("%s: %s" % (e.__class__, str(e)))
finally:
plugin.close()
return True | Load the plugin from the given file. Return True if the plugin was
loaded, or False if it wanted to be ignored. Raise an exception if
there was an error. | train | https://github.com/pyqt/python-qt5/blob/c9ed180c56f6fd3521ffe5fb70904bc5d3f50e5f/PyQt5/uic/objcreator.py#L146-L163 | null | class QObjectCreator(object):
def __init__(self, creatorPolicy):
self._cpolicy = creatorPolicy
self._cwFilters = []
self._modules = self._cpolicy.createQtGuiWidgetsWrappers()
# Get the optional plugins.
for plugindir in widgetPluginPath:
try:
plugins = os.listdir(plugindir)
except:
plugins = []
for filename in plugins:
if not filename.endswith('.py'):
continue
filename = os.path.join(plugindir, filename)
plugin_globals = {
"MODULE": MODULE,
"CW_FILTER": CW_FILTER,
"MATCH": MATCH,
"NO_MATCH": NO_MATCH}
plugin_locals = {}
if self.load_plugin(filename, plugin_globals, plugin_locals):
pluginType = plugin_locals["pluginType"]
if pluginType == MODULE:
modinfo = plugin_locals["moduleInformation"]()
self._modules.append(self._cpolicy.createModuleWrapper(*modinfo))
elif pluginType == CW_FILTER:
self._cwFilters.append(plugin_locals["getFilter"]())
else:
raise WidgetPluginError("Unknown plugin type of %s" % filename)
self._customWidgets = self._cpolicy.createCustomWidgetLoader()
self._modules.append(self._customWidgets)
def createQObject(self, classname, *args, **kwargs):
# Handle regular and custom widgets.
factory = self.findQObjectType(classname)
if factory is None:
# Handle scoped names, typically static factory methods.
parts = classname.split('.')
if len(parts) > 1:
factory = self.findQObjectType(parts[0])
if factory is not None:
for part in parts[1:]:
factory = getattr(factory, part, None)
if factory is None:
break
if factory is None:
raise NoSuchWidgetError(classname)
return self._cpolicy.instantiate(factory, *args, **kwargs)
def invoke(self, rname, method, args=()):
return self._cpolicy.invoke(rname, method, args)
def findQObjectType(self, classname):
for module in self._modules:
w = module.search(classname)
if w is not None:
return w
return None
def getSlot(self, obj, slotname):
return self._cpolicy.getSlot(obj, slotname)
def asString(self, s):
return self._cpolicy.asString(s)
def addCustomWidget(self, widgetClass, baseClass, module):
for cwFilter in self._cwFilters:
match, result = cwFilter(widgetClass, baseClass, module)
if match:
widgetClass, baseClass, module = result
break
self._customWidgets.addCustomWidget(widgetClass, baseClass, module)
@staticmethod
|
miguelgrinberg/Flask-SocketIO | flask_socketio/__init__.py | emit | python | def emit(event, *args, **kwargs):
if 'namespace' in kwargs:
namespace = kwargs['namespace']
else:
namespace = flask.request.namespace
callback = kwargs.get('callback')
broadcast = kwargs.get('broadcast')
room = kwargs.get('room')
if room is None and not broadcast:
room = flask.request.sid
include_self = kwargs.get('include_self', True)
ignore_queue = kwargs.get('ignore_queue', False)
socketio = flask.current_app.extensions['socketio']
return socketio.emit(event, *args, namespace=namespace, room=room,
include_self=include_self, callback=callback,
ignore_queue=ignore_queue) | Emit a SocketIO event.
This function emits a SocketIO event to one or more connected clients. A
JSON blob can be attached to the event as payload. This is a function that
can only be called from a SocketIO event handler, as in obtains some
information from the current client context. Example::
@socketio.on('my event')
def handle_my_custom_event(json):
emit('my response', {'data': 42})
:param event: The name of the user event to emit.
:param args: A dictionary with the JSON data to send as payload.
:param namespace: The namespace under which the message is to be sent.
Defaults to the namespace used by the originating event.
A ``'/'`` can be used to explicitly specify the global
namespace.
:param callback: Callback function to invoke with the client's
acknowledgement.
:param broadcast: ``True`` to send the message to all clients, or ``False``
to only reply to the sender of the originating event.
:param room: Send the message to all the users in the given room. If this
argument is set, then broadcast is implied to be ``True``.
:param include_self: ``True`` to include the sender when broadcasting or
addressing a room, or ``False`` to send to everyone
but the sender.
:param ignore_queue: Only used when a message queue is configured. If
set to ``True``, the event is emitted to the
clients directly, without going through the queue.
This is more efficient, but only works when a
single server process is used, or when there is a
single addresee. It is recommended to always leave
this parameter with its default value of ``False``. | train | https://github.com/miguelgrinberg/Flask-SocketIO/blob/4bef800d5e7ba7d98a6f4cd94191ff0b4496c334/flask_socketio/__init__.py#L676-L726 | null | import os
import sys
# make sure gevent-socketio is not installed, as it conflicts with
# python-socketio
gevent_socketio_found = True
try:
from socketio import socketio_manage
except ImportError:
gevent_socketio_found = False
if gevent_socketio_found:
print('The gevent-socketio package is incompatible with this version of '
'the Flask-SocketIO extension. Please uninstall it, and then '
'install the latest version of python-socketio in its place.')
sys.exit(1)
import socketio
import flask
from flask import _request_ctx_stack, json as flask_json
from flask.sessions import SessionMixin
from werkzeug.debug import DebuggedApplication
from werkzeug.serving import run_with_reloader
from .namespace import Namespace
from .test_client import SocketIOTestClient
__version__ = '3.3.2'
class _SocketIOMiddleware(socketio.WSGIApp):
"""This WSGI middleware simply exposes the Flask application in the WSGI
environment before executing the request.
"""
def __init__(self, socketio_app, flask_app, socketio_path='socket.io'):
self.flask_app = flask_app
super(_SocketIOMiddleware, self).__init__(socketio_app,
flask_app.wsgi_app,
socketio_path=socketio_path)
def __call__(self, environ, start_response):
environ = environ.copy()
environ['flask.app'] = self.flask_app
return super(_SocketIOMiddleware, self).__call__(environ,
start_response)
class _ManagedSession(dict, SessionMixin):
"""This class is used for user sessions that are managed by
Flask-SocketIO. It is simple dict, expanded with the Flask session
attributes."""
pass
class SocketIO(object):
"""Create a Flask-SocketIO server.
:param app: The flask application instance. If the application instance
isn't known at the time this class is instantiated, then call
``socketio.init_app(app)`` once the application instance is
available.
:param manage_session: If set to ``True``, this extension manages the user
session for Socket.IO events. If set to ``False``,
Flask's own session management is used. When using
Flask's cookie based sessions it is recommended that
you leave this set to the default of ``True``. When
using server-side sessions, a ``False`` setting
enables sharing the user session between HTTP routes
and Socket.IO events.
:param message_queue: A connection URL for a message queue service the
server can use for multi-process communication. A
message queue is not required when using a single
server process.
:param channel: The channel name, when using a message queue. If a channel
isn't specified, a default channel will be used. If
multiple clusters of SocketIO processes need to use the
same message queue without interfering with each other, then
each cluster should use a different channel.
:param path: The path where the Socket.IO server is exposed. Defaults to
``'socket.io'``. Leave this as is unless you know what you are
doing.
:param resource: Alias to ``path``.
:param kwargs: Socket.IO and Engine.IO server options.
The Socket.IO server options are detailed below:
:param client_manager: The client manager instance that will manage the
client list. When this is omitted, the client list
is stored in an in-memory structure, so the use of
multiple connected servers is not possible. In most
cases, this argument does not need to be set
explicitly.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``.
:param binary: ``True`` to support binary payloads, ``False`` to treat all
payloads as text. On Python 2, if this is set to ``True``,
``unicode`` values are treated as text, and ``str`` and
``bytes`` values are treated as binary. This option has no
effect on Python 3, where text and binary payloads are
always automatically discovered.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions. To use the same json encoder and decoder as a Flask
application, use ``flask.json``.
The Engine.IO server configuration supports the following settings:
:param async_mode: The asynchronous model to use. See the Deployment
section in the documentation for a description of the
available options. Valid async modes are
``threading``, ``eventlet``, ``gevent`` and
``gevent_uwsgi``. If this argument is not given,
``eventlet`` is tried first, then ``gevent_uwsgi``,
then ``gevent``, and finally ``threading``. The
first async mode that has all its dependencies installed
is then one that is chosen.
:param ping_timeout: The time in seconds that the client waits for the
server to respond before disconnecting. The default is
60 seconds.
:param ping_interval: The interval in seconds at which the client pings
the server. The default is 25 seconds.
:param max_http_buffer_size: The maximum size of a message when using the
polling transport. The default is 100,000,000
bytes.
:param allow_upgrades: Whether to allow transport upgrades or not. The
default is ``True``.
:param http_compression: Whether to compress packages when using the
polling transport. The default is ``True``.
:param compression_threshold: Only compress messages when their byte size
is greater than this value. The default is
1024 bytes.
:param cookie: Name of the HTTP cookie that contains the client session
id. If set to ``None``, a cookie is not sent to the client.
The default is ``'io'``.
:param cors_allowed_origins: List of origins that are allowed to connect
to this server. All origins are allowed by
default.
:param cors_credentials: Whether credentials (cookies, authentication) are
allowed in requests to this server. The default is
``True``.
:param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
a logger object to use. To disable logging set to
``False``. The default is ``False``.
"""
def __init__(self, app=None, **kwargs):
self.server = None
self.server_options = {}
self.wsgi_server = None
self.handlers = []
self.namespace_handlers = []
self.exception_handlers = {}
self.default_exception_handler = None
self.manage_session = True
# We can call init_app when:
# - we were given the Flask app instance (standard initialization)
# - we were not given the app, but we were given a message_queue
# (standard initialization for auxiliary process)
# In all other cases we collect the arguments and assume the client
# will call init_app from an app factory function.
if app is not None or 'message_queue' in kwargs:
self.init_app(app, **kwargs)
else:
self.server_options.update(kwargs)
def init_app(self, app, **kwargs):
if app is not None:
if not hasattr(app, 'extensions'):
app.extensions = {} # pragma: no cover
app.extensions['socketio'] = self
self.server_options.update(kwargs)
self.manage_session = self.server_options.pop('manage_session',
self.manage_session)
if 'client_manager' not in self.server_options:
url = self.server_options.pop('message_queue', None)
channel = self.server_options.pop('channel', 'flask-socketio')
write_only = app is None
if url:
if url.startswith(('redis://', "rediss://")):
queue_class = socketio.RedisManager
elif url.startswith('zmq'):
queue_class = socketio.ZmqManager
else:
queue_class = socketio.KombuManager
queue = queue_class(url, channel=channel,
write_only=write_only)
self.server_options['client_manager'] = queue
if 'json' in self.server_options and \
self.server_options['json'] == flask_json:
# flask's json module is tricky to use because its output
# changes when it is invoked inside or outside the app context
# so here to prevent any ambiguities we replace it with wrappers
# that ensure that the app context is always present
class FlaskSafeJSON(object):
@staticmethod
def dumps(*args, **kwargs):
with app.app_context():
return flask_json.dumps(*args, **kwargs)
@staticmethod
def loads(*args, **kwargs):
with app.app_context():
return flask_json.loads(*args, **kwargs)
self.server_options['json'] = FlaskSafeJSON
resource = self.server_options.pop('path', None) or \
self.server_options.pop('resource', None) or 'socket.io'
if resource.startswith('/'):
resource = resource[1:]
if os.environ.get('FLASK_RUN_FROM_CLI'):
if self.server_options.get('async_mode') is None:
if app is not None:
app.logger.warning(
'Flask-SocketIO is Running under Werkzeug, WebSocket '
'is not available.')
self.server_options['async_mode'] = 'threading'
self.server = socketio.Server(**self.server_options)
self.async_mode = self.server.async_mode
for handler in self.handlers:
self.server.on(handler[0], handler[1], namespace=handler[2])
for namespace_handler in self.namespace_handlers:
self.server.register_namespace(namespace_handler)
if app is not None:
# here we attach the SocketIO middlware to the SocketIO object so it
# can be referenced later if debug middleware needs to be inserted
self.sockio_mw = _SocketIOMiddleware(self.server, app,
socketio_path=resource)
app.wsgi_app = self.sockio_mw
def on(self, message, namespace=None):
"""Decorator to register a SocketIO event handler.
This decorator must be applied to SocketIO event handlers. Example::
@socketio.on('my event', namespace='/chat')
def handle_my_custom_event(json):
print('received json: ' + str(json))
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(handler):
def _handler(sid, *args):
return self._handle_event(handler, message, namespace, sid,
*args)
if self.server:
self.server.on(message, _handler, namespace=namespace)
else:
self.handlers.append((message, _handler, namespace))
return handler
return decorator
def on_error(self, namespace=None):
"""Decorator to define a custom error handler for SocketIO events.
This decorator can be applied to a function that acts as an error
handler for a namespace. This handler will be invoked when a SocketIO
event handler raises an exception. The handler function must accept one
argument, which is the exception raised. Example::
@socketio.on_error(namespace='/chat')
def chat_error_handler(e):
print('An error has occurred: ' + str(e))
:param namespace: The namespace for which to register the error
handler. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(exception_handler):
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.exception_handlers[namespace] = exception_handler
return exception_handler
return decorator
def on_error_default(self, exception_handler):
"""Decorator to define a default error handler for SocketIO events.
This decorator can be applied to a function that acts as a default
error handler for any namespaces that do not have a specific handler.
Example::
@socketio.on_error_default
def error_handler(e):
print('An error has occurred: ' + str(e))
"""
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.default_exception_handler = exception_handler
return exception_handler
def on_event(self, message, handler, namespace=None):
"""Register a SocketIO event handler.
``on_event`` is the non-decorator version of ``'on'``.
Example::
def on_foo_event(json):
print('received json: ' + str(json))
socketio.on_event('my event', on_foo_event, namespace='/chat')
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param handler: The function that handles the event.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
self.on(message, namespace=namespace)(handler)
def on_namespace(self, namespace_handler):
if not isinstance(namespace_handler, Namespace):
raise ValueError('Not a namespace instance.')
namespace_handler._set_socketio(self)
if self.server:
self.server.register_namespace(namespace_handler)
else:
self.namespace_handlers.append(namespace_handler)
def emit(self, event, *args, **kwargs):
"""Emit a server generated SocketIO event.
This function emits a SocketIO event to one or more connected clients.
A JSON blob can be attached to the event as payload. This function can
be used outside of a SocketIO event context, so it is appropriate to
use when the server is the originator of an event, outside of any
client context, such as in a regular HTTP request handler or a
background task. Example::
@app.route('/ping')
def ping():
socketio.emit('ping event', {'data': 42}, namespace='/chat')
:param event: The name of the user event to emit.
:param args: A dictionary with the JSON data to send as payload.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message to all the users in the given room. If
this parameter is not included, the event is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
namespace = kwargs.pop('namespace', '/')
room = kwargs.pop('room', None)
include_self = kwargs.pop('include_self', True)
skip_sid = kwargs.pop('skip_sid', None)
if not include_self and not skip_sid:
skip_sid = flask.request.sid
callback = kwargs.pop('callback', None)
self.server.emit(event, *args, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def send(self, data, json=False, namespace=None, room=None,
callback=None, include_self=True, skip_sid=None, **kwargs):
"""Send a server-generated SocketIO message.
This function sends a simple SocketIO message to one or more connected
clients. The message can be a string or a JSON blob. This is a simpler
version of ``emit()``, which should be preferred. This function can be
used outside of a SocketIO event context, so it is appropriate to use
when the server is the originator of an event.
:param data: The message to send, either a string or a JSON blob.
:param json: ``True`` if ``message`` is a JSON blob, ``False``
otherwise.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message only to the users in the given room. If
this parameter is not included, the message is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
skip_sid = flask.request.sid if not include_self else skip_sid
if json:
self.emit('json', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
else:
self.emit('message', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def close_room(self, room, namespace=None):
"""Close a room.
This function removes any users that are in the given room and then
deletes the room from the server. This function can be used outside
of a SocketIO event context.
:param room: The name of the room to close.
:param namespace: The namespace under which the room exists. Defaults
to the global namespace.
"""
self.server.close_room(room, namespace)
def run(self, app, host=None, port=None, **kwargs):
"""Run the SocketIO web server.
:param app: The Flask application instance.
:param host: The hostname or IP address for the server to listen on.
Defaults to 127.0.0.1.
:param port: The port number for the server to listen on. Defaults to
5000.
:param debug: ``True`` to start the server in debug mode, ``False`` to
start in normal mode.
:param use_reloader: ``True`` to enable the Flask reloader, ``False``
to disable it.
:param extra_files: A list of additional files that the Flask
reloader should watch. Defaults to ``None``
:param log_output: If ``True``, the server logs all incomming
connections. If ``False`` logging is disabled.
Defaults to ``True`` in debug mode, ``False``
in normal mode. Unused when the threading async
mode is used.
:param kwargs: Additional web server options. The web server options
are specific to the server used in each of the supported
async modes. Note that options provided here will
not be seen when using an external web server such
as gunicorn, since this method is not called in that
case.
"""
if host is None:
host = '127.0.0.1'
if port is None:
server_name = app.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[1])
else:
port = 5000
debug = kwargs.pop('debug', app.debug)
log_output = kwargs.pop('log_output', debug)
use_reloader = kwargs.pop('use_reloader', debug)
extra_files = kwargs.pop('extra_files', None)
app.debug = debug
if app.debug and self.server.eio.async_mode != 'threading':
# put the debug middleware between the SocketIO middleware
# and the Flask application instance
#
# mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
# BECOMES
#
# dbg-mw mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
self.sockio_mw.wsgi_app = DebuggedApplication(self.sockio_mw.wsgi_app,
evalex=True)
if self.server.eio.async_mode == 'threading':
from werkzeug._internal import _log
_log('warning', 'WebSocket transport not available. Install '
'eventlet or gevent and gevent-websocket for '
'improved performance.')
app.run(host=host, port=port, threaded=True,
use_reloader=use_reloader, **kwargs)
elif self.server.eio.async_mode == 'eventlet':
def run_server():
import eventlet
import eventlet.wsgi
import eventlet.green
addresses = eventlet.green.socket.getaddrinfo(host, port)
if not addresses:
raise RuntimeError('Could not resolve host to a valid address')
eventlet_socket = eventlet.listen(addresses[0][4], addresses[0][0])
# If provided an SSL argument, use an SSL socket
ssl_args = ['keyfile', 'certfile', 'server_side', 'cert_reqs',
'ssl_version', 'ca_certs',
'do_handshake_on_connect', 'suppress_ragged_eofs',
'ciphers']
ssl_params = {k: kwargs[k] for k in kwargs if k in ssl_args}
if len(ssl_params) > 0:
for k in ssl_params:
kwargs.pop(k)
ssl_params['server_side'] = True # Listening requires true
eventlet_socket = eventlet.wrap_ssl(eventlet_socket,
**ssl_params)
eventlet.wsgi.server(eventlet_socket, app,
log_output=log_output, **kwargs)
if use_reloader:
run_with_reloader(run_server, extra_files=extra_files)
else:
run_server()
elif self.server.eio.async_mode == 'gevent':
from gevent import pywsgi
try:
from geventwebsocket.handler import WebSocketHandler
websocket = True
except ImportError:
websocket = False
log = 'default'
if not log_output:
log = None
if websocket:
self.wsgi_server = pywsgi.WSGIServer(
(host, port), app, handler_class=WebSocketHandler,
log=log, **kwargs)
else:
self.wsgi_server = pywsgi.WSGIServer((host, port), app,
log=log, **kwargs)
if use_reloader:
# monkey patching is required by the reloader
from gevent import monkey
monkey.patch_all()
def run_server():
self.wsgi_server.serve_forever()
run_with_reloader(run_server, extra_files=extra_files)
else:
self.wsgi_server.serve_forever()
def stop(self):
"""Stop a running SocketIO web server.
This method must be called from a HTTP or SocketIO handler function.
"""
if self.server.eio.async_mode == 'threading':
func = flask.request.environ.get('werkzeug.server.shutdown')
if func:
func()
else:
raise RuntimeError('Cannot stop unknown web server')
elif self.server.eio.async_mode == 'eventlet':
raise SystemExit
elif self.server.eio.async_mode == 'gevent':
self.wsgi_server.stop()
def start_background_task(self, target, *args, **kwargs):
"""Start a background task using the appropriate async model.
This is a utility function that applications can use to start a
background task using the method that is compatible with the
selected async mode.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function.
"""
return self.server.start_background_task(target, *args, **kwargs)
def sleep(self, seconds=0):
"""Sleep for the requested amount of time using the appropriate async
model.
This is a utility function that applications can use to put a task to
sleep without having to worry about using the correct call for the
selected async mode.
"""
return self.server.sleep(seconds)
def test_client(self, app, namespace=None, query_string=None,
headers=None, flask_test_client=None):
"""The Socket.IO test client is useful for testing a Flask-SocketIO
server. It works in a similar way to the Flask Test Client, but
adapted to the Socket.IO server.
:param app: The Flask application instance.
:param namespace: The namespace for the client. If not provided, the
client connects to the server on the global
namespace.
:param query_string: A string with custom query string arguments.
:param headers: A dictionary with custom HTTP headers.
:param flask_test_client: The instance of the Flask test client
currently in use. Passing the Flask test
client is optional, but is necessary if you
want the Flask user session and any other
cookies set in HTTP routes accessible from
Socket.IO events.
"""
return SocketIOTestClient(app, self, namespace=namespace,
query_string=query_string, headers=headers,
flask_test_client=flask_test_client)
def _handle_event(self, handler, message, namespace, sid, *args):
if sid not in self.server.environ:
# we don't have record of this client, ignore this event
return '', 400
app = self.server.environ[sid]['flask.app']
with app.request_context(self.server.environ[sid]):
if self.manage_session:
# manage a separate session for this client's Socket.IO events
# created as a copy of the regular user session
if 'saved_session' not in self.server.environ[sid]:
self.server.environ[sid]['saved_session'] = \
_ManagedSession(flask.session)
session_obj = self.server.environ[sid]['saved_session']
else:
# let Flask handle the user session
# for cookie based sessions, this effectively freezes the
# session to its state at connection time
# for server-side sessions, this allows HTTP and Socket.IO to
# share the session, with both having read/write access to it
session_obj = flask.session._get_current_object()
_request_ctx_stack.top.session = session_obj
flask.request.sid = sid
flask.request.namespace = namespace
flask.request.event = {'message': message, 'args': args}
try:
if message == 'connect':
ret = handler()
else:
ret = handler(*args)
except:
err_handler = self.exception_handlers.get(
namespace, self.default_exception_handler)
if err_handler is None:
raise
type, value, traceback = sys.exc_info()
return err_handler(value)
if not self.manage_session:
# when Flask is managing the user session, it needs to save it
if not hasattr(session_obj, 'modified') or session_obj.modified:
resp = app.response_class()
app.session_interface.save_session(app, session_obj, resp)
return ret
def send(message, **kwargs):
"""Send a SocketIO message.
This function sends a simple SocketIO message to one or more connected
clients. The message can be a string or a JSON blob. This is a simpler
version of ``emit()``, which should be preferred. This is a function that
can only be called from a SocketIO event handler.
:param message: The message to send, either a string or a JSON blob.
:param json: ``True`` if ``message`` is a JSON blob, ``False``
otherwise.
:param namespace: The namespace under which the message is to be sent.
Defaults to the namespace used by the originating event.
An empty string can be used to use the global namespace.
:param callback: Callback function to invoke with the client's
acknowledgement.
:param broadcast: ``True`` to send the message to all connected clients, or
``False`` to only reply to the sender of the originating
event.
:param room: Send the message to all the users in the given room.
:param include_self: ``True`` to include the sender when broadcasting or
addressing a room, or ``False`` to send to everyone
but the sender.
:param ignore_queue: Only used when a message queue is configured. If
set to ``True``, the event is emitted to the
clients directly, without going through the queue.
This is more efficient, but only works when a
single server process is used, or when there is a
single addresee. It is recommended to always leave
this parameter with its default value of ``False``.
"""
json = kwargs.get('json', False)
if 'namespace' in kwargs:
namespace = kwargs['namespace']
else:
namespace = flask.request.namespace
callback = kwargs.get('callback')
broadcast = kwargs.get('broadcast')
room = kwargs.get('room')
if room is None and not broadcast:
room = flask.request.sid
include_self = kwargs.get('include_self', True)
ignore_queue = kwargs.get('ignore_queue', False)
socketio = flask.current_app.extensions['socketio']
return socketio.send(message, json=json, namespace=namespace, room=room,
include_self=include_self, callback=callback,
ignore_queue=ignore_queue)
def join_room(room, sid=None, namespace=None):
"""Join a room.
This function puts the user in a room, under the current namespace. The
user and the namespace are obtained from the event context. This is a
function that can only be called from a SocketIO event handler. Example::
@socketio.on('join')
def on_join(data):
username = session['username']
room = data['room']
join_room(room)
send(username + ' has entered the room.', room=room)
:param room: The name of the room to join.
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
"""
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
socketio.server.enter_room(sid, room, namespace=namespace)
def leave_room(room, sid=None, namespace=None):
"""Leave a room.
This function removes the user from a room, under the current namespace.
The user and the namespace are obtained from the event context. Example::
@socketio.on('leave')
def on_leave(data):
username = session['username']
room = data['room']
leave_room(room)
send(username + ' has left the room.', room=room)
:param room: The name of the room to leave.
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
"""
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
socketio.server.leave_room(sid, room, namespace=namespace)
def close_room(room, namespace=None):
"""Close a room.
This function removes any users that are in the given room and then deletes
the room from the server.
:param room: The name of the room to close.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
"""
socketio = flask.current_app.extensions['socketio']
namespace = namespace or flask.request.namespace
socketio.server.close_room(room, namespace=namespace)
def rooms(sid=None, namespace=None):
"""Return a list of the rooms the client is in.
This function returns all the rooms the client has entered, including its
own room, assigned by the Socket.IO server.
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
"""
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
return socketio.server.rooms(sid, namespace=namespace)
def disconnect(sid=None, namespace=None, silent=False):
"""Disconnect the client.
This function terminates the connection with the client. As a result of
this call the client will receive a disconnect event. Example::
@socketio.on('message')
def receive_message(msg):
if is_banned(session['username']):
disconnect()
else:
# ...
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
:param silent: this option is deprecated.
"""
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
return socketio.server.disconnect(sid, namespace=namespace)
|
miguelgrinberg/Flask-SocketIO | flask_socketio/__init__.py | send | python | def send(message, **kwargs):
json = kwargs.get('json', False)
if 'namespace' in kwargs:
namespace = kwargs['namespace']
else:
namespace = flask.request.namespace
callback = kwargs.get('callback')
broadcast = kwargs.get('broadcast')
room = kwargs.get('room')
if room is None and not broadcast:
room = flask.request.sid
include_self = kwargs.get('include_self', True)
ignore_queue = kwargs.get('ignore_queue', False)
socketio = flask.current_app.extensions['socketio']
return socketio.send(message, json=json, namespace=namespace, room=room,
include_self=include_self, callback=callback,
ignore_queue=ignore_queue) | Send a SocketIO message.
This function sends a simple SocketIO message to one or more connected
clients. The message can be a string or a JSON blob. This is a simpler
version of ``emit()``, which should be preferred. This is a function that
can only be called from a SocketIO event handler.
:param message: The message to send, either a string or a JSON blob.
:param json: ``True`` if ``message`` is a JSON blob, ``False``
otherwise.
:param namespace: The namespace under which the message is to be sent.
Defaults to the namespace used by the originating event.
An empty string can be used to use the global namespace.
:param callback: Callback function to invoke with the client's
acknowledgement.
:param broadcast: ``True`` to send the message to all connected clients, or
``False`` to only reply to the sender of the originating
event.
:param room: Send the message to all the users in the given room.
:param include_self: ``True`` to include the sender when broadcasting or
addressing a room, or ``False`` to send to everyone
but the sender.
:param ignore_queue: Only used when a message queue is configured. If
set to ``True``, the event is emitted to the
clients directly, without going through the queue.
This is more efficient, but only works when a
single server process is used, or when there is a
single addresee. It is recommended to always leave
this parameter with its default value of ``False``. | train | https://github.com/miguelgrinberg/Flask-SocketIO/blob/4bef800d5e7ba7d98a6f4cd94191ff0b4496c334/flask_socketio/__init__.py#L729-L776 | null | import os
import sys
# make sure gevent-socketio is not installed, as it conflicts with
# python-socketio
gevent_socketio_found = True
try:
from socketio import socketio_manage
except ImportError:
gevent_socketio_found = False
if gevent_socketio_found:
print('The gevent-socketio package is incompatible with this version of '
'the Flask-SocketIO extension. Please uninstall it, and then '
'install the latest version of python-socketio in its place.')
sys.exit(1)
import socketio
import flask
from flask import _request_ctx_stack, json as flask_json
from flask.sessions import SessionMixin
from werkzeug.debug import DebuggedApplication
from werkzeug.serving import run_with_reloader
from .namespace import Namespace
from .test_client import SocketIOTestClient
__version__ = '3.3.2'
class _SocketIOMiddleware(socketio.WSGIApp):
"""This WSGI middleware simply exposes the Flask application in the WSGI
environment before executing the request.
"""
def __init__(self, socketio_app, flask_app, socketio_path='socket.io'):
self.flask_app = flask_app
super(_SocketIOMiddleware, self).__init__(socketio_app,
flask_app.wsgi_app,
socketio_path=socketio_path)
def __call__(self, environ, start_response):
environ = environ.copy()
environ['flask.app'] = self.flask_app
return super(_SocketIOMiddleware, self).__call__(environ,
start_response)
class _ManagedSession(dict, SessionMixin):
"""This class is used for user sessions that are managed by
Flask-SocketIO. It is simple dict, expanded with the Flask session
attributes."""
pass
class SocketIO(object):
"""Create a Flask-SocketIO server.
:param app: The flask application instance. If the application instance
isn't known at the time this class is instantiated, then call
``socketio.init_app(app)`` once the application instance is
available.
:param manage_session: If set to ``True``, this extension manages the user
session for Socket.IO events. If set to ``False``,
Flask's own session management is used. When using
Flask's cookie based sessions it is recommended that
you leave this set to the default of ``True``. When
using server-side sessions, a ``False`` setting
enables sharing the user session between HTTP routes
and Socket.IO events.
:param message_queue: A connection URL for a message queue service the
server can use for multi-process communication. A
message queue is not required when using a single
server process.
:param channel: The channel name, when using a message queue. If a channel
isn't specified, a default channel will be used. If
multiple clusters of SocketIO processes need to use the
same message queue without interfering with each other, then
each cluster should use a different channel.
:param path: The path where the Socket.IO server is exposed. Defaults to
``'socket.io'``. Leave this as is unless you know what you are
doing.
:param resource: Alias to ``path``.
:param kwargs: Socket.IO and Engine.IO server options.
The Socket.IO server options are detailed below:
:param client_manager: The client manager instance that will manage the
client list. When this is omitted, the client list
is stored in an in-memory structure, so the use of
multiple connected servers is not possible. In most
cases, this argument does not need to be set
explicitly.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``.
:param binary: ``True`` to support binary payloads, ``False`` to treat all
payloads as text. On Python 2, if this is set to ``True``,
``unicode`` values are treated as text, and ``str`` and
``bytes`` values are treated as binary. This option has no
effect on Python 3, where text and binary payloads are
always automatically discovered.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions. To use the same json encoder and decoder as a Flask
application, use ``flask.json``.
The Engine.IO server configuration supports the following settings:
:param async_mode: The asynchronous model to use. See the Deployment
section in the documentation for a description of the
available options. Valid async modes are
``threading``, ``eventlet``, ``gevent`` and
``gevent_uwsgi``. If this argument is not given,
``eventlet`` is tried first, then ``gevent_uwsgi``,
then ``gevent``, and finally ``threading``. The
first async mode that has all its dependencies installed
is then one that is chosen.
:param ping_timeout: The time in seconds that the client waits for the
server to respond before disconnecting. The default is
60 seconds.
:param ping_interval: The interval in seconds at which the client pings
the server. The default is 25 seconds.
:param max_http_buffer_size: The maximum size of a message when using the
polling transport. The default is 100,000,000
bytes.
:param allow_upgrades: Whether to allow transport upgrades or not. The
default is ``True``.
:param http_compression: Whether to compress packages when using the
polling transport. The default is ``True``.
:param compression_threshold: Only compress messages when their byte size
is greater than this value. The default is
1024 bytes.
:param cookie: Name of the HTTP cookie that contains the client session
id. If set to ``None``, a cookie is not sent to the client.
The default is ``'io'``.
:param cors_allowed_origins: List of origins that are allowed to connect
to this server. All origins are allowed by
default.
:param cors_credentials: Whether credentials (cookies, authentication) are
allowed in requests to this server. The default is
``True``.
:param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
a logger object to use. To disable logging set to
``False``. The default is ``False``.
"""
def __init__(self, app=None, **kwargs):
self.server = None
self.server_options = {}
self.wsgi_server = None
self.handlers = []
self.namespace_handlers = []
self.exception_handlers = {}
self.default_exception_handler = None
self.manage_session = True
# We can call init_app when:
# - we were given the Flask app instance (standard initialization)
# - we were not given the app, but we were given a message_queue
# (standard initialization for auxiliary process)
# In all other cases we collect the arguments and assume the client
# will call init_app from an app factory function.
if app is not None or 'message_queue' in kwargs:
self.init_app(app, **kwargs)
else:
self.server_options.update(kwargs)
def init_app(self, app, **kwargs):
if app is not None:
if not hasattr(app, 'extensions'):
app.extensions = {} # pragma: no cover
app.extensions['socketio'] = self
self.server_options.update(kwargs)
self.manage_session = self.server_options.pop('manage_session',
self.manage_session)
if 'client_manager' not in self.server_options:
url = self.server_options.pop('message_queue', None)
channel = self.server_options.pop('channel', 'flask-socketio')
write_only = app is None
if url:
if url.startswith(('redis://', "rediss://")):
queue_class = socketio.RedisManager
elif url.startswith('zmq'):
queue_class = socketio.ZmqManager
else:
queue_class = socketio.KombuManager
queue = queue_class(url, channel=channel,
write_only=write_only)
self.server_options['client_manager'] = queue
if 'json' in self.server_options and \
self.server_options['json'] == flask_json:
# flask's json module is tricky to use because its output
# changes when it is invoked inside or outside the app context
# so here to prevent any ambiguities we replace it with wrappers
# that ensure that the app context is always present
class FlaskSafeJSON(object):
@staticmethod
def dumps(*args, **kwargs):
with app.app_context():
return flask_json.dumps(*args, **kwargs)
@staticmethod
def loads(*args, **kwargs):
with app.app_context():
return flask_json.loads(*args, **kwargs)
self.server_options['json'] = FlaskSafeJSON
resource = self.server_options.pop('path', None) or \
self.server_options.pop('resource', None) or 'socket.io'
if resource.startswith('/'):
resource = resource[1:]
if os.environ.get('FLASK_RUN_FROM_CLI'):
if self.server_options.get('async_mode') is None:
if app is not None:
app.logger.warning(
'Flask-SocketIO is Running under Werkzeug, WebSocket '
'is not available.')
self.server_options['async_mode'] = 'threading'
self.server = socketio.Server(**self.server_options)
self.async_mode = self.server.async_mode
for handler in self.handlers:
self.server.on(handler[0], handler[1], namespace=handler[2])
for namespace_handler in self.namespace_handlers:
self.server.register_namespace(namespace_handler)
if app is not None:
# here we attach the SocketIO middlware to the SocketIO object so it
# can be referenced later if debug middleware needs to be inserted
self.sockio_mw = _SocketIOMiddleware(self.server, app,
socketio_path=resource)
app.wsgi_app = self.sockio_mw
def on(self, message, namespace=None):
"""Decorator to register a SocketIO event handler.
This decorator must be applied to SocketIO event handlers. Example::
@socketio.on('my event', namespace='/chat')
def handle_my_custom_event(json):
print('received json: ' + str(json))
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(handler):
def _handler(sid, *args):
return self._handle_event(handler, message, namespace, sid,
*args)
if self.server:
self.server.on(message, _handler, namespace=namespace)
else:
self.handlers.append((message, _handler, namespace))
return handler
return decorator
def on_error(self, namespace=None):
"""Decorator to define a custom error handler for SocketIO events.
This decorator can be applied to a function that acts as an error
handler for a namespace. This handler will be invoked when a SocketIO
event handler raises an exception. The handler function must accept one
argument, which is the exception raised. Example::
@socketio.on_error(namespace='/chat')
def chat_error_handler(e):
print('An error has occurred: ' + str(e))
:param namespace: The namespace for which to register the error
handler. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(exception_handler):
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.exception_handlers[namespace] = exception_handler
return exception_handler
return decorator
def on_error_default(self, exception_handler):
"""Decorator to define a default error handler for SocketIO events.
This decorator can be applied to a function that acts as a default
error handler for any namespaces that do not have a specific handler.
Example::
@socketio.on_error_default
def error_handler(e):
print('An error has occurred: ' + str(e))
"""
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.default_exception_handler = exception_handler
return exception_handler
def on_event(self, message, handler, namespace=None):
"""Register a SocketIO event handler.
``on_event`` is the non-decorator version of ``'on'``.
Example::
def on_foo_event(json):
print('received json: ' + str(json))
socketio.on_event('my event', on_foo_event, namespace='/chat')
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param handler: The function that handles the event.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
self.on(message, namespace=namespace)(handler)
def on_namespace(self, namespace_handler):
if not isinstance(namespace_handler, Namespace):
raise ValueError('Not a namespace instance.')
namespace_handler._set_socketio(self)
if self.server:
self.server.register_namespace(namespace_handler)
else:
self.namespace_handlers.append(namespace_handler)
def emit(self, event, *args, **kwargs):
"""Emit a server generated SocketIO event.
This function emits a SocketIO event to one or more connected clients.
A JSON blob can be attached to the event as payload. This function can
be used outside of a SocketIO event context, so it is appropriate to
use when the server is the originator of an event, outside of any
client context, such as in a regular HTTP request handler or a
background task. Example::
@app.route('/ping')
def ping():
socketio.emit('ping event', {'data': 42}, namespace='/chat')
:param event: The name of the user event to emit.
:param args: A dictionary with the JSON data to send as payload.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message to all the users in the given room. If
this parameter is not included, the event is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
namespace = kwargs.pop('namespace', '/')
room = kwargs.pop('room', None)
include_self = kwargs.pop('include_self', True)
skip_sid = kwargs.pop('skip_sid', None)
if not include_self and not skip_sid:
skip_sid = flask.request.sid
callback = kwargs.pop('callback', None)
self.server.emit(event, *args, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def send(self, data, json=False, namespace=None, room=None,
callback=None, include_self=True, skip_sid=None, **kwargs):
"""Send a server-generated SocketIO message.
This function sends a simple SocketIO message to one or more connected
clients. The message can be a string or a JSON blob. This is a simpler
version of ``emit()``, which should be preferred. This function can be
used outside of a SocketIO event context, so it is appropriate to use
when the server is the originator of an event.
:param data: The message to send, either a string or a JSON blob.
:param json: ``True`` if ``message`` is a JSON blob, ``False``
otherwise.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message only to the users in the given room. If
this parameter is not included, the message is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
skip_sid = flask.request.sid if not include_self else skip_sid
if json:
self.emit('json', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
else:
self.emit('message', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def close_room(self, room, namespace=None):
"""Close a room.
This function removes any users that are in the given room and then
deletes the room from the server. This function can be used outside
of a SocketIO event context.
:param room: The name of the room to close.
:param namespace: The namespace under which the room exists. Defaults
to the global namespace.
"""
self.server.close_room(room, namespace)
def run(self, app, host=None, port=None, **kwargs):
"""Run the SocketIO web server.
:param app: The Flask application instance.
:param host: The hostname or IP address for the server to listen on.
Defaults to 127.0.0.1.
:param port: The port number for the server to listen on. Defaults to
5000.
:param debug: ``True`` to start the server in debug mode, ``False`` to
start in normal mode.
:param use_reloader: ``True`` to enable the Flask reloader, ``False``
to disable it.
:param extra_files: A list of additional files that the Flask
reloader should watch. Defaults to ``None``
:param log_output: If ``True``, the server logs all incomming
connections. If ``False`` logging is disabled.
Defaults to ``True`` in debug mode, ``False``
in normal mode. Unused when the threading async
mode is used.
:param kwargs: Additional web server options. The web server options
are specific to the server used in each of the supported
async modes. Note that options provided here will
not be seen when using an external web server such
as gunicorn, since this method is not called in that
case.
"""
if host is None:
host = '127.0.0.1'
if port is None:
server_name = app.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[1])
else:
port = 5000
debug = kwargs.pop('debug', app.debug)
log_output = kwargs.pop('log_output', debug)
use_reloader = kwargs.pop('use_reloader', debug)
extra_files = kwargs.pop('extra_files', None)
app.debug = debug
if app.debug and self.server.eio.async_mode != 'threading':
# put the debug middleware between the SocketIO middleware
# and the Flask application instance
#
# mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
# BECOMES
#
# dbg-mw mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
self.sockio_mw.wsgi_app = DebuggedApplication(self.sockio_mw.wsgi_app,
evalex=True)
if self.server.eio.async_mode == 'threading':
from werkzeug._internal import _log
_log('warning', 'WebSocket transport not available. Install '
'eventlet or gevent and gevent-websocket for '
'improved performance.')
app.run(host=host, port=port, threaded=True,
use_reloader=use_reloader, **kwargs)
elif self.server.eio.async_mode == 'eventlet':
def run_server():
import eventlet
import eventlet.wsgi
import eventlet.green
addresses = eventlet.green.socket.getaddrinfo(host, port)
if not addresses:
raise RuntimeError('Could not resolve host to a valid address')
eventlet_socket = eventlet.listen(addresses[0][4], addresses[0][0])
# If provided an SSL argument, use an SSL socket
ssl_args = ['keyfile', 'certfile', 'server_side', 'cert_reqs',
'ssl_version', 'ca_certs',
'do_handshake_on_connect', 'suppress_ragged_eofs',
'ciphers']
ssl_params = {k: kwargs[k] for k in kwargs if k in ssl_args}
if len(ssl_params) > 0:
for k in ssl_params:
kwargs.pop(k)
ssl_params['server_side'] = True # Listening requires true
eventlet_socket = eventlet.wrap_ssl(eventlet_socket,
**ssl_params)
eventlet.wsgi.server(eventlet_socket, app,
log_output=log_output, **kwargs)
if use_reloader:
run_with_reloader(run_server, extra_files=extra_files)
else:
run_server()
elif self.server.eio.async_mode == 'gevent':
from gevent import pywsgi
try:
from geventwebsocket.handler import WebSocketHandler
websocket = True
except ImportError:
websocket = False
log = 'default'
if not log_output:
log = None
if websocket:
self.wsgi_server = pywsgi.WSGIServer(
(host, port), app, handler_class=WebSocketHandler,
log=log, **kwargs)
else:
self.wsgi_server = pywsgi.WSGIServer((host, port), app,
log=log, **kwargs)
if use_reloader:
# monkey patching is required by the reloader
from gevent import monkey
monkey.patch_all()
def run_server():
self.wsgi_server.serve_forever()
run_with_reloader(run_server, extra_files=extra_files)
else:
self.wsgi_server.serve_forever()
def stop(self):
"""Stop a running SocketIO web server.
This method must be called from a HTTP or SocketIO handler function.
"""
if self.server.eio.async_mode == 'threading':
func = flask.request.environ.get('werkzeug.server.shutdown')
if func:
func()
else:
raise RuntimeError('Cannot stop unknown web server')
elif self.server.eio.async_mode == 'eventlet':
raise SystemExit
elif self.server.eio.async_mode == 'gevent':
self.wsgi_server.stop()
def start_background_task(self, target, *args, **kwargs):
"""Start a background task using the appropriate async model.
This is a utility function that applications can use to start a
background task using the method that is compatible with the
selected async mode.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function.
"""
return self.server.start_background_task(target, *args, **kwargs)
def sleep(self, seconds=0):
"""Sleep for the requested amount of time using the appropriate async
model.
This is a utility function that applications can use to put a task to
sleep without having to worry about using the correct call for the
selected async mode.
"""
return self.server.sleep(seconds)
def test_client(self, app, namespace=None, query_string=None,
headers=None, flask_test_client=None):
"""The Socket.IO test client is useful for testing a Flask-SocketIO
server. It works in a similar way to the Flask Test Client, but
adapted to the Socket.IO server.
:param app: The Flask application instance.
:param namespace: The namespace for the client. If not provided, the
client connects to the server on the global
namespace.
:param query_string: A string with custom query string arguments.
:param headers: A dictionary with custom HTTP headers.
:param flask_test_client: The instance of the Flask test client
currently in use. Passing the Flask test
client is optional, but is necessary if you
want the Flask user session and any other
cookies set in HTTP routes accessible from
Socket.IO events.
"""
return SocketIOTestClient(app, self, namespace=namespace,
query_string=query_string, headers=headers,
flask_test_client=flask_test_client)
def _handle_event(self, handler, message, namespace, sid, *args):
if sid not in self.server.environ:
# we don't have record of this client, ignore this event
return '', 400
app = self.server.environ[sid]['flask.app']
with app.request_context(self.server.environ[sid]):
if self.manage_session:
# manage a separate session for this client's Socket.IO events
# created as a copy of the regular user session
if 'saved_session' not in self.server.environ[sid]:
self.server.environ[sid]['saved_session'] = \
_ManagedSession(flask.session)
session_obj = self.server.environ[sid]['saved_session']
else:
# let Flask handle the user session
# for cookie based sessions, this effectively freezes the
# session to its state at connection time
# for server-side sessions, this allows HTTP and Socket.IO to
# share the session, with both having read/write access to it
session_obj = flask.session._get_current_object()
_request_ctx_stack.top.session = session_obj
flask.request.sid = sid
flask.request.namespace = namespace
flask.request.event = {'message': message, 'args': args}
try:
if message == 'connect':
ret = handler()
else:
ret = handler(*args)
except:
err_handler = self.exception_handlers.get(
namespace, self.default_exception_handler)
if err_handler is None:
raise
type, value, traceback = sys.exc_info()
return err_handler(value)
if not self.manage_session:
# when Flask is managing the user session, it needs to save it
if not hasattr(session_obj, 'modified') or session_obj.modified:
resp = app.response_class()
app.session_interface.save_session(app, session_obj, resp)
return ret
def emit(event, *args, **kwargs):
"""Emit a SocketIO event.
This function emits a SocketIO event to one or more connected clients. A
JSON blob can be attached to the event as payload. This is a function that
can only be called from a SocketIO event handler, as in obtains some
information from the current client context. Example::
@socketio.on('my event')
def handle_my_custom_event(json):
emit('my response', {'data': 42})
:param event: The name of the user event to emit.
:param args: A dictionary with the JSON data to send as payload.
:param namespace: The namespace under which the message is to be sent.
Defaults to the namespace used by the originating event.
A ``'/'`` can be used to explicitly specify the global
namespace.
:param callback: Callback function to invoke with the client's
acknowledgement.
:param broadcast: ``True`` to send the message to all clients, or ``False``
to only reply to the sender of the originating event.
:param room: Send the message to all the users in the given room. If this
argument is set, then broadcast is implied to be ``True``.
:param include_self: ``True`` to include the sender when broadcasting or
addressing a room, or ``False`` to send to everyone
but the sender.
:param ignore_queue: Only used when a message queue is configured. If
set to ``True``, the event is emitted to the
clients directly, without going through the queue.
This is more efficient, but only works when a
single server process is used, or when there is a
single addresee. It is recommended to always leave
this parameter with its default value of ``False``.
"""
if 'namespace' in kwargs:
namespace = kwargs['namespace']
else:
namespace = flask.request.namespace
callback = kwargs.get('callback')
broadcast = kwargs.get('broadcast')
room = kwargs.get('room')
if room is None and not broadcast:
room = flask.request.sid
include_self = kwargs.get('include_self', True)
ignore_queue = kwargs.get('ignore_queue', False)
socketio = flask.current_app.extensions['socketio']
return socketio.emit(event, *args, namespace=namespace, room=room,
include_self=include_self, callback=callback,
ignore_queue=ignore_queue)
def join_room(room, sid=None, namespace=None):
"""Join a room.
This function puts the user in a room, under the current namespace. The
user and the namespace are obtained from the event context. This is a
function that can only be called from a SocketIO event handler. Example::
@socketio.on('join')
def on_join(data):
username = session['username']
room = data['room']
join_room(room)
send(username + ' has entered the room.', room=room)
:param room: The name of the room to join.
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
"""
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
socketio.server.enter_room(sid, room, namespace=namespace)
def leave_room(room, sid=None, namespace=None):
"""Leave a room.
This function removes the user from a room, under the current namespace.
The user and the namespace are obtained from the event context. Example::
@socketio.on('leave')
def on_leave(data):
username = session['username']
room = data['room']
leave_room(room)
send(username + ' has left the room.', room=room)
:param room: The name of the room to leave.
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
"""
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
socketio.server.leave_room(sid, room, namespace=namespace)
def close_room(room, namespace=None):
"""Close a room.
This function removes any users that are in the given room and then deletes
the room from the server.
:param room: The name of the room to close.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
"""
socketio = flask.current_app.extensions['socketio']
namespace = namespace or flask.request.namespace
socketio.server.close_room(room, namespace=namespace)
def rooms(sid=None, namespace=None):
"""Return a list of the rooms the client is in.
This function returns all the rooms the client has entered, including its
own room, assigned by the Socket.IO server.
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
"""
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
return socketio.server.rooms(sid, namespace=namespace)
def disconnect(sid=None, namespace=None, silent=False):
"""Disconnect the client.
This function terminates the connection with the client. As a result of
this call the client will receive a disconnect event. Example::
@socketio.on('message')
def receive_message(msg):
if is_banned(session['username']):
disconnect()
else:
# ...
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
:param silent: this option is deprecated.
"""
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
return socketio.server.disconnect(sid, namespace=namespace)
|
miguelgrinberg/Flask-SocketIO | flask_socketio/__init__.py | join_room | python | def join_room(room, sid=None, namespace=None):
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
socketio.server.enter_room(sid, room, namespace=namespace) | Join a room.
This function puts the user in a room, under the current namespace. The
user and the namespace are obtained from the event context. This is a
function that can only be called from a SocketIO event handler. Example::
@socketio.on('join')
def on_join(data):
username = session['username']
room = data['room']
join_room(room)
send(username + ' has entered the room.', room=room)
:param room: The name of the room to join.
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context. | train | https://github.com/miguelgrinberg/Flask-SocketIO/blob/4bef800d5e7ba7d98a6f4cd94191ff0b4496c334/flask_socketio/__init__.py#L779-L802 | null | import os
import sys
# make sure gevent-socketio is not installed, as it conflicts with
# python-socketio
gevent_socketio_found = True
try:
from socketio import socketio_manage
except ImportError:
gevent_socketio_found = False
if gevent_socketio_found:
print('The gevent-socketio package is incompatible with this version of '
'the Flask-SocketIO extension. Please uninstall it, and then '
'install the latest version of python-socketio in its place.')
sys.exit(1)
import socketio
import flask
from flask import _request_ctx_stack, json as flask_json
from flask.sessions import SessionMixin
from werkzeug.debug import DebuggedApplication
from werkzeug.serving import run_with_reloader
from .namespace import Namespace
from .test_client import SocketIOTestClient
__version__ = '3.3.2'
class _SocketIOMiddleware(socketio.WSGIApp):
"""This WSGI middleware simply exposes the Flask application in the WSGI
environment before executing the request.
"""
def __init__(self, socketio_app, flask_app, socketio_path='socket.io'):
self.flask_app = flask_app
super(_SocketIOMiddleware, self).__init__(socketio_app,
flask_app.wsgi_app,
socketio_path=socketio_path)
def __call__(self, environ, start_response):
environ = environ.copy()
environ['flask.app'] = self.flask_app
return super(_SocketIOMiddleware, self).__call__(environ,
start_response)
class _ManagedSession(dict, SessionMixin):
"""This class is used for user sessions that are managed by
Flask-SocketIO. It is simple dict, expanded with the Flask session
attributes."""
pass
class SocketIO(object):
"""Create a Flask-SocketIO server.
:param app: The flask application instance. If the application instance
isn't known at the time this class is instantiated, then call
``socketio.init_app(app)`` once the application instance is
available.
:param manage_session: If set to ``True``, this extension manages the user
session for Socket.IO events. If set to ``False``,
Flask's own session management is used. When using
Flask's cookie based sessions it is recommended that
you leave this set to the default of ``True``. When
using server-side sessions, a ``False`` setting
enables sharing the user session between HTTP routes
and Socket.IO events.
:param message_queue: A connection URL for a message queue service the
server can use for multi-process communication. A
message queue is not required when using a single
server process.
:param channel: The channel name, when using a message queue. If a channel
isn't specified, a default channel will be used. If
multiple clusters of SocketIO processes need to use the
same message queue without interfering with each other, then
each cluster should use a different channel.
:param path: The path where the Socket.IO server is exposed. Defaults to
``'socket.io'``. Leave this as is unless you know what you are
doing.
:param resource: Alias to ``path``.
:param kwargs: Socket.IO and Engine.IO server options.
The Socket.IO server options are detailed below:
:param client_manager: The client manager instance that will manage the
client list. When this is omitted, the client list
is stored in an in-memory structure, so the use of
multiple connected servers is not possible. In most
cases, this argument does not need to be set
explicitly.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``.
:param binary: ``True`` to support binary payloads, ``False`` to treat all
payloads as text. On Python 2, if this is set to ``True``,
``unicode`` values are treated as text, and ``str`` and
``bytes`` values are treated as binary. This option has no
effect on Python 3, where text and binary payloads are
always automatically discovered.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions. To use the same json encoder and decoder as a Flask
application, use ``flask.json``.
The Engine.IO server configuration supports the following settings:
:param async_mode: The asynchronous model to use. See the Deployment
section in the documentation for a description of the
available options. Valid async modes are
``threading``, ``eventlet``, ``gevent`` and
``gevent_uwsgi``. If this argument is not given,
``eventlet`` is tried first, then ``gevent_uwsgi``,
then ``gevent``, and finally ``threading``. The
first async mode that has all its dependencies installed
is then one that is chosen.
:param ping_timeout: The time in seconds that the client waits for the
server to respond before disconnecting. The default is
60 seconds.
:param ping_interval: The interval in seconds at which the client pings
the server. The default is 25 seconds.
:param max_http_buffer_size: The maximum size of a message when using the
polling transport. The default is 100,000,000
bytes.
:param allow_upgrades: Whether to allow transport upgrades or not. The
default is ``True``.
:param http_compression: Whether to compress packages when using the
polling transport. The default is ``True``.
:param compression_threshold: Only compress messages when their byte size
is greater than this value. The default is
1024 bytes.
:param cookie: Name of the HTTP cookie that contains the client session
id. If set to ``None``, a cookie is not sent to the client.
The default is ``'io'``.
:param cors_allowed_origins: List of origins that are allowed to connect
to this server. All origins are allowed by
default.
:param cors_credentials: Whether credentials (cookies, authentication) are
allowed in requests to this server. The default is
``True``.
:param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
a logger object to use. To disable logging set to
``False``. The default is ``False``.
"""
def __init__(self, app=None, **kwargs):
self.server = None
self.server_options = {}
self.wsgi_server = None
self.handlers = []
self.namespace_handlers = []
self.exception_handlers = {}
self.default_exception_handler = None
self.manage_session = True
# We can call init_app when:
# - we were given the Flask app instance (standard initialization)
# - we were not given the app, but we were given a message_queue
# (standard initialization for auxiliary process)
# In all other cases we collect the arguments and assume the client
# will call init_app from an app factory function.
if app is not None or 'message_queue' in kwargs:
self.init_app(app, **kwargs)
else:
self.server_options.update(kwargs)
def init_app(self, app, **kwargs):
if app is not None:
if not hasattr(app, 'extensions'):
app.extensions = {} # pragma: no cover
app.extensions['socketio'] = self
self.server_options.update(kwargs)
self.manage_session = self.server_options.pop('manage_session',
self.manage_session)
if 'client_manager' not in self.server_options:
url = self.server_options.pop('message_queue', None)
channel = self.server_options.pop('channel', 'flask-socketio')
write_only = app is None
if url:
if url.startswith(('redis://', "rediss://")):
queue_class = socketio.RedisManager
elif url.startswith('zmq'):
queue_class = socketio.ZmqManager
else:
queue_class = socketio.KombuManager
queue = queue_class(url, channel=channel,
write_only=write_only)
self.server_options['client_manager'] = queue
if 'json' in self.server_options and \
self.server_options['json'] == flask_json:
# flask's json module is tricky to use because its output
# changes when it is invoked inside or outside the app context
# so here to prevent any ambiguities we replace it with wrappers
# that ensure that the app context is always present
class FlaskSafeJSON(object):
@staticmethod
def dumps(*args, **kwargs):
with app.app_context():
return flask_json.dumps(*args, **kwargs)
@staticmethod
def loads(*args, **kwargs):
with app.app_context():
return flask_json.loads(*args, **kwargs)
self.server_options['json'] = FlaskSafeJSON
resource = self.server_options.pop('path', None) or \
self.server_options.pop('resource', None) or 'socket.io'
if resource.startswith('/'):
resource = resource[1:]
if os.environ.get('FLASK_RUN_FROM_CLI'):
if self.server_options.get('async_mode') is None:
if app is not None:
app.logger.warning(
'Flask-SocketIO is Running under Werkzeug, WebSocket '
'is not available.')
self.server_options['async_mode'] = 'threading'
self.server = socketio.Server(**self.server_options)
self.async_mode = self.server.async_mode
for handler in self.handlers:
self.server.on(handler[0], handler[1], namespace=handler[2])
for namespace_handler in self.namespace_handlers:
self.server.register_namespace(namespace_handler)
if app is not None:
# here we attach the SocketIO middlware to the SocketIO object so it
# can be referenced later if debug middleware needs to be inserted
self.sockio_mw = _SocketIOMiddleware(self.server, app,
socketio_path=resource)
app.wsgi_app = self.sockio_mw
def on(self, message, namespace=None):
"""Decorator to register a SocketIO event handler.
This decorator must be applied to SocketIO event handlers. Example::
@socketio.on('my event', namespace='/chat')
def handle_my_custom_event(json):
print('received json: ' + str(json))
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(handler):
def _handler(sid, *args):
return self._handle_event(handler, message, namespace, sid,
*args)
if self.server:
self.server.on(message, _handler, namespace=namespace)
else:
self.handlers.append((message, _handler, namespace))
return handler
return decorator
def on_error(self, namespace=None):
"""Decorator to define a custom error handler for SocketIO events.
This decorator can be applied to a function that acts as an error
handler for a namespace. This handler will be invoked when a SocketIO
event handler raises an exception. The handler function must accept one
argument, which is the exception raised. Example::
@socketio.on_error(namespace='/chat')
def chat_error_handler(e):
print('An error has occurred: ' + str(e))
:param namespace: The namespace for which to register the error
handler. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(exception_handler):
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.exception_handlers[namespace] = exception_handler
return exception_handler
return decorator
def on_error_default(self, exception_handler):
"""Decorator to define a default error handler for SocketIO events.
This decorator can be applied to a function that acts as a default
error handler for any namespaces that do not have a specific handler.
Example::
@socketio.on_error_default
def error_handler(e):
print('An error has occurred: ' + str(e))
"""
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.default_exception_handler = exception_handler
return exception_handler
def on_event(self, message, handler, namespace=None):
"""Register a SocketIO event handler.
``on_event`` is the non-decorator version of ``'on'``.
Example::
def on_foo_event(json):
print('received json: ' + str(json))
socketio.on_event('my event', on_foo_event, namespace='/chat')
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param handler: The function that handles the event.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
self.on(message, namespace=namespace)(handler)
def on_namespace(self, namespace_handler):
if not isinstance(namespace_handler, Namespace):
raise ValueError('Not a namespace instance.')
namespace_handler._set_socketio(self)
if self.server:
self.server.register_namespace(namespace_handler)
else:
self.namespace_handlers.append(namespace_handler)
def emit(self, event, *args, **kwargs):
"""Emit a server generated SocketIO event.
This function emits a SocketIO event to one or more connected clients.
A JSON blob can be attached to the event as payload. This function can
be used outside of a SocketIO event context, so it is appropriate to
use when the server is the originator of an event, outside of any
client context, such as in a regular HTTP request handler or a
background task. Example::
@app.route('/ping')
def ping():
socketio.emit('ping event', {'data': 42}, namespace='/chat')
:param event: The name of the user event to emit.
:param args: A dictionary with the JSON data to send as payload.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message to all the users in the given room. If
this parameter is not included, the event is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
namespace = kwargs.pop('namespace', '/')
room = kwargs.pop('room', None)
include_self = kwargs.pop('include_self', True)
skip_sid = kwargs.pop('skip_sid', None)
if not include_self and not skip_sid:
skip_sid = flask.request.sid
callback = kwargs.pop('callback', None)
self.server.emit(event, *args, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def send(self, data, json=False, namespace=None, room=None,
callback=None, include_self=True, skip_sid=None, **kwargs):
"""Send a server-generated SocketIO message.
This function sends a simple SocketIO message to one or more connected
clients. The message can be a string or a JSON blob. This is a simpler
version of ``emit()``, which should be preferred. This function can be
used outside of a SocketIO event context, so it is appropriate to use
when the server is the originator of an event.
:param data: The message to send, either a string or a JSON blob.
:param json: ``True`` if ``message`` is a JSON blob, ``False``
otherwise.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message only to the users in the given room. If
this parameter is not included, the message is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
skip_sid = flask.request.sid if not include_self else skip_sid
if json:
self.emit('json', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
else:
self.emit('message', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def close_room(self, room, namespace=None):
"""Close a room.
This function removes any users that are in the given room and then
deletes the room from the server. This function can be used outside
of a SocketIO event context.
:param room: The name of the room to close.
:param namespace: The namespace under which the room exists. Defaults
to the global namespace.
"""
self.server.close_room(room, namespace)
def run(self, app, host=None, port=None, **kwargs):
"""Run the SocketIO web server.
:param app: The Flask application instance.
:param host: The hostname or IP address for the server to listen on.
Defaults to 127.0.0.1.
:param port: The port number for the server to listen on. Defaults to
5000.
:param debug: ``True`` to start the server in debug mode, ``False`` to
start in normal mode.
:param use_reloader: ``True`` to enable the Flask reloader, ``False``
to disable it.
:param extra_files: A list of additional files that the Flask
reloader should watch. Defaults to ``None``
:param log_output: If ``True``, the server logs all incomming
connections. If ``False`` logging is disabled.
Defaults to ``True`` in debug mode, ``False``
in normal mode. Unused when the threading async
mode is used.
:param kwargs: Additional web server options. The web server options
are specific to the server used in each of the supported
async modes. Note that options provided here will
not be seen when using an external web server such
as gunicorn, since this method is not called in that
case.
"""
if host is None:
host = '127.0.0.1'
if port is None:
server_name = app.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[1])
else:
port = 5000
debug = kwargs.pop('debug', app.debug)
log_output = kwargs.pop('log_output', debug)
use_reloader = kwargs.pop('use_reloader', debug)
extra_files = kwargs.pop('extra_files', None)
app.debug = debug
if app.debug and self.server.eio.async_mode != 'threading':
# put the debug middleware between the SocketIO middleware
# and the Flask application instance
#
# mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
# BECOMES
#
# dbg-mw mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
self.sockio_mw.wsgi_app = DebuggedApplication(self.sockio_mw.wsgi_app,
evalex=True)
if self.server.eio.async_mode == 'threading':
from werkzeug._internal import _log
_log('warning', 'WebSocket transport not available. Install '
'eventlet or gevent and gevent-websocket for '
'improved performance.')
app.run(host=host, port=port, threaded=True,
use_reloader=use_reloader, **kwargs)
elif self.server.eio.async_mode == 'eventlet':
def run_server():
import eventlet
import eventlet.wsgi
import eventlet.green
addresses = eventlet.green.socket.getaddrinfo(host, port)
if not addresses:
raise RuntimeError('Could not resolve host to a valid address')
eventlet_socket = eventlet.listen(addresses[0][4], addresses[0][0])
# If provided an SSL argument, use an SSL socket
ssl_args = ['keyfile', 'certfile', 'server_side', 'cert_reqs',
'ssl_version', 'ca_certs',
'do_handshake_on_connect', 'suppress_ragged_eofs',
'ciphers']
ssl_params = {k: kwargs[k] for k in kwargs if k in ssl_args}
if len(ssl_params) > 0:
for k in ssl_params:
kwargs.pop(k)
ssl_params['server_side'] = True # Listening requires true
eventlet_socket = eventlet.wrap_ssl(eventlet_socket,
**ssl_params)
eventlet.wsgi.server(eventlet_socket, app,
log_output=log_output, **kwargs)
if use_reloader:
run_with_reloader(run_server, extra_files=extra_files)
else:
run_server()
elif self.server.eio.async_mode == 'gevent':
from gevent import pywsgi
try:
from geventwebsocket.handler import WebSocketHandler
websocket = True
except ImportError:
websocket = False
log = 'default'
if not log_output:
log = None
if websocket:
self.wsgi_server = pywsgi.WSGIServer(
(host, port), app, handler_class=WebSocketHandler,
log=log, **kwargs)
else:
self.wsgi_server = pywsgi.WSGIServer((host, port), app,
log=log, **kwargs)
if use_reloader:
# monkey patching is required by the reloader
from gevent import monkey
monkey.patch_all()
def run_server():
self.wsgi_server.serve_forever()
run_with_reloader(run_server, extra_files=extra_files)
else:
self.wsgi_server.serve_forever()
def stop(self):
"""Stop a running SocketIO web server.
This method must be called from a HTTP or SocketIO handler function.
"""
if self.server.eio.async_mode == 'threading':
func = flask.request.environ.get('werkzeug.server.shutdown')
if func:
func()
else:
raise RuntimeError('Cannot stop unknown web server')
elif self.server.eio.async_mode == 'eventlet':
raise SystemExit
elif self.server.eio.async_mode == 'gevent':
self.wsgi_server.stop()
def start_background_task(self, target, *args, **kwargs):
"""Start a background task using the appropriate async model.
This is a utility function that applications can use to start a
background task using the method that is compatible with the
selected async mode.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function.
"""
return self.server.start_background_task(target, *args, **kwargs)
def sleep(self, seconds=0):
"""Sleep for the requested amount of time using the appropriate async
model.
This is a utility function that applications can use to put a task to
sleep without having to worry about using the correct call for the
selected async mode.
"""
return self.server.sleep(seconds)
def test_client(self, app, namespace=None, query_string=None,
headers=None, flask_test_client=None):
"""The Socket.IO test client is useful for testing a Flask-SocketIO
server. It works in a similar way to the Flask Test Client, but
adapted to the Socket.IO server.
:param app: The Flask application instance.
:param namespace: The namespace for the client. If not provided, the
client connects to the server on the global
namespace.
:param query_string: A string with custom query string arguments.
:param headers: A dictionary with custom HTTP headers.
:param flask_test_client: The instance of the Flask test client
currently in use. Passing the Flask test
client is optional, but is necessary if you
want the Flask user session and any other
cookies set in HTTP routes accessible from
Socket.IO events.
"""
return SocketIOTestClient(app, self, namespace=namespace,
query_string=query_string, headers=headers,
flask_test_client=flask_test_client)
def _handle_event(self, handler, message, namespace, sid, *args):
if sid not in self.server.environ:
# we don't have record of this client, ignore this event
return '', 400
app = self.server.environ[sid]['flask.app']
with app.request_context(self.server.environ[sid]):
if self.manage_session:
# manage a separate session for this client's Socket.IO events
# created as a copy of the regular user session
if 'saved_session' not in self.server.environ[sid]:
self.server.environ[sid]['saved_session'] = \
_ManagedSession(flask.session)
session_obj = self.server.environ[sid]['saved_session']
else:
# let Flask handle the user session
# for cookie based sessions, this effectively freezes the
# session to its state at connection time
# for server-side sessions, this allows HTTP and Socket.IO to
# share the session, with both having read/write access to it
session_obj = flask.session._get_current_object()
_request_ctx_stack.top.session = session_obj
flask.request.sid = sid
flask.request.namespace = namespace
flask.request.event = {'message': message, 'args': args}
try:
if message == 'connect':
ret = handler()
else:
ret = handler(*args)
except:
err_handler = self.exception_handlers.get(
namespace, self.default_exception_handler)
if err_handler is None:
raise
type, value, traceback = sys.exc_info()
return err_handler(value)
if not self.manage_session:
# when Flask is managing the user session, it needs to save it
if not hasattr(session_obj, 'modified') or session_obj.modified:
resp = app.response_class()
app.session_interface.save_session(app, session_obj, resp)
return ret
def emit(event, *args, **kwargs):
"""Emit a SocketIO event.
This function emits a SocketIO event to one or more connected clients. A
JSON blob can be attached to the event as payload. This is a function that
can only be called from a SocketIO event handler, as in obtains some
information from the current client context. Example::
@socketio.on('my event')
def handle_my_custom_event(json):
emit('my response', {'data': 42})
:param event: The name of the user event to emit.
:param args: A dictionary with the JSON data to send as payload.
:param namespace: The namespace under which the message is to be sent.
Defaults to the namespace used by the originating event.
A ``'/'`` can be used to explicitly specify the global
namespace.
:param callback: Callback function to invoke with the client's
acknowledgement.
:param broadcast: ``True`` to send the message to all clients, or ``False``
to only reply to the sender of the originating event.
:param room: Send the message to all the users in the given room. If this
argument is set, then broadcast is implied to be ``True``.
:param include_self: ``True`` to include the sender when broadcasting or
addressing a room, or ``False`` to send to everyone
but the sender.
:param ignore_queue: Only used when a message queue is configured. If
set to ``True``, the event is emitted to the
clients directly, without going through the queue.
This is more efficient, but only works when a
single server process is used, or when there is a
single addresee. It is recommended to always leave
this parameter with its default value of ``False``.
"""
if 'namespace' in kwargs:
namespace = kwargs['namespace']
else:
namespace = flask.request.namespace
callback = kwargs.get('callback')
broadcast = kwargs.get('broadcast')
room = kwargs.get('room')
if room is None and not broadcast:
room = flask.request.sid
include_self = kwargs.get('include_self', True)
ignore_queue = kwargs.get('ignore_queue', False)
socketio = flask.current_app.extensions['socketio']
return socketio.emit(event, *args, namespace=namespace, room=room,
include_self=include_self, callback=callback,
ignore_queue=ignore_queue)
def send(message, **kwargs):
"""Send a SocketIO message.
This function sends a simple SocketIO message to one or more connected
clients. The message can be a string or a JSON blob. This is a simpler
version of ``emit()``, which should be preferred. This is a function that
can only be called from a SocketIO event handler.
:param message: The message to send, either a string or a JSON blob.
:param json: ``True`` if ``message`` is a JSON blob, ``False``
otherwise.
:param namespace: The namespace under which the message is to be sent.
Defaults to the namespace used by the originating event.
An empty string can be used to use the global namespace.
:param callback: Callback function to invoke with the client's
acknowledgement.
:param broadcast: ``True`` to send the message to all connected clients, or
``False`` to only reply to the sender of the originating
event.
:param room: Send the message to all the users in the given room.
:param include_self: ``True`` to include the sender when broadcasting or
addressing a room, or ``False`` to send to everyone
but the sender.
:param ignore_queue: Only used when a message queue is configured. If
set to ``True``, the event is emitted to the
clients directly, without going through the queue.
This is more efficient, but only works when a
single server process is used, or when there is a
single addresee. It is recommended to always leave
this parameter with its default value of ``False``.
"""
json = kwargs.get('json', False)
if 'namespace' in kwargs:
namespace = kwargs['namespace']
else:
namespace = flask.request.namespace
callback = kwargs.get('callback')
broadcast = kwargs.get('broadcast')
room = kwargs.get('room')
if room is None and not broadcast:
room = flask.request.sid
include_self = kwargs.get('include_self', True)
ignore_queue = kwargs.get('ignore_queue', False)
socketio = flask.current_app.extensions['socketio']
return socketio.send(message, json=json, namespace=namespace, room=room,
include_self=include_self, callback=callback,
ignore_queue=ignore_queue)
def leave_room(room, sid=None, namespace=None):
"""Leave a room.
This function removes the user from a room, under the current namespace.
The user and the namespace are obtained from the event context. Example::
@socketio.on('leave')
def on_leave(data):
username = session['username']
room = data['room']
leave_room(room)
send(username + ' has left the room.', room=room)
:param room: The name of the room to leave.
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
"""
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
socketio.server.leave_room(sid, room, namespace=namespace)
def close_room(room, namespace=None):
"""Close a room.
This function removes any users that are in the given room and then deletes
the room from the server.
:param room: The name of the room to close.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
"""
socketio = flask.current_app.extensions['socketio']
namespace = namespace or flask.request.namespace
socketio.server.close_room(room, namespace=namespace)
def rooms(sid=None, namespace=None):
"""Return a list of the rooms the client is in.
This function returns all the rooms the client has entered, including its
own room, assigned by the Socket.IO server.
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
"""
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
return socketio.server.rooms(sid, namespace=namespace)
def disconnect(sid=None, namespace=None, silent=False):
"""Disconnect the client.
This function terminates the connection with the client. As a result of
this call the client will receive a disconnect event. Example::
@socketio.on('message')
def receive_message(msg):
if is_banned(session['username']):
disconnect()
else:
# ...
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
:param silent: this option is deprecated.
"""
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
return socketio.server.disconnect(sid, namespace=namespace)
|
miguelgrinberg/Flask-SocketIO | flask_socketio/__init__.py | leave_room | python | def leave_room(room, sid=None, namespace=None):
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
socketio.server.leave_room(sid, room, namespace=namespace) | Leave a room.
This function removes the user from a room, under the current namespace.
The user and the namespace are obtained from the event context. Example::
@socketio.on('leave')
def on_leave(data):
username = session['username']
room = data['room']
leave_room(room)
send(username + ' has left the room.', room=room)
:param room: The name of the room to leave.
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context. | train | https://github.com/miguelgrinberg/Flask-SocketIO/blob/4bef800d5e7ba7d98a6f4cd94191ff0b4496c334/flask_socketio/__init__.py#L805-L827 | null | import os
import sys
# make sure gevent-socketio is not installed, as it conflicts with
# python-socketio
gevent_socketio_found = True
try:
from socketio import socketio_manage
except ImportError:
gevent_socketio_found = False
if gevent_socketio_found:
print('The gevent-socketio package is incompatible with this version of '
'the Flask-SocketIO extension. Please uninstall it, and then '
'install the latest version of python-socketio in its place.')
sys.exit(1)
import socketio
import flask
from flask import _request_ctx_stack, json as flask_json
from flask.sessions import SessionMixin
from werkzeug.debug import DebuggedApplication
from werkzeug.serving import run_with_reloader
from .namespace import Namespace
from .test_client import SocketIOTestClient
__version__ = '3.3.2'
class _SocketIOMiddleware(socketio.WSGIApp):
"""This WSGI middleware simply exposes the Flask application in the WSGI
environment before executing the request.
"""
def __init__(self, socketio_app, flask_app, socketio_path='socket.io'):
self.flask_app = flask_app
super(_SocketIOMiddleware, self).__init__(socketio_app,
flask_app.wsgi_app,
socketio_path=socketio_path)
def __call__(self, environ, start_response):
environ = environ.copy()
environ['flask.app'] = self.flask_app
return super(_SocketIOMiddleware, self).__call__(environ,
start_response)
class _ManagedSession(dict, SessionMixin):
"""This class is used for user sessions that are managed by
Flask-SocketIO. It is simple dict, expanded with the Flask session
attributes."""
pass
class SocketIO(object):
"""Create a Flask-SocketIO server.
:param app: The flask application instance. If the application instance
isn't known at the time this class is instantiated, then call
``socketio.init_app(app)`` once the application instance is
available.
:param manage_session: If set to ``True``, this extension manages the user
session for Socket.IO events. If set to ``False``,
Flask's own session management is used. When using
Flask's cookie based sessions it is recommended that
you leave this set to the default of ``True``. When
using server-side sessions, a ``False`` setting
enables sharing the user session between HTTP routes
and Socket.IO events.
:param message_queue: A connection URL for a message queue service the
server can use for multi-process communication. A
message queue is not required when using a single
server process.
:param channel: The channel name, when using a message queue. If a channel
isn't specified, a default channel will be used. If
multiple clusters of SocketIO processes need to use the
same message queue without interfering with each other, then
each cluster should use a different channel.
:param path: The path where the Socket.IO server is exposed. Defaults to
``'socket.io'``. Leave this as is unless you know what you are
doing.
:param resource: Alias to ``path``.
:param kwargs: Socket.IO and Engine.IO server options.
The Socket.IO server options are detailed below:
:param client_manager: The client manager instance that will manage the
client list. When this is omitted, the client list
is stored in an in-memory structure, so the use of
multiple connected servers is not possible. In most
cases, this argument does not need to be set
explicitly.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``.
:param binary: ``True`` to support binary payloads, ``False`` to treat all
payloads as text. On Python 2, if this is set to ``True``,
``unicode`` values are treated as text, and ``str`` and
``bytes`` values are treated as binary. This option has no
effect on Python 3, where text and binary payloads are
always automatically discovered.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions. To use the same json encoder and decoder as a Flask
application, use ``flask.json``.
The Engine.IO server configuration supports the following settings:
:param async_mode: The asynchronous model to use. See the Deployment
section in the documentation for a description of the
available options. Valid async modes are
``threading``, ``eventlet``, ``gevent`` and
``gevent_uwsgi``. If this argument is not given,
``eventlet`` is tried first, then ``gevent_uwsgi``,
then ``gevent``, and finally ``threading``. The
first async mode that has all its dependencies installed
is then one that is chosen.
:param ping_timeout: The time in seconds that the client waits for the
server to respond before disconnecting. The default is
60 seconds.
:param ping_interval: The interval in seconds at which the client pings
the server. The default is 25 seconds.
:param max_http_buffer_size: The maximum size of a message when using the
polling transport. The default is 100,000,000
bytes.
:param allow_upgrades: Whether to allow transport upgrades or not. The
default is ``True``.
:param http_compression: Whether to compress packages when using the
polling transport. The default is ``True``.
:param compression_threshold: Only compress messages when their byte size
is greater than this value. The default is
1024 bytes.
:param cookie: Name of the HTTP cookie that contains the client session
id. If set to ``None``, a cookie is not sent to the client.
The default is ``'io'``.
:param cors_allowed_origins: List of origins that are allowed to connect
to this server. All origins are allowed by
default.
:param cors_credentials: Whether credentials (cookies, authentication) are
allowed in requests to this server. The default is
``True``.
:param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
a logger object to use. To disable logging set to
``False``. The default is ``False``.
"""
def __init__(self, app=None, **kwargs):
self.server = None
self.server_options = {}
self.wsgi_server = None
self.handlers = []
self.namespace_handlers = []
self.exception_handlers = {}
self.default_exception_handler = None
self.manage_session = True
# We can call init_app when:
# - we were given the Flask app instance (standard initialization)
# - we were not given the app, but we were given a message_queue
# (standard initialization for auxiliary process)
# In all other cases we collect the arguments and assume the client
# will call init_app from an app factory function.
if app is not None or 'message_queue' in kwargs:
self.init_app(app, **kwargs)
else:
self.server_options.update(kwargs)
def init_app(self, app, **kwargs):
if app is not None:
if not hasattr(app, 'extensions'):
app.extensions = {} # pragma: no cover
app.extensions['socketio'] = self
self.server_options.update(kwargs)
self.manage_session = self.server_options.pop('manage_session',
self.manage_session)
if 'client_manager' not in self.server_options:
url = self.server_options.pop('message_queue', None)
channel = self.server_options.pop('channel', 'flask-socketio')
write_only = app is None
if url:
if url.startswith(('redis://', "rediss://")):
queue_class = socketio.RedisManager
elif url.startswith('zmq'):
queue_class = socketio.ZmqManager
else:
queue_class = socketio.KombuManager
queue = queue_class(url, channel=channel,
write_only=write_only)
self.server_options['client_manager'] = queue
if 'json' in self.server_options and \
self.server_options['json'] == flask_json:
# flask's json module is tricky to use because its output
# changes when it is invoked inside or outside the app context
# so here to prevent any ambiguities we replace it with wrappers
# that ensure that the app context is always present
class FlaskSafeJSON(object):
@staticmethod
def dumps(*args, **kwargs):
with app.app_context():
return flask_json.dumps(*args, **kwargs)
@staticmethod
def loads(*args, **kwargs):
with app.app_context():
return flask_json.loads(*args, **kwargs)
self.server_options['json'] = FlaskSafeJSON
resource = self.server_options.pop('path', None) or \
self.server_options.pop('resource', None) or 'socket.io'
if resource.startswith('/'):
resource = resource[1:]
if os.environ.get('FLASK_RUN_FROM_CLI'):
if self.server_options.get('async_mode') is None:
if app is not None:
app.logger.warning(
'Flask-SocketIO is Running under Werkzeug, WebSocket '
'is not available.')
self.server_options['async_mode'] = 'threading'
self.server = socketio.Server(**self.server_options)
self.async_mode = self.server.async_mode
for handler in self.handlers:
self.server.on(handler[0], handler[1], namespace=handler[2])
for namespace_handler in self.namespace_handlers:
self.server.register_namespace(namespace_handler)
if app is not None:
# here we attach the SocketIO middlware to the SocketIO object so it
# can be referenced later if debug middleware needs to be inserted
self.sockio_mw = _SocketIOMiddleware(self.server, app,
socketio_path=resource)
app.wsgi_app = self.sockio_mw
def on(self, message, namespace=None):
"""Decorator to register a SocketIO event handler.
This decorator must be applied to SocketIO event handlers. Example::
@socketio.on('my event', namespace='/chat')
def handle_my_custom_event(json):
print('received json: ' + str(json))
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(handler):
def _handler(sid, *args):
return self._handle_event(handler, message, namespace, sid,
*args)
if self.server:
self.server.on(message, _handler, namespace=namespace)
else:
self.handlers.append((message, _handler, namespace))
return handler
return decorator
def on_error(self, namespace=None):
"""Decorator to define a custom error handler for SocketIO events.
This decorator can be applied to a function that acts as an error
handler for a namespace. This handler will be invoked when a SocketIO
event handler raises an exception. The handler function must accept one
argument, which is the exception raised. Example::
@socketio.on_error(namespace='/chat')
def chat_error_handler(e):
print('An error has occurred: ' + str(e))
:param namespace: The namespace for which to register the error
handler. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(exception_handler):
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.exception_handlers[namespace] = exception_handler
return exception_handler
return decorator
def on_error_default(self, exception_handler):
"""Decorator to define a default error handler for SocketIO events.
This decorator can be applied to a function that acts as a default
error handler for any namespaces that do not have a specific handler.
Example::
@socketio.on_error_default
def error_handler(e):
print('An error has occurred: ' + str(e))
"""
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.default_exception_handler = exception_handler
return exception_handler
def on_event(self, message, handler, namespace=None):
"""Register a SocketIO event handler.
``on_event`` is the non-decorator version of ``'on'``.
Example::
def on_foo_event(json):
print('received json: ' + str(json))
socketio.on_event('my event', on_foo_event, namespace='/chat')
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param handler: The function that handles the event.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
self.on(message, namespace=namespace)(handler)
def on_namespace(self, namespace_handler):
if not isinstance(namespace_handler, Namespace):
raise ValueError('Not a namespace instance.')
namespace_handler._set_socketio(self)
if self.server:
self.server.register_namespace(namespace_handler)
else:
self.namespace_handlers.append(namespace_handler)
def emit(self, event, *args, **kwargs):
"""Emit a server generated SocketIO event.
This function emits a SocketIO event to one or more connected clients.
A JSON blob can be attached to the event as payload. This function can
be used outside of a SocketIO event context, so it is appropriate to
use when the server is the originator of an event, outside of any
client context, such as in a regular HTTP request handler or a
background task. Example::
@app.route('/ping')
def ping():
socketio.emit('ping event', {'data': 42}, namespace='/chat')
:param event: The name of the user event to emit.
:param args: A dictionary with the JSON data to send as payload.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message to all the users in the given room. If
this parameter is not included, the event is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
namespace = kwargs.pop('namespace', '/')
room = kwargs.pop('room', None)
include_self = kwargs.pop('include_self', True)
skip_sid = kwargs.pop('skip_sid', None)
if not include_self and not skip_sid:
skip_sid = flask.request.sid
callback = kwargs.pop('callback', None)
self.server.emit(event, *args, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def send(self, data, json=False, namespace=None, room=None,
callback=None, include_self=True, skip_sid=None, **kwargs):
"""Send a server-generated SocketIO message.
This function sends a simple SocketIO message to one or more connected
clients. The message can be a string or a JSON blob. This is a simpler
version of ``emit()``, which should be preferred. This function can be
used outside of a SocketIO event context, so it is appropriate to use
when the server is the originator of an event.
:param data: The message to send, either a string or a JSON blob.
:param json: ``True`` if ``message`` is a JSON blob, ``False``
otherwise.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message only to the users in the given room. If
this parameter is not included, the message is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
skip_sid = flask.request.sid if not include_self else skip_sid
if json:
self.emit('json', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
else:
self.emit('message', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def close_room(self, room, namespace=None):
"""Close a room.
This function removes any users that are in the given room and then
deletes the room from the server. This function can be used outside
of a SocketIO event context.
:param room: The name of the room to close.
:param namespace: The namespace under which the room exists. Defaults
to the global namespace.
"""
self.server.close_room(room, namespace)
def run(self, app, host=None, port=None, **kwargs):
"""Run the SocketIO web server.
:param app: The Flask application instance.
:param host: The hostname or IP address for the server to listen on.
Defaults to 127.0.0.1.
:param port: The port number for the server to listen on. Defaults to
5000.
:param debug: ``True`` to start the server in debug mode, ``False`` to
start in normal mode.
:param use_reloader: ``True`` to enable the Flask reloader, ``False``
to disable it.
:param extra_files: A list of additional files that the Flask
reloader should watch. Defaults to ``None``
:param log_output: If ``True``, the server logs all incomming
connections. If ``False`` logging is disabled.
Defaults to ``True`` in debug mode, ``False``
in normal mode. Unused when the threading async
mode is used.
:param kwargs: Additional web server options. The web server options
are specific to the server used in each of the supported
async modes. Note that options provided here will
not be seen when using an external web server such
as gunicorn, since this method is not called in that
case.
"""
if host is None:
host = '127.0.0.1'
if port is None:
server_name = app.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[1])
else:
port = 5000
debug = kwargs.pop('debug', app.debug)
log_output = kwargs.pop('log_output', debug)
use_reloader = kwargs.pop('use_reloader', debug)
extra_files = kwargs.pop('extra_files', None)
app.debug = debug
if app.debug and self.server.eio.async_mode != 'threading':
# put the debug middleware between the SocketIO middleware
# and the Flask application instance
#
# mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
# BECOMES
#
# dbg-mw mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
self.sockio_mw.wsgi_app = DebuggedApplication(self.sockio_mw.wsgi_app,
evalex=True)
if self.server.eio.async_mode == 'threading':
from werkzeug._internal import _log
_log('warning', 'WebSocket transport not available. Install '
'eventlet or gevent and gevent-websocket for '
'improved performance.')
app.run(host=host, port=port, threaded=True,
use_reloader=use_reloader, **kwargs)
elif self.server.eio.async_mode == 'eventlet':
def run_server():
import eventlet
import eventlet.wsgi
import eventlet.green
addresses = eventlet.green.socket.getaddrinfo(host, port)
if not addresses:
raise RuntimeError('Could not resolve host to a valid address')
eventlet_socket = eventlet.listen(addresses[0][4], addresses[0][0])
# If provided an SSL argument, use an SSL socket
ssl_args = ['keyfile', 'certfile', 'server_side', 'cert_reqs',
'ssl_version', 'ca_certs',
'do_handshake_on_connect', 'suppress_ragged_eofs',
'ciphers']
ssl_params = {k: kwargs[k] for k in kwargs if k in ssl_args}
if len(ssl_params) > 0:
for k in ssl_params:
kwargs.pop(k)
ssl_params['server_side'] = True # Listening requires true
eventlet_socket = eventlet.wrap_ssl(eventlet_socket,
**ssl_params)
eventlet.wsgi.server(eventlet_socket, app,
log_output=log_output, **kwargs)
if use_reloader:
run_with_reloader(run_server, extra_files=extra_files)
else:
run_server()
elif self.server.eio.async_mode == 'gevent':
from gevent import pywsgi
try:
from geventwebsocket.handler import WebSocketHandler
websocket = True
except ImportError:
websocket = False
log = 'default'
if not log_output:
log = None
if websocket:
self.wsgi_server = pywsgi.WSGIServer(
(host, port), app, handler_class=WebSocketHandler,
log=log, **kwargs)
else:
self.wsgi_server = pywsgi.WSGIServer((host, port), app,
log=log, **kwargs)
if use_reloader:
# monkey patching is required by the reloader
from gevent import monkey
monkey.patch_all()
def run_server():
self.wsgi_server.serve_forever()
run_with_reloader(run_server, extra_files=extra_files)
else:
self.wsgi_server.serve_forever()
def stop(self):
"""Stop a running SocketIO web server.
This method must be called from a HTTP or SocketIO handler function.
"""
if self.server.eio.async_mode == 'threading':
func = flask.request.environ.get('werkzeug.server.shutdown')
if func:
func()
else:
raise RuntimeError('Cannot stop unknown web server')
elif self.server.eio.async_mode == 'eventlet':
raise SystemExit
elif self.server.eio.async_mode == 'gevent':
self.wsgi_server.stop()
def start_background_task(self, target, *args, **kwargs):
"""Start a background task using the appropriate async model.
This is a utility function that applications can use to start a
background task using the method that is compatible with the
selected async mode.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function.
"""
return self.server.start_background_task(target, *args, **kwargs)
def sleep(self, seconds=0):
"""Sleep for the requested amount of time using the appropriate async
model.
This is a utility function that applications can use to put a task to
sleep without having to worry about using the correct call for the
selected async mode.
"""
return self.server.sleep(seconds)
def test_client(self, app, namespace=None, query_string=None,
headers=None, flask_test_client=None):
"""The Socket.IO test client is useful for testing a Flask-SocketIO
server. It works in a similar way to the Flask Test Client, but
adapted to the Socket.IO server.
:param app: The Flask application instance.
:param namespace: The namespace for the client. If not provided, the
client connects to the server on the global
namespace.
:param query_string: A string with custom query string arguments.
:param headers: A dictionary with custom HTTP headers.
:param flask_test_client: The instance of the Flask test client
currently in use. Passing the Flask test
client is optional, but is necessary if you
want the Flask user session and any other
cookies set in HTTP routes accessible from
Socket.IO events.
"""
return SocketIOTestClient(app, self, namespace=namespace,
query_string=query_string, headers=headers,
flask_test_client=flask_test_client)
def _handle_event(self, handler, message, namespace, sid, *args):
if sid not in self.server.environ:
# we don't have record of this client, ignore this event
return '', 400
app = self.server.environ[sid]['flask.app']
with app.request_context(self.server.environ[sid]):
if self.manage_session:
# manage a separate session for this client's Socket.IO events
# created as a copy of the regular user session
if 'saved_session' not in self.server.environ[sid]:
self.server.environ[sid]['saved_session'] = \
_ManagedSession(flask.session)
session_obj = self.server.environ[sid]['saved_session']
else:
# let Flask handle the user session
# for cookie based sessions, this effectively freezes the
# session to its state at connection time
# for server-side sessions, this allows HTTP and Socket.IO to
# share the session, with both having read/write access to it
session_obj = flask.session._get_current_object()
_request_ctx_stack.top.session = session_obj
flask.request.sid = sid
flask.request.namespace = namespace
flask.request.event = {'message': message, 'args': args}
try:
if message == 'connect':
ret = handler()
else:
ret = handler(*args)
except:
err_handler = self.exception_handlers.get(
namespace, self.default_exception_handler)
if err_handler is None:
raise
type, value, traceback = sys.exc_info()
return err_handler(value)
if not self.manage_session:
# when Flask is managing the user session, it needs to save it
if not hasattr(session_obj, 'modified') or session_obj.modified:
resp = app.response_class()
app.session_interface.save_session(app, session_obj, resp)
return ret
def emit(event, *args, **kwargs):
"""Emit a SocketIO event.
This function emits a SocketIO event to one or more connected clients. A
JSON blob can be attached to the event as payload. This is a function that
can only be called from a SocketIO event handler, as in obtains some
information from the current client context. Example::
@socketio.on('my event')
def handle_my_custom_event(json):
emit('my response', {'data': 42})
:param event: The name of the user event to emit.
:param args: A dictionary with the JSON data to send as payload.
:param namespace: The namespace under which the message is to be sent.
Defaults to the namespace used by the originating event.
A ``'/'`` can be used to explicitly specify the global
namespace.
:param callback: Callback function to invoke with the client's
acknowledgement.
:param broadcast: ``True`` to send the message to all clients, or ``False``
to only reply to the sender of the originating event.
:param room: Send the message to all the users in the given room. If this
argument is set, then broadcast is implied to be ``True``.
:param include_self: ``True`` to include the sender when broadcasting or
addressing a room, or ``False`` to send to everyone
but the sender.
:param ignore_queue: Only used when a message queue is configured. If
set to ``True``, the event is emitted to the
clients directly, without going through the queue.
This is more efficient, but only works when a
single server process is used, or when there is a
single addresee. It is recommended to always leave
this parameter with its default value of ``False``.
"""
if 'namespace' in kwargs:
namespace = kwargs['namespace']
else:
namespace = flask.request.namespace
callback = kwargs.get('callback')
broadcast = kwargs.get('broadcast')
room = kwargs.get('room')
if room is None and not broadcast:
room = flask.request.sid
include_self = kwargs.get('include_self', True)
ignore_queue = kwargs.get('ignore_queue', False)
socketio = flask.current_app.extensions['socketio']
return socketio.emit(event, *args, namespace=namespace, room=room,
include_self=include_self, callback=callback,
ignore_queue=ignore_queue)
def send(message, **kwargs):
"""Send a SocketIO message.
This function sends a simple SocketIO message to one or more connected
clients. The message can be a string or a JSON blob. This is a simpler
version of ``emit()``, which should be preferred. This is a function that
can only be called from a SocketIO event handler.
:param message: The message to send, either a string or a JSON blob.
:param json: ``True`` if ``message`` is a JSON blob, ``False``
otherwise.
:param namespace: The namespace under which the message is to be sent.
Defaults to the namespace used by the originating event.
An empty string can be used to use the global namespace.
:param callback: Callback function to invoke with the client's
acknowledgement.
:param broadcast: ``True`` to send the message to all connected clients, or
``False`` to only reply to the sender of the originating
event.
:param room: Send the message to all the users in the given room.
:param include_self: ``True`` to include the sender when broadcasting or
addressing a room, or ``False`` to send to everyone
but the sender.
:param ignore_queue: Only used when a message queue is configured. If
set to ``True``, the event is emitted to the
clients directly, without going through the queue.
This is more efficient, but only works when a
single server process is used, or when there is a
single addresee. It is recommended to always leave
this parameter with its default value of ``False``.
"""
json = kwargs.get('json', False)
if 'namespace' in kwargs:
namespace = kwargs['namespace']
else:
namespace = flask.request.namespace
callback = kwargs.get('callback')
broadcast = kwargs.get('broadcast')
room = kwargs.get('room')
if room is None and not broadcast:
room = flask.request.sid
include_self = kwargs.get('include_self', True)
ignore_queue = kwargs.get('ignore_queue', False)
socketio = flask.current_app.extensions['socketio']
return socketio.send(message, json=json, namespace=namespace, room=room,
include_self=include_self, callback=callback,
ignore_queue=ignore_queue)
def join_room(room, sid=None, namespace=None):
"""Join a room.
This function puts the user in a room, under the current namespace. The
user and the namespace are obtained from the event context. This is a
function that can only be called from a SocketIO event handler. Example::
@socketio.on('join')
def on_join(data):
username = session['username']
room = data['room']
join_room(room)
send(username + ' has entered the room.', room=room)
:param room: The name of the room to join.
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
"""
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
socketio.server.enter_room(sid, room, namespace=namespace)
def close_room(room, namespace=None):
"""Close a room.
This function removes any users that are in the given room and then deletes
the room from the server.
:param room: The name of the room to close.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
"""
socketio = flask.current_app.extensions['socketio']
namespace = namespace or flask.request.namespace
socketio.server.close_room(room, namespace=namespace)
def rooms(sid=None, namespace=None):
"""Return a list of the rooms the client is in.
This function returns all the rooms the client has entered, including its
own room, assigned by the Socket.IO server.
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
"""
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
return socketio.server.rooms(sid, namespace=namespace)
def disconnect(sid=None, namespace=None, silent=False):
"""Disconnect the client.
This function terminates the connection with the client. As a result of
this call the client will receive a disconnect event. Example::
@socketio.on('message')
def receive_message(msg):
if is_banned(session['username']):
disconnect()
else:
# ...
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
:param silent: this option is deprecated.
"""
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
return socketio.server.disconnect(sid, namespace=namespace)
|
miguelgrinberg/Flask-SocketIO | flask_socketio/__init__.py | close_room | python | def close_room(room, namespace=None):
socketio = flask.current_app.extensions['socketio']
namespace = namespace or flask.request.namespace
socketio.server.close_room(room, namespace=namespace) | Close a room.
This function removes any users that are in the given room and then deletes
the room from the server.
:param room: The name of the room to close.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context. | train | https://github.com/miguelgrinberg/Flask-SocketIO/blob/4bef800d5e7ba7d98a6f4cd94191ff0b4496c334/flask_socketio/__init__.py#L830-L842 | null | import os
import sys
# make sure gevent-socketio is not installed, as it conflicts with
# python-socketio
gevent_socketio_found = True
try:
from socketio import socketio_manage
except ImportError:
gevent_socketio_found = False
if gevent_socketio_found:
print('The gevent-socketio package is incompatible with this version of '
'the Flask-SocketIO extension. Please uninstall it, and then '
'install the latest version of python-socketio in its place.')
sys.exit(1)
import socketio
import flask
from flask import _request_ctx_stack, json as flask_json
from flask.sessions import SessionMixin
from werkzeug.debug import DebuggedApplication
from werkzeug.serving import run_with_reloader
from .namespace import Namespace
from .test_client import SocketIOTestClient
__version__ = '3.3.2'
class _SocketIOMiddleware(socketio.WSGIApp):
"""This WSGI middleware simply exposes the Flask application in the WSGI
environment before executing the request.
"""
def __init__(self, socketio_app, flask_app, socketio_path='socket.io'):
self.flask_app = flask_app
super(_SocketIOMiddleware, self).__init__(socketio_app,
flask_app.wsgi_app,
socketio_path=socketio_path)
def __call__(self, environ, start_response):
environ = environ.copy()
environ['flask.app'] = self.flask_app
return super(_SocketIOMiddleware, self).__call__(environ,
start_response)
class _ManagedSession(dict, SessionMixin):
"""This class is used for user sessions that are managed by
Flask-SocketIO. It is simple dict, expanded with the Flask session
attributes."""
pass
class SocketIO(object):
"""Create a Flask-SocketIO server.
:param app: The flask application instance. If the application instance
isn't known at the time this class is instantiated, then call
``socketio.init_app(app)`` once the application instance is
available.
:param manage_session: If set to ``True``, this extension manages the user
session for Socket.IO events. If set to ``False``,
Flask's own session management is used. When using
Flask's cookie based sessions it is recommended that
you leave this set to the default of ``True``. When
using server-side sessions, a ``False`` setting
enables sharing the user session between HTTP routes
and Socket.IO events.
:param message_queue: A connection URL for a message queue service the
server can use for multi-process communication. A
message queue is not required when using a single
server process.
:param channel: The channel name, when using a message queue. If a channel
isn't specified, a default channel will be used. If
multiple clusters of SocketIO processes need to use the
same message queue without interfering with each other, then
each cluster should use a different channel.
:param path: The path where the Socket.IO server is exposed. Defaults to
``'socket.io'``. Leave this as is unless you know what you are
doing.
:param resource: Alias to ``path``.
:param kwargs: Socket.IO and Engine.IO server options.
The Socket.IO server options are detailed below:
:param client_manager: The client manager instance that will manage the
client list. When this is omitted, the client list
is stored in an in-memory structure, so the use of
multiple connected servers is not possible. In most
cases, this argument does not need to be set
explicitly.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``.
:param binary: ``True`` to support binary payloads, ``False`` to treat all
payloads as text. On Python 2, if this is set to ``True``,
``unicode`` values are treated as text, and ``str`` and
``bytes`` values are treated as binary. This option has no
effect on Python 3, where text and binary payloads are
always automatically discovered.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions. To use the same json encoder and decoder as a Flask
application, use ``flask.json``.
The Engine.IO server configuration supports the following settings:
:param async_mode: The asynchronous model to use. See the Deployment
section in the documentation for a description of the
available options. Valid async modes are
``threading``, ``eventlet``, ``gevent`` and
``gevent_uwsgi``. If this argument is not given,
``eventlet`` is tried first, then ``gevent_uwsgi``,
then ``gevent``, and finally ``threading``. The
first async mode that has all its dependencies installed
is then one that is chosen.
:param ping_timeout: The time in seconds that the client waits for the
server to respond before disconnecting. The default is
60 seconds.
:param ping_interval: The interval in seconds at which the client pings
the server. The default is 25 seconds.
:param max_http_buffer_size: The maximum size of a message when using the
polling transport. The default is 100,000,000
bytes.
:param allow_upgrades: Whether to allow transport upgrades or not. The
default is ``True``.
:param http_compression: Whether to compress packages when using the
polling transport. The default is ``True``.
:param compression_threshold: Only compress messages when their byte size
is greater than this value. The default is
1024 bytes.
:param cookie: Name of the HTTP cookie that contains the client session
id. If set to ``None``, a cookie is not sent to the client.
The default is ``'io'``.
:param cors_allowed_origins: List of origins that are allowed to connect
to this server. All origins are allowed by
default.
:param cors_credentials: Whether credentials (cookies, authentication) are
allowed in requests to this server. The default is
``True``.
:param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
a logger object to use. To disable logging set to
``False``. The default is ``False``.
"""
def __init__(self, app=None, **kwargs):
self.server = None
self.server_options = {}
self.wsgi_server = None
self.handlers = []
self.namespace_handlers = []
self.exception_handlers = {}
self.default_exception_handler = None
self.manage_session = True
# We can call init_app when:
# - we were given the Flask app instance (standard initialization)
# - we were not given the app, but we were given a message_queue
# (standard initialization for auxiliary process)
# In all other cases we collect the arguments and assume the client
# will call init_app from an app factory function.
if app is not None or 'message_queue' in kwargs:
self.init_app(app, **kwargs)
else:
self.server_options.update(kwargs)
def init_app(self, app, **kwargs):
if app is not None:
if not hasattr(app, 'extensions'):
app.extensions = {} # pragma: no cover
app.extensions['socketio'] = self
self.server_options.update(kwargs)
self.manage_session = self.server_options.pop('manage_session',
self.manage_session)
if 'client_manager' not in self.server_options:
url = self.server_options.pop('message_queue', None)
channel = self.server_options.pop('channel', 'flask-socketio')
write_only = app is None
if url:
if url.startswith(('redis://', "rediss://")):
queue_class = socketio.RedisManager
elif url.startswith('zmq'):
queue_class = socketio.ZmqManager
else:
queue_class = socketio.KombuManager
queue = queue_class(url, channel=channel,
write_only=write_only)
self.server_options['client_manager'] = queue
if 'json' in self.server_options and \
self.server_options['json'] == flask_json:
# flask's json module is tricky to use because its output
# changes when it is invoked inside or outside the app context
# so here to prevent any ambiguities we replace it with wrappers
# that ensure that the app context is always present
class FlaskSafeJSON(object):
@staticmethod
def dumps(*args, **kwargs):
with app.app_context():
return flask_json.dumps(*args, **kwargs)
@staticmethod
def loads(*args, **kwargs):
with app.app_context():
return flask_json.loads(*args, **kwargs)
self.server_options['json'] = FlaskSafeJSON
resource = self.server_options.pop('path', None) or \
self.server_options.pop('resource', None) or 'socket.io'
if resource.startswith('/'):
resource = resource[1:]
if os.environ.get('FLASK_RUN_FROM_CLI'):
if self.server_options.get('async_mode') is None:
if app is not None:
app.logger.warning(
'Flask-SocketIO is Running under Werkzeug, WebSocket '
'is not available.')
self.server_options['async_mode'] = 'threading'
self.server = socketio.Server(**self.server_options)
self.async_mode = self.server.async_mode
for handler in self.handlers:
self.server.on(handler[0], handler[1], namespace=handler[2])
for namespace_handler in self.namespace_handlers:
self.server.register_namespace(namespace_handler)
if app is not None:
# here we attach the SocketIO middlware to the SocketIO object so it
# can be referenced later if debug middleware needs to be inserted
self.sockio_mw = _SocketIOMiddleware(self.server, app,
socketio_path=resource)
app.wsgi_app = self.sockio_mw
def on(self, message, namespace=None):
"""Decorator to register a SocketIO event handler.
This decorator must be applied to SocketIO event handlers. Example::
@socketio.on('my event', namespace='/chat')
def handle_my_custom_event(json):
print('received json: ' + str(json))
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(handler):
def _handler(sid, *args):
return self._handle_event(handler, message, namespace, sid,
*args)
if self.server:
self.server.on(message, _handler, namespace=namespace)
else:
self.handlers.append((message, _handler, namespace))
return handler
return decorator
def on_error(self, namespace=None):
"""Decorator to define a custom error handler for SocketIO events.
This decorator can be applied to a function that acts as an error
handler for a namespace. This handler will be invoked when a SocketIO
event handler raises an exception. The handler function must accept one
argument, which is the exception raised. Example::
@socketio.on_error(namespace='/chat')
def chat_error_handler(e):
print('An error has occurred: ' + str(e))
:param namespace: The namespace for which to register the error
handler. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(exception_handler):
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.exception_handlers[namespace] = exception_handler
return exception_handler
return decorator
def on_error_default(self, exception_handler):
"""Decorator to define a default error handler for SocketIO events.
This decorator can be applied to a function that acts as a default
error handler for any namespaces that do not have a specific handler.
Example::
@socketio.on_error_default
def error_handler(e):
print('An error has occurred: ' + str(e))
"""
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.default_exception_handler = exception_handler
return exception_handler
def on_event(self, message, handler, namespace=None):
"""Register a SocketIO event handler.
``on_event`` is the non-decorator version of ``'on'``.
Example::
def on_foo_event(json):
print('received json: ' + str(json))
socketio.on_event('my event', on_foo_event, namespace='/chat')
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param handler: The function that handles the event.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
self.on(message, namespace=namespace)(handler)
def on_namespace(self, namespace_handler):
if not isinstance(namespace_handler, Namespace):
raise ValueError('Not a namespace instance.')
namespace_handler._set_socketio(self)
if self.server:
self.server.register_namespace(namespace_handler)
else:
self.namespace_handlers.append(namespace_handler)
def emit(self, event, *args, **kwargs):
"""Emit a server generated SocketIO event.
This function emits a SocketIO event to one or more connected clients.
A JSON blob can be attached to the event as payload. This function can
be used outside of a SocketIO event context, so it is appropriate to
use when the server is the originator of an event, outside of any
client context, such as in a regular HTTP request handler or a
background task. Example::
@app.route('/ping')
def ping():
socketio.emit('ping event', {'data': 42}, namespace='/chat')
:param event: The name of the user event to emit.
:param args: A dictionary with the JSON data to send as payload.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message to all the users in the given room. If
this parameter is not included, the event is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
namespace = kwargs.pop('namespace', '/')
room = kwargs.pop('room', None)
include_self = kwargs.pop('include_self', True)
skip_sid = kwargs.pop('skip_sid', None)
if not include_self and not skip_sid:
skip_sid = flask.request.sid
callback = kwargs.pop('callback', None)
self.server.emit(event, *args, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def send(self, data, json=False, namespace=None, room=None,
callback=None, include_self=True, skip_sid=None, **kwargs):
"""Send a server-generated SocketIO message.
This function sends a simple SocketIO message to one or more connected
clients. The message can be a string or a JSON blob. This is a simpler
version of ``emit()``, which should be preferred. This function can be
used outside of a SocketIO event context, so it is appropriate to use
when the server is the originator of an event.
:param data: The message to send, either a string or a JSON blob.
:param json: ``True`` if ``message`` is a JSON blob, ``False``
otherwise.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message only to the users in the given room. If
this parameter is not included, the message is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
skip_sid = flask.request.sid if not include_self else skip_sid
if json:
self.emit('json', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
else:
self.emit('message', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def close_room(self, room, namespace=None):
"""Close a room.
This function removes any users that are in the given room and then
deletes the room from the server. This function can be used outside
of a SocketIO event context.
:param room: The name of the room to close.
:param namespace: The namespace under which the room exists. Defaults
to the global namespace.
"""
self.server.close_room(room, namespace)
def run(self, app, host=None, port=None, **kwargs):
"""Run the SocketIO web server.
:param app: The Flask application instance.
:param host: The hostname or IP address for the server to listen on.
Defaults to 127.0.0.1.
:param port: The port number for the server to listen on. Defaults to
5000.
:param debug: ``True`` to start the server in debug mode, ``False`` to
start in normal mode.
:param use_reloader: ``True`` to enable the Flask reloader, ``False``
to disable it.
:param extra_files: A list of additional files that the Flask
reloader should watch. Defaults to ``None``
:param log_output: If ``True``, the server logs all incomming
connections. If ``False`` logging is disabled.
Defaults to ``True`` in debug mode, ``False``
in normal mode. Unused when the threading async
mode is used.
:param kwargs: Additional web server options. The web server options
are specific to the server used in each of the supported
async modes. Note that options provided here will
not be seen when using an external web server such
as gunicorn, since this method is not called in that
case.
"""
if host is None:
host = '127.0.0.1'
if port is None:
server_name = app.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[1])
else:
port = 5000
debug = kwargs.pop('debug', app.debug)
log_output = kwargs.pop('log_output', debug)
use_reloader = kwargs.pop('use_reloader', debug)
extra_files = kwargs.pop('extra_files', None)
app.debug = debug
if app.debug and self.server.eio.async_mode != 'threading':
# put the debug middleware between the SocketIO middleware
# and the Flask application instance
#
# mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
# BECOMES
#
# dbg-mw mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
self.sockio_mw.wsgi_app = DebuggedApplication(self.sockio_mw.wsgi_app,
evalex=True)
if self.server.eio.async_mode == 'threading':
from werkzeug._internal import _log
_log('warning', 'WebSocket transport not available. Install '
'eventlet or gevent and gevent-websocket for '
'improved performance.')
app.run(host=host, port=port, threaded=True,
use_reloader=use_reloader, **kwargs)
elif self.server.eio.async_mode == 'eventlet':
def run_server():
import eventlet
import eventlet.wsgi
import eventlet.green
addresses = eventlet.green.socket.getaddrinfo(host, port)
if not addresses:
raise RuntimeError('Could not resolve host to a valid address')
eventlet_socket = eventlet.listen(addresses[0][4], addresses[0][0])
# If provided an SSL argument, use an SSL socket
ssl_args = ['keyfile', 'certfile', 'server_side', 'cert_reqs',
'ssl_version', 'ca_certs',
'do_handshake_on_connect', 'suppress_ragged_eofs',
'ciphers']
ssl_params = {k: kwargs[k] for k in kwargs if k in ssl_args}
if len(ssl_params) > 0:
for k in ssl_params:
kwargs.pop(k)
ssl_params['server_side'] = True # Listening requires true
eventlet_socket = eventlet.wrap_ssl(eventlet_socket,
**ssl_params)
eventlet.wsgi.server(eventlet_socket, app,
log_output=log_output, **kwargs)
if use_reloader:
run_with_reloader(run_server, extra_files=extra_files)
else:
run_server()
elif self.server.eio.async_mode == 'gevent':
from gevent import pywsgi
try:
from geventwebsocket.handler import WebSocketHandler
websocket = True
except ImportError:
websocket = False
log = 'default'
if not log_output:
log = None
if websocket:
self.wsgi_server = pywsgi.WSGIServer(
(host, port), app, handler_class=WebSocketHandler,
log=log, **kwargs)
else:
self.wsgi_server = pywsgi.WSGIServer((host, port), app,
log=log, **kwargs)
if use_reloader:
# monkey patching is required by the reloader
from gevent import monkey
monkey.patch_all()
def run_server():
self.wsgi_server.serve_forever()
run_with_reloader(run_server, extra_files=extra_files)
else:
self.wsgi_server.serve_forever()
def stop(self):
"""Stop a running SocketIO web server.
This method must be called from a HTTP or SocketIO handler function.
"""
if self.server.eio.async_mode == 'threading':
func = flask.request.environ.get('werkzeug.server.shutdown')
if func:
func()
else:
raise RuntimeError('Cannot stop unknown web server')
elif self.server.eio.async_mode == 'eventlet':
raise SystemExit
elif self.server.eio.async_mode == 'gevent':
self.wsgi_server.stop()
def start_background_task(self, target, *args, **kwargs):
"""Start a background task using the appropriate async model.
This is a utility function that applications can use to start a
background task using the method that is compatible with the
selected async mode.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function.
"""
return self.server.start_background_task(target, *args, **kwargs)
def sleep(self, seconds=0):
"""Sleep for the requested amount of time using the appropriate async
model.
This is a utility function that applications can use to put a task to
sleep without having to worry about using the correct call for the
selected async mode.
"""
return self.server.sleep(seconds)
def test_client(self, app, namespace=None, query_string=None,
headers=None, flask_test_client=None):
"""The Socket.IO test client is useful for testing a Flask-SocketIO
server. It works in a similar way to the Flask Test Client, but
adapted to the Socket.IO server.
:param app: The Flask application instance.
:param namespace: The namespace for the client. If not provided, the
client connects to the server on the global
namespace.
:param query_string: A string with custom query string arguments.
:param headers: A dictionary with custom HTTP headers.
:param flask_test_client: The instance of the Flask test client
currently in use. Passing the Flask test
client is optional, but is necessary if you
want the Flask user session and any other
cookies set in HTTP routes accessible from
Socket.IO events.
"""
return SocketIOTestClient(app, self, namespace=namespace,
query_string=query_string, headers=headers,
flask_test_client=flask_test_client)
def _handle_event(self, handler, message, namespace, sid, *args):
if sid not in self.server.environ:
# we don't have record of this client, ignore this event
return '', 400
app = self.server.environ[sid]['flask.app']
with app.request_context(self.server.environ[sid]):
if self.manage_session:
# manage a separate session for this client's Socket.IO events
# created as a copy of the regular user session
if 'saved_session' not in self.server.environ[sid]:
self.server.environ[sid]['saved_session'] = \
_ManagedSession(flask.session)
session_obj = self.server.environ[sid]['saved_session']
else:
# let Flask handle the user session
# for cookie based sessions, this effectively freezes the
# session to its state at connection time
# for server-side sessions, this allows HTTP and Socket.IO to
# share the session, with both having read/write access to it
session_obj = flask.session._get_current_object()
_request_ctx_stack.top.session = session_obj
flask.request.sid = sid
flask.request.namespace = namespace
flask.request.event = {'message': message, 'args': args}
try:
if message == 'connect':
ret = handler()
else:
ret = handler(*args)
except:
err_handler = self.exception_handlers.get(
namespace, self.default_exception_handler)
if err_handler is None:
raise
type, value, traceback = sys.exc_info()
return err_handler(value)
if not self.manage_session:
# when Flask is managing the user session, it needs to save it
if not hasattr(session_obj, 'modified') or session_obj.modified:
resp = app.response_class()
app.session_interface.save_session(app, session_obj, resp)
return ret
def emit(event, *args, **kwargs):
"""Emit a SocketIO event.
This function emits a SocketIO event to one or more connected clients. A
JSON blob can be attached to the event as payload. This is a function that
can only be called from a SocketIO event handler, as in obtains some
information from the current client context. Example::
@socketio.on('my event')
def handle_my_custom_event(json):
emit('my response', {'data': 42})
:param event: The name of the user event to emit.
:param args: A dictionary with the JSON data to send as payload.
:param namespace: The namespace under which the message is to be sent.
Defaults to the namespace used by the originating event.
A ``'/'`` can be used to explicitly specify the global
namespace.
:param callback: Callback function to invoke with the client's
acknowledgement.
:param broadcast: ``True`` to send the message to all clients, or ``False``
to only reply to the sender of the originating event.
:param room: Send the message to all the users in the given room. If this
argument is set, then broadcast is implied to be ``True``.
:param include_self: ``True`` to include the sender when broadcasting or
addressing a room, or ``False`` to send to everyone
but the sender.
:param ignore_queue: Only used when a message queue is configured. If
set to ``True``, the event is emitted to the
clients directly, without going through the queue.
This is more efficient, but only works when a
single server process is used, or when there is a
single addresee. It is recommended to always leave
this parameter with its default value of ``False``.
"""
if 'namespace' in kwargs:
namespace = kwargs['namespace']
else:
namespace = flask.request.namespace
callback = kwargs.get('callback')
broadcast = kwargs.get('broadcast')
room = kwargs.get('room')
if room is None and not broadcast:
room = flask.request.sid
include_self = kwargs.get('include_self', True)
ignore_queue = kwargs.get('ignore_queue', False)
socketio = flask.current_app.extensions['socketio']
return socketio.emit(event, *args, namespace=namespace, room=room,
include_self=include_self, callback=callback,
ignore_queue=ignore_queue)
def send(message, **kwargs):
"""Send a SocketIO message.
This function sends a simple SocketIO message to one or more connected
clients. The message can be a string or a JSON blob. This is a simpler
version of ``emit()``, which should be preferred. This is a function that
can only be called from a SocketIO event handler.
:param message: The message to send, either a string or a JSON blob.
:param json: ``True`` if ``message`` is a JSON blob, ``False``
otherwise.
:param namespace: The namespace under which the message is to be sent.
Defaults to the namespace used by the originating event.
An empty string can be used to use the global namespace.
:param callback: Callback function to invoke with the client's
acknowledgement.
:param broadcast: ``True`` to send the message to all connected clients, or
``False`` to only reply to the sender of the originating
event.
:param room: Send the message to all the users in the given room.
:param include_self: ``True`` to include the sender when broadcasting or
addressing a room, or ``False`` to send to everyone
but the sender.
:param ignore_queue: Only used when a message queue is configured. If
set to ``True``, the event is emitted to the
clients directly, without going through the queue.
This is more efficient, but only works when a
single server process is used, or when there is a
single addresee. It is recommended to always leave
this parameter with its default value of ``False``.
"""
json = kwargs.get('json', False)
if 'namespace' in kwargs:
namespace = kwargs['namespace']
else:
namespace = flask.request.namespace
callback = kwargs.get('callback')
broadcast = kwargs.get('broadcast')
room = kwargs.get('room')
if room is None and not broadcast:
room = flask.request.sid
include_self = kwargs.get('include_self', True)
ignore_queue = kwargs.get('ignore_queue', False)
socketio = flask.current_app.extensions['socketio']
return socketio.send(message, json=json, namespace=namespace, room=room,
include_self=include_self, callback=callback,
ignore_queue=ignore_queue)
def join_room(room, sid=None, namespace=None):
"""Join a room.
This function puts the user in a room, under the current namespace. The
user and the namespace are obtained from the event context. This is a
function that can only be called from a SocketIO event handler. Example::
@socketio.on('join')
def on_join(data):
username = session['username']
room = data['room']
join_room(room)
send(username + ' has entered the room.', room=room)
:param room: The name of the room to join.
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
"""
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
socketio.server.enter_room(sid, room, namespace=namespace)
def leave_room(room, sid=None, namespace=None):
"""Leave a room.
This function removes the user from a room, under the current namespace.
The user and the namespace are obtained from the event context. Example::
@socketio.on('leave')
def on_leave(data):
username = session['username']
room = data['room']
leave_room(room)
send(username + ' has left the room.', room=room)
:param room: The name of the room to leave.
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
"""
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
socketio.server.leave_room(sid, room, namespace=namespace)
def rooms(sid=None, namespace=None):
"""Return a list of the rooms the client is in.
This function returns all the rooms the client has entered, including its
own room, assigned by the Socket.IO server.
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
"""
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
return socketio.server.rooms(sid, namespace=namespace)
def disconnect(sid=None, namespace=None, silent=False):
"""Disconnect the client.
This function terminates the connection with the client. As a result of
this call the client will receive a disconnect event. Example::
@socketio.on('message')
def receive_message(msg):
if is_banned(session['username']):
disconnect()
else:
# ...
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
:param silent: this option is deprecated.
"""
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
return socketio.server.disconnect(sid, namespace=namespace)
|
miguelgrinberg/Flask-SocketIO | flask_socketio/__init__.py | rooms | python | def rooms(sid=None, namespace=None):
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
return socketio.server.rooms(sid, namespace=namespace) | Return a list of the rooms the client is in.
This function returns all the rooms the client has entered, including its
own room, assigned by the Socket.IO server.
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context. | train | https://github.com/miguelgrinberg/Flask-SocketIO/blob/4bef800d5e7ba7d98a6f4cd94191ff0b4496c334/flask_socketio/__init__.py#L845-L859 | null | import os
import sys
# make sure gevent-socketio is not installed, as it conflicts with
# python-socketio
gevent_socketio_found = True
try:
from socketio import socketio_manage
except ImportError:
gevent_socketio_found = False
if gevent_socketio_found:
print('The gevent-socketio package is incompatible with this version of '
'the Flask-SocketIO extension. Please uninstall it, and then '
'install the latest version of python-socketio in its place.')
sys.exit(1)
import socketio
import flask
from flask import _request_ctx_stack, json as flask_json
from flask.sessions import SessionMixin
from werkzeug.debug import DebuggedApplication
from werkzeug.serving import run_with_reloader
from .namespace import Namespace
from .test_client import SocketIOTestClient
__version__ = '3.3.2'
class _SocketIOMiddleware(socketio.WSGIApp):
"""This WSGI middleware simply exposes the Flask application in the WSGI
environment before executing the request.
"""
def __init__(self, socketio_app, flask_app, socketio_path='socket.io'):
self.flask_app = flask_app
super(_SocketIOMiddleware, self).__init__(socketio_app,
flask_app.wsgi_app,
socketio_path=socketio_path)
def __call__(self, environ, start_response):
environ = environ.copy()
environ['flask.app'] = self.flask_app
return super(_SocketIOMiddleware, self).__call__(environ,
start_response)
class _ManagedSession(dict, SessionMixin):
"""This class is used for user sessions that are managed by
Flask-SocketIO. It is simple dict, expanded with the Flask session
attributes."""
pass
class SocketIO(object):
"""Create a Flask-SocketIO server.
:param app: The flask application instance. If the application instance
isn't known at the time this class is instantiated, then call
``socketio.init_app(app)`` once the application instance is
available.
:param manage_session: If set to ``True``, this extension manages the user
session for Socket.IO events. If set to ``False``,
Flask's own session management is used. When using
Flask's cookie based sessions it is recommended that
you leave this set to the default of ``True``. When
using server-side sessions, a ``False`` setting
enables sharing the user session between HTTP routes
and Socket.IO events.
:param message_queue: A connection URL for a message queue service the
server can use for multi-process communication. A
message queue is not required when using a single
server process.
:param channel: The channel name, when using a message queue. If a channel
isn't specified, a default channel will be used. If
multiple clusters of SocketIO processes need to use the
same message queue without interfering with each other, then
each cluster should use a different channel.
:param path: The path where the Socket.IO server is exposed. Defaults to
``'socket.io'``. Leave this as is unless you know what you are
doing.
:param resource: Alias to ``path``.
:param kwargs: Socket.IO and Engine.IO server options.
The Socket.IO server options are detailed below:
:param client_manager: The client manager instance that will manage the
client list. When this is omitted, the client list
is stored in an in-memory structure, so the use of
multiple connected servers is not possible. In most
cases, this argument does not need to be set
explicitly.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``.
:param binary: ``True`` to support binary payloads, ``False`` to treat all
payloads as text. On Python 2, if this is set to ``True``,
``unicode`` values are treated as text, and ``str`` and
``bytes`` values are treated as binary. This option has no
effect on Python 3, where text and binary payloads are
always automatically discovered.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions. To use the same json encoder and decoder as a Flask
application, use ``flask.json``.
The Engine.IO server configuration supports the following settings:
:param async_mode: The asynchronous model to use. See the Deployment
section in the documentation for a description of the
available options. Valid async modes are
``threading``, ``eventlet``, ``gevent`` and
``gevent_uwsgi``. If this argument is not given,
``eventlet`` is tried first, then ``gevent_uwsgi``,
then ``gevent``, and finally ``threading``. The
first async mode that has all its dependencies installed
is then one that is chosen.
:param ping_timeout: The time in seconds that the client waits for the
server to respond before disconnecting. The default is
60 seconds.
:param ping_interval: The interval in seconds at which the client pings
the server. The default is 25 seconds.
:param max_http_buffer_size: The maximum size of a message when using the
polling transport. The default is 100,000,000
bytes.
:param allow_upgrades: Whether to allow transport upgrades or not. The
default is ``True``.
:param http_compression: Whether to compress packages when using the
polling transport. The default is ``True``.
:param compression_threshold: Only compress messages when their byte size
is greater than this value. The default is
1024 bytes.
:param cookie: Name of the HTTP cookie that contains the client session
id. If set to ``None``, a cookie is not sent to the client.
The default is ``'io'``.
:param cors_allowed_origins: List of origins that are allowed to connect
to this server. All origins are allowed by
default.
:param cors_credentials: Whether credentials (cookies, authentication) are
allowed in requests to this server. The default is
``True``.
:param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
a logger object to use. To disable logging set to
``False``. The default is ``False``.
"""
def __init__(self, app=None, **kwargs):
self.server = None
self.server_options = {}
self.wsgi_server = None
self.handlers = []
self.namespace_handlers = []
self.exception_handlers = {}
self.default_exception_handler = None
self.manage_session = True
# We can call init_app when:
# - we were given the Flask app instance (standard initialization)
# - we were not given the app, but we were given a message_queue
# (standard initialization for auxiliary process)
# In all other cases we collect the arguments and assume the client
# will call init_app from an app factory function.
if app is not None or 'message_queue' in kwargs:
self.init_app(app, **kwargs)
else:
self.server_options.update(kwargs)
def init_app(self, app, **kwargs):
if app is not None:
if not hasattr(app, 'extensions'):
app.extensions = {} # pragma: no cover
app.extensions['socketio'] = self
self.server_options.update(kwargs)
self.manage_session = self.server_options.pop('manage_session',
self.manage_session)
if 'client_manager' not in self.server_options:
url = self.server_options.pop('message_queue', None)
channel = self.server_options.pop('channel', 'flask-socketio')
write_only = app is None
if url:
if url.startswith(('redis://', "rediss://")):
queue_class = socketio.RedisManager
elif url.startswith('zmq'):
queue_class = socketio.ZmqManager
else:
queue_class = socketio.KombuManager
queue = queue_class(url, channel=channel,
write_only=write_only)
self.server_options['client_manager'] = queue
if 'json' in self.server_options and \
self.server_options['json'] == flask_json:
# flask's json module is tricky to use because its output
# changes when it is invoked inside or outside the app context
# so here to prevent any ambiguities we replace it with wrappers
# that ensure that the app context is always present
class FlaskSafeJSON(object):
@staticmethod
def dumps(*args, **kwargs):
with app.app_context():
return flask_json.dumps(*args, **kwargs)
@staticmethod
def loads(*args, **kwargs):
with app.app_context():
return flask_json.loads(*args, **kwargs)
self.server_options['json'] = FlaskSafeJSON
resource = self.server_options.pop('path', None) or \
self.server_options.pop('resource', None) or 'socket.io'
if resource.startswith('/'):
resource = resource[1:]
if os.environ.get('FLASK_RUN_FROM_CLI'):
if self.server_options.get('async_mode') is None:
if app is not None:
app.logger.warning(
'Flask-SocketIO is Running under Werkzeug, WebSocket '
'is not available.')
self.server_options['async_mode'] = 'threading'
self.server = socketio.Server(**self.server_options)
self.async_mode = self.server.async_mode
for handler in self.handlers:
self.server.on(handler[0], handler[1], namespace=handler[2])
for namespace_handler in self.namespace_handlers:
self.server.register_namespace(namespace_handler)
if app is not None:
# here we attach the SocketIO middlware to the SocketIO object so it
# can be referenced later if debug middleware needs to be inserted
self.sockio_mw = _SocketIOMiddleware(self.server, app,
socketio_path=resource)
app.wsgi_app = self.sockio_mw
def on(self, message, namespace=None):
"""Decorator to register a SocketIO event handler.
This decorator must be applied to SocketIO event handlers. Example::
@socketio.on('my event', namespace='/chat')
def handle_my_custom_event(json):
print('received json: ' + str(json))
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(handler):
def _handler(sid, *args):
return self._handle_event(handler, message, namespace, sid,
*args)
if self.server:
self.server.on(message, _handler, namespace=namespace)
else:
self.handlers.append((message, _handler, namespace))
return handler
return decorator
def on_error(self, namespace=None):
"""Decorator to define a custom error handler for SocketIO events.
This decorator can be applied to a function that acts as an error
handler for a namespace. This handler will be invoked when a SocketIO
event handler raises an exception. The handler function must accept one
argument, which is the exception raised. Example::
@socketio.on_error(namespace='/chat')
def chat_error_handler(e):
print('An error has occurred: ' + str(e))
:param namespace: The namespace for which to register the error
handler. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(exception_handler):
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.exception_handlers[namespace] = exception_handler
return exception_handler
return decorator
def on_error_default(self, exception_handler):
"""Decorator to define a default error handler for SocketIO events.
This decorator can be applied to a function that acts as a default
error handler for any namespaces that do not have a specific handler.
Example::
@socketio.on_error_default
def error_handler(e):
print('An error has occurred: ' + str(e))
"""
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.default_exception_handler = exception_handler
return exception_handler
def on_event(self, message, handler, namespace=None):
"""Register a SocketIO event handler.
``on_event`` is the non-decorator version of ``'on'``.
Example::
def on_foo_event(json):
print('received json: ' + str(json))
socketio.on_event('my event', on_foo_event, namespace='/chat')
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param handler: The function that handles the event.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
self.on(message, namespace=namespace)(handler)
def on_namespace(self, namespace_handler):
if not isinstance(namespace_handler, Namespace):
raise ValueError('Not a namespace instance.')
namespace_handler._set_socketio(self)
if self.server:
self.server.register_namespace(namespace_handler)
else:
self.namespace_handlers.append(namespace_handler)
def emit(self, event, *args, **kwargs):
"""Emit a server generated SocketIO event.
This function emits a SocketIO event to one or more connected clients.
A JSON blob can be attached to the event as payload. This function can
be used outside of a SocketIO event context, so it is appropriate to
use when the server is the originator of an event, outside of any
client context, such as in a regular HTTP request handler or a
background task. Example::
@app.route('/ping')
def ping():
socketio.emit('ping event', {'data': 42}, namespace='/chat')
:param event: The name of the user event to emit.
:param args: A dictionary with the JSON data to send as payload.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message to all the users in the given room. If
this parameter is not included, the event is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
namespace = kwargs.pop('namespace', '/')
room = kwargs.pop('room', None)
include_self = kwargs.pop('include_self', True)
skip_sid = kwargs.pop('skip_sid', None)
if not include_self and not skip_sid:
skip_sid = flask.request.sid
callback = kwargs.pop('callback', None)
self.server.emit(event, *args, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def send(self, data, json=False, namespace=None, room=None,
callback=None, include_self=True, skip_sid=None, **kwargs):
"""Send a server-generated SocketIO message.
This function sends a simple SocketIO message to one or more connected
clients. The message can be a string or a JSON blob. This is a simpler
version of ``emit()``, which should be preferred. This function can be
used outside of a SocketIO event context, so it is appropriate to use
when the server is the originator of an event.
:param data: The message to send, either a string or a JSON blob.
:param json: ``True`` if ``message`` is a JSON blob, ``False``
otherwise.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message only to the users in the given room. If
this parameter is not included, the message is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
skip_sid = flask.request.sid if not include_self else skip_sid
if json:
self.emit('json', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
else:
self.emit('message', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def close_room(self, room, namespace=None):
"""Close a room.
This function removes any users that are in the given room and then
deletes the room from the server. This function can be used outside
of a SocketIO event context.
:param room: The name of the room to close.
:param namespace: The namespace under which the room exists. Defaults
to the global namespace.
"""
self.server.close_room(room, namespace)
def run(self, app, host=None, port=None, **kwargs):
"""Run the SocketIO web server.
:param app: The Flask application instance.
:param host: The hostname or IP address for the server to listen on.
Defaults to 127.0.0.1.
:param port: The port number for the server to listen on. Defaults to
5000.
:param debug: ``True`` to start the server in debug mode, ``False`` to
start in normal mode.
:param use_reloader: ``True`` to enable the Flask reloader, ``False``
to disable it.
:param extra_files: A list of additional files that the Flask
reloader should watch. Defaults to ``None``
:param log_output: If ``True``, the server logs all incomming
connections. If ``False`` logging is disabled.
Defaults to ``True`` in debug mode, ``False``
in normal mode. Unused when the threading async
mode is used.
:param kwargs: Additional web server options. The web server options
are specific to the server used in each of the supported
async modes. Note that options provided here will
not be seen when using an external web server such
as gunicorn, since this method is not called in that
case.
"""
if host is None:
host = '127.0.0.1'
if port is None:
server_name = app.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[1])
else:
port = 5000
debug = kwargs.pop('debug', app.debug)
log_output = kwargs.pop('log_output', debug)
use_reloader = kwargs.pop('use_reloader', debug)
extra_files = kwargs.pop('extra_files', None)
app.debug = debug
if app.debug and self.server.eio.async_mode != 'threading':
# put the debug middleware between the SocketIO middleware
# and the Flask application instance
#
# mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
# BECOMES
#
# dbg-mw mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
self.sockio_mw.wsgi_app = DebuggedApplication(self.sockio_mw.wsgi_app,
evalex=True)
if self.server.eio.async_mode == 'threading':
from werkzeug._internal import _log
_log('warning', 'WebSocket transport not available. Install '
'eventlet or gevent and gevent-websocket for '
'improved performance.')
app.run(host=host, port=port, threaded=True,
use_reloader=use_reloader, **kwargs)
elif self.server.eio.async_mode == 'eventlet':
def run_server():
import eventlet
import eventlet.wsgi
import eventlet.green
addresses = eventlet.green.socket.getaddrinfo(host, port)
if not addresses:
raise RuntimeError('Could not resolve host to a valid address')
eventlet_socket = eventlet.listen(addresses[0][4], addresses[0][0])
# If provided an SSL argument, use an SSL socket
ssl_args = ['keyfile', 'certfile', 'server_side', 'cert_reqs',
'ssl_version', 'ca_certs',
'do_handshake_on_connect', 'suppress_ragged_eofs',
'ciphers']
ssl_params = {k: kwargs[k] for k in kwargs if k in ssl_args}
if len(ssl_params) > 0:
for k in ssl_params:
kwargs.pop(k)
ssl_params['server_side'] = True # Listening requires true
eventlet_socket = eventlet.wrap_ssl(eventlet_socket,
**ssl_params)
eventlet.wsgi.server(eventlet_socket, app,
log_output=log_output, **kwargs)
if use_reloader:
run_with_reloader(run_server, extra_files=extra_files)
else:
run_server()
elif self.server.eio.async_mode == 'gevent':
from gevent import pywsgi
try:
from geventwebsocket.handler import WebSocketHandler
websocket = True
except ImportError:
websocket = False
log = 'default'
if not log_output:
log = None
if websocket:
self.wsgi_server = pywsgi.WSGIServer(
(host, port), app, handler_class=WebSocketHandler,
log=log, **kwargs)
else:
self.wsgi_server = pywsgi.WSGIServer((host, port), app,
log=log, **kwargs)
if use_reloader:
# monkey patching is required by the reloader
from gevent import monkey
monkey.patch_all()
def run_server():
self.wsgi_server.serve_forever()
run_with_reloader(run_server, extra_files=extra_files)
else:
self.wsgi_server.serve_forever()
def stop(self):
"""Stop a running SocketIO web server.
This method must be called from a HTTP or SocketIO handler function.
"""
if self.server.eio.async_mode == 'threading':
func = flask.request.environ.get('werkzeug.server.shutdown')
if func:
func()
else:
raise RuntimeError('Cannot stop unknown web server')
elif self.server.eio.async_mode == 'eventlet':
raise SystemExit
elif self.server.eio.async_mode == 'gevent':
self.wsgi_server.stop()
def start_background_task(self, target, *args, **kwargs):
"""Start a background task using the appropriate async model.
This is a utility function that applications can use to start a
background task using the method that is compatible with the
selected async mode.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function.
"""
return self.server.start_background_task(target, *args, **kwargs)
def sleep(self, seconds=0):
"""Sleep for the requested amount of time using the appropriate async
model.
This is a utility function that applications can use to put a task to
sleep without having to worry about using the correct call for the
selected async mode.
"""
return self.server.sleep(seconds)
def test_client(self, app, namespace=None, query_string=None,
headers=None, flask_test_client=None):
"""The Socket.IO test client is useful for testing a Flask-SocketIO
server. It works in a similar way to the Flask Test Client, but
adapted to the Socket.IO server.
:param app: The Flask application instance.
:param namespace: The namespace for the client. If not provided, the
client connects to the server on the global
namespace.
:param query_string: A string with custom query string arguments.
:param headers: A dictionary with custom HTTP headers.
:param flask_test_client: The instance of the Flask test client
currently in use. Passing the Flask test
client is optional, but is necessary if you
want the Flask user session and any other
cookies set in HTTP routes accessible from
Socket.IO events.
"""
return SocketIOTestClient(app, self, namespace=namespace,
query_string=query_string, headers=headers,
flask_test_client=flask_test_client)
def _handle_event(self, handler, message, namespace, sid, *args):
if sid not in self.server.environ:
# we don't have record of this client, ignore this event
return '', 400
app = self.server.environ[sid]['flask.app']
with app.request_context(self.server.environ[sid]):
if self.manage_session:
# manage a separate session for this client's Socket.IO events
# created as a copy of the regular user session
if 'saved_session' not in self.server.environ[sid]:
self.server.environ[sid]['saved_session'] = \
_ManagedSession(flask.session)
session_obj = self.server.environ[sid]['saved_session']
else:
# let Flask handle the user session
# for cookie based sessions, this effectively freezes the
# session to its state at connection time
# for server-side sessions, this allows HTTP and Socket.IO to
# share the session, with both having read/write access to it
session_obj = flask.session._get_current_object()
_request_ctx_stack.top.session = session_obj
flask.request.sid = sid
flask.request.namespace = namespace
flask.request.event = {'message': message, 'args': args}
try:
if message == 'connect':
ret = handler()
else:
ret = handler(*args)
except:
err_handler = self.exception_handlers.get(
namespace, self.default_exception_handler)
if err_handler is None:
raise
type, value, traceback = sys.exc_info()
return err_handler(value)
if not self.manage_session:
# when Flask is managing the user session, it needs to save it
if not hasattr(session_obj, 'modified') or session_obj.modified:
resp = app.response_class()
app.session_interface.save_session(app, session_obj, resp)
return ret
def emit(event, *args, **kwargs):
"""Emit a SocketIO event.
This function emits a SocketIO event to one or more connected clients. A
JSON blob can be attached to the event as payload. This is a function that
can only be called from a SocketIO event handler, as in obtains some
information from the current client context. Example::
@socketio.on('my event')
def handle_my_custom_event(json):
emit('my response', {'data': 42})
:param event: The name of the user event to emit.
:param args: A dictionary with the JSON data to send as payload.
:param namespace: The namespace under which the message is to be sent.
Defaults to the namespace used by the originating event.
A ``'/'`` can be used to explicitly specify the global
namespace.
:param callback: Callback function to invoke with the client's
acknowledgement.
:param broadcast: ``True`` to send the message to all clients, or ``False``
to only reply to the sender of the originating event.
:param room: Send the message to all the users in the given room. If this
argument is set, then broadcast is implied to be ``True``.
:param include_self: ``True`` to include the sender when broadcasting or
addressing a room, or ``False`` to send to everyone
but the sender.
:param ignore_queue: Only used when a message queue is configured. If
set to ``True``, the event is emitted to the
clients directly, without going through the queue.
This is more efficient, but only works when a
single server process is used, or when there is a
single addresee. It is recommended to always leave
this parameter with its default value of ``False``.
"""
if 'namespace' in kwargs:
namespace = kwargs['namespace']
else:
namespace = flask.request.namespace
callback = kwargs.get('callback')
broadcast = kwargs.get('broadcast')
room = kwargs.get('room')
if room is None and not broadcast:
room = flask.request.sid
include_self = kwargs.get('include_self', True)
ignore_queue = kwargs.get('ignore_queue', False)
socketio = flask.current_app.extensions['socketio']
return socketio.emit(event, *args, namespace=namespace, room=room,
include_self=include_self, callback=callback,
ignore_queue=ignore_queue)
def send(message, **kwargs):
"""Send a SocketIO message.
This function sends a simple SocketIO message to one or more connected
clients. The message can be a string or a JSON blob. This is a simpler
version of ``emit()``, which should be preferred. This is a function that
can only be called from a SocketIO event handler.
:param message: The message to send, either a string or a JSON blob.
:param json: ``True`` if ``message`` is a JSON blob, ``False``
otherwise.
:param namespace: The namespace under which the message is to be sent.
Defaults to the namespace used by the originating event.
An empty string can be used to use the global namespace.
:param callback: Callback function to invoke with the client's
acknowledgement.
:param broadcast: ``True`` to send the message to all connected clients, or
``False`` to only reply to the sender of the originating
event.
:param room: Send the message to all the users in the given room.
:param include_self: ``True`` to include the sender when broadcasting or
addressing a room, or ``False`` to send to everyone
but the sender.
:param ignore_queue: Only used when a message queue is configured. If
set to ``True``, the event is emitted to the
clients directly, without going through the queue.
This is more efficient, but only works when a
single server process is used, or when there is a
single addresee. It is recommended to always leave
this parameter with its default value of ``False``.
"""
json = kwargs.get('json', False)
if 'namespace' in kwargs:
namespace = kwargs['namespace']
else:
namespace = flask.request.namespace
callback = kwargs.get('callback')
broadcast = kwargs.get('broadcast')
room = kwargs.get('room')
if room is None and not broadcast:
room = flask.request.sid
include_self = kwargs.get('include_self', True)
ignore_queue = kwargs.get('ignore_queue', False)
socketio = flask.current_app.extensions['socketio']
return socketio.send(message, json=json, namespace=namespace, room=room,
include_self=include_self, callback=callback,
ignore_queue=ignore_queue)
def join_room(room, sid=None, namespace=None):
"""Join a room.
This function puts the user in a room, under the current namespace. The
user and the namespace are obtained from the event context. This is a
function that can only be called from a SocketIO event handler. Example::
@socketio.on('join')
def on_join(data):
username = session['username']
room = data['room']
join_room(room)
send(username + ' has entered the room.', room=room)
:param room: The name of the room to join.
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
"""
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
socketio.server.enter_room(sid, room, namespace=namespace)
def leave_room(room, sid=None, namespace=None):
"""Leave a room.
This function removes the user from a room, under the current namespace.
The user and the namespace are obtained from the event context. Example::
@socketio.on('leave')
def on_leave(data):
username = session['username']
room = data['room']
leave_room(room)
send(username + ' has left the room.', room=room)
:param room: The name of the room to leave.
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
"""
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
socketio.server.leave_room(sid, room, namespace=namespace)
def close_room(room, namespace=None):
"""Close a room.
This function removes any users that are in the given room and then deletes
the room from the server.
:param room: The name of the room to close.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
"""
socketio = flask.current_app.extensions['socketio']
namespace = namespace or flask.request.namespace
socketio.server.close_room(room, namespace=namespace)
def disconnect(sid=None, namespace=None, silent=False):
"""Disconnect the client.
This function terminates the connection with the client. As a result of
this call the client will receive a disconnect event. Example::
@socketio.on('message')
def receive_message(msg):
if is_banned(session['username']):
disconnect()
else:
# ...
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
:param silent: this option is deprecated.
"""
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
return socketio.server.disconnect(sid, namespace=namespace)
|
miguelgrinberg/Flask-SocketIO | flask_socketio/__init__.py | disconnect | python | def disconnect(sid=None, namespace=None, silent=False):
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
return socketio.server.disconnect(sid, namespace=namespace) | Disconnect the client.
This function terminates the connection with the client. As a result of
this call the client will receive a disconnect event. Example::
@socketio.on('message')
def receive_message(msg):
if is_banned(session['username']):
disconnect()
else:
# ...
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
:param silent: this option is deprecated. | train | https://github.com/miguelgrinberg/Flask-SocketIO/blob/4bef800d5e7ba7d98a6f4cd94191ff0b4496c334/flask_socketio/__init__.py#L862-L884 | null | import os
import sys
# make sure gevent-socketio is not installed, as it conflicts with
# python-socketio
gevent_socketio_found = True
try:
from socketio import socketio_manage
except ImportError:
gevent_socketio_found = False
if gevent_socketio_found:
print('The gevent-socketio package is incompatible with this version of '
'the Flask-SocketIO extension. Please uninstall it, and then '
'install the latest version of python-socketio in its place.')
sys.exit(1)
import socketio
import flask
from flask import _request_ctx_stack, json as flask_json
from flask.sessions import SessionMixin
from werkzeug.debug import DebuggedApplication
from werkzeug.serving import run_with_reloader
from .namespace import Namespace
from .test_client import SocketIOTestClient
__version__ = '3.3.2'
class _SocketIOMiddleware(socketio.WSGIApp):
"""This WSGI middleware simply exposes the Flask application in the WSGI
environment before executing the request.
"""
def __init__(self, socketio_app, flask_app, socketio_path='socket.io'):
self.flask_app = flask_app
super(_SocketIOMiddleware, self).__init__(socketio_app,
flask_app.wsgi_app,
socketio_path=socketio_path)
def __call__(self, environ, start_response):
environ = environ.copy()
environ['flask.app'] = self.flask_app
return super(_SocketIOMiddleware, self).__call__(environ,
start_response)
class _ManagedSession(dict, SessionMixin):
"""This class is used for user sessions that are managed by
Flask-SocketIO. It is simple dict, expanded with the Flask session
attributes."""
pass
class SocketIO(object):
"""Create a Flask-SocketIO server.
:param app: The flask application instance. If the application instance
isn't known at the time this class is instantiated, then call
``socketio.init_app(app)`` once the application instance is
available.
:param manage_session: If set to ``True``, this extension manages the user
session for Socket.IO events. If set to ``False``,
Flask's own session management is used. When using
Flask's cookie based sessions it is recommended that
you leave this set to the default of ``True``. When
using server-side sessions, a ``False`` setting
enables sharing the user session between HTTP routes
and Socket.IO events.
:param message_queue: A connection URL for a message queue service the
server can use for multi-process communication. A
message queue is not required when using a single
server process.
:param channel: The channel name, when using a message queue. If a channel
isn't specified, a default channel will be used. If
multiple clusters of SocketIO processes need to use the
same message queue without interfering with each other, then
each cluster should use a different channel.
:param path: The path where the Socket.IO server is exposed. Defaults to
``'socket.io'``. Leave this as is unless you know what you are
doing.
:param resource: Alias to ``path``.
:param kwargs: Socket.IO and Engine.IO server options.
The Socket.IO server options are detailed below:
:param client_manager: The client manager instance that will manage the
client list. When this is omitted, the client list
is stored in an in-memory structure, so the use of
multiple connected servers is not possible. In most
cases, this argument does not need to be set
explicitly.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``.
:param binary: ``True`` to support binary payloads, ``False`` to treat all
payloads as text. On Python 2, if this is set to ``True``,
``unicode`` values are treated as text, and ``str`` and
``bytes`` values are treated as binary. This option has no
effect on Python 3, where text and binary payloads are
always automatically discovered.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions. To use the same json encoder and decoder as a Flask
application, use ``flask.json``.
The Engine.IO server configuration supports the following settings:
:param async_mode: The asynchronous model to use. See the Deployment
section in the documentation for a description of the
available options. Valid async modes are
``threading``, ``eventlet``, ``gevent`` and
``gevent_uwsgi``. If this argument is not given,
``eventlet`` is tried first, then ``gevent_uwsgi``,
then ``gevent``, and finally ``threading``. The
first async mode that has all its dependencies installed
is then one that is chosen.
:param ping_timeout: The time in seconds that the client waits for the
server to respond before disconnecting. The default is
60 seconds.
:param ping_interval: The interval in seconds at which the client pings
the server. The default is 25 seconds.
:param max_http_buffer_size: The maximum size of a message when using the
polling transport. The default is 100,000,000
bytes.
:param allow_upgrades: Whether to allow transport upgrades or not. The
default is ``True``.
:param http_compression: Whether to compress packages when using the
polling transport. The default is ``True``.
:param compression_threshold: Only compress messages when their byte size
is greater than this value. The default is
1024 bytes.
:param cookie: Name of the HTTP cookie that contains the client session
id. If set to ``None``, a cookie is not sent to the client.
The default is ``'io'``.
:param cors_allowed_origins: List of origins that are allowed to connect
to this server. All origins are allowed by
default.
:param cors_credentials: Whether credentials (cookies, authentication) are
allowed in requests to this server. The default is
``True``.
:param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
a logger object to use. To disable logging set to
``False``. The default is ``False``.
"""
def __init__(self, app=None, **kwargs):
self.server = None
self.server_options = {}
self.wsgi_server = None
self.handlers = []
self.namespace_handlers = []
self.exception_handlers = {}
self.default_exception_handler = None
self.manage_session = True
# We can call init_app when:
# - we were given the Flask app instance (standard initialization)
# - we were not given the app, but we were given a message_queue
# (standard initialization for auxiliary process)
# In all other cases we collect the arguments and assume the client
# will call init_app from an app factory function.
if app is not None or 'message_queue' in kwargs:
self.init_app(app, **kwargs)
else:
self.server_options.update(kwargs)
def init_app(self, app, **kwargs):
if app is not None:
if not hasattr(app, 'extensions'):
app.extensions = {} # pragma: no cover
app.extensions['socketio'] = self
self.server_options.update(kwargs)
self.manage_session = self.server_options.pop('manage_session',
self.manage_session)
if 'client_manager' not in self.server_options:
url = self.server_options.pop('message_queue', None)
channel = self.server_options.pop('channel', 'flask-socketio')
write_only = app is None
if url:
if url.startswith(('redis://', "rediss://")):
queue_class = socketio.RedisManager
elif url.startswith('zmq'):
queue_class = socketio.ZmqManager
else:
queue_class = socketio.KombuManager
queue = queue_class(url, channel=channel,
write_only=write_only)
self.server_options['client_manager'] = queue
if 'json' in self.server_options and \
self.server_options['json'] == flask_json:
# flask's json module is tricky to use because its output
# changes when it is invoked inside or outside the app context
# so here to prevent any ambiguities we replace it with wrappers
# that ensure that the app context is always present
class FlaskSafeJSON(object):
@staticmethod
def dumps(*args, **kwargs):
with app.app_context():
return flask_json.dumps(*args, **kwargs)
@staticmethod
def loads(*args, **kwargs):
with app.app_context():
return flask_json.loads(*args, **kwargs)
self.server_options['json'] = FlaskSafeJSON
resource = self.server_options.pop('path', None) or \
self.server_options.pop('resource', None) or 'socket.io'
if resource.startswith('/'):
resource = resource[1:]
if os.environ.get('FLASK_RUN_FROM_CLI'):
if self.server_options.get('async_mode') is None:
if app is not None:
app.logger.warning(
'Flask-SocketIO is Running under Werkzeug, WebSocket '
'is not available.')
self.server_options['async_mode'] = 'threading'
self.server = socketio.Server(**self.server_options)
self.async_mode = self.server.async_mode
for handler in self.handlers:
self.server.on(handler[0], handler[1], namespace=handler[2])
for namespace_handler in self.namespace_handlers:
self.server.register_namespace(namespace_handler)
if app is not None:
# here we attach the SocketIO middlware to the SocketIO object so it
# can be referenced later if debug middleware needs to be inserted
self.sockio_mw = _SocketIOMiddleware(self.server, app,
socketio_path=resource)
app.wsgi_app = self.sockio_mw
def on(self, message, namespace=None):
"""Decorator to register a SocketIO event handler.
This decorator must be applied to SocketIO event handlers. Example::
@socketio.on('my event', namespace='/chat')
def handle_my_custom_event(json):
print('received json: ' + str(json))
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(handler):
def _handler(sid, *args):
return self._handle_event(handler, message, namespace, sid,
*args)
if self.server:
self.server.on(message, _handler, namespace=namespace)
else:
self.handlers.append((message, _handler, namespace))
return handler
return decorator
def on_error(self, namespace=None):
"""Decorator to define a custom error handler for SocketIO events.
This decorator can be applied to a function that acts as an error
handler for a namespace. This handler will be invoked when a SocketIO
event handler raises an exception. The handler function must accept one
argument, which is the exception raised. Example::
@socketio.on_error(namespace='/chat')
def chat_error_handler(e):
print('An error has occurred: ' + str(e))
:param namespace: The namespace for which to register the error
handler. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(exception_handler):
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.exception_handlers[namespace] = exception_handler
return exception_handler
return decorator
def on_error_default(self, exception_handler):
"""Decorator to define a default error handler for SocketIO events.
This decorator can be applied to a function that acts as a default
error handler for any namespaces that do not have a specific handler.
Example::
@socketio.on_error_default
def error_handler(e):
print('An error has occurred: ' + str(e))
"""
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.default_exception_handler = exception_handler
return exception_handler
def on_event(self, message, handler, namespace=None):
"""Register a SocketIO event handler.
``on_event`` is the non-decorator version of ``'on'``.
Example::
def on_foo_event(json):
print('received json: ' + str(json))
socketio.on_event('my event', on_foo_event, namespace='/chat')
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param handler: The function that handles the event.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
self.on(message, namespace=namespace)(handler)
def on_namespace(self, namespace_handler):
if not isinstance(namespace_handler, Namespace):
raise ValueError('Not a namespace instance.')
namespace_handler._set_socketio(self)
if self.server:
self.server.register_namespace(namespace_handler)
else:
self.namespace_handlers.append(namespace_handler)
def emit(self, event, *args, **kwargs):
"""Emit a server generated SocketIO event.
This function emits a SocketIO event to one or more connected clients.
A JSON blob can be attached to the event as payload. This function can
be used outside of a SocketIO event context, so it is appropriate to
use when the server is the originator of an event, outside of any
client context, such as in a regular HTTP request handler or a
background task. Example::
@app.route('/ping')
def ping():
socketio.emit('ping event', {'data': 42}, namespace='/chat')
:param event: The name of the user event to emit.
:param args: A dictionary with the JSON data to send as payload.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message to all the users in the given room. If
this parameter is not included, the event is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
namespace = kwargs.pop('namespace', '/')
room = kwargs.pop('room', None)
include_self = kwargs.pop('include_self', True)
skip_sid = kwargs.pop('skip_sid', None)
if not include_self and not skip_sid:
skip_sid = flask.request.sid
callback = kwargs.pop('callback', None)
self.server.emit(event, *args, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def send(self, data, json=False, namespace=None, room=None,
callback=None, include_self=True, skip_sid=None, **kwargs):
"""Send a server-generated SocketIO message.
This function sends a simple SocketIO message to one or more connected
clients. The message can be a string or a JSON blob. This is a simpler
version of ``emit()``, which should be preferred. This function can be
used outside of a SocketIO event context, so it is appropriate to use
when the server is the originator of an event.
:param data: The message to send, either a string or a JSON blob.
:param json: ``True`` if ``message`` is a JSON blob, ``False``
otherwise.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message only to the users in the given room. If
this parameter is not included, the message is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
skip_sid = flask.request.sid if not include_self else skip_sid
if json:
self.emit('json', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
else:
self.emit('message', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def close_room(self, room, namespace=None):
"""Close a room.
This function removes any users that are in the given room and then
deletes the room from the server. This function can be used outside
of a SocketIO event context.
:param room: The name of the room to close.
:param namespace: The namespace under which the room exists. Defaults
to the global namespace.
"""
self.server.close_room(room, namespace)
def run(self, app, host=None, port=None, **kwargs):
"""Run the SocketIO web server.
:param app: The Flask application instance.
:param host: The hostname or IP address for the server to listen on.
Defaults to 127.0.0.1.
:param port: The port number for the server to listen on. Defaults to
5000.
:param debug: ``True`` to start the server in debug mode, ``False`` to
start in normal mode.
:param use_reloader: ``True`` to enable the Flask reloader, ``False``
to disable it.
:param extra_files: A list of additional files that the Flask
reloader should watch. Defaults to ``None``
:param log_output: If ``True``, the server logs all incomming
connections. If ``False`` logging is disabled.
Defaults to ``True`` in debug mode, ``False``
in normal mode. Unused when the threading async
mode is used.
:param kwargs: Additional web server options. The web server options
are specific to the server used in each of the supported
async modes. Note that options provided here will
not be seen when using an external web server such
as gunicorn, since this method is not called in that
case.
"""
if host is None:
host = '127.0.0.1'
if port is None:
server_name = app.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[1])
else:
port = 5000
debug = kwargs.pop('debug', app.debug)
log_output = kwargs.pop('log_output', debug)
use_reloader = kwargs.pop('use_reloader', debug)
extra_files = kwargs.pop('extra_files', None)
app.debug = debug
if app.debug and self.server.eio.async_mode != 'threading':
# put the debug middleware between the SocketIO middleware
# and the Flask application instance
#
# mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
# BECOMES
#
# dbg-mw mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
self.sockio_mw.wsgi_app = DebuggedApplication(self.sockio_mw.wsgi_app,
evalex=True)
if self.server.eio.async_mode == 'threading':
from werkzeug._internal import _log
_log('warning', 'WebSocket transport not available. Install '
'eventlet or gevent and gevent-websocket for '
'improved performance.')
app.run(host=host, port=port, threaded=True,
use_reloader=use_reloader, **kwargs)
elif self.server.eio.async_mode == 'eventlet':
def run_server():
import eventlet
import eventlet.wsgi
import eventlet.green
addresses = eventlet.green.socket.getaddrinfo(host, port)
if not addresses:
raise RuntimeError('Could not resolve host to a valid address')
eventlet_socket = eventlet.listen(addresses[0][4], addresses[0][0])
# If provided an SSL argument, use an SSL socket
ssl_args = ['keyfile', 'certfile', 'server_side', 'cert_reqs',
'ssl_version', 'ca_certs',
'do_handshake_on_connect', 'suppress_ragged_eofs',
'ciphers']
ssl_params = {k: kwargs[k] for k in kwargs if k in ssl_args}
if len(ssl_params) > 0:
for k in ssl_params:
kwargs.pop(k)
ssl_params['server_side'] = True # Listening requires true
eventlet_socket = eventlet.wrap_ssl(eventlet_socket,
**ssl_params)
eventlet.wsgi.server(eventlet_socket, app,
log_output=log_output, **kwargs)
if use_reloader:
run_with_reloader(run_server, extra_files=extra_files)
else:
run_server()
elif self.server.eio.async_mode == 'gevent':
from gevent import pywsgi
try:
from geventwebsocket.handler import WebSocketHandler
websocket = True
except ImportError:
websocket = False
log = 'default'
if not log_output:
log = None
if websocket:
self.wsgi_server = pywsgi.WSGIServer(
(host, port), app, handler_class=WebSocketHandler,
log=log, **kwargs)
else:
self.wsgi_server = pywsgi.WSGIServer((host, port), app,
log=log, **kwargs)
if use_reloader:
# monkey patching is required by the reloader
from gevent import monkey
monkey.patch_all()
def run_server():
self.wsgi_server.serve_forever()
run_with_reloader(run_server, extra_files=extra_files)
else:
self.wsgi_server.serve_forever()
def stop(self):
"""Stop a running SocketIO web server.
This method must be called from a HTTP or SocketIO handler function.
"""
if self.server.eio.async_mode == 'threading':
func = flask.request.environ.get('werkzeug.server.shutdown')
if func:
func()
else:
raise RuntimeError('Cannot stop unknown web server')
elif self.server.eio.async_mode == 'eventlet':
raise SystemExit
elif self.server.eio.async_mode == 'gevent':
self.wsgi_server.stop()
def start_background_task(self, target, *args, **kwargs):
"""Start a background task using the appropriate async model.
This is a utility function that applications can use to start a
background task using the method that is compatible with the
selected async mode.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function.
"""
return self.server.start_background_task(target, *args, **kwargs)
def sleep(self, seconds=0):
"""Sleep for the requested amount of time using the appropriate async
model.
This is a utility function that applications can use to put a task to
sleep without having to worry about using the correct call for the
selected async mode.
"""
return self.server.sleep(seconds)
def test_client(self, app, namespace=None, query_string=None,
headers=None, flask_test_client=None):
"""The Socket.IO test client is useful for testing a Flask-SocketIO
server. It works in a similar way to the Flask Test Client, but
adapted to the Socket.IO server.
:param app: The Flask application instance.
:param namespace: The namespace for the client. If not provided, the
client connects to the server on the global
namespace.
:param query_string: A string with custom query string arguments.
:param headers: A dictionary with custom HTTP headers.
:param flask_test_client: The instance of the Flask test client
currently in use. Passing the Flask test
client is optional, but is necessary if you
want the Flask user session and any other
cookies set in HTTP routes accessible from
Socket.IO events.
"""
return SocketIOTestClient(app, self, namespace=namespace,
query_string=query_string, headers=headers,
flask_test_client=flask_test_client)
def _handle_event(self, handler, message, namespace, sid, *args):
if sid not in self.server.environ:
# we don't have record of this client, ignore this event
return '', 400
app = self.server.environ[sid]['flask.app']
with app.request_context(self.server.environ[sid]):
if self.manage_session:
# manage a separate session for this client's Socket.IO events
# created as a copy of the regular user session
if 'saved_session' not in self.server.environ[sid]:
self.server.environ[sid]['saved_session'] = \
_ManagedSession(flask.session)
session_obj = self.server.environ[sid]['saved_session']
else:
# let Flask handle the user session
# for cookie based sessions, this effectively freezes the
# session to its state at connection time
# for server-side sessions, this allows HTTP and Socket.IO to
# share the session, with both having read/write access to it
session_obj = flask.session._get_current_object()
_request_ctx_stack.top.session = session_obj
flask.request.sid = sid
flask.request.namespace = namespace
flask.request.event = {'message': message, 'args': args}
try:
if message == 'connect':
ret = handler()
else:
ret = handler(*args)
except:
err_handler = self.exception_handlers.get(
namespace, self.default_exception_handler)
if err_handler is None:
raise
type, value, traceback = sys.exc_info()
return err_handler(value)
if not self.manage_session:
# when Flask is managing the user session, it needs to save it
if not hasattr(session_obj, 'modified') or session_obj.modified:
resp = app.response_class()
app.session_interface.save_session(app, session_obj, resp)
return ret
def emit(event, *args, **kwargs):
"""Emit a SocketIO event.
This function emits a SocketIO event to one or more connected clients. A
JSON blob can be attached to the event as payload. This is a function that
can only be called from a SocketIO event handler, as in obtains some
information from the current client context. Example::
@socketio.on('my event')
def handle_my_custom_event(json):
emit('my response', {'data': 42})
:param event: The name of the user event to emit.
:param args: A dictionary with the JSON data to send as payload.
:param namespace: The namespace under which the message is to be sent.
Defaults to the namespace used by the originating event.
A ``'/'`` can be used to explicitly specify the global
namespace.
:param callback: Callback function to invoke with the client's
acknowledgement.
:param broadcast: ``True`` to send the message to all clients, or ``False``
to only reply to the sender of the originating event.
:param room: Send the message to all the users in the given room. If this
argument is set, then broadcast is implied to be ``True``.
:param include_self: ``True`` to include the sender when broadcasting or
addressing a room, or ``False`` to send to everyone
but the sender.
:param ignore_queue: Only used when a message queue is configured. If
set to ``True``, the event is emitted to the
clients directly, without going through the queue.
This is more efficient, but only works when a
single server process is used, or when there is a
single addresee. It is recommended to always leave
this parameter with its default value of ``False``.
"""
if 'namespace' in kwargs:
namespace = kwargs['namespace']
else:
namespace = flask.request.namespace
callback = kwargs.get('callback')
broadcast = kwargs.get('broadcast')
room = kwargs.get('room')
if room is None and not broadcast:
room = flask.request.sid
include_self = kwargs.get('include_self', True)
ignore_queue = kwargs.get('ignore_queue', False)
socketio = flask.current_app.extensions['socketio']
return socketio.emit(event, *args, namespace=namespace, room=room,
include_self=include_self, callback=callback,
ignore_queue=ignore_queue)
def send(message, **kwargs):
"""Send a SocketIO message.
This function sends a simple SocketIO message to one or more connected
clients. The message can be a string or a JSON blob. This is a simpler
version of ``emit()``, which should be preferred. This is a function that
can only be called from a SocketIO event handler.
:param message: The message to send, either a string or a JSON blob.
:param json: ``True`` if ``message`` is a JSON blob, ``False``
otherwise.
:param namespace: The namespace under which the message is to be sent.
Defaults to the namespace used by the originating event.
An empty string can be used to use the global namespace.
:param callback: Callback function to invoke with the client's
acknowledgement.
:param broadcast: ``True`` to send the message to all connected clients, or
``False`` to only reply to the sender of the originating
event.
:param room: Send the message to all the users in the given room.
:param include_self: ``True`` to include the sender when broadcasting or
addressing a room, or ``False`` to send to everyone
but the sender.
:param ignore_queue: Only used when a message queue is configured. If
set to ``True``, the event is emitted to the
clients directly, without going through the queue.
This is more efficient, but only works when a
single server process is used, or when there is a
single addresee. It is recommended to always leave
this parameter with its default value of ``False``.
"""
json = kwargs.get('json', False)
if 'namespace' in kwargs:
namespace = kwargs['namespace']
else:
namespace = flask.request.namespace
callback = kwargs.get('callback')
broadcast = kwargs.get('broadcast')
room = kwargs.get('room')
if room is None and not broadcast:
room = flask.request.sid
include_self = kwargs.get('include_self', True)
ignore_queue = kwargs.get('ignore_queue', False)
socketio = flask.current_app.extensions['socketio']
return socketio.send(message, json=json, namespace=namespace, room=room,
include_self=include_self, callback=callback,
ignore_queue=ignore_queue)
def join_room(room, sid=None, namespace=None):
"""Join a room.
This function puts the user in a room, under the current namespace. The
user and the namespace are obtained from the event context. This is a
function that can only be called from a SocketIO event handler. Example::
@socketio.on('join')
def on_join(data):
username = session['username']
room = data['room']
join_room(room)
send(username + ' has entered the room.', room=room)
:param room: The name of the room to join.
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
"""
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
socketio.server.enter_room(sid, room, namespace=namespace)
def leave_room(room, sid=None, namespace=None):
"""Leave a room.
This function removes the user from a room, under the current namespace.
The user and the namespace are obtained from the event context. Example::
@socketio.on('leave')
def on_leave(data):
username = session['username']
room = data['room']
leave_room(room)
send(username + ' has left the room.', room=room)
:param room: The name of the room to leave.
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
"""
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
socketio.server.leave_room(sid, room, namespace=namespace)
def close_room(room, namespace=None):
"""Close a room.
This function removes any users that are in the given room and then deletes
the room from the server.
:param room: The name of the room to close.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
"""
socketio = flask.current_app.extensions['socketio']
namespace = namespace or flask.request.namespace
socketio.server.close_room(room, namespace=namespace)
def rooms(sid=None, namespace=None):
"""Return a list of the rooms the client is in.
This function returns all the rooms the client has entered, including its
own room, assigned by the Socket.IO server.
:param sid: The session id of the client. If not provided, the client is
obtained from the request context.
:param namespace: The namespace for the room. If not provided, the
namespace is obtained from the request context.
"""
socketio = flask.current_app.extensions['socketio']
sid = sid or flask.request.sid
namespace = namespace or flask.request.namespace
return socketio.server.rooms(sid, namespace=namespace)
|
miguelgrinberg/Flask-SocketIO | flask_socketio/__init__.py | SocketIO.on | python | def on(self, message, namespace=None):
namespace = namespace or '/'
def decorator(handler):
def _handler(sid, *args):
return self._handle_event(handler, message, namespace, sid,
*args)
if self.server:
self.server.on(message, _handler, namespace=namespace)
else:
self.handlers.append((message, _handler, namespace))
return handler
return decorator | Decorator to register a SocketIO event handler.
This decorator must be applied to SocketIO event handlers. Example::
@socketio.on('my event', namespace='/chat')
def handle_my_custom_event(json):
print('received json: ' + str(json))
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace. | train | https://github.com/miguelgrinberg/Flask-SocketIO/blob/4bef800d5e7ba7d98a6f4cd94191ff0b4496c334/flask_socketio/__init__.py#L235-L266 | null | class SocketIO(object):
"""Create a Flask-SocketIO server.
:param app: The flask application instance. If the application instance
isn't known at the time this class is instantiated, then call
``socketio.init_app(app)`` once the application instance is
available.
:param manage_session: If set to ``True``, this extension manages the user
session for Socket.IO events. If set to ``False``,
Flask's own session management is used. When using
Flask's cookie based sessions it is recommended that
you leave this set to the default of ``True``. When
using server-side sessions, a ``False`` setting
enables sharing the user session between HTTP routes
and Socket.IO events.
:param message_queue: A connection URL for a message queue service the
server can use for multi-process communication. A
message queue is not required when using a single
server process.
:param channel: The channel name, when using a message queue. If a channel
isn't specified, a default channel will be used. If
multiple clusters of SocketIO processes need to use the
same message queue without interfering with each other, then
each cluster should use a different channel.
:param path: The path where the Socket.IO server is exposed. Defaults to
``'socket.io'``. Leave this as is unless you know what you are
doing.
:param resource: Alias to ``path``.
:param kwargs: Socket.IO and Engine.IO server options.
The Socket.IO server options are detailed below:
:param client_manager: The client manager instance that will manage the
client list. When this is omitted, the client list
is stored in an in-memory structure, so the use of
multiple connected servers is not possible. In most
cases, this argument does not need to be set
explicitly.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``.
:param binary: ``True`` to support binary payloads, ``False`` to treat all
payloads as text. On Python 2, if this is set to ``True``,
``unicode`` values are treated as text, and ``str`` and
``bytes`` values are treated as binary. This option has no
effect on Python 3, where text and binary payloads are
always automatically discovered.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions. To use the same json encoder and decoder as a Flask
application, use ``flask.json``.
The Engine.IO server configuration supports the following settings:
:param async_mode: The asynchronous model to use. See the Deployment
section in the documentation for a description of the
available options. Valid async modes are
``threading``, ``eventlet``, ``gevent`` and
``gevent_uwsgi``. If this argument is not given,
``eventlet`` is tried first, then ``gevent_uwsgi``,
then ``gevent``, and finally ``threading``. The
first async mode that has all its dependencies installed
is then one that is chosen.
:param ping_timeout: The time in seconds that the client waits for the
server to respond before disconnecting. The default is
60 seconds.
:param ping_interval: The interval in seconds at which the client pings
the server. The default is 25 seconds.
:param max_http_buffer_size: The maximum size of a message when using the
polling transport. The default is 100,000,000
bytes.
:param allow_upgrades: Whether to allow transport upgrades or not. The
default is ``True``.
:param http_compression: Whether to compress packages when using the
polling transport. The default is ``True``.
:param compression_threshold: Only compress messages when their byte size
is greater than this value. The default is
1024 bytes.
:param cookie: Name of the HTTP cookie that contains the client session
id. If set to ``None``, a cookie is not sent to the client.
The default is ``'io'``.
:param cors_allowed_origins: List of origins that are allowed to connect
to this server. All origins are allowed by
default.
:param cors_credentials: Whether credentials (cookies, authentication) are
allowed in requests to this server. The default is
``True``.
:param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
a logger object to use. To disable logging set to
``False``. The default is ``False``.
"""
def __init__(self, app=None, **kwargs):
self.server = None
self.server_options = {}
self.wsgi_server = None
self.handlers = []
self.namespace_handlers = []
self.exception_handlers = {}
self.default_exception_handler = None
self.manage_session = True
# We can call init_app when:
# - we were given the Flask app instance (standard initialization)
# - we were not given the app, but we were given a message_queue
# (standard initialization for auxiliary process)
# In all other cases we collect the arguments and assume the client
# will call init_app from an app factory function.
if app is not None or 'message_queue' in kwargs:
self.init_app(app, **kwargs)
else:
self.server_options.update(kwargs)
def init_app(self, app, **kwargs):
if app is not None:
if not hasattr(app, 'extensions'):
app.extensions = {} # pragma: no cover
app.extensions['socketio'] = self
self.server_options.update(kwargs)
self.manage_session = self.server_options.pop('manage_session',
self.manage_session)
if 'client_manager' not in self.server_options:
url = self.server_options.pop('message_queue', None)
channel = self.server_options.pop('channel', 'flask-socketio')
write_only = app is None
if url:
if url.startswith(('redis://', "rediss://")):
queue_class = socketio.RedisManager
elif url.startswith('zmq'):
queue_class = socketio.ZmqManager
else:
queue_class = socketio.KombuManager
queue = queue_class(url, channel=channel,
write_only=write_only)
self.server_options['client_manager'] = queue
if 'json' in self.server_options and \
self.server_options['json'] == flask_json:
# flask's json module is tricky to use because its output
# changes when it is invoked inside or outside the app context
# so here to prevent any ambiguities we replace it with wrappers
# that ensure that the app context is always present
class FlaskSafeJSON(object):
@staticmethod
def dumps(*args, **kwargs):
with app.app_context():
return flask_json.dumps(*args, **kwargs)
@staticmethod
def loads(*args, **kwargs):
with app.app_context():
return flask_json.loads(*args, **kwargs)
self.server_options['json'] = FlaskSafeJSON
resource = self.server_options.pop('path', None) or \
self.server_options.pop('resource', None) or 'socket.io'
if resource.startswith('/'):
resource = resource[1:]
if os.environ.get('FLASK_RUN_FROM_CLI'):
if self.server_options.get('async_mode') is None:
if app is not None:
app.logger.warning(
'Flask-SocketIO is Running under Werkzeug, WebSocket '
'is not available.')
self.server_options['async_mode'] = 'threading'
self.server = socketio.Server(**self.server_options)
self.async_mode = self.server.async_mode
for handler in self.handlers:
self.server.on(handler[0], handler[1], namespace=handler[2])
for namespace_handler in self.namespace_handlers:
self.server.register_namespace(namespace_handler)
if app is not None:
# here we attach the SocketIO middlware to the SocketIO object so it
# can be referenced later if debug middleware needs to be inserted
self.sockio_mw = _SocketIOMiddleware(self.server, app,
socketio_path=resource)
app.wsgi_app = self.sockio_mw
def on_error(self, namespace=None):
"""Decorator to define a custom error handler for SocketIO events.
This decorator can be applied to a function that acts as an error
handler for a namespace. This handler will be invoked when a SocketIO
event handler raises an exception. The handler function must accept one
argument, which is the exception raised. Example::
@socketio.on_error(namespace='/chat')
def chat_error_handler(e):
print('An error has occurred: ' + str(e))
:param namespace: The namespace for which to register the error
handler. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(exception_handler):
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.exception_handlers[namespace] = exception_handler
return exception_handler
return decorator
def on_error_default(self, exception_handler):
"""Decorator to define a default error handler for SocketIO events.
This decorator can be applied to a function that acts as a default
error handler for any namespaces that do not have a specific handler.
Example::
@socketio.on_error_default
def error_handler(e):
print('An error has occurred: ' + str(e))
"""
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.default_exception_handler = exception_handler
return exception_handler
def on_event(self, message, handler, namespace=None):
"""Register a SocketIO event handler.
``on_event`` is the non-decorator version of ``'on'``.
Example::
def on_foo_event(json):
print('received json: ' + str(json))
socketio.on_event('my event', on_foo_event, namespace='/chat')
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param handler: The function that handles the event.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
self.on(message, namespace=namespace)(handler)
def on_namespace(self, namespace_handler):
if not isinstance(namespace_handler, Namespace):
raise ValueError('Not a namespace instance.')
namespace_handler._set_socketio(self)
if self.server:
self.server.register_namespace(namespace_handler)
else:
self.namespace_handlers.append(namespace_handler)
def emit(self, event, *args, **kwargs):
"""Emit a server generated SocketIO event.
This function emits a SocketIO event to one or more connected clients.
A JSON blob can be attached to the event as payload. This function can
be used outside of a SocketIO event context, so it is appropriate to
use when the server is the originator of an event, outside of any
client context, such as in a regular HTTP request handler or a
background task. Example::
@app.route('/ping')
def ping():
socketio.emit('ping event', {'data': 42}, namespace='/chat')
:param event: The name of the user event to emit.
:param args: A dictionary with the JSON data to send as payload.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message to all the users in the given room. If
this parameter is not included, the event is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
namespace = kwargs.pop('namespace', '/')
room = kwargs.pop('room', None)
include_self = kwargs.pop('include_self', True)
skip_sid = kwargs.pop('skip_sid', None)
if not include_self and not skip_sid:
skip_sid = flask.request.sid
callback = kwargs.pop('callback', None)
self.server.emit(event, *args, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def send(self, data, json=False, namespace=None, room=None,
callback=None, include_self=True, skip_sid=None, **kwargs):
"""Send a server-generated SocketIO message.
This function sends a simple SocketIO message to one or more connected
clients. The message can be a string or a JSON blob. This is a simpler
version of ``emit()``, which should be preferred. This function can be
used outside of a SocketIO event context, so it is appropriate to use
when the server is the originator of an event.
:param data: The message to send, either a string or a JSON blob.
:param json: ``True`` if ``message`` is a JSON blob, ``False``
otherwise.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message only to the users in the given room. If
this parameter is not included, the message is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
skip_sid = flask.request.sid if not include_self else skip_sid
if json:
self.emit('json', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
else:
self.emit('message', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def close_room(self, room, namespace=None):
"""Close a room.
This function removes any users that are in the given room and then
deletes the room from the server. This function can be used outside
of a SocketIO event context.
:param room: The name of the room to close.
:param namespace: The namespace under which the room exists. Defaults
to the global namespace.
"""
self.server.close_room(room, namespace)
def run(self, app, host=None, port=None, **kwargs):
"""Run the SocketIO web server.
:param app: The Flask application instance.
:param host: The hostname or IP address for the server to listen on.
Defaults to 127.0.0.1.
:param port: The port number for the server to listen on. Defaults to
5000.
:param debug: ``True`` to start the server in debug mode, ``False`` to
start in normal mode.
:param use_reloader: ``True`` to enable the Flask reloader, ``False``
to disable it.
:param extra_files: A list of additional files that the Flask
reloader should watch. Defaults to ``None``
:param log_output: If ``True``, the server logs all incomming
connections. If ``False`` logging is disabled.
Defaults to ``True`` in debug mode, ``False``
in normal mode. Unused when the threading async
mode is used.
:param kwargs: Additional web server options. The web server options
are specific to the server used in each of the supported
async modes. Note that options provided here will
not be seen when using an external web server such
as gunicorn, since this method is not called in that
case.
"""
if host is None:
host = '127.0.0.1'
if port is None:
server_name = app.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[1])
else:
port = 5000
debug = kwargs.pop('debug', app.debug)
log_output = kwargs.pop('log_output', debug)
use_reloader = kwargs.pop('use_reloader', debug)
extra_files = kwargs.pop('extra_files', None)
app.debug = debug
if app.debug and self.server.eio.async_mode != 'threading':
# put the debug middleware between the SocketIO middleware
# and the Flask application instance
#
# mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
# BECOMES
#
# dbg-mw mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
self.sockio_mw.wsgi_app = DebuggedApplication(self.sockio_mw.wsgi_app,
evalex=True)
if self.server.eio.async_mode == 'threading':
from werkzeug._internal import _log
_log('warning', 'WebSocket transport not available. Install '
'eventlet or gevent and gevent-websocket for '
'improved performance.')
app.run(host=host, port=port, threaded=True,
use_reloader=use_reloader, **kwargs)
elif self.server.eio.async_mode == 'eventlet':
def run_server():
import eventlet
import eventlet.wsgi
import eventlet.green
addresses = eventlet.green.socket.getaddrinfo(host, port)
if not addresses:
raise RuntimeError('Could not resolve host to a valid address')
eventlet_socket = eventlet.listen(addresses[0][4], addresses[0][0])
# If provided an SSL argument, use an SSL socket
ssl_args = ['keyfile', 'certfile', 'server_side', 'cert_reqs',
'ssl_version', 'ca_certs',
'do_handshake_on_connect', 'suppress_ragged_eofs',
'ciphers']
ssl_params = {k: kwargs[k] for k in kwargs if k in ssl_args}
if len(ssl_params) > 0:
for k in ssl_params:
kwargs.pop(k)
ssl_params['server_side'] = True # Listening requires true
eventlet_socket = eventlet.wrap_ssl(eventlet_socket,
**ssl_params)
eventlet.wsgi.server(eventlet_socket, app,
log_output=log_output, **kwargs)
if use_reloader:
run_with_reloader(run_server, extra_files=extra_files)
else:
run_server()
elif self.server.eio.async_mode == 'gevent':
from gevent import pywsgi
try:
from geventwebsocket.handler import WebSocketHandler
websocket = True
except ImportError:
websocket = False
log = 'default'
if not log_output:
log = None
if websocket:
self.wsgi_server = pywsgi.WSGIServer(
(host, port), app, handler_class=WebSocketHandler,
log=log, **kwargs)
else:
self.wsgi_server = pywsgi.WSGIServer((host, port), app,
log=log, **kwargs)
if use_reloader:
# monkey patching is required by the reloader
from gevent import monkey
monkey.patch_all()
def run_server():
self.wsgi_server.serve_forever()
run_with_reloader(run_server, extra_files=extra_files)
else:
self.wsgi_server.serve_forever()
def stop(self):
"""Stop a running SocketIO web server.
This method must be called from a HTTP or SocketIO handler function.
"""
if self.server.eio.async_mode == 'threading':
func = flask.request.environ.get('werkzeug.server.shutdown')
if func:
func()
else:
raise RuntimeError('Cannot stop unknown web server')
elif self.server.eio.async_mode == 'eventlet':
raise SystemExit
elif self.server.eio.async_mode == 'gevent':
self.wsgi_server.stop()
def start_background_task(self, target, *args, **kwargs):
"""Start a background task using the appropriate async model.
This is a utility function that applications can use to start a
background task using the method that is compatible with the
selected async mode.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function.
"""
return self.server.start_background_task(target, *args, **kwargs)
def sleep(self, seconds=0):
"""Sleep for the requested amount of time using the appropriate async
model.
This is a utility function that applications can use to put a task to
sleep without having to worry about using the correct call for the
selected async mode.
"""
return self.server.sleep(seconds)
def test_client(self, app, namespace=None, query_string=None,
headers=None, flask_test_client=None):
"""The Socket.IO test client is useful for testing a Flask-SocketIO
server. It works in a similar way to the Flask Test Client, but
adapted to the Socket.IO server.
:param app: The Flask application instance.
:param namespace: The namespace for the client. If not provided, the
client connects to the server on the global
namespace.
:param query_string: A string with custom query string arguments.
:param headers: A dictionary with custom HTTP headers.
:param flask_test_client: The instance of the Flask test client
currently in use. Passing the Flask test
client is optional, but is necessary if you
want the Flask user session and any other
cookies set in HTTP routes accessible from
Socket.IO events.
"""
return SocketIOTestClient(app, self, namespace=namespace,
query_string=query_string, headers=headers,
flask_test_client=flask_test_client)
def _handle_event(self, handler, message, namespace, sid, *args):
if sid not in self.server.environ:
# we don't have record of this client, ignore this event
return '', 400
app = self.server.environ[sid]['flask.app']
with app.request_context(self.server.environ[sid]):
if self.manage_session:
# manage a separate session for this client's Socket.IO events
# created as a copy of the regular user session
if 'saved_session' not in self.server.environ[sid]:
self.server.environ[sid]['saved_session'] = \
_ManagedSession(flask.session)
session_obj = self.server.environ[sid]['saved_session']
else:
# let Flask handle the user session
# for cookie based sessions, this effectively freezes the
# session to its state at connection time
# for server-side sessions, this allows HTTP and Socket.IO to
# share the session, with both having read/write access to it
session_obj = flask.session._get_current_object()
_request_ctx_stack.top.session = session_obj
flask.request.sid = sid
flask.request.namespace = namespace
flask.request.event = {'message': message, 'args': args}
try:
if message == 'connect':
ret = handler()
else:
ret = handler(*args)
except:
err_handler = self.exception_handlers.get(
namespace, self.default_exception_handler)
if err_handler is None:
raise
type, value, traceback = sys.exc_info()
return err_handler(value)
if not self.manage_session:
# when Flask is managing the user session, it needs to save it
if not hasattr(session_obj, 'modified') or session_obj.modified:
resp = app.response_class()
app.session_interface.save_session(app, session_obj, resp)
return ret
|
miguelgrinberg/Flask-SocketIO | flask_socketio/__init__.py | SocketIO.on_error | python | def on_error(self, namespace=None):
namespace = namespace or '/'
def decorator(exception_handler):
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.exception_handlers[namespace] = exception_handler
return exception_handler
return decorator | Decorator to define a custom error handler for SocketIO events.
This decorator can be applied to a function that acts as an error
handler for a namespace. This handler will be invoked when a SocketIO
event handler raises an exception. The handler function must accept one
argument, which is the exception raised. Example::
@socketio.on_error(namespace='/chat')
def chat_error_handler(e):
print('An error has occurred: ' + str(e))
:param namespace: The namespace for which to register the error
handler. Defaults to the global namespace. | train | https://github.com/miguelgrinberg/Flask-SocketIO/blob/4bef800d5e7ba7d98a6f4cd94191ff0b4496c334/flask_socketio/__init__.py#L268-L290 | null | class SocketIO(object):
"""Create a Flask-SocketIO server.
:param app: The flask application instance. If the application instance
isn't known at the time this class is instantiated, then call
``socketio.init_app(app)`` once the application instance is
available.
:param manage_session: If set to ``True``, this extension manages the user
session for Socket.IO events. If set to ``False``,
Flask's own session management is used. When using
Flask's cookie based sessions it is recommended that
you leave this set to the default of ``True``. When
using server-side sessions, a ``False`` setting
enables sharing the user session between HTTP routes
and Socket.IO events.
:param message_queue: A connection URL for a message queue service the
server can use for multi-process communication. A
message queue is not required when using a single
server process.
:param channel: The channel name, when using a message queue. If a channel
isn't specified, a default channel will be used. If
multiple clusters of SocketIO processes need to use the
same message queue without interfering with each other, then
each cluster should use a different channel.
:param path: The path where the Socket.IO server is exposed. Defaults to
``'socket.io'``. Leave this as is unless you know what you are
doing.
:param resource: Alias to ``path``.
:param kwargs: Socket.IO and Engine.IO server options.
The Socket.IO server options are detailed below:
:param client_manager: The client manager instance that will manage the
client list. When this is omitted, the client list
is stored in an in-memory structure, so the use of
multiple connected servers is not possible. In most
cases, this argument does not need to be set
explicitly.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``.
:param binary: ``True`` to support binary payloads, ``False`` to treat all
payloads as text. On Python 2, if this is set to ``True``,
``unicode`` values are treated as text, and ``str`` and
``bytes`` values are treated as binary. This option has no
effect on Python 3, where text and binary payloads are
always automatically discovered.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions. To use the same json encoder and decoder as a Flask
application, use ``flask.json``.
The Engine.IO server configuration supports the following settings:
:param async_mode: The asynchronous model to use. See the Deployment
section in the documentation for a description of the
available options. Valid async modes are
``threading``, ``eventlet``, ``gevent`` and
``gevent_uwsgi``. If this argument is not given,
``eventlet`` is tried first, then ``gevent_uwsgi``,
then ``gevent``, and finally ``threading``. The
first async mode that has all its dependencies installed
is then one that is chosen.
:param ping_timeout: The time in seconds that the client waits for the
server to respond before disconnecting. The default is
60 seconds.
:param ping_interval: The interval in seconds at which the client pings
the server. The default is 25 seconds.
:param max_http_buffer_size: The maximum size of a message when using the
polling transport. The default is 100,000,000
bytes.
:param allow_upgrades: Whether to allow transport upgrades or not. The
default is ``True``.
:param http_compression: Whether to compress packages when using the
polling transport. The default is ``True``.
:param compression_threshold: Only compress messages when their byte size
is greater than this value. The default is
1024 bytes.
:param cookie: Name of the HTTP cookie that contains the client session
id. If set to ``None``, a cookie is not sent to the client.
The default is ``'io'``.
:param cors_allowed_origins: List of origins that are allowed to connect
to this server. All origins are allowed by
default.
:param cors_credentials: Whether credentials (cookies, authentication) are
allowed in requests to this server. The default is
``True``.
:param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
a logger object to use. To disable logging set to
``False``. The default is ``False``.
"""
def __init__(self, app=None, **kwargs):
self.server = None
self.server_options = {}
self.wsgi_server = None
self.handlers = []
self.namespace_handlers = []
self.exception_handlers = {}
self.default_exception_handler = None
self.manage_session = True
# We can call init_app when:
# - we were given the Flask app instance (standard initialization)
# - we were not given the app, but we were given a message_queue
# (standard initialization for auxiliary process)
# In all other cases we collect the arguments and assume the client
# will call init_app from an app factory function.
if app is not None or 'message_queue' in kwargs:
self.init_app(app, **kwargs)
else:
self.server_options.update(kwargs)
def init_app(self, app, **kwargs):
if app is not None:
if not hasattr(app, 'extensions'):
app.extensions = {} # pragma: no cover
app.extensions['socketio'] = self
self.server_options.update(kwargs)
self.manage_session = self.server_options.pop('manage_session',
self.manage_session)
if 'client_manager' not in self.server_options:
url = self.server_options.pop('message_queue', None)
channel = self.server_options.pop('channel', 'flask-socketio')
write_only = app is None
if url:
if url.startswith(('redis://', "rediss://")):
queue_class = socketio.RedisManager
elif url.startswith('zmq'):
queue_class = socketio.ZmqManager
else:
queue_class = socketio.KombuManager
queue = queue_class(url, channel=channel,
write_only=write_only)
self.server_options['client_manager'] = queue
if 'json' in self.server_options and \
self.server_options['json'] == flask_json:
# flask's json module is tricky to use because its output
# changes when it is invoked inside or outside the app context
# so here to prevent any ambiguities we replace it with wrappers
# that ensure that the app context is always present
class FlaskSafeJSON(object):
@staticmethod
def dumps(*args, **kwargs):
with app.app_context():
return flask_json.dumps(*args, **kwargs)
@staticmethod
def loads(*args, **kwargs):
with app.app_context():
return flask_json.loads(*args, **kwargs)
self.server_options['json'] = FlaskSafeJSON
resource = self.server_options.pop('path', None) or \
self.server_options.pop('resource', None) or 'socket.io'
if resource.startswith('/'):
resource = resource[1:]
if os.environ.get('FLASK_RUN_FROM_CLI'):
if self.server_options.get('async_mode') is None:
if app is not None:
app.logger.warning(
'Flask-SocketIO is Running under Werkzeug, WebSocket '
'is not available.')
self.server_options['async_mode'] = 'threading'
self.server = socketio.Server(**self.server_options)
self.async_mode = self.server.async_mode
for handler in self.handlers:
self.server.on(handler[0], handler[1], namespace=handler[2])
for namespace_handler in self.namespace_handlers:
self.server.register_namespace(namespace_handler)
if app is not None:
# here we attach the SocketIO middlware to the SocketIO object so it
# can be referenced later if debug middleware needs to be inserted
self.sockio_mw = _SocketIOMiddleware(self.server, app,
socketio_path=resource)
app.wsgi_app = self.sockio_mw
def on(self, message, namespace=None):
"""Decorator to register a SocketIO event handler.
This decorator must be applied to SocketIO event handlers. Example::
@socketio.on('my event', namespace='/chat')
def handle_my_custom_event(json):
print('received json: ' + str(json))
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(handler):
def _handler(sid, *args):
return self._handle_event(handler, message, namespace, sid,
*args)
if self.server:
self.server.on(message, _handler, namespace=namespace)
else:
self.handlers.append((message, _handler, namespace))
return handler
return decorator
def on_error_default(self, exception_handler):
"""Decorator to define a default error handler for SocketIO events.
This decorator can be applied to a function that acts as a default
error handler for any namespaces that do not have a specific handler.
Example::
@socketio.on_error_default
def error_handler(e):
print('An error has occurred: ' + str(e))
"""
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.default_exception_handler = exception_handler
return exception_handler
def on_event(self, message, handler, namespace=None):
"""Register a SocketIO event handler.
``on_event`` is the non-decorator version of ``'on'``.
Example::
def on_foo_event(json):
print('received json: ' + str(json))
socketio.on_event('my event', on_foo_event, namespace='/chat')
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param handler: The function that handles the event.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
self.on(message, namespace=namespace)(handler)
def on_namespace(self, namespace_handler):
if not isinstance(namespace_handler, Namespace):
raise ValueError('Not a namespace instance.')
namespace_handler._set_socketio(self)
if self.server:
self.server.register_namespace(namespace_handler)
else:
self.namespace_handlers.append(namespace_handler)
def emit(self, event, *args, **kwargs):
"""Emit a server generated SocketIO event.
This function emits a SocketIO event to one or more connected clients.
A JSON blob can be attached to the event as payload. This function can
be used outside of a SocketIO event context, so it is appropriate to
use when the server is the originator of an event, outside of any
client context, such as in a regular HTTP request handler or a
background task. Example::
@app.route('/ping')
def ping():
socketio.emit('ping event', {'data': 42}, namespace='/chat')
:param event: The name of the user event to emit.
:param args: A dictionary with the JSON data to send as payload.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message to all the users in the given room. If
this parameter is not included, the event is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
namespace = kwargs.pop('namespace', '/')
room = kwargs.pop('room', None)
include_self = kwargs.pop('include_self', True)
skip_sid = kwargs.pop('skip_sid', None)
if not include_self and not skip_sid:
skip_sid = flask.request.sid
callback = kwargs.pop('callback', None)
self.server.emit(event, *args, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def send(self, data, json=False, namespace=None, room=None,
callback=None, include_self=True, skip_sid=None, **kwargs):
"""Send a server-generated SocketIO message.
This function sends a simple SocketIO message to one or more connected
clients. The message can be a string or a JSON blob. This is a simpler
version of ``emit()``, which should be preferred. This function can be
used outside of a SocketIO event context, so it is appropriate to use
when the server is the originator of an event.
:param data: The message to send, either a string or a JSON blob.
:param json: ``True`` if ``message`` is a JSON blob, ``False``
otherwise.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message only to the users in the given room. If
this parameter is not included, the message is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
skip_sid = flask.request.sid if not include_self else skip_sid
if json:
self.emit('json', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
else:
self.emit('message', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def close_room(self, room, namespace=None):
"""Close a room.
This function removes any users that are in the given room and then
deletes the room from the server. This function can be used outside
of a SocketIO event context.
:param room: The name of the room to close.
:param namespace: The namespace under which the room exists. Defaults
to the global namespace.
"""
self.server.close_room(room, namespace)
def run(self, app, host=None, port=None, **kwargs):
"""Run the SocketIO web server.
:param app: The Flask application instance.
:param host: The hostname or IP address for the server to listen on.
Defaults to 127.0.0.1.
:param port: The port number for the server to listen on. Defaults to
5000.
:param debug: ``True`` to start the server in debug mode, ``False`` to
start in normal mode.
:param use_reloader: ``True`` to enable the Flask reloader, ``False``
to disable it.
:param extra_files: A list of additional files that the Flask
reloader should watch. Defaults to ``None``
:param log_output: If ``True``, the server logs all incomming
connections. If ``False`` logging is disabled.
Defaults to ``True`` in debug mode, ``False``
in normal mode. Unused when the threading async
mode is used.
:param kwargs: Additional web server options. The web server options
are specific to the server used in each of the supported
async modes. Note that options provided here will
not be seen when using an external web server such
as gunicorn, since this method is not called in that
case.
"""
if host is None:
host = '127.0.0.1'
if port is None:
server_name = app.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[1])
else:
port = 5000
debug = kwargs.pop('debug', app.debug)
log_output = kwargs.pop('log_output', debug)
use_reloader = kwargs.pop('use_reloader', debug)
extra_files = kwargs.pop('extra_files', None)
app.debug = debug
if app.debug and self.server.eio.async_mode != 'threading':
# put the debug middleware between the SocketIO middleware
# and the Flask application instance
#
# mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
# BECOMES
#
# dbg-mw mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
self.sockio_mw.wsgi_app = DebuggedApplication(self.sockio_mw.wsgi_app,
evalex=True)
if self.server.eio.async_mode == 'threading':
from werkzeug._internal import _log
_log('warning', 'WebSocket transport not available. Install '
'eventlet or gevent and gevent-websocket for '
'improved performance.')
app.run(host=host, port=port, threaded=True,
use_reloader=use_reloader, **kwargs)
elif self.server.eio.async_mode == 'eventlet':
def run_server():
import eventlet
import eventlet.wsgi
import eventlet.green
addresses = eventlet.green.socket.getaddrinfo(host, port)
if not addresses:
raise RuntimeError('Could not resolve host to a valid address')
eventlet_socket = eventlet.listen(addresses[0][4], addresses[0][0])
# If provided an SSL argument, use an SSL socket
ssl_args = ['keyfile', 'certfile', 'server_side', 'cert_reqs',
'ssl_version', 'ca_certs',
'do_handshake_on_connect', 'suppress_ragged_eofs',
'ciphers']
ssl_params = {k: kwargs[k] for k in kwargs if k in ssl_args}
if len(ssl_params) > 0:
for k in ssl_params:
kwargs.pop(k)
ssl_params['server_side'] = True # Listening requires true
eventlet_socket = eventlet.wrap_ssl(eventlet_socket,
**ssl_params)
eventlet.wsgi.server(eventlet_socket, app,
log_output=log_output, **kwargs)
if use_reloader:
run_with_reloader(run_server, extra_files=extra_files)
else:
run_server()
elif self.server.eio.async_mode == 'gevent':
from gevent import pywsgi
try:
from geventwebsocket.handler import WebSocketHandler
websocket = True
except ImportError:
websocket = False
log = 'default'
if not log_output:
log = None
if websocket:
self.wsgi_server = pywsgi.WSGIServer(
(host, port), app, handler_class=WebSocketHandler,
log=log, **kwargs)
else:
self.wsgi_server = pywsgi.WSGIServer((host, port), app,
log=log, **kwargs)
if use_reloader:
# monkey patching is required by the reloader
from gevent import monkey
monkey.patch_all()
def run_server():
self.wsgi_server.serve_forever()
run_with_reloader(run_server, extra_files=extra_files)
else:
self.wsgi_server.serve_forever()
def stop(self):
"""Stop a running SocketIO web server.
This method must be called from a HTTP or SocketIO handler function.
"""
if self.server.eio.async_mode == 'threading':
func = flask.request.environ.get('werkzeug.server.shutdown')
if func:
func()
else:
raise RuntimeError('Cannot stop unknown web server')
elif self.server.eio.async_mode == 'eventlet':
raise SystemExit
elif self.server.eio.async_mode == 'gevent':
self.wsgi_server.stop()
def start_background_task(self, target, *args, **kwargs):
"""Start a background task using the appropriate async model.
This is a utility function that applications can use to start a
background task using the method that is compatible with the
selected async mode.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function.
"""
return self.server.start_background_task(target, *args, **kwargs)
def sleep(self, seconds=0):
"""Sleep for the requested amount of time using the appropriate async
model.
This is a utility function that applications can use to put a task to
sleep without having to worry about using the correct call for the
selected async mode.
"""
return self.server.sleep(seconds)
def test_client(self, app, namespace=None, query_string=None,
headers=None, flask_test_client=None):
"""The Socket.IO test client is useful for testing a Flask-SocketIO
server. It works in a similar way to the Flask Test Client, but
adapted to the Socket.IO server.
:param app: The Flask application instance.
:param namespace: The namespace for the client. If not provided, the
client connects to the server on the global
namespace.
:param query_string: A string with custom query string arguments.
:param headers: A dictionary with custom HTTP headers.
:param flask_test_client: The instance of the Flask test client
currently in use. Passing the Flask test
client is optional, but is necessary if you
want the Flask user session and any other
cookies set in HTTP routes accessible from
Socket.IO events.
"""
return SocketIOTestClient(app, self, namespace=namespace,
query_string=query_string, headers=headers,
flask_test_client=flask_test_client)
def _handle_event(self, handler, message, namespace, sid, *args):
if sid not in self.server.environ:
# we don't have record of this client, ignore this event
return '', 400
app = self.server.environ[sid]['flask.app']
with app.request_context(self.server.environ[sid]):
if self.manage_session:
# manage a separate session for this client's Socket.IO events
# created as a copy of the regular user session
if 'saved_session' not in self.server.environ[sid]:
self.server.environ[sid]['saved_session'] = \
_ManagedSession(flask.session)
session_obj = self.server.environ[sid]['saved_session']
else:
# let Flask handle the user session
# for cookie based sessions, this effectively freezes the
# session to its state at connection time
# for server-side sessions, this allows HTTP and Socket.IO to
# share the session, with both having read/write access to it
session_obj = flask.session._get_current_object()
_request_ctx_stack.top.session = session_obj
flask.request.sid = sid
flask.request.namespace = namespace
flask.request.event = {'message': message, 'args': args}
try:
if message == 'connect':
ret = handler()
else:
ret = handler(*args)
except:
err_handler = self.exception_handlers.get(
namespace, self.default_exception_handler)
if err_handler is None:
raise
type, value, traceback = sys.exc_info()
return err_handler(value)
if not self.manage_session:
# when Flask is managing the user session, it needs to save it
if not hasattr(session_obj, 'modified') or session_obj.modified:
resp = app.response_class()
app.session_interface.save_session(app, session_obj, resp)
return ret
|
miguelgrinberg/Flask-SocketIO | flask_socketio/__init__.py | SocketIO.on_error_default | python | def on_error_default(self, exception_handler):
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.default_exception_handler = exception_handler
return exception_handler | Decorator to define a default error handler for SocketIO events.
This decorator can be applied to a function that acts as a default
error handler for any namespaces that do not have a specific handler.
Example::
@socketio.on_error_default
def error_handler(e):
print('An error has occurred: ' + str(e)) | train | https://github.com/miguelgrinberg/Flask-SocketIO/blob/4bef800d5e7ba7d98a6f4cd94191ff0b4496c334/flask_socketio/__init__.py#L292-L306 | null | class SocketIO(object):
"""Create a Flask-SocketIO server.
:param app: The flask application instance. If the application instance
isn't known at the time this class is instantiated, then call
``socketio.init_app(app)`` once the application instance is
available.
:param manage_session: If set to ``True``, this extension manages the user
session for Socket.IO events. If set to ``False``,
Flask's own session management is used. When using
Flask's cookie based sessions it is recommended that
you leave this set to the default of ``True``. When
using server-side sessions, a ``False`` setting
enables sharing the user session between HTTP routes
and Socket.IO events.
:param message_queue: A connection URL for a message queue service the
server can use for multi-process communication. A
message queue is not required when using a single
server process.
:param channel: The channel name, when using a message queue. If a channel
isn't specified, a default channel will be used. If
multiple clusters of SocketIO processes need to use the
same message queue without interfering with each other, then
each cluster should use a different channel.
:param path: The path where the Socket.IO server is exposed. Defaults to
``'socket.io'``. Leave this as is unless you know what you are
doing.
:param resource: Alias to ``path``.
:param kwargs: Socket.IO and Engine.IO server options.
The Socket.IO server options are detailed below:
:param client_manager: The client manager instance that will manage the
client list. When this is omitted, the client list
is stored in an in-memory structure, so the use of
multiple connected servers is not possible. In most
cases, this argument does not need to be set
explicitly.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``.
:param binary: ``True`` to support binary payloads, ``False`` to treat all
payloads as text. On Python 2, if this is set to ``True``,
``unicode`` values are treated as text, and ``str`` and
``bytes`` values are treated as binary. This option has no
effect on Python 3, where text and binary payloads are
always automatically discovered.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions. To use the same json encoder and decoder as a Flask
application, use ``flask.json``.
The Engine.IO server configuration supports the following settings:
:param async_mode: The asynchronous model to use. See the Deployment
section in the documentation for a description of the
available options. Valid async modes are
``threading``, ``eventlet``, ``gevent`` and
``gevent_uwsgi``. If this argument is not given,
``eventlet`` is tried first, then ``gevent_uwsgi``,
then ``gevent``, and finally ``threading``. The
first async mode that has all its dependencies installed
is then one that is chosen.
:param ping_timeout: The time in seconds that the client waits for the
server to respond before disconnecting. The default is
60 seconds.
:param ping_interval: The interval in seconds at which the client pings
the server. The default is 25 seconds.
:param max_http_buffer_size: The maximum size of a message when using the
polling transport. The default is 100,000,000
bytes.
:param allow_upgrades: Whether to allow transport upgrades or not. The
default is ``True``.
:param http_compression: Whether to compress packages when using the
polling transport. The default is ``True``.
:param compression_threshold: Only compress messages when their byte size
is greater than this value. The default is
1024 bytes.
:param cookie: Name of the HTTP cookie that contains the client session
id. If set to ``None``, a cookie is not sent to the client.
The default is ``'io'``.
:param cors_allowed_origins: List of origins that are allowed to connect
to this server. All origins are allowed by
default.
:param cors_credentials: Whether credentials (cookies, authentication) are
allowed in requests to this server. The default is
``True``.
:param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
a logger object to use. To disable logging set to
``False``. The default is ``False``.
"""
def __init__(self, app=None, **kwargs):
self.server = None
self.server_options = {}
self.wsgi_server = None
self.handlers = []
self.namespace_handlers = []
self.exception_handlers = {}
self.default_exception_handler = None
self.manage_session = True
# We can call init_app when:
# - we were given the Flask app instance (standard initialization)
# - we were not given the app, but we were given a message_queue
# (standard initialization for auxiliary process)
# In all other cases we collect the arguments and assume the client
# will call init_app from an app factory function.
if app is not None or 'message_queue' in kwargs:
self.init_app(app, **kwargs)
else:
self.server_options.update(kwargs)
def init_app(self, app, **kwargs):
if app is not None:
if not hasattr(app, 'extensions'):
app.extensions = {} # pragma: no cover
app.extensions['socketio'] = self
self.server_options.update(kwargs)
self.manage_session = self.server_options.pop('manage_session',
self.manage_session)
if 'client_manager' not in self.server_options:
url = self.server_options.pop('message_queue', None)
channel = self.server_options.pop('channel', 'flask-socketio')
write_only = app is None
if url:
if url.startswith(('redis://', "rediss://")):
queue_class = socketio.RedisManager
elif url.startswith('zmq'):
queue_class = socketio.ZmqManager
else:
queue_class = socketio.KombuManager
queue = queue_class(url, channel=channel,
write_only=write_only)
self.server_options['client_manager'] = queue
if 'json' in self.server_options and \
self.server_options['json'] == flask_json:
# flask's json module is tricky to use because its output
# changes when it is invoked inside or outside the app context
# so here to prevent any ambiguities we replace it with wrappers
# that ensure that the app context is always present
class FlaskSafeJSON(object):
@staticmethod
def dumps(*args, **kwargs):
with app.app_context():
return flask_json.dumps(*args, **kwargs)
@staticmethod
def loads(*args, **kwargs):
with app.app_context():
return flask_json.loads(*args, **kwargs)
self.server_options['json'] = FlaskSafeJSON
resource = self.server_options.pop('path', None) or \
self.server_options.pop('resource', None) or 'socket.io'
if resource.startswith('/'):
resource = resource[1:]
if os.environ.get('FLASK_RUN_FROM_CLI'):
if self.server_options.get('async_mode') is None:
if app is not None:
app.logger.warning(
'Flask-SocketIO is Running under Werkzeug, WebSocket '
'is not available.')
self.server_options['async_mode'] = 'threading'
self.server = socketio.Server(**self.server_options)
self.async_mode = self.server.async_mode
for handler in self.handlers:
self.server.on(handler[0], handler[1], namespace=handler[2])
for namespace_handler in self.namespace_handlers:
self.server.register_namespace(namespace_handler)
if app is not None:
# here we attach the SocketIO middlware to the SocketIO object so it
# can be referenced later if debug middleware needs to be inserted
self.sockio_mw = _SocketIOMiddleware(self.server, app,
socketio_path=resource)
app.wsgi_app = self.sockio_mw
def on(self, message, namespace=None):
"""Decorator to register a SocketIO event handler.
This decorator must be applied to SocketIO event handlers. Example::
@socketio.on('my event', namespace='/chat')
def handle_my_custom_event(json):
print('received json: ' + str(json))
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(handler):
def _handler(sid, *args):
return self._handle_event(handler, message, namespace, sid,
*args)
if self.server:
self.server.on(message, _handler, namespace=namespace)
else:
self.handlers.append((message, _handler, namespace))
return handler
return decorator
def on_error(self, namespace=None):
"""Decorator to define a custom error handler for SocketIO events.
This decorator can be applied to a function that acts as an error
handler for a namespace. This handler will be invoked when a SocketIO
event handler raises an exception. The handler function must accept one
argument, which is the exception raised. Example::
@socketio.on_error(namespace='/chat')
def chat_error_handler(e):
print('An error has occurred: ' + str(e))
:param namespace: The namespace for which to register the error
handler. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(exception_handler):
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.exception_handlers[namespace] = exception_handler
return exception_handler
return decorator
def on_event(self, message, handler, namespace=None):
"""Register a SocketIO event handler.
``on_event`` is the non-decorator version of ``'on'``.
Example::
def on_foo_event(json):
print('received json: ' + str(json))
socketio.on_event('my event', on_foo_event, namespace='/chat')
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param handler: The function that handles the event.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
self.on(message, namespace=namespace)(handler)
def on_namespace(self, namespace_handler):
if not isinstance(namespace_handler, Namespace):
raise ValueError('Not a namespace instance.')
namespace_handler._set_socketio(self)
if self.server:
self.server.register_namespace(namespace_handler)
else:
self.namespace_handlers.append(namespace_handler)
def emit(self, event, *args, **kwargs):
"""Emit a server generated SocketIO event.
This function emits a SocketIO event to one or more connected clients.
A JSON blob can be attached to the event as payload. This function can
be used outside of a SocketIO event context, so it is appropriate to
use when the server is the originator of an event, outside of any
client context, such as in a regular HTTP request handler or a
background task. Example::
@app.route('/ping')
def ping():
socketio.emit('ping event', {'data': 42}, namespace='/chat')
:param event: The name of the user event to emit.
:param args: A dictionary with the JSON data to send as payload.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message to all the users in the given room. If
this parameter is not included, the event is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
namespace = kwargs.pop('namespace', '/')
room = kwargs.pop('room', None)
include_self = kwargs.pop('include_self', True)
skip_sid = kwargs.pop('skip_sid', None)
if not include_self and not skip_sid:
skip_sid = flask.request.sid
callback = kwargs.pop('callback', None)
self.server.emit(event, *args, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def send(self, data, json=False, namespace=None, room=None,
callback=None, include_self=True, skip_sid=None, **kwargs):
"""Send a server-generated SocketIO message.
This function sends a simple SocketIO message to one or more connected
clients. The message can be a string or a JSON blob. This is a simpler
version of ``emit()``, which should be preferred. This function can be
used outside of a SocketIO event context, so it is appropriate to use
when the server is the originator of an event.
:param data: The message to send, either a string or a JSON blob.
:param json: ``True`` if ``message`` is a JSON blob, ``False``
otherwise.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message only to the users in the given room. If
this parameter is not included, the message is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
skip_sid = flask.request.sid if not include_self else skip_sid
if json:
self.emit('json', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
else:
self.emit('message', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def close_room(self, room, namespace=None):
"""Close a room.
This function removes any users that are in the given room and then
deletes the room from the server. This function can be used outside
of a SocketIO event context.
:param room: The name of the room to close.
:param namespace: The namespace under which the room exists. Defaults
to the global namespace.
"""
self.server.close_room(room, namespace)
def run(self, app, host=None, port=None, **kwargs):
"""Run the SocketIO web server.
:param app: The Flask application instance.
:param host: The hostname or IP address for the server to listen on.
Defaults to 127.0.0.1.
:param port: The port number for the server to listen on. Defaults to
5000.
:param debug: ``True`` to start the server in debug mode, ``False`` to
start in normal mode.
:param use_reloader: ``True`` to enable the Flask reloader, ``False``
to disable it.
:param extra_files: A list of additional files that the Flask
reloader should watch. Defaults to ``None``
:param log_output: If ``True``, the server logs all incomming
connections. If ``False`` logging is disabled.
Defaults to ``True`` in debug mode, ``False``
in normal mode. Unused when the threading async
mode is used.
:param kwargs: Additional web server options. The web server options
are specific to the server used in each of the supported
async modes. Note that options provided here will
not be seen when using an external web server such
as gunicorn, since this method is not called in that
case.
"""
if host is None:
host = '127.0.0.1'
if port is None:
server_name = app.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[1])
else:
port = 5000
debug = kwargs.pop('debug', app.debug)
log_output = kwargs.pop('log_output', debug)
use_reloader = kwargs.pop('use_reloader', debug)
extra_files = kwargs.pop('extra_files', None)
app.debug = debug
if app.debug and self.server.eio.async_mode != 'threading':
# put the debug middleware between the SocketIO middleware
# and the Flask application instance
#
# mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
# BECOMES
#
# dbg-mw mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
self.sockio_mw.wsgi_app = DebuggedApplication(self.sockio_mw.wsgi_app,
evalex=True)
if self.server.eio.async_mode == 'threading':
from werkzeug._internal import _log
_log('warning', 'WebSocket transport not available. Install '
'eventlet or gevent and gevent-websocket for '
'improved performance.')
app.run(host=host, port=port, threaded=True,
use_reloader=use_reloader, **kwargs)
elif self.server.eio.async_mode == 'eventlet':
def run_server():
import eventlet
import eventlet.wsgi
import eventlet.green
addresses = eventlet.green.socket.getaddrinfo(host, port)
if not addresses:
raise RuntimeError('Could not resolve host to a valid address')
eventlet_socket = eventlet.listen(addresses[0][4], addresses[0][0])
# If provided an SSL argument, use an SSL socket
ssl_args = ['keyfile', 'certfile', 'server_side', 'cert_reqs',
'ssl_version', 'ca_certs',
'do_handshake_on_connect', 'suppress_ragged_eofs',
'ciphers']
ssl_params = {k: kwargs[k] for k in kwargs if k in ssl_args}
if len(ssl_params) > 0:
for k in ssl_params:
kwargs.pop(k)
ssl_params['server_side'] = True # Listening requires true
eventlet_socket = eventlet.wrap_ssl(eventlet_socket,
**ssl_params)
eventlet.wsgi.server(eventlet_socket, app,
log_output=log_output, **kwargs)
if use_reloader:
run_with_reloader(run_server, extra_files=extra_files)
else:
run_server()
elif self.server.eio.async_mode == 'gevent':
from gevent import pywsgi
try:
from geventwebsocket.handler import WebSocketHandler
websocket = True
except ImportError:
websocket = False
log = 'default'
if not log_output:
log = None
if websocket:
self.wsgi_server = pywsgi.WSGIServer(
(host, port), app, handler_class=WebSocketHandler,
log=log, **kwargs)
else:
self.wsgi_server = pywsgi.WSGIServer((host, port), app,
log=log, **kwargs)
if use_reloader:
# monkey patching is required by the reloader
from gevent import monkey
monkey.patch_all()
def run_server():
self.wsgi_server.serve_forever()
run_with_reloader(run_server, extra_files=extra_files)
else:
self.wsgi_server.serve_forever()
def stop(self):
"""Stop a running SocketIO web server.
This method must be called from a HTTP or SocketIO handler function.
"""
if self.server.eio.async_mode == 'threading':
func = flask.request.environ.get('werkzeug.server.shutdown')
if func:
func()
else:
raise RuntimeError('Cannot stop unknown web server')
elif self.server.eio.async_mode == 'eventlet':
raise SystemExit
elif self.server.eio.async_mode == 'gevent':
self.wsgi_server.stop()
def start_background_task(self, target, *args, **kwargs):
"""Start a background task using the appropriate async model.
This is a utility function that applications can use to start a
background task using the method that is compatible with the
selected async mode.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function.
"""
return self.server.start_background_task(target, *args, **kwargs)
def sleep(self, seconds=0):
"""Sleep for the requested amount of time using the appropriate async
model.
This is a utility function that applications can use to put a task to
sleep without having to worry about using the correct call for the
selected async mode.
"""
return self.server.sleep(seconds)
def test_client(self, app, namespace=None, query_string=None,
headers=None, flask_test_client=None):
"""The Socket.IO test client is useful for testing a Flask-SocketIO
server. It works in a similar way to the Flask Test Client, but
adapted to the Socket.IO server.
:param app: The Flask application instance.
:param namespace: The namespace for the client. If not provided, the
client connects to the server on the global
namespace.
:param query_string: A string with custom query string arguments.
:param headers: A dictionary with custom HTTP headers.
:param flask_test_client: The instance of the Flask test client
currently in use. Passing the Flask test
client is optional, but is necessary if you
want the Flask user session and any other
cookies set in HTTP routes accessible from
Socket.IO events.
"""
return SocketIOTestClient(app, self, namespace=namespace,
query_string=query_string, headers=headers,
flask_test_client=flask_test_client)
def _handle_event(self, handler, message, namespace, sid, *args):
if sid not in self.server.environ:
# we don't have record of this client, ignore this event
return '', 400
app = self.server.environ[sid]['flask.app']
with app.request_context(self.server.environ[sid]):
if self.manage_session:
# manage a separate session for this client's Socket.IO events
# created as a copy of the regular user session
if 'saved_session' not in self.server.environ[sid]:
self.server.environ[sid]['saved_session'] = \
_ManagedSession(flask.session)
session_obj = self.server.environ[sid]['saved_session']
else:
# let Flask handle the user session
# for cookie based sessions, this effectively freezes the
# session to its state at connection time
# for server-side sessions, this allows HTTP and Socket.IO to
# share the session, with both having read/write access to it
session_obj = flask.session._get_current_object()
_request_ctx_stack.top.session = session_obj
flask.request.sid = sid
flask.request.namespace = namespace
flask.request.event = {'message': message, 'args': args}
try:
if message == 'connect':
ret = handler()
else:
ret = handler(*args)
except:
err_handler = self.exception_handlers.get(
namespace, self.default_exception_handler)
if err_handler is None:
raise
type, value, traceback = sys.exc_info()
return err_handler(value)
if not self.manage_session:
# when Flask is managing the user session, it needs to save it
if not hasattr(session_obj, 'modified') or session_obj.modified:
resp = app.response_class()
app.session_interface.save_session(app, session_obj, resp)
return ret
|
miguelgrinberg/Flask-SocketIO | flask_socketio/__init__.py | SocketIO.on_event | python | def on_event(self, message, handler, namespace=None):
self.on(message, namespace=namespace)(handler) | Register a SocketIO event handler.
``on_event`` is the non-decorator version of ``'on'``.
Example::
def on_foo_event(json):
print('received json: ' + str(json))
socketio.on_event('my event', on_foo_event, namespace='/chat')
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param handler: The function that handles the event.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace. | train | https://github.com/miguelgrinberg/Flask-SocketIO/blob/4bef800d5e7ba7d98a6f4cd94191ff0b4496c334/flask_socketio/__init__.py#L308-L331 | [
"def on(self, message, namespace=None):\n \"\"\"Decorator to register a SocketIO event handler.\n\n This decorator must be applied to SocketIO event handlers. Example::\n\n @socketio.on('my event', namespace='/chat')\n def handle_my_custom_event(json):\n print('received json: ' + str(json))\n\n :param message: The name of the event. This is normally a user defined\n string, but a few event names are already defined. Use\n ``'message'`` to define a handler that takes a string\n payload, ``'json'`` to define a handler that takes a\n JSON blob payload, ``'connect'`` or ``'disconnect'``\n to create handlers for connection and disconnection\n events.\n :param namespace: The namespace on which the handler is to be\n registered. Defaults to the global namespace.\n \"\"\"\n namespace = namespace or '/'\n\n def decorator(handler):\n def _handler(sid, *args):\n return self._handle_event(handler, message, namespace, sid,\n *args)\n\n if self.server:\n self.server.on(message, _handler, namespace=namespace)\n else:\n self.handlers.append((message, _handler, namespace))\n return handler\n return decorator\n",
"def decorator(handler):\n def _handler(sid, *args):\n return self._handle_event(handler, message, namespace, sid,\n *args)\n\n if self.server:\n self.server.on(message, _handler, namespace=namespace)\n else:\n self.handlers.append((message, _handler, namespace))\n return handler\n"
] | class SocketIO(object):
"""Create a Flask-SocketIO server.
:param app: The flask application instance. If the application instance
isn't known at the time this class is instantiated, then call
``socketio.init_app(app)`` once the application instance is
available.
:param manage_session: If set to ``True``, this extension manages the user
session for Socket.IO events. If set to ``False``,
Flask's own session management is used. When using
Flask's cookie based sessions it is recommended that
you leave this set to the default of ``True``. When
using server-side sessions, a ``False`` setting
enables sharing the user session between HTTP routes
and Socket.IO events.
:param message_queue: A connection URL for a message queue service the
server can use for multi-process communication. A
message queue is not required when using a single
server process.
:param channel: The channel name, when using a message queue. If a channel
isn't specified, a default channel will be used. If
multiple clusters of SocketIO processes need to use the
same message queue without interfering with each other, then
each cluster should use a different channel.
:param path: The path where the Socket.IO server is exposed. Defaults to
``'socket.io'``. Leave this as is unless you know what you are
doing.
:param resource: Alias to ``path``.
:param kwargs: Socket.IO and Engine.IO server options.
The Socket.IO server options are detailed below:
:param client_manager: The client manager instance that will manage the
client list. When this is omitted, the client list
is stored in an in-memory structure, so the use of
multiple connected servers is not possible. In most
cases, this argument does not need to be set
explicitly.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``.
:param binary: ``True`` to support binary payloads, ``False`` to treat all
payloads as text. On Python 2, if this is set to ``True``,
``unicode`` values are treated as text, and ``str`` and
``bytes`` values are treated as binary. This option has no
effect on Python 3, where text and binary payloads are
always automatically discovered.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions. To use the same json encoder and decoder as a Flask
application, use ``flask.json``.
The Engine.IO server configuration supports the following settings:
:param async_mode: The asynchronous model to use. See the Deployment
section in the documentation for a description of the
available options. Valid async modes are
``threading``, ``eventlet``, ``gevent`` and
``gevent_uwsgi``. If this argument is not given,
``eventlet`` is tried first, then ``gevent_uwsgi``,
then ``gevent``, and finally ``threading``. The
first async mode that has all its dependencies installed
is then one that is chosen.
:param ping_timeout: The time in seconds that the client waits for the
server to respond before disconnecting. The default is
60 seconds.
:param ping_interval: The interval in seconds at which the client pings
the server. The default is 25 seconds.
:param max_http_buffer_size: The maximum size of a message when using the
polling transport. The default is 100,000,000
bytes.
:param allow_upgrades: Whether to allow transport upgrades or not. The
default is ``True``.
:param http_compression: Whether to compress packages when using the
polling transport. The default is ``True``.
:param compression_threshold: Only compress messages when their byte size
is greater than this value. The default is
1024 bytes.
:param cookie: Name of the HTTP cookie that contains the client session
id. If set to ``None``, a cookie is not sent to the client.
The default is ``'io'``.
:param cors_allowed_origins: List of origins that are allowed to connect
to this server. All origins are allowed by
default.
:param cors_credentials: Whether credentials (cookies, authentication) are
allowed in requests to this server. The default is
``True``.
:param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
a logger object to use. To disable logging set to
``False``. The default is ``False``.
"""
def __init__(self, app=None, **kwargs):
self.server = None
self.server_options = {}
self.wsgi_server = None
self.handlers = []
self.namespace_handlers = []
self.exception_handlers = {}
self.default_exception_handler = None
self.manage_session = True
# We can call init_app when:
# - we were given the Flask app instance (standard initialization)
# - we were not given the app, but we were given a message_queue
# (standard initialization for auxiliary process)
# In all other cases we collect the arguments and assume the client
# will call init_app from an app factory function.
if app is not None or 'message_queue' in kwargs:
self.init_app(app, **kwargs)
else:
self.server_options.update(kwargs)
def init_app(self, app, **kwargs):
if app is not None:
if not hasattr(app, 'extensions'):
app.extensions = {} # pragma: no cover
app.extensions['socketio'] = self
self.server_options.update(kwargs)
self.manage_session = self.server_options.pop('manage_session',
self.manage_session)
if 'client_manager' not in self.server_options:
url = self.server_options.pop('message_queue', None)
channel = self.server_options.pop('channel', 'flask-socketio')
write_only = app is None
if url:
if url.startswith(('redis://', "rediss://")):
queue_class = socketio.RedisManager
elif url.startswith('zmq'):
queue_class = socketio.ZmqManager
else:
queue_class = socketio.KombuManager
queue = queue_class(url, channel=channel,
write_only=write_only)
self.server_options['client_manager'] = queue
if 'json' in self.server_options and \
self.server_options['json'] == flask_json:
# flask's json module is tricky to use because its output
# changes when it is invoked inside or outside the app context
# so here to prevent any ambiguities we replace it with wrappers
# that ensure that the app context is always present
class FlaskSafeJSON(object):
@staticmethod
def dumps(*args, **kwargs):
with app.app_context():
return flask_json.dumps(*args, **kwargs)
@staticmethod
def loads(*args, **kwargs):
with app.app_context():
return flask_json.loads(*args, **kwargs)
self.server_options['json'] = FlaskSafeJSON
resource = self.server_options.pop('path', None) or \
self.server_options.pop('resource', None) or 'socket.io'
if resource.startswith('/'):
resource = resource[1:]
if os.environ.get('FLASK_RUN_FROM_CLI'):
if self.server_options.get('async_mode') is None:
if app is not None:
app.logger.warning(
'Flask-SocketIO is Running under Werkzeug, WebSocket '
'is not available.')
self.server_options['async_mode'] = 'threading'
self.server = socketio.Server(**self.server_options)
self.async_mode = self.server.async_mode
for handler in self.handlers:
self.server.on(handler[0], handler[1], namespace=handler[2])
for namespace_handler in self.namespace_handlers:
self.server.register_namespace(namespace_handler)
if app is not None:
# here we attach the SocketIO middlware to the SocketIO object so it
# can be referenced later if debug middleware needs to be inserted
self.sockio_mw = _SocketIOMiddleware(self.server, app,
socketio_path=resource)
app.wsgi_app = self.sockio_mw
def on(self, message, namespace=None):
"""Decorator to register a SocketIO event handler.
This decorator must be applied to SocketIO event handlers. Example::
@socketio.on('my event', namespace='/chat')
def handle_my_custom_event(json):
print('received json: ' + str(json))
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(handler):
def _handler(sid, *args):
return self._handle_event(handler, message, namespace, sid,
*args)
if self.server:
self.server.on(message, _handler, namespace=namespace)
else:
self.handlers.append((message, _handler, namespace))
return handler
return decorator
def on_error(self, namespace=None):
"""Decorator to define a custom error handler for SocketIO events.
This decorator can be applied to a function that acts as an error
handler for a namespace. This handler will be invoked when a SocketIO
event handler raises an exception. The handler function must accept one
argument, which is the exception raised. Example::
@socketio.on_error(namespace='/chat')
def chat_error_handler(e):
print('An error has occurred: ' + str(e))
:param namespace: The namespace for which to register the error
handler. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(exception_handler):
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.exception_handlers[namespace] = exception_handler
return exception_handler
return decorator
def on_error_default(self, exception_handler):
"""Decorator to define a default error handler for SocketIO events.
This decorator can be applied to a function that acts as a default
error handler for any namespaces that do not have a specific handler.
Example::
@socketio.on_error_default
def error_handler(e):
print('An error has occurred: ' + str(e))
"""
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.default_exception_handler = exception_handler
return exception_handler
def on_namespace(self, namespace_handler):
if not isinstance(namespace_handler, Namespace):
raise ValueError('Not a namespace instance.')
namespace_handler._set_socketio(self)
if self.server:
self.server.register_namespace(namespace_handler)
else:
self.namespace_handlers.append(namespace_handler)
def emit(self, event, *args, **kwargs):
"""Emit a server generated SocketIO event.
This function emits a SocketIO event to one or more connected clients.
A JSON blob can be attached to the event as payload. This function can
be used outside of a SocketIO event context, so it is appropriate to
use when the server is the originator of an event, outside of any
client context, such as in a regular HTTP request handler or a
background task. Example::
@app.route('/ping')
def ping():
socketio.emit('ping event', {'data': 42}, namespace='/chat')
:param event: The name of the user event to emit.
:param args: A dictionary with the JSON data to send as payload.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message to all the users in the given room. If
this parameter is not included, the event is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
namespace = kwargs.pop('namespace', '/')
room = kwargs.pop('room', None)
include_self = kwargs.pop('include_self', True)
skip_sid = kwargs.pop('skip_sid', None)
if not include_self and not skip_sid:
skip_sid = flask.request.sid
callback = kwargs.pop('callback', None)
self.server.emit(event, *args, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def send(self, data, json=False, namespace=None, room=None,
callback=None, include_self=True, skip_sid=None, **kwargs):
"""Send a server-generated SocketIO message.
This function sends a simple SocketIO message to one or more connected
clients. The message can be a string or a JSON blob. This is a simpler
version of ``emit()``, which should be preferred. This function can be
used outside of a SocketIO event context, so it is appropriate to use
when the server is the originator of an event.
:param data: The message to send, either a string or a JSON blob.
:param json: ``True`` if ``message`` is a JSON blob, ``False``
otherwise.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message only to the users in the given room. If
this parameter is not included, the message is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
skip_sid = flask.request.sid if not include_self else skip_sid
if json:
self.emit('json', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
else:
self.emit('message', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def close_room(self, room, namespace=None):
"""Close a room.
This function removes any users that are in the given room and then
deletes the room from the server. This function can be used outside
of a SocketIO event context.
:param room: The name of the room to close.
:param namespace: The namespace under which the room exists. Defaults
to the global namespace.
"""
self.server.close_room(room, namespace)
def run(self, app, host=None, port=None, **kwargs):
"""Run the SocketIO web server.
:param app: The Flask application instance.
:param host: The hostname or IP address for the server to listen on.
Defaults to 127.0.0.1.
:param port: The port number for the server to listen on. Defaults to
5000.
:param debug: ``True`` to start the server in debug mode, ``False`` to
start in normal mode.
:param use_reloader: ``True`` to enable the Flask reloader, ``False``
to disable it.
:param extra_files: A list of additional files that the Flask
reloader should watch. Defaults to ``None``
:param log_output: If ``True``, the server logs all incomming
connections. If ``False`` logging is disabled.
Defaults to ``True`` in debug mode, ``False``
in normal mode. Unused when the threading async
mode is used.
:param kwargs: Additional web server options. The web server options
are specific to the server used in each of the supported
async modes. Note that options provided here will
not be seen when using an external web server such
as gunicorn, since this method is not called in that
case.
"""
if host is None:
host = '127.0.0.1'
if port is None:
server_name = app.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[1])
else:
port = 5000
debug = kwargs.pop('debug', app.debug)
log_output = kwargs.pop('log_output', debug)
use_reloader = kwargs.pop('use_reloader', debug)
extra_files = kwargs.pop('extra_files', None)
app.debug = debug
if app.debug and self.server.eio.async_mode != 'threading':
# put the debug middleware between the SocketIO middleware
# and the Flask application instance
#
# mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
# BECOMES
#
# dbg-mw mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
self.sockio_mw.wsgi_app = DebuggedApplication(self.sockio_mw.wsgi_app,
evalex=True)
if self.server.eio.async_mode == 'threading':
from werkzeug._internal import _log
_log('warning', 'WebSocket transport not available. Install '
'eventlet or gevent and gevent-websocket for '
'improved performance.')
app.run(host=host, port=port, threaded=True,
use_reloader=use_reloader, **kwargs)
elif self.server.eio.async_mode == 'eventlet':
def run_server():
import eventlet
import eventlet.wsgi
import eventlet.green
addresses = eventlet.green.socket.getaddrinfo(host, port)
if not addresses:
raise RuntimeError('Could not resolve host to a valid address')
eventlet_socket = eventlet.listen(addresses[0][4], addresses[0][0])
# If provided an SSL argument, use an SSL socket
ssl_args = ['keyfile', 'certfile', 'server_side', 'cert_reqs',
'ssl_version', 'ca_certs',
'do_handshake_on_connect', 'suppress_ragged_eofs',
'ciphers']
ssl_params = {k: kwargs[k] for k in kwargs if k in ssl_args}
if len(ssl_params) > 0:
for k in ssl_params:
kwargs.pop(k)
ssl_params['server_side'] = True # Listening requires true
eventlet_socket = eventlet.wrap_ssl(eventlet_socket,
**ssl_params)
eventlet.wsgi.server(eventlet_socket, app,
log_output=log_output, **kwargs)
if use_reloader:
run_with_reloader(run_server, extra_files=extra_files)
else:
run_server()
elif self.server.eio.async_mode == 'gevent':
from gevent import pywsgi
try:
from geventwebsocket.handler import WebSocketHandler
websocket = True
except ImportError:
websocket = False
log = 'default'
if not log_output:
log = None
if websocket:
self.wsgi_server = pywsgi.WSGIServer(
(host, port), app, handler_class=WebSocketHandler,
log=log, **kwargs)
else:
self.wsgi_server = pywsgi.WSGIServer((host, port), app,
log=log, **kwargs)
if use_reloader:
# monkey patching is required by the reloader
from gevent import monkey
monkey.patch_all()
def run_server():
self.wsgi_server.serve_forever()
run_with_reloader(run_server, extra_files=extra_files)
else:
self.wsgi_server.serve_forever()
def stop(self):
"""Stop a running SocketIO web server.
This method must be called from a HTTP or SocketIO handler function.
"""
if self.server.eio.async_mode == 'threading':
func = flask.request.environ.get('werkzeug.server.shutdown')
if func:
func()
else:
raise RuntimeError('Cannot stop unknown web server')
elif self.server.eio.async_mode == 'eventlet':
raise SystemExit
elif self.server.eio.async_mode == 'gevent':
self.wsgi_server.stop()
def start_background_task(self, target, *args, **kwargs):
"""Start a background task using the appropriate async model.
This is a utility function that applications can use to start a
background task using the method that is compatible with the
selected async mode.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function.
"""
return self.server.start_background_task(target, *args, **kwargs)
def sleep(self, seconds=0):
"""Sleep for the requested amount of time using the appropriate async
model.
This is a utility function that applications can use to put a task to
sleep without having to worry about using the correct call for the
selected async mode.
"""
return self.server.sleep(seconds)
def test_client(self, app, namespace=None, query_string=None,
headers=None, flask_test_client=None):
"""The Socket.IO test client is useful for testing a Flask-SocketIO
server. It works in a similar way to the Flask Test Client, but
adapted to the Socket.IO server.
:param app: The Flask application instance.
:param namespace: The namespace for the client. If not provided, the
client connects to the server on the global
namespace.
:param query_string: A string with custom query string arguments.
:param headers: A dictionary with custom HTTP headers.
:param flask_test_client: The instance of the Flask test client
currently in use. Passing the Flask test
client is optional, but is necessary if you
want the Flask user session and any other
cookies set in HTTP routes accessible from
Socket.IO events.
"""
return SocketIOTestClient(app, self, namespace=namespace,
query_string=query_string, headers=headers,
flask_test_client=flask_test_client)
def _handle_event(self, handler, message, namespace, sid, *args):
if sid not in self.server.environ:
# we don't have record of this client, ignore this event
return '', 400
app = self.server.environ[sid]['flask.app']
with app.request_context(self.server.environ[sid]):
if self.manage_session:
# manage a separate session for this client's Socket.IO events
# created as a copy of the regular user session
if 'saved_session' not in self.server.environ[sid]:
self.server.environ[sid]['saved_session'] = \
_ManagedSession(flask.session)
session_obj = self.server.environ[sid]['saved_session']
else:
# let Flask handle the user session
# for cookie based sessions, this effectively freezes the
# session to its state at connection time
# for server-side sessions, this allows HTTP and Socket.IO to
# share the session, with both having read/write access to it
session_obj = flask.session._get_current_object()
_request_ctx_stack.top.session = session_obj
flask.request.sid = sid
flask.request.namespace = namespace
flask.request.event = {'message': message, 'args': args}
try:
if message == 'connect':
ret = handler()
else:
ret = handler(*args)
except:
err_handler = self.exception_handlers.get(
namespace, self.default_exception_handler)
if err_handler is None:
raise
type, value, traceback = sys.exc_info()
return err_handler(value)
if not self.manage_session:
# when Flask is managing the user session, it needs to save it
if not hasattr(session_obj, 'modified') or session_obj.modified:
resp = app.response_class()
app.session_interface.save_session(app, session_obj, resp)
return ret
|
miguelgrinberg/Flask-SocketIO | flask_socketio/__init__.py | SocketIO.emit | python | def emit(self, event, *args, **kwargs):
namespace = kwargs.pop('namespace', '/')
room = kwargs.pop('room', None)
include_self = kwargs.pop('include_self', True)
skip_sid = kwargs.pop('skip_sid', None)
if not include_self and not skip_sid:
skip_sid = flask.request.sid
callback = kwargs.pop('callback', None)
self.server.emit(event, *args, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs) | Emit a server generated SocketIO event.
This function emits a SocketIO event to one or more connected clients.
A JSON blob can be attached to the event as payload. This function can
be used outside of a SocketIO event context, so it is appropriate to
use when the server is the originator of an event, outside of any
client context, such as in a regular HTTP request handler or a
background task. Example::
@app.route('/ping')
def ping():
socketio.emit('ping event', {'data': 42}, namespace='/chat')
:param event: The name of the user event to emit.
:param args: A dictionary with the JSON data to send as payload.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message to all the users in the given room. If
this parameter is not included, the event is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client. | train | https://github.com/miguelgrinberg/Flask-SocketIO/blob/4bef800d5e7ba7d98a6f4cd94191ff0b4496c334/flask_socketio/__init__.py#L342-L381 | null | class SocketIO(object):
"""Create a Flask-SocketIO server.
:param app: The flask application instance. If the application instance
isn't known at the time this class is instantiated, then call
``socketio.init_app(app)`` once the application instance is
available.
:param manage_session: If set to ``True``, this extension manages the user
session for Socket.IO events. If set to ``False``,
Flask's own session management is used. When using
Flask's cookie based sessions it is recommended that
you leave this set to the default of ``True``. When
using server-side sessions, a ``False`` setting
enables sharing the user session between HTTP routes
and Socket.IO events.
:param message_queue: A connection URL for a message queue service the
server can use for multi-process communication. A
message queue is not required when using a single
server process.
:param channel: The channel name, when using a message queue. If a channel
isn't specified, a default channel will be used. If
multiple clusters of SocketIO processes need to use the
same message queue without interfering with each other, then
each cluster should use a different channel.
:param path: The path where the Socket.IO server is exposed. Defaults to
``'socket.io'``. Leave this as is unless you know what you are
doing.
:param resource: Alias to ``path``.
:param kwargs: Socket.IO and Engine.IO server options.
The Socket.IO server options are detailed below:
:param client_manager: The client manager instance that will manage the
client list. When this is omitted, the client list
is stored in an in-memory structure, so the use of
multiple connected servers is not possible. In most
cases, this argument does not need to be set
explicitly.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``.
:param binary: ``True`` to support binary payloads, ``False`` to treat all
payloads as text. On Python 2, if this is set to ``True``,
``unicode`` values are treated as text, and ``str`` and
``bytes`` values are treated as binary. This option has no
effect on Python 3, where text and binary payloads are
always automatically discovered.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions. To use the same json encoder and decoder as a Flask
application, use ``flask.json``.
The Engine.IO server configuration supports the following settings:
:param async_mode: The asynchronous model to use. See the Deployment
section in the documentation for a description of the
available options. Valid async modes are
``threading``, ``eventlet``, ``gevent`` and
``gevent_uwsgi``. If this argument is not given,
``eventlet`` is tried first, then ``gevent_uwsgi``,
then ``gevent``, and finally ``threading``. The
first async mode that has all its dependencies installed
is then one that is chosen.
:param ping_timeout: The time in seconds that the client waits for the
server to respond before disconnecting. The default is
60 seconds.
:param ping_interval: The interval in seconds at which the client pings
the server. The default is 25 seconds.
:param max_http_buffer_size: The maximum size of a message when using the
polling transport. The default is 100,000,000
bytes.
:param allow_upgrades: Whether to allow transport upgrades or not. The
default is ``True``.
:param http_compression: Whether to compress packages when using the
polling transport. The default is ``True``.
:param compression_threshold: Only compress messages when their byte size
is greater than this value. The default is
1024 bytes.
:param cookie: Name of the HTTP cookie that contains the client session
id. If set to ``None``, a cookie is not sent to the client.
The default is ``'io'``.
:param cors_allowed_origins: List of origins that are allowed to connect
to this server. All origins are allowed by
default.
:param cors_credentials: Whether credentials (cookies, authentication) are
allowed in requests to this server. The default is
``True``.
:param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
a logger object to use. To disable logging set to
``False``. The default is ``False``.
"""
def __init__(self, app=None, **kwargs):
self.server = None
self.server_options = {}
self.wsgi_server = None
self.handlers = []
self.namespace_handlers = []
self.exception_handlers = {}
self.default_exception_handler = None
self.manage_session = True
# We can call init_app when:
# - we were given the Flask app instance (standard initialization)
# - we were not given the app, but we were given a message_queue
# (standard initialization for auxiliary process)
# In all other cases we collect the arguments and assume the client
# will call init_app from an app factory function.
if app is not None or 'message_queue' in kwargs:
self.init_app(app, **kwargs)
else:
self.server_options.update(kwargs)
def init_app(self, app, **kwargs):
if app is not None:
if not hasattr(app, 'extensions'):
app.extensions = {} # pragma: no cover
app.extensions['socketio'] = self
self.server_options.update(kwargs)
self.manage_session = self.server_options.pop('manage_session',
self.manage_session)
if 'client_manager' not in self.server_options:
url = self.server_options.pop('message_queue', None)
channel = self.server_options.pop('channel', 'flask-socketio')
write_only = app is None
if url:
if url.startswith(('redis://', "rediss://")):
queue_class = socketio.RedisManager
elif url.startswith('zmq'):
queue_class = socketio.ZmqManager
else:
queue_class = socketio.KombuManager
queue = queue_class(url, channel=channel,
write_only=write_only)
self.server_options['client_manager'] = queue
if 'json' in self.server_options and \
self.server_options['json'] == flask_json:
# flask's json module is tricky to use because its output
# changes when it is invoked inside or outside the app context
# so here to prevent any ambiguities we replace it with wrappers
# that ensure that the app context is always present
class FlaskSafeJSON(object):
@staticmethod
def dumps(*args, **kwargs):
with app.app_context():
return flask_json.dumps(*args, **kwargs)
@staticmethod
def loads(*args, **kwargs):
with app.app_context():
return flask_json.loads(*args, **kwargs)
self.server_options['json'] = FlaskSafeJSON
resource = self.server_options.pop('path', None) or \
self.server_options.pop('resource', None) or 'socket.io'
if resource.startswith('/'):
resource = resource[1:]
if os.environ.get('FLASK_RUN_FROM_CLI'):
if self.server_options.get('async_mode') is None:
if app is not None:
app.logger.warning(
'Flask-SocketIO is Running under Werkzeug, WebSocket '
'is not available.')
self.server_options['async_mode'] = 'threading'
self.server = socketio.Server(**self.server_options)
self.async_mode = self.server.async_mode
for handler in self.handlers:
self.server.on(handler[0], handler[1], namespace=handler[2])
for namespace_handler in self.namespace_handlers:
self.server.register_namespace(namespace_handler)
if app is not None:
# here we attach the SocketIO middlware to the SocketIO object so it
# can be referenced later if debug middleware needs to be inserted
self.sockio_mw = _SocketIOMiddleware(self.server, app,
socketio_path=resource)
app.wsgi_app = self.sockio_mw
def on(self, message, namespace=None):
"""Decorator to register a SocketIO event handler.
This decorator must be applied to SocketIO event handlers. Example::
@socketio.on('my event', namespace='/chat')
def handle_my_custom_event(json):
print('received json: ' + str(json))
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(handler):
def _handler(sid, *args):
return self._handle_event(handler, message, namespace, sid,
*args)
if self.server:
self.server.on(message, _handler, namespace=namespace)
else:
self.handlers.append((message, _handler, namespace))
return handler
return decorator
def on_error(self, namespace=None):
"""Decorator to define a custom error handler for SocketIO events.
This decorator can be applied to a function that acts as an error
handler for a namespace. This handler will be invoked when a SocketIO
event handler raises an exception. The handler function must accept one
argument, which is the exception raised. Example::
@socketio.on_error(namespace='/chat')
def chat_error_handler(e):
print('An error has occurred: ' + str(e))
:param namespace: The namespace for which to register the error
handler. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(exception_handler):
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.exception_handlers[namespace] = exception_handler
return exception_handler
return decorator
def on_error_default(self, exception_handler):
"""Decorator to define a default error handler for SocketIO events.
This decorator can be applied to a function that acts as a default
error handler for any namespaces that do not have a specific handler.
Example::
@socketio.on_error_default
def error_handler(e):
print('An error has occurred: ' + str(e))
"""
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.default_exception_handler = exception_handler
return exception_handler
def on_event(self, message, handler, namespace=None):
"""Register a SocketIO event handler.
``on_event`` is the non-decorator version of ``'on'``.
Example::
def on_foo_event(json):
print('received json: ' + str(json))
socketio.on_event('my event', on_foo_event, namespace='/chat')
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param handler: The function that handles the event.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
self.on(message, namespace=namespace)(handler)
def on_namespace(self, namespace_handler):
if not isinstance(namespace_handler, Namespace):
raise ValueError('Not a namespace instance.')
namespace_handler._set_socketio(self)
if self.server:
self.server.register_namespace(namespace_handler)
else:
self.namespace_handlers.append(namespace_handler)
def send(self, data, json=False, namespace=None, room=None,
callback=None, include_self=True, skip_sid=None, **kwargs):
"""Send a server-generated SocketIO message.
This function sends a simple SocketIO message to one or more connected
clients. The message can be a string or a JSON blob. This is a simpler
version of ``emit()``, which should be preferred. This function can be
used outside of a SocketIO event context, so it is appropriate to use
when the server is the originator of an event.
:param data: The message to send, either a string or a JSON blob.
:param json: ``True`` if ``message`` is a JSON blob, ``False``
otherwise.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message only to the users in the given room. If
this parameter is not included, the message is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
skip_sid = flask.request.sid if not include_self else skip_sid
if json:
self.emit('json', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
else:
self.emit('message', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def close_room(self, room, namespace=None):
"""Close a room.
This function removes any users that are in the given room and then
deletes the room from the server. This function can be used outside
of a SocketIO event context.
:param room: The name of the room to close.
:param namespace: The namespace under which the room exists. Defaults
to the global namespace.
"""
self.server.close_room(room, namespace)
def run(self, app, host=None, port=None, **kwargs):
"""Run the SocketIO web server.
:param app: The Flask application instance.
:param host: The hostname or IP address for the server to listen on.
Defaults to 127.0.0.1.
:param port: The port number for the server to listen on. Defaults to
5000.
:param debug: ``True`` to start the server in debug mode, ``False`` to
start in normal mode.
:param use_reloader: ``True`` to enable the Flask reloader, ``False``
to disable it.
:param extra_files: A list of additional files that the Flask
reloader should watch. Defaults to ``None``
:param log_output: If ``True``, the server logs all incomming
connections. If ``False`` logging is disabled.
Defaults to ``True`` in debug mode, ``False``
in normal mode. Unused when the threading async
mode is used.
:param kwargs: Additional web server options. The web server options
are specific to the server used in each of the supported
async modes. Note that options provided here will
not be seen when using an external web server such
as gunicorn, since this method is not called in that
case.
"""
if host is None:
host = '127.0.0.1'
if port is None:
server_name = app.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[1])
else:
port = 5000
debug = kwargs.pop('debug', app.debug)
log_output = kwargs.pop('log_output', debug)
use_reloader = kwargs.pop('use_reloader', debug)
extra_files = kwargs.pop('extra_files', None)
app.debug = debug
if app.debug and self.server.eio.async_mode != 'threading':
# put the debug middleware between the SocketIO middleware
# and the Flask application instance
#
# mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
# BECOMES
#
# dbg-mw mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
self.sockio_mw.wsgi_app = DebuggedApplication(self.sockio_mw.wsgi_app,
evalex=True)
if self.server.eio.async_mode == 'threading':
from werkzeug._internal import _log
_log('warning', 'WebSocket transport not available. Install '
'eventlet or gevent and gevent-websocket for '
'improved performance.')
app.run(host=host, port=port, threaded=True,
use_reloader=use_reloader, **kwargs)
elif self.server.eio.async_mode == 'eventlet':
def run_server():
import eventlet
import eventlet.wsgi
import eventlet.green
addresses = eventlet.green.socket.getaddrinfo(host, port)
if not addresses:
raise RuntimeError('Could not resolve host to a valid address')
eventlet_socket = eventlet.listen(addresses[0][4], addresses[0][0])
# If provided an SSL argument, use an SSL socket
ssl_args = ['keyfile', 'certfile', 'server_side', 'cert_reqs',
'ssl_version', 'ca_certs',
'do_handshake_on_connect', 'suppress_ragged_eofs',
'ciphers']
ssl_params = {k: kwargs[k] for k in kwargs if k in ssl_args}
if len(ssl_params) > 0:
for k in ssl_params:
kwargs.pop(k)
ssl_params['server_side'] = True # Listening requires true
eventlet_socket = eventlet.wrap_ssl(eventlet_socket,
**ssl_params)
eventlet.wsgi.server(eventlet_socket, app,
log_output=log_output, **kwargs)
if use_reloader:
run_with_reloader(run_server, extra_files=extra_files)
else:
run_server()
elif self.server.eio.async_mode == 'gevent':
from gevent import pywsgi
try:
from geventwebsocket.handler import WebSocketHandler
websocket = True
except ImportError:
websocket = False
log = 'default'
if not log_output:
log = None
if websocket:
self.wsgi_server = pywsgi.WSGIServer(
(host, port), app, handler_class=WebSocketHandler,
log=log, **kwargs)
else:
self.wsgi_server = pywsgi.WSGIServer((host, port), app,
log=log, **kwargs)
if use_reloader:
# monkey patching is required by the reloader
from gevent import monkey
monkey.patch_all()
def run_server():
self.wsgi_server.serve_forever()
run_with_reloader(run_server, extra_files=extra_files)
else:
self.wsgi_server.serve_forever()
def stop(self):
"""Stop a running SocketIO web server.
This method must be called from a HTTP or SocketIO handler function.
"""
if self.server.eio.async_mode == 'threading':
func = flask.request.environ.get('werkzeug.server.shutdown')
if func:
func()
else:
raise RuntimeError('Cannot stop unknown web server')
elif self.server.eio.async_mode == 'eventlet':
raise SystemExit
elif self.server.eio.async_mode == 'gevent':
self.wsgi_server.stop()
def start_background_task(self, target, *args, **kwargs):
"""Start a background task using the appropriate async model.
This is a utility function that applications can use to start a
background task using the method that is compatible with the
selected async mode.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function.
"""
return self.server.start_background_task(target, *args, **kwargs)
def sleep(self, seconds=0):
"""Sleep for the requested amount of time using the appropriate async
model.
This is a utility function that applications can use to put a task to
sleep without having to worry about using the correct call for the
selected async mode.
"""
return self.server.sleep(seconds)
def test_client(self, app, namespace=None, query_string=None,
headers=None, flask_test_client=None):
"""The Socket.IO test client is useful for testing a Flask-SocketIO
server. It works in a similar way to the Flask Test Client, but
adapted to the Socket.IO server.
:param app: The Flask application instance.
:param namespace: The namespace for the client. If not provided, the
client connects to the server on the global
namespace.
:param query_string: A string with custom query string arguments.
:param headers: A dictionary with custom HTTP headers.
:param flask_test_client: The instance of the Flask test client
currently in use. Passing the Flask test
client is optional, but is necessary if you
want the Flask user session and any other
cookies set in HTTP routes accessible from
Socket.IO events.
"""
return SocketIOTestClient(app, self, namespace=namespace,
query_string=query_string, headers=headers,
flask_test_client=flask_test_client)
def _handle_event(self, handler, message, namespace, sid, *args):
if sid not in self.server.environ:
# we don't have record of this client, ignore this event
return '', 400
app = self.server.environ[sid]['flask.app']
with app.request_context(self.server.environ[sid]):
if self.manage_session:
# manage a separate session for this client's Socket.IO events
# created as a copy of the regular user session
if 'saved_session' not in self.server.environ[sid]:
self.server.environ[sid]['saved_session'] = \
_ManagedSession(flask.session)
session_obj = self.server.environ[sid]['saved_session']
else:
# let Flask handle the user session
# for cookie based sessions, this effectively freezes the
# session to its state at connection time
# for server-side sessions, this allows HTTP and Socket.IO to
# share the session, with both having read/write access to it
session_obj = flask.session._get_current_object()
_request_ctx_stack.top.session = session_obj
flask.request.sid = sid
flask.request.namespace = namespace
flask.request.event = {'message': message, 'args': args}
try:
if message == 'connect':
ret = handler()
else:
ret = handler(*args)
except:
err_handler = self.exception_handlers.get(
namespace, self.default_exception_handler)
if err_handler is None:
raise
type, value, traceback = sys.exc_info()
return err_handler(value)
if not self.manage_session:
# when Flask is managing the user session, it needs to save it
if not hasattr(session_obj, 'modified') or session_obj.modified:
resp = app.response_class()
app.session_interface.save_session(app, session_obj, resp)
return ret
|
miguelgrinberg/Flask-SocketIO | flask_socketio/__init__.py | SocketIO.send | python | def send(self, data, json=False, namespace=None, room=None,
callback=None, include_self=True, skip_sid=None, **kwargs):
skip_sid = flask.request.sid if not include_self else skip_sid
if json:
self.emit('json', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
else:
self.emit('message', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs) | Send a server-generated SocketIO message.
This function sends a simple SocketIO message to one or more connected
clients. The message can be a string or a JSON blob. This is a simpler
version of ``emit()``, which should be preferred. This function can be
used outside of a SocketIO event context, so it is appropriate to use
when the server is the originator of an event.
:param data: The message to send, either a string or a JSON blob.
:param json: ``True`` if ``message`` is a JSON blob, ``False``
otherwise.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message only to the users in the given room. If
this parameter is not included, the message is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client. | train | https://github.com/miguelgrinberg/Flask-SocketIO/blob/4bef800d5e7ba7d98a6f4cd94191ff0b4496c334/flask_socketio/__init__.py#L383-L417 | [
"def emit(self, event, *args, **kwargs):\n \"\"\"Emit a server generated SocketIO event.\n\n This function emits a SocketIO event to one or more connected clients.\n A JSON blob can be attached to the event as payload. This function can\n be used outside of a SocketIO event context, so it is appropriate to\n use when the server is the originator of an event, outside of any\n client context, such as in a regular HTTP request handler or a\n background task. Example::\n\n @app.route('/ping')\n def ping():\n socketio.emit('ping event', {'data': 42}, namespace='/chat')\n\n :param event: The name of the user event to emit.\n :param args: A dictionary with the JSON data to send as payload.\n :param namespace: The namespace under which the message is to be sent.\n Defaults to the global namespace.\n :param room: Send the message to all the users in the given room. If\n this parameter is not included, the event is sent to\n all connected users.\n :param skip_sid: The session id of a client to ignore when broadcasting\n or addressing a room. This is typically set to the\n originator of the message, so that everyone except\n that client receive the message.\n :param callback: If given, this function will be called to acknowledge\n that the client has received the message. The\n arguments that will be passed to the function are\n those provided by the client. Callback functions can\n only be used when addressing an individual client.\n \"\"\"\n namespace = kwargs.pop('namespace', '/')\n room = kwargs.pop('room', None)\n include_self = kwargs.pop('include_self', True)\n skip_sid = kwargs.pop('skip_sid', None)\n if not include_self and not skip_sid:\n skip_sid = flask.request.sid\n callback = kwargs.pop('callback', None)\n self.server.emit(event, *args, namespace=namespace, room=room,\n skip_sid=skip_sid, callback=callback, **kwargs)\n"
] | class SocketIO(object):
"""Create a Flask-SocketIO server.
:param app: The flask application instance. If the application instance
isn't known at the time this class is instantiated, then call
``socketio.init_app(app)`` once the application instance is
available.
:param manage_session: If set to ``True``, this extension manages the user
session for Socket.IO events. If set to ``False``,
Flask's own session management is used. When using
Flask's cookie based sessions it is recommended that
you leave this set to the default of ``True``. When
using server-side sessions, a ``False`` setting
enables sharing the user session between HTTP routes
and Socket.IO events.
:param message_queue: A connection URL for a message queue service the
server can use for multi-process communication. A
message queue is not required when using a single
server process.
:param channel: The channel name, when using a message queue. If a channel
isn't specified, a default channel will be used. If
multiple clusters of SocketIO processes need to use the
same message queue without interfering with each other, then
each cluster should use a different channel.
:param path: The path where the Socket.IO server is exposed. Defaults to
``'socket.io'``. Leave this as is unless you know what you are
doing.
:param resource: Alias to ``path``.
:param kwargs: Socket.IO and Engine.IO server options.
The Socket.IO server options are detailed below:
:param client_manager: The client manager instance that will manage the
client list. When this is omitted, the client list
is stored in an in-memory structure, so the use of
multiple connected servers is not possible. In most
cases, this argument does not need to be set
explicitly.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``.
:param binary: ``True`` to support binary payloads, ``False`` to treat all
payloads as text. On Python 2, if this is set to ``True``,
``unicode`` values are treated as text, and ``str`` and
``bytes`` values are treated as binary. This option has no
effect on Python 3, where text and binary payloads are
always automatically discovered.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions. To use the same json encoder and decoder as a Flask
application, use ``flask.json``.
The Engine.IO server configuration supports the following settings:
:param async_mode: The asynchronous model to use. See the Deployment
section in the documentation for a description of the
available options. Valid async modes are
``threading``, ``eventlet``, ``gevent`` and
``gevent_uwsgi``. If this argument is not given,
``eventlet`` is tried first, then ``gevent_uwsgi``,
then ``gevent``, and finally ``threading``. The
first async mode that has all its dependencies installed
is then one that is chosen.
:param ping_timeout: The time in seconds that the client waits for the
server to respond before disconnecting. The default is
60 seconds.
:param ping_interval: The interval in seconds at which the client pings
the server. The default is 25 seconds.
:param max_http_buffer_size: The maximum size of a message when using the
polling transport. The default is 100,000,000
bytes.
:param allow_upgrades: Whether to allow transport upgrades or not. The
default is ``True``.
:param http_compression: Whether to compress packages when using the
polling transport. The default is ``True``.
:param compression_threshold: Only compress messages when their byte size
is greater than this value. The default is
1024 bytes.
:param cookie: Name of the HTTP cookie that contains the client session
id. If set to ``None``, a cookie is not sent to the client.
The default is ``'io'``.
:param cors_allowed_origins: List of origins that are allowed to connect
to this server. All origins are allowed by
default.
:param cors_credentials: Whether credentials (cookies, authentication) are
allowed in requests to this server. The default is
``True``.
:param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
a logger object to use. To disable logging set to
``False``. The default is ``False``.
"""
def __init__(self, app=None, **kwargs):
self.server = None
self.server_options = {}
self.wsgi_server = None
self.handlers = []
self.namespace_handlers = []
self.exception_handlers = {}
self.default_exception_handler = None
self.manage_session = True
# We can call init_app when:
# - we were given the Flask app instance (standard initialization)
# - we were not given the app, but we were given a message_queue
# (standard initialization for auxiliary process)
# In all other cases we collect the arguments and assume the client
# will call init_app from an app factory function.
if app is not None or 'message_queue' in kwargs:
self.init_app(app, **kwargs)
else:
self.server_options.update(kwargs)
def init_app(self, app, **kwargs):
if app is not None:
if not hasattr(app, 'extensions'):
app.extensions = {} # pragma: no cover
app.extensions['socketio'] = self
self.server_options.update(kwargs)
self.manage_session = self.server_options.pop('manage_session',
self.manage_session)
if 'client_manager' not in self.server_options:
url = self.server_options.pop('message_queue', None)
channel = self.server_options.pop('channel', 'flask-socketio')
write_only = app is None
if url:
if url.startswith(('redis://', "rediss://")):
queue_class = socketio.RedisManager
elif url.startswith('zmq'):
queue_class = socketio.ZmqManager
else:
queue_class = socketio.KombuManager
queue = queue_class(url, channel=channel,
write_only=write_only)
self.server_options['client_manager'] = queue
if 'json' in self.server_options and \
self.server_options['json'] == flask_json:
# flask's json module is tricky to use because its output
# changes when it is invoked inside or outside the app context
# so here to prevent any ambiguities we replace it with wrappers
# that ensure that the app context is always present
class FlaskSafeJSON(object):
@staticmethod
def dumps(*args, **kwargs):
with app.app_context():
return flask_json.dumps(*args, **kwargs)
@staticmethod
def loads(*args, **kwargs):
with app.app_context():
return flask_json.loads(*args, **kwargs)
self.server_options['json'] = FlaskSafeJSON
resource = self.server_options.pop('path', None) or \
self.server_options.pop('resource', None) or 'socket.io'
if resource.startswith('/'):
resource = resource[1:]
if os.environ.get('FLASK_RUN_FROM_CLI'):
if self.server_options.get('async_mode') is None:
if app is not None:
app.logger.warning(
'Flask-SocketIO is Running under Werkzeug, WebSocket '
'is not available.')
self.server_options['async_mode'] = 'threading'
self.server = socketio.Server(**self.server_options)
self.async_mode = self.server.async_mode
for handler in self.handlers:
self.server.on(handler[0], handler[1], namespace=handler[2])
for namespace_handler in self.namespace_handlers:
self.server.register_namespace(namespace_handler)
if app is not None:
# here we attach the SocketIO middlware to the SocketIO object so it
# can be referenced later if debug middleware needs to be inserted
self.sockio_mw = _SocketIOMiddleware(self.server, app,
socketio_path=resource)
app.wsgi_app = self.sockio_mw
def on(self, message, namespace=None):
"""Decorator to register a SocketIO event handler.
This decorator must be applied to SocketIO event handlers. Example::
@socketio.on('my event', namespace='/chat')
def handle_my_custom_event(json):
print('received json: ' + str(json))
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(handler):
def _handler(sid, *args):
return self._handle_event(handler, message, namespace, sid,
*args)
if self.server:
self.server.on(message, _handler, namespace=namespace)
else:
self.handlers.append((message, _handler, namespace))
return handler
return decorator
def on_error(self, namespace=None):
"""Decorator to define a custom error handler for SocketIO events.
This decorator can be applied to a function that acts as an error
handler for a namespace. This handler will be invoked when a SocketIO
event handler raises an exception. The handler function must accept one
argument, which is the exception raised. Example::
@socketio.on_error(namespace='/chat')
def chat_error_handler(e):
print('An error has occurred: ' + str(e))
:param namespace: The namespace for which to register the error
handler. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(exception_handler):
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.exception_handlers[namespace] = exception_handler
return exception_handler
return decorator
def on_error_default(self, exception_handler):
"""Decorator to define a default error handler for SocketIO events.
This decorator can be applied to a function that acts as a default
error handler for any namespaces that do not have a specific handler.
Example::
@socketio.on_error_default
def error_handler(e):
print('An error has occurred: ' + str(e))
"""
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.default_exception_handler = exception_handler
return exception_handler
def on_event(self, message, handler, namespace=None):
"""Register a SocketIO event handler.
``on_event`` is the non-decorator version of ``'on'``.
Example::
def on_foo_event(json):
print('received json: ' + str(json))
socketio.on_event('my event', on_foo_event, namespace='/chat')
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param handler: The function that handles the event.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
self.on(message, namespace=namespace)(handler)
def on_namespace(self, namespace_handler):
if not isinstance(namespace_handler, Namespace):
raise ValueError('Not a namespace instance.')
namespace_handler._set_socketio(self)
if self.server:
self.server.register_namespace(namespace_handler)
else:
self.namespace_handlers.append(namespace_handler)
def emit(self, event, *args, **kwargs):
"""Emit a server generated SocketIO event.
This function emits a SocketIO event to one or more connected clients.
A JSON blob can be attached to the event as payload. This function can
be used outside of a SocketIO event context, so it is appropriate to
use when the server is the originator of an event, outside of any
client context, such as in a regular HTTP request handler or a
background task. Example::
@app.route('/ping')
def ping():
socketio.emit('ping event', {'data': 42}, namespace='/chat')
:param event: The name of the user event to emit.
:param args: A dictionary with the JSON data to send as payload.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message to all the users in the given room. If
this parameter is not included, the event is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
namespace = kwargs.pop('namespace', '/')
room = kwargs.pop('room', None)
include_self = kwargs.pop('include_self', True)
skip_sid = kwargs.pop('skip_sid', None)
if not include_self and not skip_sid:
skip_sid = flask.request.sid
callback = kwargs.pop('callback', None)
self.server.emit(event, *args, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def close_room(self, room, namespace=None):
"""Close a room.
This function removes any users that are in the given room and then
deletes the room from the server. This function can be used outside
of a SocketIO event context.
:param room: The name of the room to close.
:param namespace: The namespace under which the room exists. Defaults
to the global namespace.
"""
self.server.close_room(room, namespace)
def run(self, app, host=None, port=None, **kwargs):
"""Run the SocketIO web server.
:param app: The Flask application instance.
:param host: The hostname or IP address for the server to listen on.
Defaults to 127.0.0.1.
:param port: The port number for the server to listen on. Defaults to
5000.
:param debug: ``True`` to start the server in debug mode, ``False`` to
start in normal mode.
:param use_reloader: ``True`` to enable the Flask reloader, ``False``
to disable it.
:param extra_files: A list of additional files that the Flask
reloader should watch. Defaults to ``None``
:param log_output: If ``True``, the server logs all incomming
connections. If ``False`` logging is disabled.
Defaults to ``True`` in debug mode, ``False``
in normal mode. Unused when the threading async
mode is used.
:param kwargs: Additional web server options. The web server options
are specific to the server used in each of the supported
async modes. Note that options provided here will
not be seen when using an external web server such
as gunicorn, since this method is not called in that
case.
"""
if host is None:
host = '127.0.0.1'
if port is None:
server_name = app.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[1])
else:
port = 5000
debug = kwargs.pop('debug', app.debug)
log_output = kwargs.pop('log_output', debug)
use_reloader = kwargs.pop('use_reloader', debug)
extra_files = kwargs.pop('extra_files', None)
app.debug = debug
if app.debug and self.server.eio.async_mode != 'threading':
# put the debug middleware between the SocketIO middleware
# and the Flask application instance
#
# mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
# BECOMES
#
# dbg-mw mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
self.sockio_mw.wsgi_app = DebuggedApplication(self.sockio_mw.wsgi_app,
evalex=True)
if self.server.eio.async_mode == 'threading':
from werkzeug._internal import _log
_log('warning', 'WebSocket transport not available. Install '
'eventlet or gevent and gevent-websocket for '
'improved performance.')
app.run(host=host, port=port, threaded=True,
use_reloader=use_reloader, **kwargs)
elif self.server.eio.async_mode == 'eventlet':
def run_server():
import eventlet
import eventlet.wsgi
import eventlet.green
addresses = eventlet.green.socket.getaddrinfo(host, port)
if not addresses:
raise RuntimeError('Could not resolve host to a valid address')
eventlet_socket = eventlet.listen(addresses[0][4], addresses[0][0])
# If provided an SSL argument, use an SSL socket
ssl_args = ['keyfile', 'certfile', 'server_side', 'cert_reqs',
'ssl_version', 'ca_certs',
'do_handshake_on_connect', 'suppress_ragged_eofs',
'ciphers']
ssl_params = {k: kwargs[k] for k in kwargs if k in ssl_args}
if len(ssl_params) > 0:
for k in ssl_params:
kwargs.pop(k)
ssl_params['server_side'] = True # Listening requires true
eventlet_socket = eventlet.wrap_ssl(eventlet_socket,
**ssl_params)
eventlet.wsgi.server(eventlet_socket, app,
log_output=log_output, **kwargs)
if use_reloader:
run_with_reloader(run_server, extra_files=extra_files)
else:
run_server()
elif self.server.eio.async_mode == 'gevent':
from gevent import pywsgi
try:
from geventwebsocket.handler import WebSocketHandler
websocket = True
except ImportError:
websocket = False
log = 'default'
if not log_output:
log = None
if websocket:
self.wsgi_server = pywsgi.WSGIServer(
(host, port), app, handler_class=WebSocketHandler,
log=log, **kwargs)
else:
self.wsgi_server = pywsgi.WSGIServer((host, port), app,
log=log, **kwargs)
if use_reloader:
# monkey patching is required by the reloader
from gevent import monkey
monkey.patch_all()
def run_server():
self.wsgi_server.serve_forever()
run_with_reloader(run_server, extra_files=extra_files)
else:
self.wsgi_server.serve_forever()
def stop(self):
"""Stop a running SocketIO web server.
This method must be called from a HTTP or SocketIO handler function.
"""
if self.server.eio.async_mode == 'threading':
func = flask.request.environ.get('werkzeug.server.shutdown')
if func:
func()
else:
raise RuntimeError('Cannot stop unknown web server')
elif self.server.eio.async_mode == 'eventlet':
raise SystemExit
elif self.server.eio.async_mode == 'gevent':
self.wsgi_server.stop()
def start_background_task(self, target, *args, **kwargs):
"""Start a background task using the appropriate async model.
This is a utility function that applications can use to start a
background task using the method that is compatible with the
selected async mode.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function.
"""
return self.server.start_background_task(target, *args, **kwargs)
def sleep(self, seconds=0):
"""Sleep for the requested amount of time using the appropriate async
model.
This is a utility function that applications can use to put a task to
sleep without having to worry about using the correct call for the
selected async mode.
"""
return self.server.sleep(seconds)
def test_client(self, app, namespace=None, query_string=None,
headers=None, flask_test_client=None):
"""The Socket.IO test client is useful for testing a Flask-SocketIO
server. It works in a similar way to the Flask Test Client, but
adapted to the Socket.IO server.
:param app: The Flask application instance.
:param namespace: The namespace for the client. If not provided, the
client connects to the server on the global
namespace.
:param query_string: A string with custom query string arguments.
:param headers: A dictionary with custom HTTP headers.
:param flask_test_client: The instance of the Flask test client
currently in use. Passing the Flask test
client is optional, but is necessary if you
want the Flask user session and any other
cookies set in HTTP routes accessible from
Socket.IO events.
"""
return SocketIOTestClient(app, self, namespace=namespace,
query_string=query_string, headers=headers,
flask_test_client=flask_test_client)
def _handle_event(self, handler, message, namespace, sid, *args):
if sid not in self.server.environ:
# we don't have record of this client, ignore this event
return '', 400
app = self.server.environ[sid]['flask.app']
with app.request_context(self.server.environ[sid]):
if self.manage_session:
# manage a separate session for this client's Socket.IO events
# created as a copy of the regular user session
if 'saved_session' not in self.server.environ[sid]:
self.server.environ[sid]['saved_session'] = \
_ManagedSession(flask.session)
session_obj = self.server.environ[sid]['saved_session']
else:
# let Flask handle the user session
# for cookie based sessions, this effectively freezes the
# session to its state at connection time
# for server-side sessions, this allows HTTP and Socket.IO to
# share the session, with both having read/write access to it
session_obj = flask.session._get_current_object()
_request_ctx_stack.top.session = session_obj
flask.request.sid = sid
flask.request.namespace = namespace
flask.request.event = {'message': message, 'args': args}
try:
if message == 'connect':
ret = handler()
else:
ret = handler(*args)
except:
err_handler = self.exception_handlers.get(
namespace, self.default_exception_handler)
if err_handler is None:
raise
type, value, traceback = sys.exc_info()
return err_handler(value)
if not self.manage_session:
# when Flask is managing the user session, it needs to save it
if not hasattr(session_obj, 'modified') or session_obj.modified:
resp = app.response_class()
app.session_interface.save_session(app, session_obj, resp)
return ret
|
miguelgrinberg/Flask-SocketIO | flask_socketio/__init__.py | SocketIO.run | python | def run(self, app, host=None, port=None, **kwargs):
if host is None:
host = '127.0.0.1'
if port is None:
server_name = app.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[1])
else:
port = 5000
debug = kwargs.pop('debug', app.debug)
log_output = kwargs.pop('log_output', debug)
use_reloader = kwargs.pop('use_reloader', debug)
extra_files = kwargs.pop('extra_files', None)
app.debug = debug
if app.debug and self.server.eio.async_mode != 'threading':
# put the debug middleware between the SocketIO middleware
# and the Flask application instance
#
# mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
# BECOMES
#
# dbg-mw mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
self.sockio_mw.wsgi_app = DebuggedApplication(self.sockio_mw.wsgi_app,
evalex=True)
if self.server.eio.async_mode == 'threading':
from werkzeug._internal import _log
_log('warning', 'WebSocket transport not available. Install '
'eventlet or gevent and gevent-websocket for '
'improved performance.')
app.run(host=host, port=port, threaded=True,
use_reloader=use_reloader, **kwargs)
elif self.server.eio.async_mode == 'eventlet':
def run_server():
import eventlet
import eventlet.wsgi
import eventlet.green
addresses = eventlet.green.socket.getaddrinfo(host, port)
if not addresses:
raise RuntimeError('Could not resolve host to a valid address')
eventlet_socket = eventlet.listen(addresses[0][4], addresses[0][0])
# If provided an SSL argument, use an SSL socket
ssl_args = ['keyfile', 'certfile', 'server_side', 'cert_reqs',
'ssl_version', 'ca_certs',
'do_handshake_on_connect', 'suppress_ragged_eofs',
'ciphers']
ssl_params = {k: kwargs[k] for k in kwargs if k in ssl_args}
if len(ssl_params) > 0:
for k in ssl_params:
kwargs.pop(k)
ssl_params['server_side'] = True # Listening requires true
eventlet_socket = eventlet.wrap_ssl(eventlet_socket,
**ssl_params)
eventlet.wsgi.server(eventlet_socket, app,
log_output=log_output, **kwargs)
if use_reloader:
run_with_reloader(run_server, extra_files=extra_files)
else:
run_server()
elif self.server.eio.async_mode == 'gevent':
from gevent import pywsgi
try:
from geventwebsocket.handler import WebSocketHandler
websocket = True
except ImportError:
websocket = False
log = 'default'
if not log_output:
log = None
if websocket:
self.wsgi_server = pywsgi.WSGIServer(
(host, port), app, handler_class=WebSocketHandler,
log=log, **kwargs)
else:
self.wsgi_server = pywsgi.WSGIServer((host, port), app,
log=log, **kwargs)
if use_reloader:
# monkey patching is required by the reloader
from gevent import monkey
monkey.patch_all()
def run_server():
self.wsgi_server.serve_forever()
run_with_reloader(run_server, extra_files=extra_files)
else:
self.wsgi_server.serve_forever() | Run the SocketIO web server.
:param app: The Flask application instance.
:param host: The hostname or IP address for the server to listen on.
Defaults to 127.0.0.1.
:param port: The port number for the server to listen on. Defaults to
5000.
:param debug: ``True`` to start the server in debug mode, ``False`` to
start in normal mode.
:param use_reloader: ``True`` to enable the Flask reloader, ``False``
to disable it.
:param extra_files: A list of additional files that the Flask
reloader should watch. Defaults to ``None``
:param log_output: If ``True``, the server logs all incomming
connections. If ``False`` logging is disabled.
Defaults to ``True`` in debug mode, ``False``
in normal mode. Unused when the threading async
mode is used.
:param kwargs: Additional web server options. The web server options
are specific to the server used in each of the supported
async modes. Note that options provided here will
not be seen when using an external web server such
as gunicorn, since this method is not called in that
case. | train | https://github.com/miguelgrinberg/Flask-SocketIO/blob/4bef800d5e7ba7d98a6f4cd94191ff0b4496c334/flask_socketio/__init__.py#L432-L564 | [
"def run_server():\n import eventlet\n import eventlet.wsgi\n import eventlet.green\n addresses = eventlet.green.socket.getaddrinfo(host, port)\n if not addresses:\n raise RuntimeError('Could not resolve host to a valid address')\n eventlet_socket = eventlet.listen(addresses[0][4], addresses[0][0])\n\n # If provided an SSL argument, use an SSL socket\n ssl_args = ['keyfile', 'certfile', 'server_side', 'cert_reqs',\n 'ssl_version', 'ca_certs',\n 'do_handshake_on_connect', 'suppress_ragged_eofs',\n 'ciphers']\n ssl_params = {k: kwargs[k] for k in kwargs if k in ssl_args}\n if len(ssl_params) > 0:\n for k in ssl_params:\n kwargs.pop(k)\n ssl_params['server_side'] = True # Listening requires true\n eventlet_socket = eventlet.wrap_ssl(eventlet_socket,\n **ssl_params)\n\n eventlet.wsgi.server(eventlet_socket, app,\n log_output=log_output, **kwargs)\n"
] | class SocketIO(object):
"""Create a Flask-SocketIO server.
:param app: The flask application instance. If the application instance
isn't known at the time this class is instantiated, then call
``socketio.init_app(app)`` once the application instance is
available.
:param manage_session: If set to ``True``, this extension manages the user
session for Socket.IO events. If set to ``False``,
Flask's own session management is used. When using
Flask's cookie based sessions it is recommended that
you leave this set to the default of ``True``. When
using server-side sessions, a ``False`` setting
enables sharing the user session between HTTP routes
and Socket.IO events.
:param message_queue: A connection URL for a message queue service the
server can use for multi-process communication. A
message queue is not required when using a single
server process.
:param channel: The channel name, when using a message queue. If a channel
isn't specified, a default channel will be used. If
multiple clusters of SocketIO processes need to use the
same message queue without interfering with each other, then
each cluster should use a different channel.
:param path: The path where the Socket.IO server is exposed. Defaults to
``'socket.io'``. Leave this as is unless you know what you are
doing.
:param resource: Alias to ``path``.
:param kwargs: Socket.IO and Engine.IO server options.
The Socket.IO server options are detailed below:
:param client_manager: The client manager instance that will manage the
client list. When this is omitted, the client list
is stored in an in-memory structure, so the use of
multiple connected servers is not possible. In most
cases, this argument does not need to be set
explicitly.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``.
:param binary: ``True`` to support binary payloads, ``False`` to treat all
payloads as text. On Python 2, if this is set to ``True``,
``unicode`` values are treated as text, and ``str`` and
``bytes`` values are treated as binary. This option has no
effect on Python 3, where text and binary payloads are
always automatically discovered.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions. To use the same json encoder and decoder as a Flask
application, use ``flask.json``.
The Engine.IO server configuration supports the following settings:
:param async_mode: The asynchronous model to use. See the Deployment
section in the documentation for a description of the
available options. Valid async modes are
``threading``, ``eventlet``, ``gevent`` and
``gevent_uwsgi``. If this argument is not given,
``eventlet`` is tried first, then ``gevent_uwsgi``,
then ``gevent``, and finally ``threading``. The
first async mode that has all its dependencies installed
is then one that is chosen.
:param ping_timeout: The time in seconds that the client waits for the
server to respond before disconnecting. The default is
60 seconds.
:param ping_interval: The interval in seconds at which the client pings
the server. The default is 25 seconds.
:param max_http_buffer_size: The maximum size of a message when using the
polling transport. The default is 100,000,000
bytes.
:param allow_upgrades: Whether to allow transport upgrades or not. The
default is ``True``.
:param http_compression: Whether to compress packages when using the
polling transport. The default is ``True``.
:param compression_threshold: Only compress messages when their byte size
is greater than this value. The default is
1024 bytes.
:param cookie: Name of the HTTP cookie that contains the client session
id. If set to ``None``, a cookie is not sent to the client.
The default is ``'io'``.
:param cors_allowed_origins: List of origins that are allowed to connect
to this server. All origins are allowed by
default.
:param cors_credentials: Whether credentials (cookies, authentication) are
allowed in requests to this server. The default is
``True``.
:param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
a logger object to use. To disable logging set to
``False``. The default is ``False``.
"""
def __init__(self, app=None, **kwargs):
self.server = None
self.server_options = {}
self.wsgi_server = None
self.handlers = []
self.namespace_handlers = []
self.exception_handlers = {}
self.default_exception_handler = None
self.manage_session = True
# We can call init_app when:
# - we were given the Flask app instance (standard initialization)
# - we were not given the app, but we were given a message_queue
# (standard initialization for auxiliary process)
# In all other cases we collect the arguments and assume the client
# will call init_app from an app factory function.
if app is not None or 'message_queue' in kwargs:
self.init_app(app, **kwargs)
else:
self.server_options.update(kwargs)
def init_app(self, app, **kwargs):
if app is not None:
if not hasattr(app, 'extensions'):
app.extensions = {} # pragma: no cover
app.extensions['socketio'] = self
self.server_options.update(kwargs)
self.manage_session = self.server_options.pop('manage_session',
self.manage_session)
if 'client_manager' not in self.server_options:
url = self.server_options.pop('message_queue', None)
channel = self.server_options.pop('channel', 'flask-socketio')
write_only = app is None
if url:
if url.startswith(('redis://', "rediss://")):
queue_class = socketio.RedisManager
elif url.startswith('zmq'):
queue_class = socketio.ZmqManager
else:
queue_class = socketio.KombuManager
queue = queue_class(url, channel=channel,
write_only=write_only)
self.server_options['client_manager'] = queue
if 'json' in self.server_options and \
self.server_options['json'] == flask_json:
# flask's json module is tricky to use because its output
# changes when it is invoked inside or outside the app context
# so here to prevent any ambiguities we replace it with wrappers
# that ensure that the app context is always present
class FlaskSafeJSON(object):
@staticmethod
def dumps(*args, **kwargs):
with app.app_context():
return flask_json.dumps(*args, **kwargs)
@staticmethod
def loads(*args, **kwargs):
with app.app_context():
return flask_json.loads(*args, **kwargs)
self.server_options['json'] = FlaskSafeJSON
resource = self.server_options.pop('path', None) or \
self.server_options.pop('resource', None) or 'socket.io'
if resource.startswith('/'):
resource = resource[1:]
if os.environ.get('FLASK_RUN_FROM_CLI'):
if self.server_options.get('async_mode') is None:
if app is not None:
app.logger.warning(
'Flask-SocketIO is Running under Werkzeug, WebSocket '
'is not available.')
self.server_options['async_mode'] = 'threading'
self.server = socketio.Server(**self.server_options)
self.async_mode = self.server.async_mode
for handler in self.handlers:
self.server.on(handler[0], handler[1], namespace=handler[2])
for namespace_handler in self.namespace_handlers:
self.server.register_namespace(namespace_handler)
if app is not None:
# here we attach the SocketIO middlware to the SocketIO object so it
# can be referenced later if debug middleware needs to be inserted
self.sockio_mw = _SocketIOMiddleware(self.server, app,
socketio_path=resource)
app.wsgi_app = self.sockio_mw
def on(self, message, namespace=None):
"""Decorator to register a SocketIO event handler.
This decorator must be applied to SocketIO event handlers. Example::
@socketio.on('my event', namespace='/chat')
def handle_my_custom_event(json):
print('received json: ' + str(json))
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(handler):
def _handler(sid, *args):
return self._handle_event(handler, message, namespace, sid,
*args)
if self.server:
self.server.on(message, _handler, namespace=namespace)
else:
self.handlers.append((message, _handler, namespace))
return handler
return decorator
def on_error(self, namespace=None):
"""Decorator to define a custom error handler for SocketIO events.
This decorator can be applied to a function that acts as an error
handler for a namespace. This handler will be invoked when a SocketIO
event handler raises an exception. The handler function must accept one
argument, which is the exception raised. Example::
@socketio.on_error(namespace='/chat')
def chat_error_handler(e):
print('An error has occurred: ' + str(e))
:param namespace: The namespace for which to register the error
handler. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(exception_handler):
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.exception_handlers[namespace] = exception_handler
return exception_handler
return decorator
def on_error_default(self, exception_handler):
"""Decorator to define a default error handler for SocketIO events.
This decorator can be applied to a function that acts as a default
error handler for any namespaces that do not have a specific handler.
Example::
@socketio.on_error_default
def error_handler(e):
print('An error has occurred: ' + str(e))
"""
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.default_exception_handler = exception_handler
return exception_handler
def on_event(self, message, handler, namespace=None):
"""Register a SocketIO event handler.
``on_event`` is the non-decorator version of ``'on'``.
Example::
def on_foo_event(json):
print('received json: ' + str(json))
socketio.on_event('my event', on_foo_event, namespace='/chat')
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param handler: The function that handles the event.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
self.on(message, namespace=namespace)(handler)
def on_namespace(self, namespace_handler):
if not isinstance(namespace_handler, Namespace):
raise ValueError('Not a namespace instance.')
namespace_handler._set_socketio(self)
if self.server:
self.server.register_namespace(namespace_handler)
else:
self.namespace_handlers.append(namespace_handler)
def emit(self, event, *args, **kwargs):
"""Emit a server generated SocketIO event.
This function emits a SocketIO event to one or more connected clients.
A JSON blob can be attached to the event as payload. This function can
be used outside of a SocketIO event context, so it is appropriate to
use when the server is the originator of an event, outside of any
client context, such as in a regular HTTP request handler or a
background task. Example::
@app.route('/ping')
def ping():
socketio.emit('ping event', {'data': 42}, namespace='/chat')
:param event: The name of the user event to emit.
:param args: A dictionary with the JSON data to send as payload.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message to all the users in the given room. If
this parameter is not included, the event is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
namespace = kwargs.pop('namespace', '/')
room = kwargs.pop('room', None)
include_self = kwargs.pop('include_self', True)
skip_sid = kwargs.pop('skip_sid', None)
if not include_self and not skip_sid:
skip_sid = flask.request.sid
callback = kwargs.pop('callback', None)
self.server.emit(event, *args, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def send(self, data, json=False, namespace=None, room=None,
callback=None, include_self=True, skip_sid=None, **kwargs):
"""Send a server-generated SocketIO message.
This function sends a simple SocketIO message to one or more connected
clients. The message can be a string or a JSON blob. This is a simpler
version of ``emit()``, which should be preferred. This function can be
used outside of a SocketIO event context, so it is appropriate to use
when the server is the originator of an event.
:param data: The message to send, either a string or a JSON blob.
:param json: ``True`` if ``message`` is a JSON blob, ``False``
otherwise.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message only to the users in the given room. If
this parameter is not included, the message is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
skip_sid = flask.request.sid if not include_self else skip_sid
if json:
self.emit('json', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
else:
self.emit('message', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def close_room(self, room, namespace=None):
"""Close a room.
This function removes any users that are in the given room and then
deletes the room from the server. This function can be used outside
of a SocketIO event context.
:param room: The name of the room to close.
:param namespace: The namespace under which the room exists. Defaults
to the global namespace.
"""
self.server.close_room(room, namespace)
def stop(self):
"""Stop a running SocketIO web server.
This method must be called from a HTTP or SocketIO handler function.
"""
if self.server.eio.async_mode == 'threading':
func = flask.request.environ.get('werkzeug.server.shutdown')
if func:
func()
else:
raise RuntimeError('Cannot stop unknown web server')
elif self.server.eio.async_mode == 'eventlet':
raise SystemExit
elif self.server.eio.async_mode == 'gevent':
self.wsgi_server.stop()
def start_background_task(self, target, *args, **kwargs):
"""Start a background task using the appropriate async model.
This is a utility function that applications can use to start a
background task using the method that is compatible with the
selected async mode.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function.
"""
return self.server.start_background_task(target, *args, **kwargs)
def sleep(self, seconds=0):
"""Sleep for the requested amount of time using the appropriate async
model.
This is a utility function that applications can use to put a task to
sleep without having to worry about using the correct call for the
selected async mode.
"""
return self.server.sleep(seconds)
def test_client(self, app, namespace=None, query_string=None,
headers=None, flask_test_client=None):
"""The Socket.IO test client is useful for testing a Flask-SocketIO
server. It works in a similar way to the Flask Test Client, but
adapted to the Socket.IO server.
:param app: The Flask application instance.
:param namespace: The namespace for the client. If not provided, the
client connects to the server on the global
namespace.
:param query_string: A string with custom query string arguments.
:param headers: A dictionary with custom HTTP headers.
:param flask_test_client: The instance of the Flask test client
currently in use. Passing the Flask test
client is optional, but is necessary if you
want the Flask user session and any other
cookies set in HTTP routes accessible from
Socket.IO events.
"""
return SocketIOTestClient(app, self, namespace=namespace,
query_string=query_string, headers=headers,
flask_test_client=flask_test_client)
def _handle_event(self, handler, message, namespace, sid, *args):
if sid not in self.server.environ:
# we don't have record of this client, ignore this event
return '', 400
app = self.server.environ[sid]['flask.app']
with app.request_context(self.server.environ[sid]):
if self.manage_session:
# manage a separate session for this client's Socket.IO events
# created as a copy of the regular user session
if 'saved_session' not in self.server.environ[sid]:
self.server.environ[sid]['saved_session'] = \
_ManagedSession(flask.session)
session_obj = self.server.environ[sid]['saved_session']
else:
# let Flask handle the user session
# for cookie based sessions, this effectively freezes the
# session to its state at connection time
# for server-side sessions, this allows HTTP and Socket.IO to
# share the session, with both having read/write access to it
session_obj = flask.session._get_current_object()
_request_ctx_stack.top.session = session_obj
flask.request.sid = sid
flask.request.namespace = namespace
flask.request.event = {'message': message, 'args': args}
try:
if message == 'connect':
ret = handler()
else:
ret = handler(*args)
except:
err_handler = self.exception_handlers.get(
namespace, self.default_exception_handler)
if err_handler is None:
raise
type, value, traceback = sys.exc_info()
return err_handler(value)
if not self.manage_session:
# when Flask is managing the user session, it needs to save it
if not hasattr(session_obj, 'modified') or session_obj.modified:
resp = app.response_class()
app.session_interface.save_session(app, session_obj, resp)
return ret
|
miguelgrinberg/Flask-SocketIO | flask_socketio/__init__.py | SocketIO.stop | python | def stop(self):
if self.server.eio.async_mode == 'threading':
func = flask.request.environ.get('werkzeug.server.shutdown')
if func:
func()
else:
raise RuntimeError('Cannot stop unknown web server')
elif self.server.eio.async_mode == 'eventlet':
raise SystemExit
elif self.server.eio.async_mode == 'gevent':
self.wsgi_server.stop() | Stop a running SocketIO web server.
This method must be called from a HTTP or SocketIO handler function. | train | https://github.com/miguelgrinberg/Flask-SocketIO/blob/4bef800d5e7ba7d98a6f4cd94191ff0b4496c334/flask_socketio/__init__.py#L566-L580 | null | class SocketIO(object):
"""Create a Flask-SocketIO server.
:param app: The flask application instance. If the application instance
isn't known at the time this class is instantiated, then call
``socketio.init_app(app)`` once the application instance is
available.
:param manage_session: If set to ``True``, this extension manages the user
session for Socket.IO events. If set to ``False``,
Flask's own session management is used. When using
Flask's cookie based sessions it is recommended that
you leave this set to the default of ``True``. When
using server-side sessions, a ``False`` setting
enables sharing the user session between HTTP routes
and Socket.IO events.
:param message_queue: A connection URL for a message queue service the
server can use for multi-process communication. A
message queue is not required when using a single
server process.
:param channel: The channel name, when using a message queue. If a channel
isn't specified, a default channel will be used. If
multiple clusters of SocketIO processes need to use the
same message queue without interfering with each other, then
each cluster should use a different channel.
:param path: The path where the Socket.IO server is exposed. Defaults to
``'socket.io'``. Leave this as is unless you know what you are
doing.
:param resource: Alias to ``path``.
:param kwargs: Socket.IO and Engine.IO server options.
The Socket.IO server options are detailed below:
:param client_manager: The client manager instance that will manage the
client list. When this is omitted, the client list
is stored in an in-memory structure, so the use of
multiple connected servers is not possible. In most
cases, this argument does not need to be set
explicitly.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``.
:param binary: ``True`` to support binary payloads, ``False`` to treat all
payloads as text. On Python 2, if this is set to ``True``,
``unicode`` values are treated as text, and ``str`` and
``bytes`` values are treated as binary. This option has no
effect on Python 3, where text and binary payloads are
always automatically discovered.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions. To use the same json encoder and decoder as a Flask
application, use ``flask.json``.
The Engine.IO server configuration supports the following settings:
:param async_mode: The asynchronous model to use. See the Deployment
section in the documentation for a description of the
available options. Valid async modes are
``threading``, ``eventlet``, ``gevent`` and
``gevent_uwsgi``. If this argument is not given,
``eventlet`` is tried first, then ``gevent_uwsgi``,
then ``gevent``, and finally ``threading``. The
first async mode that has all its dependencies installed
is then one that is chosen.
:param ping_timeout: The time in seconds that the client waits for the
server to respond before disconnecting. The default is
60 seconds.
:param ping_interval: The interval in seconds at which the client pings
the server. The default is 25 seconds.
:param max_http_buffer_size: The maximum size of a message when using the
polling transport. The default is 100,000,000
bytes.
:param allow_upgrades: Whether to allow transport upgrades or not. The
default is ``True``.
:param http_compression: Whether to compress packages when using the
polling transport. The default is ``True``.
:param compression_threshold: Only compress messages when their byte size
is greater than this value. The default is
1024 bytes.
:param cookie: Name of the HTTP cookie that contains the client session
id. If set to ``None``, a cookie is not sent to the client.
The default is ``'io'``.
:param cors_allowed_origins: List of origins that are allowed to connect
to this server. All origins are allowed by
default.
:param cors_credentials: Whether credentials (cookies, authentication) are
allowed in requests to this server. The default is
``True``.
:param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
a logger object to use. To disable logging set to
``False``. The default is ``False``.
"""
def __init__(self, app=None, **kwargs):
self.server = None
self.server_options = {}
self.wsgi_server = None
self.handlers = []
self.namespace_handlers = []
self.exception_handlers = {}
self.default_exception_handler = None
self.manage_session = True
# We can call init_app when:
# - we were given the Flask app instance (standard initialization)
# - we were not given the app, but we were given a message_queue
# (standard initialization for auxiliary process)
# In all other cases we collect the arguments and assume the client
# will call init_app from an app factory function.
if app is not None or 'message_queue' in kwargs:
self.init_app(app, **kwargs)
else:
self.server_options.update(kwargs)
def init_app(self, app, **kwargs):
if app is not None:
if not hasattr(app, 'extensions'):
app.extensions = {} # pragma: no cover
app.extensions['socketio'] = self
self.server_options.update(kwargs)
self.manage_session = self.server_options.pop('manage_session',
self.manage_session)
if 'client_manager' not in self.server_options:
url = self.server_options.pop('message_queue', None)
channel = self.server_options.pop('channel', 'flask-socketio')
write_only = app is None
if url:
if url.startswith(('redis://', "rediss://")):
queue_class = socketio.RedisManager
elif url.startswith('zmq'):
queue_class = socketio.ZmqManager
else:
queue_class = socketio.KombuManager
queue = queue_class(url, channel=channel,
write_only=write_only)
self.server_options['client_manager'] = queue
if 'json' in self.server_options and \
self.server_options['json'] == flask_json:
# flask's json module is tricky to use because its output
# changes when it is invoked inside or outside the app context
# so here to prevent any ambiguities we replace it with wrappers
# that ensure that the app context is always present
class FlaskSafeJSON(object):
@staticmethod
def dumps(*args, **kwargs):
with app.app_context():
return flask_json.dumps(*args, **kwargs)
@staticmethod
def loads(*args, **kwargs):
with app.app_context():
return flask_json.loads(*args, **kwargs)
self.server_options['json'] = FlaskSafeJSON
resource = self.server_options.pop('path', None) or \
self.server_options.pop('resource', None) or 'socket.io'
if resource.startswith('/'):
resource = resource[1:]
if os.environ.get('FLASK_RUN_FROM_CLI'):
if self.server_options.get('async_mode') is None:
if app is not None:
app.logger.warning(
'Flask-SocketIO is Running under Werkzeug, WebSocket '
'is not available.')
self.server_options['async_mode'] = 'threading'
self.server = socketio.Server(**self.server_options)
self.async_mode = self.server.async_mode
for handler in self.handlers:
self.server.on(handler[0], handler[1], namespace=handler[2])
for namespace_handler in self.namespace_handlers:
self.server.register_namespace(namespace_handler)
if app is not None:
# here we attach the SocketIO middlware to the SocketIO object so it
# can be referenced later if debug middleware needs to be inserted
self.sockio_mw = _SocketIOMiddleware(self.server, app,
socketio_path=resource)
app.wsgi_app = self.sockio_mw
def on(self, message, namespace=None):
"""Decorator to register a SocketIO event handler.
This decorator must be applied to SocketIO event handlers. Example::
@socketio.on('my event', namespace='/chat')
def handle_my_custom_event(json):
print('received json: ' + str(json))
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(handler):
def _handler(sid, *args):
return self._handle_event(handler, message, namespace, sid,
*args)
if self.server:
self.server.on(message, _handler, namespace=namespace)
else:
self.handlers.append((message, _handler, namespace))
return handler
return decorator
def on_error(self, namespace=None):
"""Decorator to define a custom error handler for SocketIO events.
This decorator can be applied to a function that acts as an error
handler for a namespace. This handler will be invoked when a SocketIO
event handler raises an exception. The handler function must accept one
argument, which is the exception raised. Example::
@socketio.on_error(namespace='/chat')
def chat_error_handler(e):
print('An error has occurred: ' + str(e))
:param namespace: The namespace for which to register the error
handler. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(exception_handler):
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.exception_handlers[namespace] = exception_handler
return exception_handler
return decorator
def on_error_default(self, exception_handler):
"""Decorator to define a default error handler for SocketIO events.
This decorator can be applied to a function that acts as a default
error handler for any namespaces that do not have a specific handler.
Example::
@socketio.on_error_default
def error_handler(e):
print('An error has occurred: ' + str(e))
"""
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.default_exception_handler = exception_handler
return exception_handler
def on_event(self, message, handler, namespace=None):
"""Register a SocketIO event handler.
``on_event`` is the non-decorator version of ``'on'``.
Example::
def on_foo_event(json):
print('received json: ' + str(json))
socketio.on_event('my event', on_foo_event, namespace='/chat')
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param handler: The function that handles the event.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
self.on(message, namespace=namespace)(handler)
def on_namespace(self, namespace_handler):
if not isinstance(namespace_handler, Namespace):
raise ValueError('Not a namespace instance.')
namespace_handler._set_socketio(self)
if self.server:
self.server.register_namespace(namespace_handler)
else:
self.namespace_handlers.append(namespace_handler)
def emit(self, event, *args, **kwargs):
"""Emit a server generated SocketIO event.
This function emits a SocketIO event to one or more connected clients.
A JSON blob can be attached to the event as payload. This function can
be used outside of a SocketIO event context, so it is appropriate to
use when the server is the originator of an event, outside of any
client context, such as in a regular HTTP request handler or a
background task. Example::
@app.route('/ping')
def ping():
socketio.emit('ping event', {'data': 42}, namespace='/chat')
:param event: The name of the user event to emit.
:param args: A dictionary with the JSON data to send as payload.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message to all the users in the given room. If
this parameter is not included, the event is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
namespace = kwargs.pop('namespace', '/')
room = kwargs.pop('room', None)
include_self = kwargs.pop('include_self', True)
skip_sid = kwargs.pop('skip_sid', None)
if not include_self and not skip_sid:
skip_sid = flask.request.sid
callback = kwargs.pop('callback', None)
self.server.emit(event, *args, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def send(self, data, json=False, namespace=None, room=None,
callback=None, include_self=True, skip_sid=None, **kwargs):
"""Send a server-generated SocketIO message.
This function sends a simple SocketIO message to one or more connected
clients. The message can be a string or a JSON blob. This is a simpler
version of ``emit()``, which should be preferred. This function can be
used outside of a SocketIO event context, so it is appropriate to use
when the server is the originator of an event.
:param data: The message to send, either a string or a JSON blob.
:param json: ``True`` if ``message`` is a JSON blob, ``False``
otherwise.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message only to the users in the given room. If
this parameter is not included, the message is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
skip_sid = flask.request.sid if not include_self else skip_sid
if json:
self.emit('json', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
else:
self.emit('message', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def close_room(self, room, namespace=None):
"""Close a room.
This function removes any users that are in the given room and then
deletes the room from the server. This function can be used outside
of a SocketIO event context.
:param room: The name of the room to close.
:param namespace: The namespace under which the room exists. Defaults
to the global namespace.
"""
self.server.close_room(room, namespace)
def run(self, app, host=None, port=None, **kwargs):
"""Run the SocketIO web server.
:param app: The Flask application instance.
:param host: The hostname or IP address for the server to listen on.
Defaults to 127.0.0.1.
:param port: The port number for the server to listen on. Defaults to
5000.
:param debug: ``True`` to start the server in debug mode, ``False`` to
start in normal mode.
:param use_reloader: ``True`` to enable the Flask reloader, ``False``
to disable it.
:param extra_files: A list of additional files that the Flask
reloader should watch. Defaults to ``None``
:param log_output: If ``True``, the server logs all incomming
connections. If ``False`` logging is disabled.
Defaults to ``True`` in debug mode, ``False``
in normal mode. Unused when the threading async
mode is used.
:param kwargs: Additional web server options. The web server options
are specific to the server used in each of the supported
async modes. Note that options provided here will
not be seen when using an external web server such
as gunicorn, since this method is not called in that
case.
"""
if host is None:
host = '127.0.0.1'
if port is None:
server_name = app.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[1])
else:
port = 5000
debug = kwargs.pop('debug', app.debug)
log_output = kwargs.pop('log_output', debug)
use_reloader = kwargs.pop('use_reloader', debug)
extra_files = kwargs.pop('extra_files', None)
app.debug = debug
if app.debug and self.server.eio.async_mode != 'threading':
# put the debug middleware between the SocketIO middleware
# and the Flask application instance
#
# mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
# BECOMES
#
# dbg-mw mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
self.sockio_mw.wsgi_app = DebuggedApplication(self.sockio_mw.wsgi_app,
evalex=True)
if self.server.eio.async_mode == 'threading':
from werkzeug._internal import _log
_log('warning', 'WebSocket transport not available. Install '
'eventlet or gevent and gevent-websocket for '
'improved performance.')
app.run(host=host, port=port, threaded=True,
use_reloader=use_reloader, **kwargs)
elif self.server.eio.async_mode == 'eventlet':
def run_server():
import eventlet
import eventlet.wsgi
import eventlet.green
addresses = eventlet.green.socket.getaddrinfo(host, port)
if not addresses:
raise RuntimeError('Could not resolve host to a valid address')
eventlet_socket = eventlet.listen(addresses[0][4], addresses[0][0])
# If provided an SSL argument, use an SSL socket
ssl_args = ['keyfile', 'certfile', 'server_side', 'cert_reqs',
'ssl_version', 'ca_certs',
'do_handshake_on_connect', 'suppress_ragged_eofs',
'ciphers']
ssl_params = {k: kwargs[k] for k in kwargs if k in ssl_args}
if len(ssl_params) > 0:
for k in ssl_params:
kwargs.pop(k)
ssl_params['server_side'] = True # Listening requires true
eventlet_socket = eventlet.wrap_ssl(eventlet_socket,
**ssl_params)
eventlet.wsgi.server(eventlet_socket, app,
log_output=log_output, **kwargs)
if use_reloader:
run_with_reloader(run_server, extra_files=extra_files)
else:
run_server()
elif self.server.eio.async_mode == 'gevent':
from gevent import pywsgi
try:
from geventwebsocket.handler import WebSocketHandler
websocket = True
except ImportError:
websocket = False
log = 'default'
if not log_output:
log = None
if websocket:
self.wsgi_server = pywsgi.WSGIServer(
(host, port), app, handler_class=WebSocketHandler,
log=log, **kwargs)
else:
self.wsgi_server = pywsgi.WSGIServer((host, port), app,
log=log, **kwargs)
if use_reloader:
# monkey patching is required by the reloader
from gevent import monkey
monkey.patch_all()
def run_server():
self.wsgi_server.serve_forever()
run_with_reloader(run_server, extra_files=extra_files)
else:
self.wsgi_server.serve_forever()
def start_background_task(self, target, *args, **kwargs):
"""Start a background task using the appropriate async model.
This is a utility function that applications can use to start a
background task using the method that is compatible with the
selected async mode.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function.
"""
return self.server.start_background_task(target, *args, **kwargs)
def sleep(self, seconds=0):
"""Sleep for the requested amount of time using the appropriate async
model.
This is a utility function that applications can use to put a task to
sleep without having to worry about using the correct call for the
selected async mode.
"""
return self.server.sleep(seconds)
def test_client(self, app, namespace=None, query_string=None,
headers=None, flask_test_client=None):
"""The Socket.IO test client is useful for testing a Flask-SocketIO
server. It works in a similar way to the Flask Test Client, but
adapted to the Socket.IO server.
:param app: The Flask application instance.
:param namespace: The namespace for the client. If not provided, the
client connects to the server on the global
namespace.
:param query_string: A string with custom query string arguments.
:param headers: A dictionary with custom HTTP headers.
:param flask_test_client: The instance of the Flask test client
currently in use. Passing the Flask test
client is optional, but is necessary if you
want the Flask user session and any other
cookies set in HTTP routes accessible from
Socket.IO events.
"""
return SocketIOTestClient(app, self, namespace=namespace,
query_string=query_string, headers=headers,
flask_test_client=flask_test_client)
def _handle_event(self, handler, message, namespace, sid, *args):
if sid not in self.server.environ:
# we don't have record of this client, ignore this event
return '', 400
app = self.server.environ[sid]['flask.app']
with app.request_context(self.server.environ[sid]):
if self.manage_session:
# manage a separate session for this client's Socket.IO events
# created as a copy of the regular user session
if 'saved_session' not in self.server.environ[sid]:
self.server.environ[sid]['saved_session'] = \
_ManagedSession(flask.session)
session_obj = self.server.environ[sid]['saved_session']
else:
# let Flask handle the user session
# for cookie based sessions, this effectively freezes the
# session to its state at connection time
# for server-side sessions, this allows HTTP and Socket.IO to
# share the session, with both having read/write access to it
session_obj = flask.session._get_current_object()
_request_ctx_stack.top.session = session_obj
flask.request.sid = sid
flask.request.namespace = namespace
flask.request.event = {'message': message, 'args': args}
try:
if message == 'connect':
ret = handler()
else:
ret = handler(*args)
except:
err_handler = self.exception_handlers.get(
namespace, self.default_exception_handler)
if err_handler is None:
raise
type, value, traceback = sys.exc_info()
return err_handler(value)
if not self.manage_session:
# when Flask is managing the user session, it needs to save it
if not hasattr(session_obj, 'modified') or session_obj.modified:
resp = app.response_class()
app.session_interface.save_session(app, session_obj, resp)
return ret
|
miguelgrinberg/Flask-SocketIO | flask_socketio/__init__.py | SocketIO.start_background_task | python | def start_background_task(self, target, *args, **kwargs):
return self.server.start_background_task(target, *args, **kwargs) | Start a background task using the appropriate async model.
This is a utility function that applications can use to start a
background task using the method that is compatible with the
selected async mode.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function. | train | https://github.com/miguelgrinberg/Flask-SocketIO/blob/4bef800d5e7ba7d98a6f4cd94191ff0b4496c334/flask_socketio/__init__.py#L582-L597 | null | class SocketIO(object):
"""Create a Flask-SocketIO server.
:param app: The flask application instance. If the application instance
isn't known at the time this class is instantiated, then call
``socketio.init_app(app)`` once the application instance is
available.
:param manage_session: If set to ``True``, this extension manages the user
session for Socket.IO events. If set to ``False``,
Flask's own session management is used. When using
Flask's cookie based sessions it is recommended that
you leave this set to the default of ``True``. When
using server-side sessions, a ``False`` setting
enables sharing the user session between HTTP routes
and Socket.IO events.
:param message_queue: A connection URL for a message queue service the
server can use for multi-process communication. A
message queue is not required when using a single
server process.
:param channel: The channel name, when using a message queue. If a channel
isn't specified, a default channel will be used. If
multiple clusters of SocketIO processes need to use the
same message queue without interfering with each other, then
each cluster should use a different channel.
:param path: The path where the Socket.IO server is exposed. Defaults to
``'socket.io'``. Leave this as is unless you know what you are
doing.
:param resource: Alias to ``path``.
:param kwargs: Socket.IO and Engine.IO server options.
The Socket.IO server options are detailed below:
:param client_manager: The client manager instance that will manage the
client list. When this is omitted, the client list
is stored in an in-memory structure, so the use of
multiple connected servers is not possible. In most
cases, this argument does not need to be set
explicitly.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``.
:param binary: ``True`` to support binary payloads, ``False`` to treat all
payloads as text. On Python 2, if this is set to ``True``,
``unicode`` values are treated as text, and ``str`` and
``bytes`` values are treated as binary. This option has no
effect on Python 3, where text and binary payloads are
always automatically discovered.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions. To use the same json encoder and decoder as a Flask
application, use ``flask.json``.
The Engine.IO server configuration supports the following settings:
:param async_mode: The asynchronous model to use. See the Deployment
section in the documentation for a description of the
available options. Valid async modes are
``threading``, ``eventlet``, ``gevent`` and
``gevent_uwsgi``. If this argument is not given,
``eventlet`` is tried first, then ``gevent_uwsgi``,
then ``gevent``, and finally ``threading``. The
first async mode that has all its dependencies installed
is then one that is chosen.
:param ping_timeout: The time in seconds that the client waits for the
server to respond before disconnecting. The default is
60 seconds.
:param ping_interval: The interval in seconds at which the client pings
the server. The default is 25 seconds.
:param max_http_buffer_size: The maximum size of a message when using the
polling transport. The default is 100,000,000
bytes.
:param allow_upgrades: Whether to allow transport upgrades or not. The
default is ``True``.
:param http_compression: Whether to compress packages when using the
polling transport. The default is ``True``.
:param compression_threshold: Only compress messages when their byte size
is greater than this value. The default is
1024 bytes.
:param cookie: Name of the HTTP cookie that contains the client session
id. If set to ``None``, a cookie is not sent to the client.
The default is ``'io'``.
:param cors_allowed_origins: List of origins that are allowed to connect
to this server. All origins are allowed by
default.
:param cors_credentials: Whether credentials (cookies, authentication) are
allowed in requests to this server. The default is
``True``.
:param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
a logger object to use. To disable logging set to
``False``. The default is ``False``.
"""
def __init__(self, app=None, **kwargs):
self.server = None
self.server_options = {}
self.wsgi_server = None
self.handlers = []
self.namespace_handlers = []
self.exception_handlers = {}
self.default_exception_handler = None
self.manage_session = True
# We can call init_app when:
# - we were given the Flask app instance (standard initialization)
# - we were not given the app, but we were given a message_queue
# (standard initialization for auxiliary process)
# In all other cases we collect the arguments and assume the client
# will call init_app from an app factory function.
if app is not None or 'message_queue' in kwargs:
self.init_app(app, **kwargs)
else:
self.server_options.update(kwargs)
def init_app(self, app, **kwargs):
if app is not None:
if not hasattr(app, 'extensions'):
app.extensions = {} # pragma: no cover
app.extensions['socketio'] = self
self.server_options.update(kwargs)
self.manage_session = self.server_options.pop('manage_session',
self.manage_session)
if 'client_manager' not in self.server_options:
url = self.server_options.pop('message_queue', None)
channel = self.server_options.pop('channel', 'flask-socketio')
write_only = app is None
if url:
if url.startswith(('redis://', "rediss://")):
queue_class = socketio.RedisManager
elif url.startswith('zmq'):
queue_class = socketio.ZmqManager
else:
queue_class = socketio.KombuManager
queue = queue_class(url, channel=channel,
write_only=write_only)
self.server_options['client_manager'] = queue
if 'json' in self.server_options and \
self.server_options['json'] == flask_json:
# flask's json module is tricky to use because its output
# changes when it is invoked inside or outside the app context
# so here to prevent any ambiguities we replace it with wrappers
# that ensure that the app context is always present
class FlaskSafeJSON(object):
@staticmethod
def dumps(*args, **kwargs):
with app.app_context():
return flask_json.dumps(*args, **kwargs)
@staticmethod
def loads(*args, **kwargs):
with app.app_context():
return flask_json.loads(*args, **kwargs)
self.server_options['json'] = FlaskSafeJSON
resource = self.server_options.pop('path', None) or \
self.server_options.pop('resource', None) or 'socket.io'
if resource.startswith('/'):
resource = resource[1:]
if os.environ.get('FLASK_RUN_FROM_CLI'):
if self.server_options.get('async_mode') is None:
if app is not None:
app.logger.warning(
'Flask-SocketIO is Running under Werkzeug, WebSocket '
'is not available.')
self.server_options['async_mode'] = 'threading'
self.server = socketio.Server(**self.server_options)
self.async_mode = self.server.async_mode
for handler in self.handlers:
self.server.on(handler[0], handler[1], namespace=handler[2])
for namespace_handler in self.namespace_handlers:
self.server.register_namespace(namespace_handler)
if app is not None:
# here we attach the SocketIO middlware to the SocketIO object so it
# can be referenced later if debug middleware needs to be inserted
self.sockio_mw = _SocketIOMiddleware(self.server, app,
socketio_path=resource)
app.wsgi_app = self.sockio_mw
def on(self, message, namespace=None):
"""Decorator to register a SocketIO event handler.
This decorator must be applied to SocketIO event handlers. Example::
@socketio.on('my event', namespace='/chat')
def handle_my_custom_event(json):
print('received json: ' + str(json))
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(handler):
def _handler(sid, *args):
return self._handle_event(handler, message, namespace, sid,
*args)
if self.server:
self.server.on(message, _handler, namespace=namespace)
else:
self.handlers.append((message, _handler, namespace))
return handler
return decorator
def on_error(self, namespace=None):
"""Decorator to define a custom error handler for SocketIO events.
This decorator can be applied to a function that acts as an error
handler for a namespace. This handler will be invoked when a SocketIO
event handler raises an exception. The handler function must accept one
argument, which is the exception raised. Example::
@socketio.on_error(namespace='/chat')
def chat_error_handler(e):
print('An error has occurred: ' + str(e))
:param namespace: The namespace for which to register the error
handler. Defaults to the global namespace.
"""
namespace = namespace or '/'
def decorator(exception_handler):
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.exception_handlers[namespace] = exception_handler
return exception_handler
return decorator
def on_error_default(self, exception_handler):
"""Decorator to define a default error handler for SocketIO events.
This decorator can be applied to a function that acts as a default
error handler for any namespaces that do not have a specific handler.
Example::
@socketio.on_error_default
def error_handler(e):
print('An error has occurred: ' + str(e))
"""
if not callable(exception_handler):
raise ValueError('exception_handler must be callable')
self.default_exception_handler = exception_handler
return exception_handler
def on_event(self, message, handler, namespace=None):
"""Register a SocketIO event handler.
``on_event`` is the non-decorator version of ``'on'``.
Example::
def on_foo_event(json):
print('received json: ' + str(json))
socketio.on_event('my event', on_foo_event, namespace='/chat')
:param message: The name of the event. This is normally a user defined
string, but a few event names are already defined. Use
``'message'`` to define a handler that takes a string
payload, ``'json'`` to define a handler that takes a
JSON blob payload, ``'connect'`` or ``'disconnect'``
to create handlers for connection and disconnection
events.
:param handler: The function that handles the event.
:param namespace: The namespace on which the handler is to be
registered. Defaults to the global namespace.
"""
self.on(message, namespace=namespace)(handler)
def on_namespace(self, namespace_handler):
if not isinstance(namespace_handler, Namespace):
raise ValueError('Not a namespace instance.')
namespace_handler._set_socketio(self)
if self.server:
self.server.register_namespace(namespace_handler)
else:
self.namespace_handlers.append(namespace_handler)
def emit(self, event, *args, **kwargs):
"""Emit a server generated SocketIO event.
This function emits a SocketIO event to one or more connected clients.
A JSON blob can be attached to the event as payload. This function can
be used outside of a SocketIO event context, so it is appropriate to
use when the server is the originator of an event, outside of any
client context, such as in a regular HTTP request handler or a
background task. Example::
@app.route('/ping')
def ping():
socketio.emit('ping event', {'data': 42}, namespace='/chat')
:param event: The name of the user event to emit.
:param args: A dictionary with the JSON data to send as payload.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message to all the users in the given room. If
this parameter is not included, the event is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
namespace = kwargs.pop('namespace', '/')
room = kwargs.pop('room', None)
include_self = kwargs.pop('include_self', True)
skip_sid = kwargs.pop('skip_sid', None)
if not include_self and not skip_sid:
skip_sid = flask.request.sid
callback = kwargs.pop('callback', None)
self.server.emit(event, *args, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def send(self, data, json=False, namespace=None, room=None,
callback=None, include_self=True, skip_sid=None, **kwargs):
"""Send a server-generated SocketIO message.
This function sends a simple SocketIO message to one or more connected
clients. The message can be a string or a JSON blob. This is a simpler
version of ``emit()``, which should be preferred. This function can be
used outside of a SocketIO event context, so it is appropriate to use
when the server is the originator of an event.
:param data: The message to send, either a string or a JSON blob.
:param json: ``True`` if ``message`` is a JSON blob, ``False``
otherwise.
:param namespace: The namespace under which the message is to be sent.
Defaults to the global namespace.
:param room: Send the message only to the users in the given room. If
this parameter is not included, the message is sent to
all connected users.
:param skip_sid: The session id of a client to ignore when broadcasting
or addressing a room. This is typically set to the
originator of the message, so that everyone except
that client receive the message.
:param callback: If given, this function will be called to acknowledge
that the client has received the message. The
arguments that will be passed to the function are
those provided by the client. Callback functions can
only be used when addressing an individual client.
"""
skip_sid = flask.request.sid if not include_self else skip_sid
if json:
self.emit('json', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
else:
self.emit('message', data, namespace=namespace, room=room,
skip_sid=skip_sid, callback=callback, **kwargs)
def close_room(self, room, namespace=None):
"""Close a room.
This function removes any users that are in the given room and then
deletes the room from the server. This function can be used outside
of a SocketIO event context.
:param room: The name of the room to close.
:param namespace: The namespace under which the room exists. Defaults
to the global namespace.
"""
self.server.close_room(room, namespace)
def run(self, app, host=None, port=None, **kwargs):
"""Run the SocketIO web server.
:param app: The Flask application instance.
:param host: The hostname or IP address for the server to listen on.
Defaults to 127.0.0.1.
:param port: The port number for the server to listen on. Defaults to
5000.
:param debug: ``True`` to start the server in debug mode, ``False`` to
start in normal mode.
:param use_reloader: ``True`` to enable the Flask reloader, ``False``
to disable it.
:param extra_files: A list of additional files that the Flask
reloader should watch. Defaults to ``None``
:param log_output: If ``True``, the server logs all incomming
connections. If ``False`` logging is disabled.
Defaults to ``True`` in debug mode, ``False``
in normal mode. Unused when the threading async
mode is used.
:param kwargs: Additional web server options. The web server options
are specific to the server used in each of the supported
async modes. Note that options provided here will
not be seen when using an external web server such
as gunicorn, since this method is not called in that
case.
"""
if host is None:
host = '127.0.0.1'
if port is None:
server_name = app.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[1])
else:
port = 5000
debug = kwargs.pop('debug', app.debug)
log_output = kwargs.pop('log_output', debug)
use_reloader = kwargs.pop('use_reloader', debug)
extra_files = kwargs.pop('extra_files', None)
app.debug = debug
if app.debug and self.server.eio.async_mode != 'threading':
# put the debug middleware between the SocketIO middleware
# and the Flask application instance
#
# mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
# BECOMES
#
# dbg-mw mw1 mw2 mw3 Flask app
# o ---- o ---- o ---- o ---- o
# /
# o Flask-SocketIO
# \ middleware
# o
# Flask-SocketIO WebSocket handler
#
self.sockio_mw.wsgi_app = DebuggedApplication(self.sockio_mw.wsgi_app,
evalex=True)
if self.server.eio.async_mode == 'threading':
from werkzeug._internal import _log
_log('warning', 'WebSocket transport not available. Install '
'eventlet or gevent and gevent-websocket for '
'improved performance.')
app.run(host=host, port=port, threaded=True,
use_reloader=use_reloader, **kwargs)
elif self.server.eio.async_mode == 'eventlet':
def run_server():
import eventlet
import eventlet.wsgi
import eventlet.green
addresses = eventlet.green.socket.getaddrinfo(host, port)
if not addresses:
raise RuntimeError('Could not resolve host to a valid address')
eventlet_socket = eventlet.listen(addresses[0][4], addresses[0][0])
# If provided an SSL argument, use an SSL socket
ssl_args = ['keyfile', 'certfile', 'server_side', 'cert_reqs',
'ssl_version', 'ca_certs',
'do_handshake_on_connect', 'suppress_ragged_eofs',
'ciphers']
ssl_params = {k: kwargs[k] for k in kwargs if k in ssl_args}
if len(ssl_params) > 0:
for k in ssl_params:
kwargs.pop(k)
ssl_params['server_side'] = True # Listening requires true
eventlet_socket = eventlet.wrap_ssl(eventlet_socket,
**ssl_params)
eventlet.wsgi.server(eventlet_socket, app,
log_output=log_output, **kwargs)
if use_reloader:
run_with_reloader(run_server, extra_files=extra_files)
else:
run_server()
elif self.server.eio.async_mode == 'gevent':
from gevent import pywsgi
try:
from geventwebsocket.handler import WebSocketHandler
websocket = True
except ImportError:
websocket = False
log = 'default'
if not log_output:
log = None
if websocket:
self.wsgi_server = pywsgi.WSGIServer(
(host, port), app, handler_class=WebSocketHandler,
log=log, **kwargs)
else:
self.wsgi_server = pywsgi.WSGIServer((host, port), app,
log=log, **kwargs)
if use_reloader:
# monkey patching is required by the reloader
from gevent import monkey
monkey.patch_all()
def run_server():
self.wsgi_server.serve_forever()
run_with_reloader(run_server, extra_files=extra_files)
else:
self.wsgi_server.serve_forever()
def stop(self):
"""Stop a running SocketIO web server.
This method must be called from a HTTP or SocketIO handler function.
"""
if self.server.eio.async_mode == 'threading':
func = flask.request.environ.get('werkzeug.server.shutdown')
if func:
func()
else:
raise RuntimeError('Cannot stop unknown web server')
elif self.server.eio.async_mode == 'eventlet':
raise SystemExit
elif self.server.eio.async_mode == 'gevent':
self.wsgi_server.stop()
def sleep(self, seconds=0):
"""Sleep for the requested amount of time using the appropriate async
model.
This is a utility function that applications can use to put a task to
sleep without having to worry about using the correct call for the
selected async mode.
"""
return self.server.sleep(seconds)
def test_client(self, app, namespace=None, query_string=None,
headers=None, flask_test_client=None):
"""The Socket.IO test client is useful for testing a Flask-SocketIO
server. It works in a similar way to the Flask Test Client, but
adapted to the Socket.IO server.
:param app: The Flask application instance.
:param namespace: The namespace for the client. If not provided, the
client connects to the server on the global
namespace.
:param query_string: A string with custom query string arguments.
:param headers: A dictionary with custom HTTP headers.
:param flask_test_client: The instance of the Flask test client
currently in use. Passing the Flask test
client is optional, but is necessary if you
want the Flask user session and any other
cookies set in HTTP routes accessible from
Socket.IO events.
"""
return SocketIOTestClient(app, self, namespace=namespace,
query_string=query_string, headers=headers,
flask_test_client=flask_test_client)
def _handle_event(self, handler, message, namespace, sid, *args):
if sid not in self.server.environ:
# we don't have record of this client, ignore this event
return '', 400
app = self.server.environ[sid]['flask.app']
with app.request_context(self.server.environ[sid]):
if self.manage_session:
# manage a separate session for this client's Socket.IO events
# created as a copy of the regular user session
if 'saved_session' not in self.server.environ[sid]:
self.server.environ[sid]['saved_session'] = \
_ManagedSession(flask.session)
session_obj = self.server.environ[sid]['saved_session']
else:
# let Flask handle the user session
# for cookie based sessions, this effectively freezes the
# session to its state at connection time
# for server-side sessions, this allows HTTP and Socket.IO to
# share the session, with both having read/write access to it
session_obj = flask.session._get_current_object()
_request_ctx_stack.top.session = session_obj
flask.request.sid = sid
flask.request.namespace = namespace
flask.request.event = {'message': message, 'args': args}
try:
if message == 'connect':
ret = handler()
else:
ret = handler(*args)
except:
err_handler = self.exception_handlers.get(
namespace, self.default_exception_handler)
if err_handler is None:
raise
type, value, traceback = sys.exc_info()
return err_handler(value)
if not self.manage_session:
# when Flask is managing the user session, it needs to save it
if not hasattr(session_obj, 'modified') or session_obj.modified:
resp = app.response_class()
app.session_interface.save_session(app, session_obj, resp)
return ret
|
miguelgrinberg/Flask-SocketIO | flask_socketio/namespace.py | Namespace.trigger_event | python | def trigger_event(self, event, *args):
handler_name = 'on_' + event
if not hasattr(self, handler_name):
# there is no handler for this event, so we ignore it
return
handler = getattr(self, handler_name)
return self.socketio._handle_event(handler, event, self.namespace,
*args) | Dispatch an event to the proper handler method.
In the most common usage, this method is not overloaded by subclasses,
as it performs the routing of events to methods. However, this
method can be overriden if special dispatching rules are needed, or if
having a single method that catches all events is desired. | train | https://github.com/miguelgrinberg/Flask-SocketIO/blob/4bef800d5e7ba7d98a6f4cd94191ff0b4496c334/flask_socketio/namespace.py#L12-L26 | null | class Namespace(_Namespace):
def __init__(self, namespace=None):
super(Namespace, self).__init__(namespace)
self.socketio = None
def _set_socketio(self, socketio):
self.socketio = socketio
def emit(self, event, data=None, room=None, include_self=True,
namespace=None, callback=None):
"""Emit a custom event to one or more connected clients."""
return self.socketio.emit(event, data, room=room,
include_self=include_self,
namespace=namespace or self.namespace,
callback=callback)
def send(self, data, room=None, include_self=True, namespace=None,
callback=None):
"""Send a message to one or more connected clients."""
return self.socketio.send(data, room=room, include_self=include_self,
namespace=namespace or self.namespace,
callback=callback)
def close_room(self, room, namespace=None):
"""Close a room."""
return self.socketio.close_room(room=room,
namespace=namespace or self.namespace)
|
miguelgrinberg/Flask-SocketIO | flask_socketio/namespace.py | Namespace.emit | python | def emit(self, event, data=None, room=None, include_self=True,
namespace=None, callback=None):
return self.socketio.emit(event, data, room=room,
include_self=include_self,
namespace=namespace or self.namespace,
callback=callback) | Emit a custom event to one or more connected clients. | train | https://github.com/miguelgrinberg/Flask-SocketIO/blob/4bef800d5e7ba7d98a6f4cd94191ff0b4496c334/flask_socketio/namespace.py#L28-L34 | null | class Namespace(_Namespace):
def __init__(self, namespace=None):
super(Namespace, self).__init__(namespace)
self.socketio = None
def _set_socketio(self, socketio):
self.socketio = socketio
def trigger_event(self, event, *args):
"""Dispatch an event to the proper handler method.
In the most common usage, this method is not overloaded by subclasses,
as it performs the routing of events to methods. However, this
method can be overriden if special dispatching rules are needed, or if
having a single method that catches all events is desired.
"""
handler_name = 'on_' + event
if not hasattr(self, handler_name):
# there is no handler for this event, so we ignore it
return
handler = getattr(self, handler_name)
return self.socketio._handle_event(handler, event, self.namespace,
*args)
def send(self, data, room=None, include_self=True, namespace=None,
callback=None):
"""Send a message to one or more connected clients."""
return self.socketio.send(data, room=room, include_self=include_self,
namespace=namespace or self.namespace,
callback=callback)
def close_room(self, room, namespace=None):
"""Close a room."""
return self.socketio.close_room(room=room,
namespace=namespace or self.namespace)
|
miguelgrinberg/Flask-SocketIO | flask_socketio/namespace.py | Namespace.send | python | def send(self, data, room=None, include_self=True, namespace=None,
callback=None):
return self.socketio.send(data, room=room, include_self=include_self,
namespace=namespace or self.namespace,
callback=callback) | Send a message to one or more connected clients. | train | https://github.com/miguelgrinberg/Flask-SocketIO/blob/4bef800d5e7ba7d98a6f4cd94191ff0b4496c334/flask_socketio/namespace.py#L36-L41 | null | class Namespace(_Namespace):
def __init__(self, namespace=None):
super(Namespace, self).__init__(namespace)
self.socketio = None
def _set_socketio(self, socketio):
self.socketio = socketio
def trigger_event(self, event, *args):
"""Dispatch an event to the proper handler method.
In the most common usage, this method is not overloaded by subclasses,
as it performs the routing of events to methods. However, this
method can be overriden if special dispatching rules are needed, or if
having a single method that catches all events is desired.
"""
handler_name = 'on_' + event
if not hasattr(self, handler_name):
# there is no handler for this event, so we ignore it
return
handler = getattr(self, handler_name)
return self.socketio._handle_event(handler, event, self.namespace,
*args)
def emit(self, event, data=None, room=None, include_self=True,
namespace=None, callback=None):
"""Emit a custom event to one or more connected clients."""
return self.socketio.emit(event, data, room=room,
include_self=include_self,
namespace=namespace or self.namespace,
callback=callback)
def close_room(self, room, namespace=None):
"""Close a room."""
return self.socketio.close_room(room=room,
namespace=namespace or self.namespace)
|
miguelgrinberg/Flask-SocketIO | flask_socketio/namespace.py | Namespace.close_room | python | def close_room(self, room, namespace=None):
return self.socketio.close_room(room=room,
namespace=namespace or self.namespace) | Close a room. | train | https://github.com/miguelgrinberg/Flask-SocketIO/blob/4bef800d5e7ba7d98a6f4cd94191ff0b4496c334/flask_socketio/namespace.py#L43-L46 | null | class Namespace(_Namespace):
def __init__(self, namespace=None):
super(Namespace, self).__init__(namespace)
self.socketio = None
def _set_socketio(self, socketio):
self.socketio = socketio
def trigger_event(self, event, *args):
"""Dispatch an event to the proper handler method.
In the most common usage, this method is not overloaded by subclasses,
as it performs the routing of events to methods. However, this
method can be overriden if special dispatching rules are needed, or if
having a single method that catches all events is desired.
"""
handler_name = 'on_' + event
if not hasattr(self, handler_name):
# there is no handler for this event, so we ignore it
return
handler = getattr(self, handler_name)
return self.socketio._handle_event(handler, event, self.namespace,
*args)
def emit(self, event, data=None, room=None, include_self=True,
namespace=None, callback=None):
"""Emit a custom event to one or more connected clients."""
return self.socketio.emit(event, data, room=room,
include_self=include_self,
namespace=namespace or self.namespace,
callback=callback)
def send(self, data, room=None, include_self=True, namespace=None,
callback=None):
"""Send a message to one or more connected clients."""
return self.socketio.send(data, room=room, include_self=include_self,
namespace=namespace or self.namespace,
callback=callback)
|
miguelgrinberg/Flask-SocketIO | example/app.py | background_thread | python | def background_thread():
count = 0
while True:
socketio.sleep(10)
count += 1
socketio.emit('my_response',
{'data': 'Server generated event', 'count': count},
namespace='/test') | Example of how to send server generated events to clients. | train | https://github.com/miguelgrinberg/Flask-SocketIO/blob/4bef800d5e7ba7d98a6f4cd94191ff0b4496c334/example/app.py#L19-L27 | [
"def emit(self, event, *args, **kwargs):\n \"\"\"Emit a server generated SocketIO event.\n\n This function emits a SocketIO event to one or more connected clients.\n A JSON blob can be attached to the event as payload. This function can\n be used outside of a SocketIO event context, so it is appropriate to\n use when the server is the originator of an event, outside of any\n client context, such as in a regular HTTP request handler or a\n background task. Example::\n\n @app.route('/ping')\n def ping():\n socketio.emit('ping event', {'data': 42}, namespace='/chat')\n\n :param event: The name of the user event to emit.\n :param args: A dictionary with the JSON data to send as payload.\n :param namespace: The namespace under which the message is to be sent.\n Defaults to the global namespace.\n :param room: Send the message to all the users in the given room. If\n this parameter is not included, the event is sent to\n all connected users.\n :param skip_sid: The session id of a client to ignore when broadcasting\n or addressing a room. This is typically set to the\n originator of the message, so that everyone except\n that client receive the message.\n :param callback: If given, this function will be called to acknowledge\n that the client has received the message. The\n arguments that will be passed to the function are\n those provided by the client. Callback functions can\n only be used when addressing an individual client.\n \"\"\"\n namespace = kwargs.pop('namespace', '/')\n room = kwargs.pop('room', None)\n include_self = kwargs.pop('include_self', True)\n skip_sid = kwargs.pop('skip_sid', None)\n if not include_self and not skip_sid:\n skip_sid = flask.request.sid\n callback = kwargs.pop('callback', None)\n self.server.emit(event, *args, namespace=namespace, room=room,\n skip_sid=skip_sid, callback=callback, **kwargs)\n",
"def sleep(self, seconds=0):\n \"\"\"Sleep for the requested amount of time using the appropriate async\n model.\n\n This is a utility function that applications can use to put a task to\n sleep without having to worry about using the correct call for the\n selected async mode.\n \"\"\"\n return self.server.sleep(seconds)\n"
] | #!/usr/bin/env python
from threading import Lock
from flask import Flask, render_template, session, request
from flask_socketio import SocketIO, emit, join_room, leave_room, \
close_room, rooms, disconnect
# Set this variable to "threading", "eventlet" or "gevent" to test the
# different async modes, or leave it set to None for the application to choose
# the best option based on installed packages.
async_mode = None
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app, async_mode=async_mode)
thread = None
thread_lock = Lock()
@app.route('/')
def index():
return render_template('index.html', async_mode=socketio.async_mode)
@socketio.on('my_event', namespace='/test')
def test_message(message):
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my_response',
{'data': message['data'], 'count': session['receive_count']})
@socketio.on('my_broadcast_event', namespace='/test')
def test_broadcast_message(message):
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my_response',
{'data': message['data'], 'count': session['receive_count']},
broadcast=True)
@socketio.on('join', namespace='/test')
def join(message):
join_room(message['room'])
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my_response',
{'data': 'In rooms: ' + ', '.join(rooms()),
'count': session['receive_count']})
@socketio.on('leave', namespace='/test')
def leave(message):
leave_room(message['room'])
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my_response',
{'data': 'In rooms: ' + ', '.join(rooms()),
'count': session['receive_count']})
@socketio.on('close_room', namespace='/test')
def close(message):
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my_response', {'data': 'Room ' + message['room'] + ' is closing.',
'count': session['receive_count']},
room=message['room'])
close_room(message['room'])
@socketio.on('my_room_event', namespace='/test')
def send_room_message(message):
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my_response',
{'data': message['data'], 'count': session['receive_count']},
room=message['room'])
@socketio.on('disconnect_request', namespace='/test')
def disconnect_request():
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my_response',
{'data': 'Disconnected!', 'count': session['receive_count']})
disconnect()
@socketio.on('my_ping', namespace='/test')
def ping_pong():
emit('my_pong')
@socketio.on('connect', namespace='/test')
def test_connect():
global thread
with thread_lock:
if thread is None:
thread = socketio.start_background_task(background_thread)
emit('my_response', {'data': 'Connected', 'count': 0})
@socketio.on('disconnect', namespace='/test')
def test_disconnect():
print('Client disconnected', request.sid)
if __name__ == '__main__':
socketio.run(app, debug=True)
|
JonathonReinhart/scuba | scuba/__main__.py | ScubaDive.prepare | python | def prepare(self):
'''Prepare to run the docker command'''
self.__make_scubadir()
if self.is_remote_docker:
'''
Docker is running remotely (e.g. boot2docker on OSX).
We don't need to do any user setup whatsoever.
TODO: For now, remote instances won't have any .scubainit
See:
https://github.com/JonathonReinhart/scuba/issues/17
'''
raise ScubaError('Remote docker not supported (DOCKER_HOST is set)')
# Docker is running natively
self.__setup_native_run()
# Apply environment vars from .scuba.yml
self.env_vars.update(self.context.environment) | Prepare to run the docker command | train | https://github.com/JonathonReinhart/scuba/blob/0244c81ec482d3c60202028bc075621447bc3ad1/scuba/__main__.py#L119-L139 | [
"def __make_scubadir(self):\n '''Make temp directory where all ancillary files are bind-mounted\n '''\n self.__scubadir_hostpath = tempfile.mkdtemp(prefix='scubadir')\n self.__scubadir_contpath = '/.scuba'\n self.add_volume(self.__scubadir_hostpath, self.__scubadir_contpath)\n",
"def __setup_native_run(self):\n # These options are appended to mounted volume arguments\n # NOTE: This tells Docker to re-label the directory for compatibility\n # with SELinux. See `man docker-run` for more information.\n self.vol_opts = ['z']\n\n\n # Pass variables to scubainit\n self.add_env('SCUBAINIT_UMASK', '{:04o}'.format(get_umask()))\n\n if not self.as_root:\n self.add_env('SCUBAINIT_UID', os.getuid())\n self.add_env('SCUBAINIT_GID', os.getgid())\n\n if self.verbose:\n self.add_env('SCUBAINIT_VERBOSE', 1)\n\n\n # Copy scubainit into the container\n # We make a copy because Docker 1.13 gets pissed if we try to re-label\n # /usr, and Fedora 28 gives an AVC denial.\n scubainit_cpath = self.copy_scubadir_file('scubainit', self.scubainit_path)\n\n # Hooks\n for name in ('root', 'user', ):\n self.__generate_hook_script(name)\n\n # allocate TTY if scuba's output is going to a terminal\n # and stdin is not redirected\n if sys.stdout.isatty() and sys.stdin.isatty():\n self.add_option('--tty')\n\n # Process any aliases\n try:\n context = self.config.process_command(self.user_command)\n except ConfigError as cfgerr:\n raise ScubaError(str(cfgerr))\n\n if self.image_override:\n context.image = self.image_override\n\n '''\n Normally, if the user provides no command to \"docker run\", the image's\n default CMD is run. Because we set the entrypiont, scuba must emulate the\n default behavior itself.\n '''\n if not context.script:\n # No user-provided command; we want to run the image's default command\n verbose_msg('No user command; getting command from image')\n default_cmd = get_image_command(context.image)\n if not default_cmd:\n raise ScubaError('No command given and no image-specified command')\n verbose_msg('{} Cmd: \"{}\"'.format(context.image, default_cmd))\n context.script = [shell_quote_cmd(default_cmd)]\n\n # Make scubainit the real entrypoint, and use the defined entrypoint as\n # the docker command (if it exists)\n self.add_option('--entrypoint={}'.format(scubainit_cpath))\n\n self.docker_cmd = []\n if self.entrypoint_override is not None:\n # --entrypoint takes precedence\n if self.entrypoint_override != '':\n self.docker_cmd = [self.entrypoint_override]\n elif context.entrypoint is not None:\n # then .scuba.yml\n if context.entrypoint != '':\n self.docker_cmd = [context.entrypoint]\n else:\n ep = get_image_entrypoint(context.image)\n if ep:\n self.docker_cmd = ep\n\n # The user command is executed via a generated shell script\n with self.open_scubadir_file('command.sh', 'wt') as f:\n self.docker_cmd += ['/bin/sh', f.container_path]\n writeln(f, '#!/bin/sh')\n writeln(f, '# Auto-generated from scuba')\n writeln(f, 'set -e')\n for cmd in context.script:\n writeln(f, cmd)\n\n self.context = context\n"
] | class ScubaDive(object):
def __init__(self, user_command, docker_args=None, env=None, as_root=False, verbose=False,
image_override=None, entrypoint=None):
env = env or {}
if not isinstance(env, collections.Mapping):
raise ValueError('Argument env must be dict-like')
self.user_command = user_command
self.as_root = as_root
self.verbose = verbose
self.image_override = image_override
self.entrypoint_override = entrypoint
# These will be added to docker run cmdline
self.env_vars = env
self.volumes = []
self.options = docker_args or []
self.workdir = None
self.__locate_scubainit()
self.__load_config()
def __str__(self):
s = StringIO()
writeln(s, 'ScubaDive')
writeln(s, ' verbose: {}'.format(self.verbose))
writeln(s, ' as_root: {}'.format(self.as_root))
writeln(s, ' workdir: {}'.format(self.workdir))
writeln(s, ' options:')
for a in self.options:
writeln(s, ' ' + a)
writeln(s, ' env_vars:')
for k,v in self.env_vars.items():
writeln(s, ' {}={}'.format(k, v))
writeln(s, ' volumes:')
for hostpath, contpath, options in self.__get_vol_opts():
writeln(s, ' {} => {} {}'.format(hostpath, contpath, options))
writeln(s, ' user_command: {}'.format(self.user_command))
writeln(s, ' context:')
writeln(s, ' script: ' + str(self.context.script))
writeln(s, ' image: ' + str(self.context.image))
return s.getvalue()
def cleanup_tempfiles(self):
shutil.rmtree(self.__scubadir_hostpath)
@property
def is_remote_docker(self):
return 'DOCKER_HOST' in os.environ
def add_env(self, name, val):
'''Add an environment variable to the docker run invocation
'''
if name in self.env_vars:
raise KeyError(name)
self.env_vars[name] = val
def add_volume(self, hostpath, contpath, options=None):
'''Add a volume (bind-mount) to the docker run invocation
'''
if options is None:
options = []
self.volumes.append((hostpath, contpath, options))
def add_option(self, option):
'''Add another option to the docker run invocation
'''
self.options.append(option)
def set_workdir(self, workdir):
self.workdir = workdir
def __locate_scubainit(self):
'''Determine path to scubainit binary
'''
pkg_path = os.path.dirname(__file__)
self.scubainit_path = os.path.join(pkg_path, 'scubainit')
if not os.path.isfile(self.scubainit_path):
raise ScubaError('scubainit not found at "{}"'.format(self.scubainit_path))
def __load_config(self):
'''Find and load .scuba.yml
'''
# top_path is where .scuba.yml is found, and becomes the top of our bind mount.
# top_rel is the relative path from top_path to the current working directory,
# and is where we'll set the working directory in the container (relative to
# the bind mount point).
try:
top_path, top_rel = find_config()
self.config = load_config(os.path.join(top_path, SCUBA_YML))
except ConfigNotFoundError as cfgerr:
# SCUBA_YML can be missing if --image was given.
# In this case, we assume a default config
if not self.image_override:
raise ScubaError(str(cfgerr))
top_path, top_rel = os.getcwd(), ''
self.config = ScubaConfig(image=None)
except ConfigError as cfgerr:
raise ScubaError(str(cfgerr))
# Mount scuba root directory at the same path in the container...
self.add_volume(top_path, top_path)
# ...and set the working dir relative to it
self.set_workdir(os.path.join(top_path, top_rel))
self.add_env('SCUBA_ROOT', top_path)
def __make_scubadir(self):
'''Make temp directory where all ancillary files are bind-mounted
'''
self.__scubadir_hostpath = tempfile.mkdtemp(prefix='scubadir')
self.__scubadir_contpath = '/.scuba'
self.add_volume(self.__scubadir_hostpath, self.__scubadir_contpath)
def __setup_native_run(self):
# These options are appended to mounted volume arguments
# NOTE: This tells Docker to re-label the directory for compatibility
# with SELinux. See `man docker-run` for more information.
self.vol_opts = ['z']
# Pass variables to scubainit
self.add_env('SCUBAINIT_UMASK', '{:04o}'.format(get_umask()))
if not self.as_root:
self.add_env('SCUBAINIT_UID', os.getuid())
self.add_env('SCUBAINIT_GID', os.getgid())
if self.verbose:
self.add_env('SCUBAINIT_VERBOSE', 1)
# Copy scubainit into the container
# We make a copy because Docker 1.13 gets pissed if we try to re-label
# /usr, and Fedora 28 gives an AVC denial.
scubainit_cpath = self.copy_scubadir_file('scubainit', self.scubainit_path)
# Hooks
for name in ('root', 'user', ):
self.__generate_hook_script(name)
# allocate TTY if scuba's output is going to a terminal
# and stdin is not redirected
if sys.stdout.isatty() and sys.stdin.isatty():
self.add_option('--tty')
# Process any aliases
try:
context = self.config.process_command(self.user_command)
except ConfigError as cfgerr:
raise ScubaError(str(cfgerr))
if self.image_override:
context.image = self.image_override
'''
Normally, if the user provides no command to "docker run", the image's
default CMD is run. Because we set the entrypiont, scuba must emulate the
default behavior itself.
'''
if not context.script:
# No user-provided command; we want to run the image's default command
verbose_msg('No user command; getting command from image')
default_cmd = get_image_command(context.image)
if not default_cmd:
raise ScubaError('No command given and no image-specified command')
verbose_msg('{} Cmd: "{}"'.format(context.image, default_cmd))
context.script = [shell_quote_cmd(default_cmd)]
# Make scubainit the real entrypoint, and use the defined entrypoint as
# the docker command (if it exists)
self.add_option('--entrypoint={}'.format(scubainit_cpath))
self.docker_cmd = []
if self.entrypoint_override is not None:
# --entrypoint takes precedence
if self.entrypoint_override != '':
self.docker_cmd = [self.entrypoint_override]
elif context.entrypoint is not None:
# then .scuba.yml
if context.entrypoint != '':
self.docker_cmd = [context.entrypoint]
else:
ep = get_image_entrypoint(context.image)
if ep:
self.docker_cmd = ep
# The user command is executed via a generated shell script
with self.open_scubadir_file('command.sh', 'wt') as f:
self.docker_cmd += ['/bin/sh', f.container_path]
writeln(f, '#!/bin/sh')
writeln(f, '# Auto-generated from scuba')
writeln(f, 'set -e')
for cmd in context.script:
writeln(f, cmd)
self.context = context
def open_scubadir_file(self, name, mode):
'''Opens a file in the 'scubadir'
This file will automatically be bind-mounted into the container,
at a path given by the 'container_path' property on the returned file object.
'''
path = os.path.join(self.__scubadir_hostpath, name)
assert not os.path.exists(path)
# Make any directories required
mkdir_p(os.path.dirname(path))
f = File(path, mode)
f.container_path = os.path.join(self.__scubadir_contpath, name)
return f
def copy_scubadir_file(self, name, source):
'''Copies source into the scubadir
Returns the container-path of the copied file
'''
dest = os.path.join(self.__scubadir_hostpath, name)
assert not os.path.exists(dest)
shutil.copy2(source, dest)
return os.path.join(self.__scubadir_contpath, name)
def __generate_hook_script(self, name):
script = self.config.hooks.get(name)
if not script:
return
# Generate the hook script, mount it into the container, and tell scubainit
with self.open_scubadir_file('hooks/{}.sh'.format(name), 'wt') as f:
self.add_env('SCUBAINIT_HOOK_{}'.format(name.upper()), f.container_path)
writeln(f, '#!/bin/sh')
writeln(f, '# Auto-generated from .scuba.yml')
writeln(f, 'set -e')
for cmd in script:
writeln(f, cmd)
def __get_vol_opts(self):
for hostpath, contpath, options in self.volumes:
yield hostpath, contpath, options + self.vol_opts
def get_docker_cmdline(self):
args = ['docker', 'run',
# interactive: keep STDIN open
'-i',
# remove container after exit
'--rm',
]
for name,val in self.env_vars.items():
args.append('--env={}={}'.format(name, val))
for hostpath, contpath, options in self.__get_vol_opts():
args.append(make_vol_opt(hostpath, contpath, options))
if self.workdir:
args += ['-w', self.workdir]
args += self.options
# Docker image
args.append(self.context.image)
# Command to run in container
args += self.docker_cmd
return args
|
JonathonReinhart/scuba | scuba/__main__.py | ScubaDive.add_env | python | def add_env(self, name, val):
'''Add an environment variable to the docker run invocation
'''
if name in self.env_vars:
raise KeyError(name)
self.env_vars[name] = val | Add an environment variable to the docker run invocation | train | https://github.com/JonathonReinhart/scuba/blob/0244c81ec482d3c60202028bc075621447bc3ad1/scuba/__main__.py#L176-L181 | null | class ScubaDive(object):
def __init__(self, user_command, docker_args=None, env=None, as_root=False, verbose=False,
image_override=None, entrypoint=None):
env = env or {}
if not isinstance(env, collections.Mapping):
raise ValueError('Argument env must be dict-like')
self.user_command = user_command
self.as_root = as_root
self.verbose = verbose
self.image_override = image_override
self.entrypoint_override = entrypoint
# These will be added to docker run cmdline
self.env_vars = env
self.volumes = []
self.options = docker_args or []
self.workdir = None
self.__locate_scubainit()
self.__load_config()
def prepare(self):
'''Prepare to run the docker command'''
self.__make_scubadir()
if self.is_remote_docker:
'''
Docker is running remotely (e.g. boot2docker on OSX).
We don't need to do any user setup whatsoever.
TODO: For now, remote instances won't have any .scubainit
See:
https://github.com/JonathonReinhart/scuba/issues/17
'''
raise ScubaError('Remote docker not supported (DOCKER_HOST is set)')
# Docker is running natively
self.__setup_native_run()
# Apply environment vars from .scuba.yml
self.env_vars.update(self.context.environment)
def __str__(self):
s = StringIO()
writeln(s, 'ScubaDive')
writeln(s, ' verbose: {}'.format(self.verbose))
writeln(s, ' as_root: {}'.format(self.as_root))
writeln(s, ' workdir: {}'.format(self.workdir))
writeln(s, ' options:')
for a in self.options:
writeln(s, ' ' + a)
writeln(s, ' env_vars:')
for k,v in self.env_vars.items():
writeln(s, ' {}={}'.format(k, v))
writeln(s, ' volumes:')
for hostpath, contpath, options in self.__get_vol_opts():
writeln(s, ' {} => {} {}'.format(hostpath, contpath, options))
writeln(s, ' user_command: {}'.format(self.user_command))
writeln(s, ' context:')
writeln(s, ' script: ' + str(self.context.script))
writeln(s, ' image: ' + str(self.context.image))
return s.getvalue()
def cleanup_tempfiles(self):
shutil.rmtree(self.__scubadir_hostpath)
@property
def is_remote_docker(self):
return 'DOCKER_HOST' in os.environ
def add_volume(self, hostpath, contpath, options=None):
'''Add a volume (bind-mount) to the docker run invocation
'''
if options is None:
options = []
self.volumes.append((hostpath, contpath, options))
def add_option(self, option):
'''Add another option to the docker run invocation
'''
self.options.append(option)
def set_workdir(self, workdir):
self.workdir = workdir
def __locate_scubainit(self):
'''Determine path to scubainit binary
'''
pkg_path = os.path.dirname(__file__)
self.scubainit_path = os.path.join(pkg_path, 'scubainit')
if not os.path.isfile(self.scubainit_path):
raise ScubaError('scubainit not found at "{}"'.format(self.scubainit_path))
def __load_config(self):
'''Find and load .scuba.yml
'''
# top_path is where .scuba.yml is found, and becomes the top of our bind mount.
# top_rel is the relative path from top_path to the current working directory,
# and is where we'll set the working directory in the container (relative to
# the bind mount point).
try:
top_path, top_rel = find_config()
self.config = load_config(os.path.join(top_path, SCUBA_YML))
except ConfigNotFoundError as cfgerr:
# SCUBA_YML can be missing if --image was given.
# In this case, we assume a default config
if not self.image_override:
raise ScubaError(str(cfgerr))
top_path, top_rel = os.getcwd(), ''
self.config = ScubaConfig(image=None)
except ConfigError as cfgerr:
raise ScubaError(str(cfgerr))
# Mount scuba root directory at the same path in the container...
self.add_volume(top_path, top_path)
# ...and set the working dir relative to it
self.set_workdir(os.path.join(top_path, top_rel))
self.add_env('SCUBA_ROOT', top_path)
def __make_scubadir(self):
'''Make temp directory where all ancillary files are bind-mounted
'''
self.__scubadir_hostpath = tempfile.mkdtemp(prefix='scubadir')
self.__scubadir_contpath = '/.scuba'
self.add_volume(self.__scubadir_hostpath, self.__scubadir_contpath)
def __setup_native_run(self):
# These options are appended to mounted volume arguments
# NOTE: This tells Docker to re-label the directory for compatibility
# with SELinux. See `man docker-run` for more information.
self.vol_opts = ['z']
# Pass variables to scubainit
self.add_env('SCUBAINIT_UMASK', '{:04o}'.format(get_umask()))
if not self.as_root:
self.add_env('SCUBAINIT_UID', os.getuid())
self.add_env('SCUBAINIT_GID', os.getgid())
if self.verbose:
self.add_env('SCUBAINIT_VERBOSE', 1)
# Copy scubainit into the container
# We make a copy because Docker 1.13 gets pissed if we try to re-label
# /usr, and Fedora 28 gives an AVC denial.
scubainit_cpath = self.copy_scubadir_file('scubainit', self.scubainit_path)
# Hooks
for name in ('root', 'user', ):
self.__generate_hook_script(name)
# allocate TTY if scuba's output is going to a terminal
# and stdin is not redirected
if sys.stdout.isatty() and sys.stdin.isatty():
self.add_option('--tty')
# Process any aliases
try:
context = self.config.process_command(self.user_command)
except ConfigError as cfgerr:
raise ScubaError(str(cfgerr))
if self.image_override:
context.image = self.image_override
'''
Normally, if the user provides no command to "docker run", the image's
default CMD is run. Because we set the entrypiont, scuba must emulate the
default behavior itself.
'''
if not context.script:
# No user-provided command; we want to run the image's default command
verbose_msg('No user command; getting command from image')
default_cmd = get_image_command(context.image)
if not default_cmd:
raise ScubaError('No command given and no image-specified command')
verbose_msg('{} Cmd: "{}"'.format(context.image, default_cmd))
context.script = [shell_quote_cmd(default_cmd)]
# Make scubainit the real entrypoint, and use the defined entrypoint as
# the docker command (if it exists)
self.add_option('--entrypoint={}'.format(scubainit_cpath))
self.docker_cmd = []
if self.entrypoint_override is not None:
# --entrypoint takes precedence
if self.entrypoint_override != '':
self.docker_cmd = [self.entrypoint_override]
elif context.entrypoint is not None:
# then .scuba.yml
if context.entrypoint != '':
self.docker_cmd = [context.entrypoint]
else:
ep = get_image_entrypoint(context.image)
if ep:
self.docker_cmd = ep
# The user command is executed via a generated shell script
with self.open_scubadir_file('command.sh', 'wt') as f:
self.docker_cmd += ['/bin/sh', f.container_path]
writeln(f, '#!/bin/sh')
writeln(f, '# Auto-generated from scuba')
writeln(f, 'set -e')
for cmd in context.script:
writeln(f, cmd)
self.context = context
def open_scubadir_file(self, name, mode):
'''Opens a file in the 'scubadir'
This file will automatically be bind-mounted into the container,
at a path given by the 'container_path' property on the returned file object.
'''
path = os.path.join(self.__scubadir_hostpath, name)
assert not os.path.exists(path)
# Make any directories required
mkdir_p(os.path.dirname(path))
f = File(path, mode)
f.container_path = os.path.join(self.__scubadir_contpath, name)
return f
def copy_scubadir_file(self, name, source):
'''Copies source into the scubadir
Returns the container-path of the copied file
'''
dest = os.path.join(self.__scubadir_hostpath, name)
assert not os.path.exists(dest)
shutil.copy2(source, dest)
return os.path.join(self.__scubadir_contpath, name)
def __generate_hook_script(self, name):
script = self.config.hooks.get(name)
if not script:
return
# Generate the hook script, mount it into the container, and tell scubainit
with self.open_scubadir_file('hooks/{}.sh'.format(name), 'wt') as f:
self.add_env('SCUBAINIT_HOOK_{}'.format(name.upper()), f.container_path)
writeln(f, '#!/bin/sh')
writeln(f, '# Auto-generated from .scuba.yml')
writeln(f, 'set -e')
for cmd in script:
writeln(f, cmd)
def __get_vol_opts(self):
for hostpath, contpath, options in self.volumes:
yield hostpath, contpath, options + self.vol_opts
def get_docker_cmdline(self):
args = ['docker', 'run',
# interactive: keep STDIN open
'-i',
# remove container after exit
'--rm',
]
for name,val in self.env_vars.items():
args.append('--env={}={}'.format(name, val))
for hostpath, contpath, options in self.__get_vol_opts():
args.append(make_vol_opt(hostpath, contpath, options))
if self.workdir:
args += ['-w', self.workdir]
args += self.options
# Docker image
args.append(self.context.image)
# Command to run in container
args += self.docker_cmd
return args
|
JonathonReinhart/scuba | scuba/__main__.py | ScubaDive.add_volume | python | def add_volume(self, hostpath, contpath, options=None):
'''Add a volume (bind-mount) to the docker run invocation
'''
if options is None:
options = []
self.volumes.append((hostpath, contpath, options)) | Add a volume (bind-mount) to the docker run invocation | train | https://github.com/JonathonReinhart/scuba/blob/0244c81ec482d3c60202028bc075621447bc3ad1/scuba/__main__.py#L183-L188 | null | class ScubaDive(object):
def __init__(self, user_command, docker_args=None, env=None, as_root=False, verbose=False,
image_override=None, entrypoint=None):
env = env or {}
if not isinstance(env, collections.Mapping):
raise ValueError('Argument env must be dict-like')
self.user_command = user_command
self.as_root = as_root
self.verbose = verbose
self.image_override = image_override
self.entrypoint_override = entrypoint
# These will be added to docker run cmdline
self.env_vars = env
self.volumes = []
self.options = docker_args or []
self.workdir = None
self.__locate_scubainit()
self.__load_config()
def prepare(self):
'''Prepare to run the docker command'''
self.__make_scubadir()
if self.is_remote_docker:
'''
Docker is running remotely (e.g. boot2docker on OSX).
We don't need to do any user setup whatsoever.
TODO: For now, remote instances won't have any .scubainit
See:
https://github.com/JonathonReinhart/scuba/issues/17
'''
raise ScubaError('Remote docker not supported (DOCKER_HOST is set)')
# Docker is running natively
self.__setup_native_run()
# Apply environment vars from .scuba.yml
self.env_vars.update(self.context.environment)
def __str__(self):
s = StringIO()
writeln(s, 'ScubaDive')
writeln(s, ' verbose: {}'.format(self.verbose))
writeln(s, ' as_root: {}'.format(self.as_root))
writeln(s, ' workdir: {}'.format(self.workdir))
writeln(s, ' options:')
for a in self.options:
writeln(s, ' ' + a)
writeln(s, ' env_vars:')
for k,v in self.env_vars.items():
writeln(s, ' {}={}'.format(k, v))
writeln(s, ' volumes:')
for hostpath, contpath, options in self.__get_vol_opts():
writeln(s, ' {} => {} {}'.format(hostpath, contpath, options))
writeln(s, ' user_command: {}'.format(self.user_command))
writeln(s, ' context:')
writeln(s, ' script: ' + str(self.context.script))
writeln(s, ' image: ' + str(self.context.image))
return s.getvalue()
def cleanup_tempfiles(self):
shutil.rmtree(self.__scubadir_hostpath)
@property
def is_remote_docker(self):
return 'DOCKER_HOST' in os.environ
def add_env(self, name, val):
'''Add an environment variable to the docker run invocation
'''
if name in self.env_vars:
raise KeyError(name)
self.env_vars[name] = val
def add_option(self, option):
'''Add another option to the docker run invocation
'''
self.options.append(option)
def set_workdir(self, workdir):
self.workdir = workdir
def __locate_scubainit(self):
'''Determine path to scubainit binary
'''
pkg_path = os.path.dirname(__file__)
self.scubainit_path = os.path.join(pkg_path, 'scubainit')
if not os.path.isfile(self.scubainit_path):
raise ScubaError('scubainit not found at "{}"'.format(self.scubainit_path))
def __load_config(self):
'''Find and load .scuba.yml
'''
# top_path is where .scuba.yml is found, and becomes the top of our bind mount.
# top_rel is the relative path from top_path to the current working directory,
# and is where we'll set the working directory in the container (relative to
# the bind mount point).
try:
top_path, top_rel = find_config()
self.config = load_config(os.path.join(top_path, SCUBA_YML))
except ConfigNotFoundError as cfgerr:
# SCUBA_YML can be missing if --image was given.
# In this case, we assume a default config
if not self.image_override:
raise ScubaError(str(cfgerr))
top_path, top_rel = os.getcwd(), ''
self.config = ScubaConfig(image=None)
except ConfigError as cfgerr:
raise ScubaError(str(cfgerr))
# Mount scuba root directory at the same path in the container...
self.add_volume(top_path, top_path)
# ...and set the working dir relative to it
self.set_workdir(os.path.join(top_path, top_rel))
self.add_env('SCUBA_ROOT', top_path)
def __make_scubadir(self):
'''Make temp directory where all ancillary files are bind-mounted
'''
self.__scubadir_hostpath = tempfile.mkdtemp(prefix='scubadir')
self.__scubadir_contpath = '/.scuba'
self.add_volume(self.__scubadir_hostpath, self.__scubadir_contpath)
def __setup_native_run(self):
# These options are appended to mounted volume arguments
# NOTE: This tells Docker to re-label the directory for compatibility
# with SELinux. See `man docker-run` for more information.
self.vol_opts = ['z']
# Pass variables to scubainit
self.add_env('SCUBAINIT_UMASK', '{:04o}'.format(get_umask()))
if not self.as_root:
self.add_env('SCUBAINIT_UID', os.getuid())
self.add_env('SCUBAINIT_GID', os.getgid())
if self.verbose:
self.add_env('SCUBAINIT_VERBOSE', 1)
# Copy scubainit into the container
# We make a copy because Docker 1.13 gets pissed if we try to re-label
# /usr, and Fedora 28 gives an AVC denial.
scubainit_cpath = self.copy_scubadir_file('scubainit', self.scubainit_path)
# Hooks
for name in ('root', 'user', ):
self.__generate_hook_script(name)
# allocate TTY if scuba's output is going to a terminal
# and stdin is not redirected
if sys.stdout.isatty() and sys.stdin.isatty():
self.add_option('--tty')
# Process any aliases
try:
context = self.config.process_command(self.user_command)
except ConfigError as cfgerr:
raise ScubaError(str(cfgerr))
if self.image_override:
context.image = self.image_override
'''
Normally, if the user provides no command to "docker run", the image's
default CMD is run. Because we set the entrypiont, scuba must emulate the
default behavior itself.
'''
if not context.script:
# No user-provided command; we want to run the image's default command
verbose_msg('No user command; getting command from image')
default_cmd = get_image_command(context.image)
if not default_cmd:
raise ScubaError('No command given and no image-specified command')
verbose_msg('{} Cmd: "{}"'.format(context.image, default_cmd))
context.script = [shell_quote_cmd(default_cmd)]
# Make scubainit the real entrypoint, and use the defined entrypoint as
# the docker command (if it exists)
self.add_option('--entrypoint={}'.format(scubainit_cpath))
self.docker_cmd = []
if self.entrypoint_override is not None:
# --entrypoint takes precedence
if self.entrypoint_override != '':
self.docker_cmd = [self.entrypoint_override]
elif context.entrypoint is not None:
# then .scuba.yml
if context.entrypoint != '':
self.docker_cmd = [context.entrypoint]
else:
ep = get_image_entrypoint(context.image)
if ep:
self.docker_cmd = ep
# The user command is executed via a generated shell script
with self.open_scubadir_file('command.sh', 'wt') as f:
self.docker_cmd += ['/bin/sh', f.container_path]
writeln(f, '#!/bin/sh')
writeln(f, '# Auto-generated from scuba')
writeln(f, 'set -e')
for cmd in context.script:
writeln(f, cmd)
self.context = context
def open_scubadir_file(self, name, mode):
'''Opens a file in the 'scubadir'
This file will automatically be bind-mounted into the container,
at a path given by the 'container_path' property on the returned file object.
'''
path = os.path.join(self.__scubadir_hostpath, name)
assert not os.path.exists(path)
# Make any directories required
mkdir_p(os.path.dirname(path))
f = File(path, mode)
f.container_path = os.path.join(self.__scubadir_contpath, name)
return f
def copy_scubadir_file(self, name, source):
'''Copies source into the scubadir
Returns the container-path of the copied file
'''
dest = os.path.join(self.__scubadir_hostpath, name)
assert not os.path.exists(dest)
shutil.copy2(source, dest)
return os.path.join(self.__scubadir_contpath, name)
def __generate_hook_script(self, name):
script = self.config.hooks.get(name)
if not script:
return
# Generate the hook script, mount it into the container, and tell scubainit
with self.open_scubadir_file('hooks/{}.sh'.format(name), 'wt') as f:
self.add_env('SCUBAINIT_HOOK_{}'.format(name.upper()), f.container_path)
writeln(f, '#!/bin/sh')
writeln(f, '# Auto-generated from .scuba.yml')
writeln(f, 'set -e')
for cmd in script:
writeln(f, cmd)
def __get_vol_opts(self):
for hostpath, contpath, options in self.volumes:
yield hostpath, contpath, options + self.vol_opts
def get_docker_cmdline(self):
args = ['docker', 'run',
# interactive: keep STDIN open
'-i',
# remove container after exit
'--rm',
]
for name,val in self.env_vars.items():
args.append('--env={}={}'.format(name, val))
for hostpath, contpath, options in self.__get_vol_opts():
args.append(make_vol_opt(hostpath, contpath, options))
if self.workdir:
args += ['-w', self.workdir]
args += self.options
# Docker image
args.append(self.context.image)
# Command to run in container
args += self.docker_cmd
return args
|
JonathonReinhart/scuba | scuba/__main__.py | ScubaDive.__locate_scubainit | python | def __locate_scubainit(self):
'''Determine path to scubainit binary
'''
pkg_path = os.path.dirname(__file__)
self.scubainit_path = os.path.join(pkg_path, 'scubainit')
if not os.path.isfile(self.scubainit_path):
raise ScubaError('scubainit not found at "{}"'.format(self.scubainit_path)) | Determine path to scubainit binary | train | https://github.com/JonathonReinhart/scuba/blob/0244c81ec482d3c60202028bc075621447bc3ad1/scuba/__main__.py#L198-L205 | null | class ScubaDive(object):
def __init__(self, user_command, docker_args=None, env=None, as_root=False, verbose=False,
image_override=None, entrypoint=None):
env = env or {}
if not isinstance(env, collections.Mapping):
raise ValueError('Argument env must be dict-like')
self.user_command = user_command
self.as_root = as_root
self.verbose = verbose
self.image_override = image_override
self.entrypoint_override = entrypoint
# These will be added to docker run cmdline
self.env_vars = env
self.volumes = []
self.options = docker_args or []
self.workdir = None
self.__locate_scubainit()
self.__load_config()
def prepare(self):
'''Prepare to run the docker command'''
self.__make_scubadir()
if self.is_remote_docker:
'''
Docker is running remotely (e.g. boot2docker on OSX).
We don't need to do any user setup whatsoever.
TODO: For now, remote instances won't have any .scubainit
See:
https://github.com/JonathonReinhart/scuba/issues/17
'''
raise ScubaError('Remote docker not supported (DOCKER_HOST is set)')
# Docker is running natively
self.__setup_native_run()
# Apply environment vars from .scuba.yml
self.env_vars.update(self.context.environment)
def __str__(self):
s = StringIO()
writeln(s, 'ScubaDive')
writeln(s, ' verbose: {}'.format(self.verbose))
writeln(s, ' as_root: {}'.format(self.as_root))
writeln(s, ' workdir: {}'.format(self.workdir))
writeln(s, ' options:')
for a in self.options:
writeln(s, ' ' + a)
writeln(s, ' env_vars:')
for k,v in self.env_vars.items():
writeln(s, ' {}={}'.format(k, v))
writeln(s, ' volumes:')
for hostpath, contpath, options in self.__get_vol_opts():
writeln(s, ' {} => {} {}'.format(hostpath, contpath, options))
writeln(s, ' user_command: {}'.format(self.user_command))
writeln(s, ' context:')
writeln(s, ' script: ' + str(self.context.script))
writeln(s, ' image: ' + str(self.context.image))
return s.getvalue()
def cleanup_tempfiles(self):
shutil.rmtree(self.__scubadir_hostpath)
@property
def is_remote_docker(self):
return 'DOCKER_HOST' in os.environ
def add_env(self, name, val):
'''Add an environment variable to the docker run invocation
'''
if name in self.env_vars:
raise KeyError(name)
self.env_vars[name] = val
def add_volume(self, hostpath, contpath, options=None):
'''Add a volume (bind-mount) to the docker run invocation
'''
if options is None:
options = []
self.volumes.append((hostpath, contpath, options))
def add_option(self, option):
'''Add another option to the docker run invocation
'''
self.options.append(option)
def set_workdir(self, workdir):
self.workdir = workdir
def __load_config(self):
'''Find and load .scuba.yml
'''
# top_path is where .scuba.yml is found, and becomes the top of our bind mount.
# top_rel is the relative path from top_path to the current working directory,
# and is where we'll set the working directory in the container (relative to
# the bind mount point).
try:
top_path, top_rel = find_config()
self.config = load_config(os.path.join(top_path, SCUBA_YML))
except ConfigNotFoundError as cfgerr:
# SCUBA_YML can be missing if --image was given.
# In this case, we assume a default config
if not self.image_override:
raise ScubaError(str(cfgerr))
top_path, top_rel = os.getcwd(), ''
self.config = ScubaConfig(image=None)
except ConfigError as cfgerr:
raise ScubaError(str(cfgerr))
# Mount scuba root directory at the same path in the container...
self.add_volume(top_path, top_path)
# ...and set the working dir relative to it
self.set_workdir(os.path.join(top_path, top_rel))
self.add_env('SCUBA_ROOT', top_path)
def __make_scubadir(self):
'''Make temp directory where all ancillary files are bind-mounted
'''
self.__scubadir_hostpath = tempfile.mkdtemp(prefix='scubadir')
self.__scubadir_contpath = '/.scuba'
self.add_volume(self.__scubadir_hostpath, self.__scubadir_contpath)
def __setup_native_run(self):
# These options are appended to mounted volume arguments
# NOTE: This tells Docker to re-label the directory for compatibility
# with SELinux. See `man docker-run` for more information.
self.vol_opts = ['z']
# Pass variables to scubainit
self.add_env('SCUBAINIT_UMASK', '{:04o}'.format(get_umask()))
if not self.as_root:
self.add_env('SCUBAINIT_UID', os.getuid())
self.add_env('SCUBAINIT_GID', os.getgid())
if self.verbose:
self.add_env('SCUBAINIT_VERBOSE', 1)
# Copy scubainit into the container
# We make a copy because Docker 1.13 gets pissed if we try to re-label
# /usr, and Fedora 28 gives an AVC denial.
scubainit_cpath = self.copy_scubadir_file('scubainit', self.scubainit_path)
# Hooks
for name in ('root', 'user', ):
self.__generate_hook_script(name)
# allocate TTY if scuba's output is going to a terminal
# and stdin is not redirected
if sys.stdout.isatty() and sys.stdin.isatty():
self.add_option('--tty')
# Process any aliases
try:
context = self.config.process_command(self.user_command)
except ConfigError as cfgerr:
raise ScubaError(str(cfgerr))
if self.image_override:
context.image = self.image_override
'''
Normally, if the user provides no command to "docker run", the image's
default CMD is run. Because we set the entrypiont, scuba must emulate the
default behavior itself.
'''
if not context.script:
# No user-provided command; we want to run the image's default command
verbose_msg('No user command; getting command from image')
default_cmd = get_image_command(context.image)
if not default_cmd:
raise ScubaError('No command given and no image-specified command')
verbose_msg('{} Cmd: "{}"'.format(context.image, default_cmd))
context.script = [shell_quote_cmd(default_cmd)]
# Make scubainit the real entrypoint, and use the defined entrypoint as
# the docker command (if it exists)
self.add_option('--entrypoint={}'.format(scubainit_cpath))
self.docker_cmd = []
if self.entrypoint_override is not None:
# --entrypoint takes precedence
if self.entrypoint_override != '':
self.docker_cmd = [self.entrypoint_override]
elif context.entrypoint is not None:
# then .scuba.yml
if context.entrypoint != '':
self.docker_cmd = [context.entrypoint]
else:
ep = get_image_entrypoint(context.image)
if ep:
self.docker_cmd = ep
# The user command is executed via a generated shell script
with self.open_scubadir_file('command.sh', 'wt') as f:
self.docker_cmd += ['/bin/sh', f.container_path]
writeln(f, '#!/bin/sh')
writeln(f, '# Auto-generated from scuba')
writeln(f, 'set -e')
for cmd in context.script:
writeln(f, cmd)
self.context = context
def open_scubadir_file(self, name, mode):
'''Opens a file in the 'scubadir'
This file will automatically be bind-mounted into the container,
at a path given by the 'container_path' property on the returned file object.
'''
path = os.path.join(self.__scubadir_hostpath, name)
assert not os.path.exists(path)
# Make any directories required
mkdir_p(os.path.dirname(path))
f = File(path, mode)
f.container_path = os.path.join(self.__scubadir_contpath, name)
return f
def copy_scubadir_file(self, name, source):
'''Copies source into the scubadir
Returns the container-path of the copied file
'''
dest = os.path.join(self.__scubadir_hostpath, name)
assert not os.path.exists(dest)
shutil.copy2(source, dest)
return os.path.join(self.__scubadir_contpath, name)
def __generate_hook_script(self, name):
script = self.config.hooks.get(name)
if not script:
return
# Generate the hook script, mount it into the container, and tell scubainit
with self.open_scubadir_file('hooks/{}.sh'.format(name), 'wt') as f:
self.add_env('SCUBAINIT_HOOK_{}'.format(name.upper()), f.container_path)
writeln(f, '#!/bin/sh')
writeln(f, '# Auto-generated from .scuba.yml')
writeln(f, 'set -e')
for cmd in script:
writeln(f, cmd)
def __get_vol_opts(self):
for hostpath, contpath, options in self.volumes:
yield hostpath, contpath, options + self.vol_opts
def get_docker_cmdline(self):
args = ['docker', 'run',
# interactive: keep STDIN open
'-i',
# remove container after exit
'--rm',
]
for name,val in self.env_vars.items():
args.append('--env={}={}'.format(name, val))
for hostpath, contpath, options in self.__get_vol_opts():
args.append(make_vol_opt(hostpath, contpath, options))
if self.workdir:
args += ['-w', self.workdir]
args += self.options
# Docker image
args.append(self.context.image)
# Command to run in container
args += self.docker_cmd
return args
|
JonathonReinhart/scuba | scuba/__main__.py | ScubaDive.__load_config | python | def __load_config(self):
'''Find and load .scuba.yml
'''
# top_path is where .scuba.yml is found, and becomes the top of our bind mount.
# top_rel is the relative path from top_path to the current working directory,
# and is where we'll set the working directory in the container (relative to
# the bind mount point).
try:
top_path, top_rel = find_config()
self.config = load_config(os.path.join(top_path, SCUBA_YML))
except ConfigNotFoundError as cfgerr:
# SCUBA_YML can be missing if --image was given.
# In this case, we assume a default config
if not self.image_override:
raise ScubaError(str(cfgerr))
top_path, top_rel = os.getcwd(), ''
self.config = ScubaConfig(image=None)
except ConfigError as cfgerr:
raise ScubaError(str(cfgerr))
# Mount scuba root directory at the same path in the container...
self.add_volume(top_path, top_path)
# ...and set the working dir relative to it
self.set_workdir(os.path.join(top_path, top_rel))
self.add_env('SCUBA_ROOT', top_path) | Find and load .scuba.yml | train | https://github.com/JonathonReinhart/scuba/blob/0244c81ec482d3c60202028bc075621447bc3ad1/scuba/__main__.py#L208-L235 | null | class ScubaDive(object):
def __init__(self, user_command, docker_args=None, env=None, as_root=False, verbose=False,
image_override=None, entrypoint=None):
env = env or {}
if not isinstance(env, collections.Mapping):
raise ValueError('Argument env must be dict-like')
self.user_command = user_command
self.as_root = as_root
self.verbose = verbose
self.image_override = image_override
self.entrypoint_override = entrypoint
# These will be added to docker run cmdline
self.env_vars = env
self.volumes = []
self.options = docker_args or []
self.workdir = None
self.__locate_scubainit()
self.__load_config()
def prepare(self):
'''Prepare to run the docker command'''
self.__make_scubadir()
if self.is_remote_docker:
'''
Docker is running remotely (e.g. boot2docker on OSX).
We don't need to do any user setup whatsoever.
TODO: For now, remote instances won't have any .scubainit
See:
https://github.com/JonathonReinhart/scuba/issues/17
'''
raise ScubaError('Remote docker not supported (DOCKER_HOST is set)')
# Docker is running natively
self.__setup_native_run()
# Apply environment vars from .scuba.yml
self.env_vars.update(self.context.environment)
def __str__(self):
s = StringIO()
writeln(s, 'ScubaDive')
writeln(s, ' verbose: {}'.format(self.verbose))
writeln(s, ' as_root: {}'.format(self.as_root))
writeln(s, ' workdir: {}'.format(self.workdir))
writeln(s, ' options:')
for a in self.options:
writeln(s, ' ' + a)
writeln(s, ' env_vars:')
for k,v in self.env_vars.items():
writeln(s, ' {}={}'.format(k, v))
writeln(s, ' volumes:')
for hostpath, contpath, options in self.__get_vol_opts():
writeln(s, ' {} => {} {}'.format(hostpath, contpath, options))
writeln(s, ' user_command: {}'.format(self.user_command))
writeln(s, ' context:')
writeln(s, ' script: ' + str(self.context.script))
writeln(s, ' image: ' + str(self.context.image))
return s.getvalue()
def cleanup_tempfiles(self):
shutil.rmtree(self.__scubadir_hostpath)
@property
def is_remote_docker(self):
return 'DOCKER_HOST' in os.environ
def add_env(self, name, val):
'''Add an environment variable to the docker run invocation
'''
if name in self.env_vars:
raise KeyError(name)
self.env_vars[name] = val
def add_volume(self, hostpath, contpath, options=None):
'''Add a volume (bind-mount) to the docker run invocation
'''
if options is None:
options = []
self.volumes.append((hostpath, contpath, options))
def add_option(self, option):
'''Add another option to the docker run invocation
'''
self.options.append(option)
def set_workdir(self, workdir):
self.workdir = workdir
def __locate_scubainit(self):
'''Determine path to scubainit binary
'''
pkg_path = os.path.dirname(__file__)
self.scubainit_path = os.path.join(pkg_path, 'scubainit')
if not os.path.isfile(self.scubainit_path):
raise ScubaError('scubainit not found at "{}"'.format(self.scubainit_path))
def __make_scubadir(self):
'''Make temp directory where all ancillary files are bind-mounted
'''
self.__scubadir_hostpath = tempfile.mkdtemp(prefix='scubadir')
self.__scubadir_contpath = '/.scuba'
self.add_volume(self.__scubadir_hostpath, self.__scubadir_contpath)
def __setup_native_run(self):
# These options are appended to mounted volume arguments
# NOTE: This tells Docker to re-label the directory for compatibility
# with SELinux. See `man docker-run` for more information.
self.vol_opts = ['z']
# Pass variables to scubainit
self.add_env('SCUBAINIT_UMASK', '{:04o}'.format(get_umask()))
if not self.as_root:
self.add_env('SCUBAINIT_UID', os.getuid())
self.add_env('SCUBAINIT_GID', os.getgid())
if self.verbose:
self.add_env('SCUBAINIT_VERBOSE', 1)
# Copy scubainit into the container
# We make a copy because Docker 1.13 gets pissed if we try to re-label
# /usr, and Fedora 28 gives an AVC denial.
scubainit_cpath = self.copy_scubadir_file('scubainit', self.scubainit_path)
# Hooks
for name in ('root', 'user', ):
self.__generate_hook_script(name)
# allocate TTY if scuba's output is going to a terminal
# and stdin is not redirected
if sys.stdout.isatty() and sys.stdin.isatty():
self.add_option('--tty')
# Process any aliases
try:
context = self.config.process_command(self.user_command)
except ConfigError as cfgerr:
raise ScubaError(str(cfgerr))
if self.image_override:
context.image = self.image_override
'''
Normally, if the user provides no command to "docker run", the image's
default CMD is run. Because we set the entrypiont, scuba must emulate the
default behavior itself.
'''
if not context.script:
# No user-provided command; we want to run the image's default command
verbose_msg('No user command; getting command from image')
default_cmd = get_image_command(context.image)
if not default_cmd:
raise ScubaError('No command given and no image-specified command')
verbose_msg('{} Cmd: "{}"'.format(context.image, default_cmd))
context.script = [shell_quote_cmd(default_cmd)]
# Make scubainit the real entrypoint, and use the defined entrypoint as
# the docker command (if it exists)
self.add_option('--entrypoint={}'.format(scubainit_cpath))
self.docker_cmd = []
if self.entrypoint_override is not None:
# --entrypoint takes precedence
if self.entrypoint_override != '':
self.docker_cmd = [self.entrypoint_override]
elif context.entrypoint is not None:
# then .scuba.yml
if context.entrypoint != '':
self.docker_cmd = [context.entrypoint]
else:
ep = get_image_entrypoint(context.image)
if ep:
self.docker_cmd = ep
# The user command is executed via a generated shell script
with self.open_scubadir_file('command.sh', 'wt') as f:
self.docker_cmd += ['/bin/sh', f.container_path]
writeln(f, '#!/bin/sh')
writeln(f, '# Auto-generated from scuba')
writeln(f, 'set -e')
for cmd in context.script:
writeln(f, cmd)
self.context = context
def open_scubadir_file(self, name, mode):
'''Opens a file in the 'scubadir'
This file will automatically be bind-mounted into the container,
at a path given by the 'container_path' property on the returned file object.
'''
path = os.path.join(self.__scubadir_hostpath, name)
assert not os.path.exists(path)
# Make any directories required
mkdir_p(os.path.dirname(path))
f = File(path, mode)
f.container_path = os.path.join(self.__scubadir_contpath, name)
return f
def copy_scubadir_file(self, name, source):
'''Copies source into the scubadir
Returns the container-path of the copied file
'''
dest = os.path.join(self.__scubadir_hostpath, name)
assert not os.path.exists(dest)
shutil.copy2(source, dest)
return os.path.join(self.__scubadir_contpath, name)
def __generate_hook_script(self, name):
script = self.config.hooks.get(name)
if not script:
return
# Generate the hook script, mount it into the container, and tell scubainit
with self.open_scubadir_file('hooks/{}.sh'.format(name), 'wt') as f:
self.add_env('SCUBAINIT_HOOK_{}'.format(name.upper()), f.container_path)
writeln(f, '#!/bin/sh')
writeln(f, '# Auto-generated from .scuba.yml')
writeln(f, 'set -e')
for cmd in script:
writeln(f, cmd)
def __get_vol_opts(self):
for hostpath, contpath, options in self.volumes:
yield hostpath, contpath, options + self.vol_opts
def get_docker_cmdline(self):
args = ['docker', 'run',
# interactive: keep STDIN open
'-i',
# remove container after exit
'--rm',
]
for name,val in self.env_vars.items():
args.append('--env={}={}'.format(name, val))
for hostpath, contpath, options in self.__get_vol_opts():
args.append(make_vol_opt(hostpath, contpath, options))
if self.workdir:
args += ['-w', self.workdir]
args += self.options
# Docker image
args.append(self.context.image)
# Command to run in container
args += self.docker_cmd
return args
|
JonathonReinhart/scuba | scuba/__main__.py | ScubaDive.__make_scubadir | python | def __make_scubadir(self):
'''Make temp directory where all ancillary files are bind-mounted
'''
self.__scubadir_hostpath = tempfile.mkdtemp(prefix='scubadir')
self.__scubadir_contpath = '/.scuba'
self.add_volume(self.__scubadir_hostpath, self.__scubadir_contpath) | Make temp directory where all ancillary files are bind-mounted | train | https://github.com/JonathonReinhart/scuba/blob/0244c81ec482d3c60202028bc075621447bc3ad1/scuba/__main__.py#L237-L242 | [
"def add_volume(self, hostpath, contpath, options=None):\n '''Add a volume (bind-mount) to the docker run invocation\n '''\n if options is None:\n options = []\n self.volumes.append((hostpath, contpath, options))\n"
] | class ScubaDive(object):
def __init__(self, user_command, docker_args=None, env=None, as_root=False, verbose=False,
image_override=None, entrypoint=None):
env = env or {}
if not isinstance(env, collections.Mapping):
raise ValueError('Argument env must be dict-like')
self.user_command = user_command
self.as_root = as_root
self.verbose = verbose
self.image_override = image_override
self.entrypoint_override = entrypoint
# These will be added to docker run cmdline
self.env_vars = env
self.volumes = []
self.options = docker_args or []
self.workdir = None
self.__locate_scubainit()
self.__load_config()
def prepare(self):
'''Prepare to run the docker command'''
self.__make_scubadir()
if self.is_remote_docker:
'''
Docker is running remotely (e.g. boot2docker on OSX).
We don't need to do any user setup whatsoever.
TODO: For now, remote instances won't have any .scubainit
See:
https://github.com/JonathonReinhart/scuba/issues/17
'''
raise ScubaError('Remote docker not supported (DOCKER_HOST is set)')
# Docker is running natively
self.__setup_native_run()
# Apply environment vars from .scuba.yml
self.env_vars.update(self.context.environment)
def __str__(self):
s = StringIO()
writeln(s, 'ScubaDive')
writeln(s, ' verbose: {}'.format(self.verbose))
writeln(s, ' as_root: {}'.format(self.as_root))
writeln(s, ' workdir: {}'.format(self.workdir))
writeln(s, ' options:')
for a in self.options:
writeln(s, ' ' + a)
writeln(s, ' env_vars:')
for k,v in self.env_vars.items():
writeln(s, ' {}={}'.format(k, v))
writeln(s, ' volumes:')
for hostpath, contpath, options in self.__get_vol_opts():
writeln(s, ' {} => {} {}'.format(hostpath, contpath, options))
writeln(s, ' user_command: {}'.format(self.user_command))
writeln(s, ' context:')
writeln(s, ' script: ' + str(self.context.script))
writeln(s, ' image: ' + str(self.context.image))
return s.getvalue()
def cleanup_tempfiles(self):
shutil.rmtree(self.__scubadir_hostpath)
@property
def is_remote_docker(self):
return 'DOCKER_HOST' in os.environ
def add_env(self, name, val):
'''Add an environment variable to the docker run invocation
'''
if name in self.env_vars:
raise KeyError(name)
self.env_vars[name] = val
def add_volume(self, hostpath, contpath, options=None):
'''Add a volume (bind-mount) to the docker run invocation
'''
if options is None:
options = []
self.volumes.append((hostpath, contpath, options))
def add_option(self, option):
'''Add another option to the docker run invocation
'''
self.options.append(option)
def set_workdir(self, workdir):
self.workdir = workdir
def __locate_scubainit(self):
'''Determine path to scubainit binary
'''
pkg_path = os.path.dirname(__file__)
self.scubainit_path = os.path.join(pkg_path, 'scubainit')
if not os.path.isfile(self.scubainit_path):
raise ScubaError('scubainit not found at "{}"'.format(self.scubainit_path))
def __load_config(self):
'''Find and load .scuba.yml
'''
# top_path is where .scuba.yml is found, and becomes the top of our bind mount.
# top_rel is the relative path from top_path to the current working directory,
# and is where we'll set the working directory in the container (relative to
# the bind mount point).
try:
top_path, top_rel = find_config()
self.config = load_config(os.path.join(top_path, SCUBA_YML))
except ConfigNotFoundError as cfgerr:
# SCUBA_YML can be missing if --image was given.
# In this case, we assume a default config
if not self.image_override:
raise ScubaError(str(cfgerr))
top_path, top_rel = os.getcwd(), ''
self.config = ScubaConfig(image=None)
except ConfigError as cfgerr:
raise ScubaError(str(cfgerr))
# Mount scuba root directory at the same path in the container...
self.add_volume(top_path, top_path)
# ...and set the working dir relative to it
self.set_workdir(os.path.join(top_path, top_rel))
self.add_env('SCUBA_ROOT', top_path)
def __setup_native_run(self):
# These options are appended to mounted volume arguments
# NOTE: This tells Docker to re-label the directory for compatibility
# with SELinux. See `man docker-run` for more information.
self.vol_opts = ['z']
# Pass variables to scubainit
self.add_env('SCUBAINIT_UMASK', '{:04o}'.format(get_umask()))
if not self.as_root:
self.add_env('SCUBAINIT_UID', os.getuid())
self.add_env('SCUBAINIT_GID', os.getgid())
if self.verbose:
self.add_env('SCUBAINIT_VERBOSE', 1)
# Copy scubainit into the container
# We make a copy because Docker 1.13 gets pissed if we try to re-label
# /usr, and Fedora 28 gives an AVC denial.
scubainit_cpath = self.copy_scubadir_file('scubainit', self.scubainit_path)
# Hooks
for name in ('root', 'user', ):
self.__generate_hook_script(name)
# allocate TTY if scuba's output is going to a terminal
# and stdin is not redirected
if sys.stdout.isatty() and sys.stdin.isatty():
self.add_option('--tty')
# Process any aliases
try:
context = self.config.process_command(self.user_command)
except ConfigError as cfgerr:
raise ScubaError(str(cfgerr))
if self.image_override:
context.image = self.image_override
'''
Normally, if the user provides no command to "docker run", the image's
default CMD is run. Because we set the entrypiont, scuba must emulate the
default behavior itself.
'''
if not context.script:
# No user-provided command; we want to run the image's default command
verbose_msg('No user command; getting command from image')
default_cmd = get_image_command(context.image)
if not default_cmd:
raise ScubaError('No command given and no image-specified command')
verbose_msg('{} Cmd: "{}"'.format(context.image, default_cmd))
context.script = [shell_quote_cmd(default_cmd)]
# Make scubainit the real entrypoint, and use the defined entrypoint as
# the docker command (if it exists)
self.add_option('--entrypoint={}'.format(scubainit_cpath))
self.docker_cmd = []
if self.entrypoint_override is not None:
# --entrypoint takes precedence
if self.entrypoint_override != '':
self.docker_cmd = [self.entrypoint_override]
elif context.entrypoint is not None:
# then .scuba.yml
if context.entrypoint != '':
self.docker_cmd = [context.entrypoint]
else:
ep = get_image_entrypoint(context.image)
if ep:
self.docker_cmd = ep
# The user command is executed via a generated shell script
with self.open_scubadir_file('command.sh', 'wt') as f:
self.docker_cmd += ['/bin/sh', f.container_path]
writeln(f, '#!/bin/sh')
writeln(f, '# Auto-generated from scuba')
writeln(f, 'set -e')
for cmd in context.script:
writeln(f, cmd)
self.context = context
def open_scubadir_file(self, name, mode):
'''Opens a file in the 'scubadir'
This file will automatically be bind-mounted into the container,
at a path given by the 'container_path' property on the returned file object.
'''
path = os.path.join(self.__scubadir_hostpath, name)
assert not os.path.exists(path)
# Make any directories required
mkdir_p(os.path.dirname(path))
f = File(path, mode)
f.container_path = os.path.join(self.__scubadir_contpath, name)
return f
def copy_scubadir_file(self, name, source):
'''Copies source into the scubadir
Returns the container-path of the copied file
'''
dest = os.path.join(self.__scubadir_hostpath, name)
assert not os.path.exists(dest)
shutil.copy2(source, dest)
return os.path.join(self.__scubadir_contpath, name)
def __generate_hook_script(self, name):
script = self.config.hooks.get(name)
if not script:
return
# Generate the hook script, mount it into the container, and tell scubainit
with self.open_scubadir_file('hooks/{}.sh'.format(name), 'wt') as f:
self.add_env('SCUBAINIT_HOOK_{}'.format(name.upper()), f.container_path)
writeln(f, '#!/bin/sh')
writeln(f, '# Auto-generated from .scuba.yml')
writeln(f, 'set -e')
for cmd in script:
writeln(f, cmd)
def __get_vol_opts(self):
for hostpath, contpath, options in self.volumes:
yield hostpath, contpath, options + self.vol_opts
def get_docker_cmdline(self):
args = ['docker', 'run',
# interactive: keep STDIN open
'-i',
# remove container after exit
'--rm',
]
for name,val in self.env_vars.items():
args.append('--env={}={}'.format(name, val))
for hostpath, contpath, options in self.__get_vol_opts():
args.append(make_vol_opt(hostpath, contpath, options))
if self.workdir:
args += ['-w', self.workdir]
args += self.options
# Docker image
args.append(self.context.image)
# Command to run in container
args += self.docker_cmd
return args
|
JonathonReinhart/scuba | scuba/__main__.py | ScubaDive.__setup_native_run | python | def __setup_native_run(self):
# These options are appended to mounted volume arguments
# NOTE: This tells Docker to re-label the directory for compatibility
# with SELinux. See `man docker-run` for more information.
self.vol_opts = ['z']
# Pass variables to scubainit
self.add_env('SCUBAINIT_UMASK', '{:04o}'.format(get_umask()))
if not self.as_root:
self.add_env('SCUBAINIT_UID', os.getuid())
self.add_env('SCUBAINIT_GID', os.getgid())
if self.verbose:
self.add_env('SCUBAINIT_VERBOSE', 1)
# Copy scubainit into the container
# We make a copy because Docker 1.13 gets pissed if we try to re-label
# /usr, and Fedora 28 gives an AVC denial.
scubainit_cpath = self.copy_scubadir_file('scubainit', self.scubainit_path)
# Hooks
for name in ('root', 'user', ):
self.__generate_hook_script(name)
# allocate TTY if scuba's output is going to a terminal
# and stdin is not redirected
if sys.stdout.isatty() and sys.stdin.isatty():
self.add_option('--tty')
# Process any aliases
try:
context = self.config.process_command(self.user_command)
except ConfigError as cfgerr:
raise ScubaError(str(cfgerr))
if self.image_override:
context.image = self.image_override
'''
Normally, if the user provides no command to "docker run", the image's
default CMD is run. Because we set the entrypiont, scuba must emulate the
default behavior itself.
'''
if not context.script:
# No user-provided command; we want to run the image's default command
verbose_msg('No user command; getting command from image')
default_cmd = get_image_command(context.image)
if not default_cmd:
raise ScubaError('No command given and no image-specified command')
verbose_msg('{} Cmd: "{}"'.format(context.image, default_cmd))
context.script = [shell_quote_cmd(default_cmd)]
# Make scubainit the real entrypoint, and use the defined entrypoint as
# the docker command (if it exists)
self.add_option('--entrypoint={}'.format(scubainit_cpath))
self.docker_cmd = []
if self.entrypoint_override is not None:
# --entrypoint takes precedence
if self.entrypoint_override != '':
self.docker_cmd = [self.entrypoint_override]
elif context.entrypoint is not None:
# then .scuba.yml
if context.entrypoint != '':
self.docker_cmd = [context.entrypoint]
else:
ep = get_image_entrypoint(context.image)
if ep:
self.docker_cmd = ep
# The user command is executed via a generated shell script
with self.open_scubadir_file('command.sh', 'wt') as f:
self.docker_cmd += ['/bin/sh', f.container_path]
writeln(f, '#!/bin/sh')
writeln(f, '# Auto-generated from scuba')
writeln(f, 'set -e')
for cmd in context.script:
writeln(f, cmd)
self.context = context | Normally, if the user provides no command to "docker run", the image's
default CMD is run. Because we set the entrypiont, scuba must emulate the
default behavior itself. | train | https://github.com/JonathonReinhart/scuba/blob/0244c81ec482d3c60202028bc075621447bc3ad1/scuba/__main__.py#L244-L326 | [
"def get_umask():\n # Same logic as bash/builtins/umask.def\n val = os.umask(0o22)\n os.umask(val)\n return val\n",
"def add_env(self, name, val):\n '''Add an environment variable to the docker run invocation\n '''\n if name in self.env_vars:\n raise KeyError(name)\n self.env_vars[name] = val\n",
"def copy_scubadir_file(self, name, source):\n '''Copies source into the scubadir\n\n Returns the container-path of the copied file\n '''\n dest = os.path.join(self.__scubadir_hostpath, name)\n assert not os.path.exists(dest)\n shutil.copy2(source, dest)\n\n return os.path.join(self.__scubadir_contpath, name)\n",
"def __generate_hook_script(self, name):\n script = self.config.hooks.get(name)\n if not script:\n return\n\n # Generate the hook script, mount it into the container, and tell scubainit\n with self.open_scubadir_file('hooks/{}.sh'.format(name), 'wt') as f:\n\n self.add_env('SCUBAINIT_HOOK_{}'.format(name.upper()), f.container_path)\n\n writeln(f, '#!/bin/sh')\n writeln(f, '# Auto-generated from .scuba.yml')\n writeln(f, 'set -e')\n for cmd in script:\n writeln(f, cmd)\n"
] | class ScubaDive(object):
def __init__(self, user_command, docker_args=None, env=None, as_root=False, verbose=False,
image_override=None, entrypoint=None):
env = env or {}
if not isinstance(env, collections.Mapping):
raise ValueError('Argument env must be dict-like')
self.user_command = user_command
self.as_root = as_root
self.verbose = verbose
self.image_override = image_override
self.entrypoint_override = entrypoint
# These will be added to docker run cmdline
self.env_vars = env
self.volumes = []
self.options = docker_args or []
self.workdir = None
self.__locate_scubainit()
self.__load_config()
def prepare(self):
'''Prepare to run the docker command'''
self.__make_scubadir()
if self.is_remote_docker:
'''
Docker is running remotely (e.g. boot2docker on OSX).
We don't need to do any user setup whatsoever.
TODO: For now, remote instances won't have any .scubainit
See:
https://github.com/JonathonReinhart/scuba/issues/17
'''
raise ScubaError('Remote docker not supported (DOCKER_HOST is set)')
# Docker is running natively
self.__setup_native_run()
# Apply environment vars from .scuba.yml
self.env_vars.update(self.context.environment)
def __str__(self):
s = StringIO()
writeln(s, 'ScubaDive')
writeln(s, ' verbose: {}'.format(self.verbose))
writeln(s, ' as_root: {}'.format(self.as_root))
writeln(s, ' workdir: {}'.format(self.workdir))
writeln(s, ' options:')
for a in self.options:
writeln(s, ' ' + a)
writeln(s, ' env_vars:')
for k,v in self.env_vars.items():
writeln(s, ' {}={}'.format(k, v))
writeln(s, ' volumes:')
for hostpath, contpath, options in self.__get_vol_opts():
writeln(s, ' {} => {} {}'.format(hostpath, contpath, options))
writeln(s, ' user_command: {}'.format(self.user_command))
writeln(s, ' context:')
writeln(s, ' script: ' + str(self.context.script))
writeln(s, ' image: ' + str(self.context.image))
return s.getvalue()
def cleanup_tempfiles(self):
shutil.rmtree(self.__scubadir_hostpath)
@property
def is_remote_docker(self):
return 'DOCKER_HOST' in os.environ
def add_env(self, name, val):
'''Add an environment variable to the docker run invocation
'''
if name in self.env_vars:
raise KeyError(name)
self.env_vars[name] = val
def add_volume(self, hostpath, contpath, options=None):
'''Add a volume (bind-mount) to the docker run invocation
'''
if options is None:
options = []
self.volumes.append((hostpath, contpath, options))
def add_option(self, option):
'''Add another option to the docker run invocation
'''
self.options.append(option)
def set_workdir(self, workdir):
self.workdir = workdir
def __locate_scubainit(self):
'''Determine path to scubainit binary
'''
pkg_path = os.path.dirname(__file__)
self.scubainit_path = os.path.join(pkg_path, 'scubainit')
if not os.path.isfile(self.scubainit_path):
raise ScubaError('scubainit not found at "{}"'.format(self.scubainit_path))
def __load_config(self):
'''Find and load .scuba.yml
'''
# top_path is where .scuba.yml is found, and becomes the top of our bind mount.
# top_rel is the relative path from top_path to the current working directory,
# and is where we'll set the working directory in the container (relative to
# the bind mount point).
try:
top_path, top_rel = find_config()
self.config = load_config(os.path.join(top_path, SCUBA_YML))
except ConfigNotFoundError as cfgerr:
# SCUBA_YML can be missing if --image was given.
# In this case, we assume a default config
if not self.image_override:
raise ScubaError(str(cfgerr))
top_path, top_rel = os.getcwd(), ''
self.config = ScubaConfig(image=None)
except ConfigError as cfgerr:
raise ScubaError(str(cfgerr))
# Mount scuba root directory at the same path in the container...
self.add_volume(top_path, top_path)
# ...and set the working dir relative to it
self.set_workdir(os.path.join(top_path, top_rel))
self.add_env('SCUBA_ROOT', top_path)
def __make_scubadir(self):
'''Make temp directory where all ancillary files are bind-mounted
'''
self.__scubadir_hostpath = tempfile.mkdtemp(prefix='scubadir')
self.__scubadir_contpath = '/.scuba'
self.add_volume(self.__scubadir_hostpath, self.__scubadir_contpath)
def open_scubadir_file(self, name, mode):
'''Opens a file in the 'scubadir'
This file will automatically be bind-mounted into the container,
at a path given by the 'container_path' property on the returned file object.
'''
path = os.path.join(self.__scubadir_hostpath, name)
assert not os.path.exists(path)
# Make any directories required
mkdir_p(os.path.dirname(path))
f = File(path, mode)
f.container_path = os.path.join(self.__scubadir_contpath, name)
return f
def copy_scubadir_file(self, name, source):
'''Copies source into the scubadir
Returns the container-path of the copied file
'''
dest = os.path.join(self.__scubadir_hostpath, name)
assert not os.path.exists(dest)
shutil.copy2(source, dest)
return os.path.join(self.__scubadir_contpath, name)
def __generate_hook_script(self, name):
script = self.config.hooks.get(name)
if not script:
return
# Generate the hook script, mount it into the container, and tell scubainit
with self.open_scubadir_file('hooks/{}.sh'.format(name), 'wt') as f:
self.add_env('SCUBAINIT_HOOK_{}'.format(name.upper()), f.container_path)
writeln(f, '#!/bin/sh')
writeln(f, '# Auto-generated from .scuba.yml')
writeln(f, 'set -e')
for cmd in script:
writeln(f, cmd)
def __get_vol_opts(self):
for hostpath, contpath, options in self.volumes:
yield hostpath, contpath, options + self.vol_opts
def get_docker_cmdline(self):
args = ['docker', 'run',
# interactive: keep STDIN open
'-i',
# remove container after exit
'--rm',
]
for name,val in self.env_vars.items():
args.append('--env={}={}'.format(name, val))
for hostpath, contpath, options in self.__get_vol_opts():
args.append(make_vol_opt(hostpath, contpath, options))
if self.workdir:
args += ['-w', self.workdir]
args += self.options
# Docker image
args.append(self.context.image)
# Command to run in container
args += self.docker_cmd
return args
|
JonathonReinhart/scuba | scuba/__main__.py | ScubaDive.open_scubadir_file | python | def open_scubadir_file(self, name, mode):
'''Opens a file in the 'scubadir'
This file will automatically be bind-mounted into the container,
at a path given by the 'container_path' property on the returned file object.
'''
path = os.path.join(self.__scubadir_hostpath, name)
assert not os.path.exists(path)
# Make any directories required
mkdir_p(os.path.dirname(path))
f = File(path, mode)
f.container_path = os.path.join(self.__scubadir_contpath, name)
return f | Opens a file in the 'scubadir'
This file will automatically be bind-mounted into the container,
at a path given by the 'container_path' property on the returned file object. | train | https://github.com/JonathonReinhart/scuba/blob/0244c81ec482d3c60202028bc075621447bc3ad1/scuba/__main__.py#L330-L344 | [
"def mkdir_p(path):\n # http://stackoverflow.com/a/600612/119527\n try:\n os.makedirs(path)\n except OSError as exc:\n if not (exc.errno == errno.EEXIST and os.path.isdir(path)):\n raise\n"
] | class ScubaDive(object):
def __init__(self, user_command, docker_args=None, env=None, as_root=False, verbose=False,
image_override=None, entrypoint=None):
env = env or {}
if not isinstance(env, collections.Mapping):
raise ValueError('Argument env must be dict-like')
self.user_command = user_command
self.as_root = as_root
self.verbose = verbose
self.image_override = image_override
self.entrypoint_override = entrypoint
# These will be added to docker run cmdline
self.env_vars = env
self.volumes = []
self.options = docker_args or []
self.workdir = None
self.__locate_scubainit()
self.__load_config()
def prepare(self):
'''Prepare to run the docker command'''
self.__make_scubadir()
if self.is_remote_docker:
'''
Docker is running remotely (e.g. boot2docker on OSX).
We don't need to do any user setup whatsoever.
TODO: For now, remote instances won't have any .scubainit
See:
https://github.com/JonathonReinhart/scuba/issues/17
'''
raise ScubaError('Remote docker not supported (DOCKER_HOST is set)')
# Docker is running natively
self.__setup_native_run()
# Apply environment vars from .scuba.yml
self.env_vars.update(self.context.environment)
def __str__(self):
s = StringIO()
writeln(s, 'ScubaDive')
writeln(s, ' verbose: {}'.format(self.verbose))
writeln(s, ' as_root: {}'.format(self.as_root))
writeln(s, ' workdir: {}'.format(self.workdir))
writeln(s, ' options:')
for a in self.options:
writeln(s, ' ' + a)
writeln(s, ' env_vars:')
for k,v in self.env_vars.items():
writeln(s, ' {}={}'.format(k, v))
writeln(s, ' volumes:')
for hostpath, contpath, options in self.__get_vol_opts():
writeln(s, ' {} => {} {}'.format(hostpath, contpath, options))
writeln(s, ' user_command: {}'.format(self.user_command))
writeln(s, ' context:')
writeln(s, ' script: ' + str(self.context.script))
writeln(s, ' image: ' + str(self.context.image))
return s.getvalue()
def cleanup_tempfiles(self):
shutil.rmtree(self.__scubadir_hostpath)
@property
def is_remote_docker(self):
return 'DOCKER_HOST' in os.environ
def add_env(self, name, val):
'''Add an environment variable to the docker run invocation
'''
if name in self.env_vars:
raise KeyError(name)
self.env_vars[name] = val
def add_volume(self, hostpath, contpath, options=None):
'''Add a volume (bind-mount) to the docker run invocation
'''
if options is None:
options = []
self.volumes.append((hostpath, contpath, options))
def add_option(self, option):
'''Add another option to the docker run invocation
'''
self.options.append(option)
def set_workdir(self, workdir):
self.workdir = workdir
def __locate_scubainit(self):
'''Determine path to scubainit binary
'''
pkg_path = os.path.dirname(__file__)
self.scubainit_path = os.path.join(pkg_path, 'scubainit')
if not os.path.isfile(self.scubainit_path):
raise ScubaError('scubainit not found at "{}"'.format(self.scubainit_path))
def __load_config(self):
'''Find and load .scuba.yml
'''
# top_path is where .scuba.yml is found, and becomes the top of our bind mount.
# top_rel is the relative path from top_path to the current working directory,
# and is where we'll set the working directory in the container (relative to
# the bind mount point).
try:
top_path, top_rel = find_config()
self.config = load_config(os.path.join(top_path, SCUBA_YML))
except ConfigNotFoundError as cfgerr:
# SCUBA_YML can be missing if --image was given.
# In this case, we assume a default config
if not self.image_override:
raise ScubaError(str(cfgerr))
top_path, top_rel = os.getcwd(), ''
self.config = ScubaConfig(image=None)
except ConfigError as cfgerr:
raise ScubaError(str(cfgerr))
# Mount scuba root directory at the same path in the container...
self.add_volume(top_path, top_path)
# ...and set the working dir relative to it
self.set_workdir(os.path.join(top_path, top_rel))
self.add_env('SCUBA_ROOT', top_path)
def __make_scubadir(self):
'''Make temp directory where all ancillary files are bind-mounted
'''
self.__scubadir_hostpath = tempfile.mkdtemp(prefix='scubadir')
self.__scubadir_contpath = '/.scuba'
self.add_volume(self.__scubadir_hostpath, self.__scubadir_contpath)
def __setup_native_run(self):
# These options are appended to mounted volume arguments
# NOTE: This tells Docker to re-label the directory for compatibility
# with SELinux. See `man docker-run` for more information.
self.vol_opts = ['z']
# Pass variables to scubainit
self.add_env('SCUBAINIT_UMASK', '{:04o}'.format(get_umask()))
if not self.as_root:
self.add_env('SCUBAINIT_UID', os.getuid())
self.add_env('SCUBAINIT_GID', os.getgid())
if self.verbose:
self.add_env('SCUBAINIT_VERBOSE', 1)
# Copy scubainit into the container
# We make a copy because Docker 1.13 gets pissed if we try to re-label
# /usr, and Fedora 28 gives an AVC denial.
scubainit_cpath = self.copy_scubadir_file('scubainit', self.scubainit_path)
# Hooks
for name in ('root', 'user', ):
self.__generate_hook_script(name)
# allocate TTY if scuba's output is going to a terminal
# and stdin is not redirected
if sys.stdout.isatty() and sys.stdin.isatty():
self.add_option('--tty')
# Process any aliases
try:
context = self.config.process_command(self.user_command)
except ConfigError as cfgerr:
raise ScubaError(str(cfgerr))
if self.image_override:
context.image = self.image_override
'''
Normally, if the user provides no command to "docker run", the image's
default CMD is run. Because we set the entrypiont, scuba must emulate the
default behavior itself.
'''
if not context.script:
# No user-provided command; we want to run the image's default command
verbose_msg('No user command; getting command from image')
default_cmd = get_image_command(context.image)
if not default_cmd:
raise ScubaError('No command given and no image-specified command')
verbose_msg('{} Cmd: "{}"'.format(context.image, default_cmd))
context.script = [shell_quote_cmd(default_cmd)]
# Make scubainit the real entrypoint, and use the defined entrypoint as
# the docker command (if it exists)
self.add_option('--entrypoint={}'.format(scubainit_cpath))
self.docker_cmd = []
if self.entrypoint_override is not None:
# --entrypoint takes precedence
if self.entrypoint_override != '':
self.docker_cmd = [self.entrypoint_override]
elif context.entrypoint is not None:
# then .scuba.yml
if context.entrypoint != '':
self.docker_cmd = [context.entrypoint]
else:
ep = get_image_entrypoint(context.image)
if ep:
self.docker_cmd = ep
# The user command is executed via a generated shell script
with self.open_scubadir_file('command.sh', 'wt') as f:
self.docker_cmd += ['/bin/sh', f.container_path]
writeln(f, '#!/bin/sh')
writeln(f, '# Auto-generated from scuba')
writeln(f, 'set -e')
for cmd in context.script:
writeln(f, cmd)
self.context = context
def copy_scubadir_file(self, name, source):
'''Copies source into the scubadir
Returns the container-path of the copied file
'''
dest = os.path.join(self.__scubadir_hostpath, name)
assert not os.path.exists(dest)
shutil.copy2(source, dest)
return os.path.join(self.__scubadir_contpath, name)
def __generate_hook_script(self, name):
script = self.config.hooks.get(name)
if not script:
return
# Generate the hook script, mount it into the container, and tell scubainit
with self.open_scubadir_file('hooks/{}.sh'.format(name), 'wt') as f:
self.add_env('SCUBAINIT_HOOK_{}'.format(name.upper()), f.container_path)
writeln(f, '#!/bin/sh')
writeln(f, '# Auto-generated from .scuba.yml')
writeln(f, 'set -e')
for cmd in script:
writeln(f, cmd)
def __get_vol_opts(self):
for hostpath, contpath, options in self.volumes:
yield hostpath, contpath, options + self.vol_opts
def get_docker_cmdline(self):
args = ['docker', 'run',
# interactive: keep STDIN open
'-i',
# remove container after exit
'--rm',
]
for name,val in self.env_vars.items():
args.append('--env={}={}'.format(name, val))
for hostpath, contpath, options in self.__get_vol_opts():
args.append(make_vol_opt(hostpath, contpath, options))
if self.workdir:
args += ['-w', self.workdir]
args += self.options
# Docker image
args.append(self.context.image)
# Command to run in container
args += self.docker_cmd
return args
|
JonathonReinhart/scuba | scuba/__main__.py | ScubaDive.copy_scubadir_file | python | def copy_scubadir_file(self, name, source):
'''Copies source into the scubadir
Returns the container-path of the copied file
'''
dest = os.path.join(self.__scubadir_hostpath, name)
assert not os.path.exists(dest)
shutil.copy2(source, dest)
return os.path.join(self.__scubadir_contpath, name) | Copies source into the scubadir
Returns the container-path of the copied file | train | https://github.com/JonathonReinhart/scuba/blob/0244c81ec482d3c60202028bc075621447bc3ad1/scuba/__main__.py#L347-L356 | null | class ScubaDive(object):
def __init__(self, user_command, docker_args=None, env=None, as_root=False, verbose=False,
image_override=None, entrypoint=None):
env = env or {}
if not isinstance(env, collections.Mapping):
raise ValueError('Argument env must be dict-like')
self.user_command = user_command
self.as_root = as_root
self.verbose = verbose
self.image_override = image_override
self.entrypoint_override = entrypoint
# These will be added to docker run cmdline
self.env_vars = env
self.volumes = []
self.options = docker_args or []
self.workdir = None
self.__locate_scubainit()
self.__load_config()
def prepare(self):
'''Prepare to run the docker command'''
self.__make_scubadir()
if self.is_remote_docker:
'''
Docker is running remotely (e.g. boot2docker on OSX).
We don't need to do any user setup whatsoever.
TODO: For now, remote instances won't have any .scubainit
See:
https://github.com/JonathonReinhart/scuba/issues/17
'''
raise ScubaError('Remote docker not supported (DOCKER_HOST is set)')
# Docker is running natively
self.__setup_native_run()
# Apply environment vars from .scuba.yml
self.env_vars.update(self.context.environment)
def __str__(self):
s = StringIO()
writeln(s, 'ScubaDive')
writeln(s, ' verbose: {}'.format(self.verbose))
writeln(s, ' as_root: {}'.format(self.as_root))
writeln(s, ' workdir: {}'.format(self.workdir))
writeln(s, ' options:')
for a in self.options:
writeln(s, ' ' + a)
writeln(s, ' env_vars:')
for k,v in self.env_vars.items():
writeln(s, ' {}={}'.format(k, v))
writeln(s, ' volumes:')
for hostpath, contpath, options in self.__get_vol_opts():
writeln(s, ' {} => {} {}'.format(hostpath, contpath, options))
writeln(s, ' user_command: {}'.format(self.user_command))
writeln(s, ' context:')
writeln(s, ' script: ' + str(self.context.script))
writeln(s, ' image: ' + str(self.context.image))
return s.getvalue()
def cleanup_tempfiles(self):
shutil.rmtree(self.__scubadir_hostpath)
@property
def is_remote_docker(self):
return 'DOCKER_HOST' in os.environ
def add_env(self, name, val):
'''Add an environment variable to the docker run invocation
'''
if name in self.env_vars:
raise KeyError(name)
self.env_vars[name] = val
def add_volume(self, hostpath, contpath, options=None):
'''Add a volume (bind-mount) to the docker run invocation
'''
if options is None:
options = []
self.volumes.append((hostpath, contpath, options))
def add_option(self, option):
'''Add another option to the docker run invocation
'''
self.options.append(option)
def set_workdir(self, workdir):
self.workdir = workdir
def __locate_scubainit(self):
'''Determine path to scubainit binary
'''
pkg_path = os.path.dirname(__file__)
self.scubainit_path = os.path.join(pkg_path, 'scubainit')
if not os.path.isfile(self.scubainit_path):
raise ScubaError('scubainit not found at "{}"'.format(self.scubainit_path))
def __load_config(self):
'''Find and load .scuba.yml
'''
# top_path is where .scuba.yml is found, and becomes the top of our bind mount.
# top_rel is the relative path from top_path to the current working directory,
# and is where we'll set the working directory in the container (relative to
# the bind mount point).
try:
top_path, top_rel = find_config()
self.config = load_config(os.path.join(top_path, SCUBA_YML))
except ConfigNotFoundError as cfgerr:
# SCUBA_YML can be missing if --image was given.
# In this case, we assume a default config
if not self.image_override:
raise ScubaError(str(cfgerr))
top_path, top_rel = os.getcwd(), ''
self.config = ScubaConfig(image=None)
except ConfigError as cfgerr:
raise ScubaError(str(cfgerr))
# Mount scuba root directory at the same path in the container...
self.add_volume(top_path, top_path)
# ...and set the working dir relative to it
self.set_workdir(os.path.join(top_path, top_rel))
self.add_env('SCUBA_ROOT', top_path)
def __make_scubadir(self):
'''Make temp directory where all ancillary files are bind-mounted
'''
self.__scubadir_hostpath = tempfile.mkdtemp(prefix='scubadir')
self.__scubadir_contpath = '/.scuba'
self.add_volume(self.__scubadir_hostpath, self.__scubadir_contpath)
def __setup_native_run(self):
# These options are appended to mounted volume arguments
# NOTE: This tells Docker to re-label the directory for compatibility
# with SELinux. See `man docker-run` for more information.
self.vol_opts = ['z']
# Pass variables to scubainit
self.add_env('SCUBAINIT_UMASK', '{:04o}'.format(get_umask()))
if not self.as_root:
self.add_env('SCUBAINIT_UID', os.getuid())
self.add_env('SCUBAINIT_GID', os.getgid())
if self.verbose:
self.add_env('SCUBAINIT_VERBOSE', 1)
# Copy scubainit into the container
# We make a copy because Docker 1.13 gets pissed if we try to re-label
# /usr, and Fedora 28 gives an AVC denial.
scubainit_cpath = self.copy_scubadir_file('scubainit', self.scubainit_path)
# Hooks
for name in ('root', 'user', ):
self.__generate_hook_script(name)
# allocate TTY if scuba's output is going to a terminal
# and stdin is not redirected
if sys.stdout.isatty() and sys.stdin.isatty():
self.add_option('--tty')
# Process any aliases
try:
context = self.config.process_command(self.user_command)
except ConfigError as cfgerr:
raise ScubaError(str(cfgerr))
if self.image_override:
context.image = self.image_override
'''
Normally, if the user provides no command to "docker run", the image's
default CMD is run. Because we set the entrypiont, scuba must emulate the
default behavior itself.
'''
if not context.script:
# No user-provided command; we want to run the image's default command
verbose_msg('No user command; getting command from image')
default_cmd = get_image_command(context.image)
if not default_cmd:
raise ScubaError('No command given and no image-specified command')
verbose_msg('{} Cmd: "{}"'.format(context.image, default_cmd))
context.script = [shell_quote_cmd(default_cmd)]
# Make scubainit the real entrypoint, and use the defined entrypoint as
# the docker command (if it exists)
self.add_option('--entrypoint={}'.format(scubainit_cpath))
self.docker_cmd = []
if self.entrypoint_override is not None:
# --entrypoint takes precedence
if self.entrypoint_override != '':
self.docker_cmd = [self.entrypoint_override]
elif context.entrypoint is not None:
# then .scuba.yml
if context.entrypoint != '':
self.docker_cmd = [context.entrypoint]
else:
ep = get_image_entrypoint(context.image)
if ep:
self.docker_cmd = ep
# The user command is executed via a generated shell script
with self.open_scubadir_file('command.sh', 'wt') as f:
self.docker_cmd += ['/bin/sh', f.container_path]
writeln(f, '#!/bin/sh')
writeln(f, '# Auto-generated from scuba')
writeln(f, 'set -e')
for cmd in context.script:
writeln(f, cmd)
self.context = context
def open_scubadir_file(self, name, mode):
'''Opens a file in the 'scubadir'
This file will automatically be bind-mounted into the container,
at a path given by the 'container_path' property on the returned file object.
'''
path = os.path.join(self.__scubadir_hostpath, name)
assert not os.path.exists(path)
# Make any directories required
mkdir_p(os.path.dirname(path))
f = File(path, mode)
f.container_path = os.path.join(self.__scubadir_contpath, name)
return f
def __generate_hook_script(self, name):
script = self.config.hooks.get(name)
if not script:
return
# Generate the hook script, mount it into the container, and tell scubainit
with self.open_scubadir_file('hooks/{}.sh'.format(name), 'wt') as f:
self.add_env('SCUBAINIT_HOOK_{}'.format(name.upper()), f.container_path)
writeln(f, '#!/bin/sh')
writeln(f, '# Auto-generated from .scuba.yml')
writeln(f, 'set -e')
for cmd in script:
writeln(f, cmd)
def __get_vol_opts(self):
for hostpath, contpath, options in self.volumes:
yield hostpath, contpath, options + self.vol_opts
def get_docker_cmdline(self):
args = ['docker', 'run',
# interactive: keep STDIN open
'-i',
# remove container after exit
'--rm',
]
for name,val in self.env_vars.items():
args.append('--env={}={}'.format(name, val))
for hostpath, contpath, options in self.__get_vol_opts():
args.append(make_vol_opt(hostpath, contpath, options))
if self.workdir:
args += ['-w', self.workdir]
args += self.options
# Docker image
args.append(self.context.image)
# Command to run in container
args += self.docker_cmd
return args
|
JonathonReinhart/scuba | scuba/utils.py | format_cmdline | python | def format_cmdline(args, maxwidth=80):
'''Format args into a shell-quoted command line.
The result will be wrapped to maxwidth characters where possible,
not breaking a single long argument.
'''
# Leave room for the space and backslash at the end of each line
maxwidth -= 2
def lines():
line = ''
for a in (shell_quote(a) for a in args):
# If adding this argument will make the line too long,
# yield the current line, and start a new one.
if len(line) + len(a) + 1 > maxwidth:
yield line
line = ''
# Append this argument to the current line, separating
# it by a space from the existing arguments.
if line:
line += ' ' + a
else:
line = a
yield line
return ' \\\n'.join(lines()) | Format args into a shell-quoted command line.
The result will be wrapped to maxwidth characters where possible,
not breaking a single long argument. | train | https://github.com/JonathonReinhart/scuba/blob/0244c81ec482d3c60202028bc075621447bc3ad1/scuba/utils.py#L13-L41 | [
"def lines():\n line = ''\n for a in (shell_quote(a) for a in args):\n # If adding this argument will make the line too long,\n # yield the current line, and start a new one.\n if len(line) + len(a) + 1 > maxwidth:\n yield line\n line = ''\n\n # Append this argument to the current line, separating\n # it by a space from the existing arguments.\n if line:\n line += ' ' + a\n else:\n line = a\n\n yield line\n"
] | import errno
import os
try:
from shlex import quote as shell_quote
except ImportError:
from pipes import quote as shell_quote
def shell_quote_cmd(cmdlist):
return ' '.join(map(shell_quote, cmdlist))
def mkdir_p(path):
# http://stackoverflow.com/a/600612/119527
try:
os.makedirs(path)
except OSError as exc:
if not (exc.errno == errno.EEXIST and os.path.isdir(path)):
raise
def parse_env_var(s):
"""Parse an environment variable string
Returns a key-value tuple
Apply the same logic as `docker run -e`:
"If the operator names an environment variable without specifying a value,
then the current value of the named variable is propagated into the
container's environment
"""
parts = s.split('=', 1)
if len(parts) == 2:
k, v = parts
return (k, v)
k = parts[0]
return (k, os.getenv(k, ''))
def flatten_list(x):
if not isinstance(x, list):
raise ValueError("argument is not a list")
result = []
for i in x:
if isinstance(i, list):
for j in flatten_list(i):
result.append(j)
else:
result.append(i)
return result
|
JonathonReinhart/scuba | scuba/utils.py | parse_env_var | python | def parse_env_var(s):
parts = s.split('=', 1)
if len(parts) == 2:
k, v = parts
return (k, v)
k = parts[0]
return (k, os.getenv(k, '')) | Parse an environment variable string
Returns a key-value tuple
Apply the same logic as `docker run -e`:
"If the operator names an environment variable without specifying a value,
then the current value of the named variable is propagated into the
container's environment | train | https://github.com/JonathonReinhart/scuba/blob/0244c81ec482d3c60202028bc075621447bc3ad1/scuba/utils.py#L53-L69 | null | import errno
import os
try:
from shlex import quote as shell_quote
except ImportError:
from pipes import quote as shell_quote
def shell_quote_cmd(cmdlist):
return ' '.join(map(shell_quote, cmdlist))
def format_cmdline(args, maxwidth=80):
'''Format args into a shell-quoted command line.
The result will be wrapped to maxwidth characters where possible,
not breaking a single long argument.
'''
# Leave room for the space and backslash at the end of each line
maxwidth -= 2
def lines():
line = ''
for a in (shell_quote(a) for a in args):
# If adding this argument will make the line too long,
# yield the current line, and start a new one.
if len(line) + len(a) + 1 > maxwidth:
yield line
line = ''
# Append this argument to the current line, separating
# it by a space from the existing arguments.
if line:
line += ' ' + a
else:
line = a
yield line
return ' \\\n'.join(lines())
def mkdir_p(path):
# http://stackoverflow.com/a/600612/119527
try:
os.makedirs(path)
except OSError as exc:
if not (exc.errno == errno.EEXIST and os.path.isdir(path)):
raise
def flatten_list(x):
if not isinstance(x, list):
raise ValueError("argument is not a list")
result = []
for i in x:
if isinstance(i, list):
for j in flatten_list(i):
result.append(j)
else:
result.append(i)
return result
|
JonathonReinhart/scuba | scuba/dockerutil.py | __wrap_docker_exec | python | def __wrap_docker_exec(func):
'''Wrap a function to raise DockerExecuteError on ENOENT'''
def call(*args, **kwargs):
try:
return func(*args, **kwargs)
except OSError as e:
if e.errno == errno.ENOENT:
raise DockerExecuteError('Failed to execute docker. Is it installed?')
raise
return call | Wrap a function to raise DockerExecuteError on ENOENT | train | https://github.com/JonathonReinhart/scuba/blob/0244c81ec482d3c60202028bc075621447bc3ad1/scuba/dockerutil.py#L19-L28 | null | import subprocess
import errno
import json
class DockerError(Exception):
pass
class DockerExecuteError(DockerError):
pass
class NoSuchImageError(DockerError):
def __init__(self, image):
self.image = image
def __str__(self):
return 'No such image: {}'.format(self.image)
Popen = __wrap_docker_exec(subprocess.Popen)
call = __wrap_docker_exec(subprocess.call)
def docker_inspect(image):
'''Inspects a docker image
Returns: Parsed JSON data
'''
args = ['docker', 'inspect', '--type', 'image', image]
p = Popen(args, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
stdout, stderr = p.communicate()
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
if not p.returncode == 0:
if 'no such image' in stderr.lower():
raise NoSuchImageError(image)
raise DockerError('Failed to inspect image: {}'.format(stderr.strip()))
return json.loads(stdout)[0]
def docker_pull(image):
'''Pulls an image'''
args = ['docker', 'pull', image]
# If this fails, the default docker stdout/stderr looks good to the user.
ret = call(args)
if ret != 0:
raise DockerError('Failed to pull image "{}"'.format(image))
def docker_inspect_or_pull(image):
'''Inspects a docker image, pulling it if it doesn't exist'''
try:
return docker_inspect(image)
except NoSuchImageError:
# If it doesn't exist yet, try to pull it now (#79)
docker_pull(image)
return docker_inspect(image)
def get_image_command(image):
'''Gets the default command for an image'''
info = docker_inspect_or_pull(image)
try:
return info['Config']['Cmd']
except KeyError as ke:
raise DockerError('Failed to inspect image: JSON result missing key {}'.format(ke))
def get_image_entrypoint(image):
'''Gets the image entrypoint'''
info = docker_inspect_or_pull(image)
try:
return info['Config']['Entrypoint']
except KeyError as ke:
raise DockerError('Failed to inspect image: JSON result missing key {}'.format(ke))
def make_vol_opt(hostdir, contdir, options=None):
'''Generate a docker volume option'''
vol = '--volume={}:{}'.format(hostdir, contdir)
if options != None:
if isinstance(options, str):
options = (options,)
vol += ':' + ','.join(options)
return vol
|
JonathonReinhart/scuba | scuba/dockerutil.py | docker_inspect | python | def docker_inspect(image):
'''Inspects a docker image
Returns: Parsed JSON data
'''
args = ['docker', 'inspect', '--type', 'image', image]
p = Popen(args, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
stdout, stderr = p.communicate()
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
if not p.returncode == 0:
if 'no such image' in stderr.lower():
raise NoSuchImageError(image)
raise DockerError('Failed to inspect image: {}'.format(stderr.strip()))
return json.loads(stdout)[0] | Inspects a docker image
Returns: Parsed JSON data | train | https://github.com/JonathonReinhart/scuba/blob/0244c81ec482d3c60202028bc075621447bc3ad1/scuba/dockerutil.py#L34-L51 | [
"def call(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except OSError as e:\n if e.errno == errno.ENOENT:\n raise DockerExecuteError('Failed to execute docker. Is it installed?')\n raise\n"
] | import subprocess
import errno
import json
class DockerError(Exception):
pass
class DockerExecuteError(DockerError):
pass
class NoSuchImageError(DockerError):
def __init__(self, image):
self.image = image
def __str__(self):
return 'No such image: {}'.format(self.image)
def __wrap_docker_exec(func):
'''Wrap a function to raise DockerExecuteError on ENOENT'''
def call(*args, **kwargs):
try:
return func(*args, **kwargs)
except OSError as e:
if e.errno == errno.ENOENT:
raise DockerExecuteError('Failed to execute docker. Is it installed?')
raise
return call
Popen = __wrap_docker_exec(subprocess.Popen)
call = __wrap_docker_exec(subprocess.call)
def docker_pull(image):
'''Pulls an image'''
args = ['docker', 'pull', image]
# If this fails, the default docker stdout/stderr looks good to the user.
ret = call(args)
if ret != 0:
raise DockerError('Failed to pull image "{}"'.format(image))
def docker_inspect_or_pull(image):
'''Inspects a docker image, pulling it if it doesn't exist'''
try:
return docker_inspect(image)
except NoSuchImageError:
# If it doesn't exist yet, try to pull it now (#79)
docker_pull(image)
return docker_inspect(image)
def get_image_command(image):
'''Gets the default command for an image'''
info = docker_inspect_or_pull(image)
try:
return info['Config']['Cmd']
except KeyError as ke:
raise DockerError('Failed to inspect image: JSON result missing key {}'.format(ke))
def get_image_entrypoint(image):
'''Gets the image entrypoint'''
info = docker_inspect_or_pull(image)
try:
return info['Config']['Entrypoint']
except KeyError as ke:
raise DockerError('Failed to inspect image: JSON result missing key {}'.format(ke))
def make_vol_opt(hostdir, contdir, options=None):
'''Generate a docker volume option'''
vol = '--volume={}:{}'.format(hostdir, contdir)
if options != None:
if isinstance(options, str):
options = (options,)
vol += ':' + ','.join(options)
return vol
|
JonathonReinhart/scuba | scuba/dockerutil.py | docker_pull | python | def docker_pull(image):
'''Pulls an image'''
args = ['docker', 'pull', image]
# If this fails, the default docker stdout/stderr looks good to the user.
ret = call(args)
if ret != 0:
raise DockerError('Failed to pull image "{}"'.format(image)) | Pulls an image | train | https://github.com/JonathonReinhart/scuba/blob/0244c81ec482d3c60202028bc075621447bc3ad1/scuba/dockerutil.py#L53-L60 | [
"def call(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except OSError as e:\n if e.errno == errno.ENOENT:\n raise DockerExecuteError('Failed to execute docker. Is it installed?')\n raise\n"
] | import subprocess
import errno
import json
class DockerError(Exception):
pass
class DockerExecuteError(DockerError):
pass
class NoSuchImageError(DockerError):
def __init__(self, image):
self.image = image
def __str__(self):
return 'No such image: {}'.format(self.image)
def __wrap_docker_exec(func):
'''Wrap a function to raise DockerExecuteError on ENOENT'''
def call(*args, **kwargs):
try:
return func(*args, **kwargs)
except OSError as e:
if e.errno == errno.ENOENT:
raise DockerExecuteError('Failed to execute docker. Is it installed?')
raise
return call
Popen = __wrap_docker_exec(subprocess.Popen)
call = __wrap_docker_exec(subprocess.call)
def docker_inspect(image):
'''Inspects a docker image
Returns: Parsed JSON data
'''
args = ['docker', 'inspect', '--type', 'image', image]
p = Popen(args, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
stdout, stderr = p.communicate()
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
if not p.returncode == 0:
if 'no such image' in stderr.lower():
raise NoSuchImageError(image)
raise DockerError('Failed to inspect image: {}'.format(stderr.strip()))
return json.loads(stdout)[0]
def docker_inspect_or_pull(image):
'''Inspects a docker image, pulling it if it doesn't exist'''
try:
return docker_inspect(image)
except NoSuchImageError:
# If it doesn't exist yet, try to pull it now (#79)
docker_pull(image)
return docker_inspect(image)
def get_image_command(image):
'''Gets the default command for an image'''
info = docker_inspect_or_pull(image)
try:
return info['Config']['Cmd']
except KeyError as ke:
raise DockerError('Failed to inspect image: JSON result missing key {}'.format(ke))
def get_image_entrypoint(image):
'''Gets the image entrypoint'''
info = docker_inspect_or_pull(image)
try:
return info['Config']['Entrypoint']
except KeyError as ke:
raise DockerError('Failed to inspect image: JSON result missing key {}'.format(ke))
def make_vol_opt(hostdir, contdir, options=None):
'''Generate a docker volume option'''
vol = '--volume={}:{}'.format(hostdir, contdir)
if options != None:
if isinstance(options, str):
options = (options,)
vol += ':' + ','.join(options)
return vol
|
JonathonReinhart/scuba | scuba/dockerutil.py | get_image_command | python | def get_image_command(image):
'''Gets the default command for an image'''
info = docker_inspect_or_pull(image)
try:
return info['Config']['Cmd']
except KeyError as ke:
raise DockerError('Failed to inspect image: JSON result missing key {}'.format(ke)) | Gets the default command for an image | train | https://github.com/JonathonReinhart/scuba/blob/0244c81ec482d3c60202028bc075621447bc3ad1/scuba/dockerutil.py#L71-L77 | [
"def docker_inspect_or_pull(image):\n '''Inspects a docker image, pulling it if it doesn't exist'''\n try:\n return docker_inspect(image)\n except NoSuchImageError:\n # If it doesn't exist yet, try to pull it now (#79)\n docker_pull(image)\n return docker_inspect(image)\n"
] | import subprocess
import errno
import json
class DockerError(Exception):
pass
class DockerExecuteError(DockerError):
pass
class NoSuchImageError(DockerError):
def __init__(self, image):
self.image = image
def __str__(self):
return 'No such image: {}'.format(self.image)
def __wrap_docker_exec(func):
'''Wrap a function to raise DockerExecuteError on ENOENT'''
def call(*args, **kwargs):
try:
return func(*args, **kwargs)
except OSError as e:
if e.errno == errno.ENOENT:
raise DockerExecuteError('Failed to execute docker. Is it installed?')
raise
return call
Popen = __wrap_docker_exec(subprocess.Popen)
call = __wrap_docker_exec(subprocess.call)
def docker_inspect(image):
'''Inspects a docker image
Returns: Parsed JSON data
'''
args = ['docker', 'inspect', '--type', 'image', image]
p = Popen(args, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
stdout, stderr = p.communicate()
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
if not p.returncode == 0:
if 'no such image' in stderr.lower():
raise NoSuchImageError(image)
raise DockerError('Failed to inspect image: {}'.format(stderr.strip()))
return json.loads(stdout)[0]
def docker_pull(image):
'''Pulls an image'''
args = ['docker', 'pull', image]
# If this fails, the default docker stdout/stderr looks good to the user.
ret = call(args)
if ret != 0:
raise DockerError('Failed to pull image "{}"'.format(image))
def docker_inspect_or_pull(image):
'''Inspects a docker image, pulling it if it doesn't exist'''
try:
return docker_inspect(image)
except NoSuchImageError:
# If it doesn't exist yet, try to pull it now (#79)
docker_pull(image)
return docker_inspect(image)
def get_image_entrypoint(image):
'''Gets the image entrypoint'''
info = docker_inspect_or_pull(image)
try:
return info['Config']['Entrypoint']
except KeyError as ke:
raise DockerError('Failed to inspect image: JSON result missing key {}'.format(ke))
def make_vol_opt(hostdir, contdir, options=None):
'''Generate a docker volume option'''
vol = '--volume={}:{}'.format(hostdir, contdir)
if options != None:
if isinstance(options, str):
options = (options,)
vol += ':' + ','.join(options)
return vol
|
JonathonReinhart/scuba | scuba/dockerutil.py | get_image_entrypoint | python | def get_image_entrypoint(image):
'''Gets the image entrypoint'''
info = docker_inspect_or_pull(image)
try:
return info['Config']['Entrypoint']
except KeyError as ke:
raise DockerError('Failed to inspect image: JSON result missing key {}'.format(ke)) | Gets the image entrypoint | train | https://github.com/JonathonReinhart/scuba/blob/0244c81ec482d3c60202028bc075621447bc3ad1/scuba/dockerutil.py#L79-L85 | [
"def docker_inspect_or_pull(image):\n '''Inspects a docker image, pulling it if it doesn't exist'''\n try:\n return docker_inspect(image)\n except NoSuchImageError:\n # If it doesn't exist yet, try to pull it now (#79)\n docker_pull(image)\n return docker_inspect(image)\n"
] | import subprocess
import errno
import json
class DockerError(Exception):
pass
class DockerExecuteError(DockerError):
pass
class NoSuchImageError(DockerError):
def __init__(self, image):
self.image = image
def __str__(self):
return 'No such image: {}'.format(self.image)
def __wrap_docker_exec(func):
'''Wrap a function to raise DockerExecuteError on ENOENT'''
def call(*args, **kwargs):
try:
return func(*args, **kwargs)
except OSError as e:
if e.errno == errno.ENOENT:
raise DockerExecuteError('Failed to execute docker. Is it installed?')
raise
return call
Popen = __wrap_docker_exec(subprocess.Popen)
call = __wrap_docker_exec(subprocess.call)
def docker_inspect(image):
'''Inspects a docker image
Returns: Parsed JSON data
'''
args = ['docker', 'inspect', '--type', 'image', image]
p = Popen(args, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
stdout, stderr = p.communicate()
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
if not p.returncode == 0:
if 'no such image' in stderr.lower():
raise NoSuchImageError(image)
raise DockerError('Failed to inspect image: {}'.format(stderr.strip()))
return json.loads(stdout)[0]
def docker_pull(image):
'''Pulls an image'''
args = ['docker', 'pull', image]
# If this fails, the default docker stdout/stderr looks good to the user.
ret = call(args)
if ret != 0:
raise DockerError('Failed to pull image "{}"'.format(image))
def docker_inspect_or_pull(image):
'''Inspects a docker image, pulling it if it doesn't exist'''
try:
return docker_inspect(image)
except NoSuchImageError:
# If it doesn't exist yet, try to pull it now (#79)
docker_pull(image)
return docker_inspect(image)
def get_image_command(image):
'''Gets the default command for an image'''
info = docker_inspect_or_pull(image)
try:
return info['Config']['Cmd']
except KeyError as ke:
raise DockerError('Failed to inspect image: JSON result missing key {}'.format(ke))
def make_vol_opt(hostdir, contdir, options=None):
'''Generate a docker volume option'''
vol = '--volume={}:{}'.format(hostdir, contdir)
if options != None:
if isinstance(options, str):
options = (options,)
vol += ':' + ','.join(options)
return vol
|
JonathonReinhart/scuba | scuba/dockerutil.py | make_vol_opt | python | def make_vol_opt(hostdir, contdir, options=None):
'''Generate a docker volume option'''
vol = '--volume={}:{}'.format(hostdir, contdir)
if options != None:
if isinstance(options, str):
options = (options,)
vol += ':' + ','.join(options)
return vol | Generate a docker volume option | train | https://github.com/JonathonReinhart/scuba/blob/0244c81ec482d3c60202028bc075621447bc3ad1/scuba/dockerutil.py#L88-L95 | null | import subprocess
import errno
import json
class DockerError(Exception):
pass
class DockerExecuteError(DockerError):
pass
class NoSuchImageError(DockerError):
def __init__(self, image):
self.image = image
def __str__(self):
return 'No such image: {}'.format(self.image)
def __wrap_docker_exec(func):
'''Wrap a function to raise DockerExecuteError on ENOENT'''
def call(*args, **kwargs):
try:
return func(*args, **kwargs)
except OSError as e:
if e.errno == errno.ENOENT:
raise DockerExecuteError('Failed to execute docker. Is it installed?')
raise
return call
Popen = __wrap_docker_exec(subprocess.Popen)
call = __wrap_docker_exec(subprocess.call)
def docker_inspect(image):
'''Inspects a docker image
Returns: Parsed JSON data
'''
args = ['docker', 'inspect', '--type', 'image', image]
p = Popen(args, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
stdout, stderr = p.communicate()
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
if not p.returncode == 0:
if 'no such image' in stderr.lower():
raise NoSuchImageError(image)
raise DockerError('Failed to inspect image: {}'.format(stderr.strip()))
return json.loads(stdout)[0]
def docker_pull(image):
'''Pulls an image'''
args = ['docker', 'pull', image]
# If this fails, the default docker stdout/stderr looks good to the user.
ret = call(args)
if ret != 0:
raise DockerError('Failed to pull image "{}"'.format(image))
def docker_inspect_or_pull(image):
'''Inspects a docker image, pulling it if it doesn't exist'''
try:
return docker_inspect(image)
except NoSuchImageError:
# If it doesn't exist yet, try to pull it now (#79)
docker_pull(image)
return docker_inspect(image)
def get_image_command(image):
'''Gets the default command for an image'''
info = docker_inspect_or_pull(image)
try:
return info['Config']['Cmd']
except KeyError as ke:
raise DockerError('Failed to inspect image: JSON result missing key {}'.format(ke))
def get_image_entrypoint(image):
'''Gets the image entrypoint'''
info = docker_inspect_or_pull(image)
try:
return info['Config']['Entrypoint']
except KeyError as ke:
raise DockerError('Failed to inspect image: JSON result missing key {}'.format(ke))
|
JonathonReinhart/scuba | scuba/config.py | find_config | python | def find_config():
'''Search up the diretcory hierarchy for .scuba.yml
Returns: path, rel on success, or None if not found
path The absolute path of the directory where .scuba.yml was found
rel The relative path from the directory where .scuba.yml was found
to the current directory
'''
cross_fs = 'SCUBA_DISCOVERY_ACROSS_FILESYSTEM' in os.environ
path = os.getcwd()
rel = ''
while True:
if os.path.exists(os.path.join(path, SCUBA_YML)):
return path, rel
if not cross_fs and os.path.ismount(path):
msg = '{} not found here or any parent up to mount point {}'.format(SCUBA_YML, path) \
+ '\nStopping at filesystem boundary (SCUBA_DISCOVERY_ACROSS_FILESYSTEM not set).'
raise ConfigNotFoundError(msg)
# Traverse up directory hierarchy
path, rest = os.path.split(path)
if not rest:
raise ConfigNotFoundError('{} not found here or any parent directories'.format(SCUBA_YML))
# Accumulate the relative path back to where we started
rel = os.path.join(rest, rel) | Search up the diretcory hierarchy for .scuba.yml
Returns: path, rel on success, or None if not found
path The absolute path of the directory where .scuba.yml was found
rel The relative path from the directory where .scuba.yml was found
to the current directory | train | https://github.com/JonathonReinhart/scuba/blob/0244c81ec482d3c60202028bc075621447bc3ad1/scuba/config.py#L74-L101 | null | from __future__ import print_function
import os
import yaml
import shlex
try:
basestring
except NameError:
basestring = str # Python 3
from .constants import *
from .utils import *
class ConfigError(Exception):
pass
class ConfigNotFoundError(ConfigError):
pass
# http://stackoverflow.com/a/9577670
class Loader(yaml.SafeLoader):
def __init__(self, stream):
self._root = os.path.split(stream.name)[0]
super(Loader, self).__init__(stream)
def from_yaml(self, node):
'''
Implementes a !from_yaml constructor with the following syntax:
!from_yaml filename key
Arguments:
filename: Filename of external YAML document from which to load,
relative to the current YAML file.
key: Key from external YAML document to return,
using a dot-separated syntax for nested keys.
Examples:
!from_yaml external.yml pop
!from_yaml external.yml foo.bar.pop
!from_yaml "another file.yml" "foo bar.snap crackle.pop"
'''
# Load the content from the node, as a scalar
content = self.construct_scalar(node)
# Split on unquoted spaces
try:
parts = shlex.split(content)
except UnicodeEncodeError:
raise yaml.YAMLError('Non-ASCII arguments to !from_yaml are unsupported')
if len(parts) != 2:
raise yaml.YAMLError('Two arguments expected to !from_yaml')
filename, key = parts
# path is relative to the current YAML document
path = os.path.join(self._root, filename)
# Load the other YAML document
with open(path, 'r') as f:
doc = yaml.load(f, self.__class__)
# Retrieve the key
try:
cur = doc
for k in key.split('.'):
cur = cur[k]
except KeyError:
raise yaml.YAMLError('Key "{}" not found in {}'.format(key, filename))
return cur
Loader.add_constructor('!from_yaml', Loader.from_yaml)
def _process_script_node(node, name):
'''Process a script-type node
This handles nodes that follow the *Common script schema*,
as outlined in doc/yaml-reference.md.
'''
if isinstance(node, basestring):
# The script is just the text itself
return [node]
if isinstance(node, dict):
# There must be a "script" key, which must be a list of strings
script = node.get('script')
if not script:
raise ConfigError("{}: must have a 'script' subkey".format(name))
if isinstance(script, list):
return script
if isinstance(script, basestring):
return [script]
raise ConfigError("{}.script: must be a string or list".format(name))
raise ConfigError("{}: must be string or dict".format(name))
def _process_environment(node, name):
# Environment can be either a list of strings ("KEY=VALUE") or a mapping
# Environment keys and values are always strings
result = {}
if not node:
pass
elif isinstance(node, dict):
for k, v in node.items():
if v is None:
v = os.getenv(k, '')
result[k] = str(v)
elif isinstance(node, list):
for e in node:
k, v = parse_env_var(e)
result[k] = v
else:
raise ConfigError("'{}' must be list or mapping, not {}".format(
name, type(node).__name__))
return result
def _get_entrypoint(data):
# N.B. We can't use data.get() here, because that might return
# None, leading to ambiguity between entrypoint being absent or set
# to a null value.
#
# "Note that a null is different from an empty string and that a
# mapping entry with some key and a null value is valid and
# different from not having that key in the mapping."
# - http://yaml.org/type/null.html
key = 'entrypoint'
if not key in data:
return None
ep = data[key]
# We represent a null value as an empty string.
if ep is None:
ep = ''
if not isinstance(ep, basestring):
raise ConfigError("'{}' must be a string, not {}".format(
key, type(ep).__name__))
return ep
class ScubaAlias(object):
def __init__(self, name, script, image, entrypoint, environment):
self.name = name
self.script = script
self.image = image
self.entrypoint = entrypoint
self.environment = environment
@classmethod
def from_dict(cls, name, node):
script = _process_script_node(node, name)
image = None
entrypoint = None
environment = None
if isinstance(node, dict): # Rich alias
image = node.get('image')
entrypoint = _get_entrypoint(node)
environment = _process_environment(
node.get('environment'),
'{}.{}'.format(name, 'environment'))
return cls(name, script, image, entrypoint, environment)
class ScubaContext(object):
pass
class ScubaConfig(object):
def __init__(self, **data):
required_nodes = ('image',)
optional_nodes = ('aliases','hooks','entrypoint','environment')
# Check for missing required nodes
missing = [n for n in required_nodes if not n in data]
if missing:
raise ConfigError('{}: Required node{} missing: {}'.format(SCUBA_YML,
's' if len(missing) > 1 else '', ', '.join(missing)))
# Check for unrecognized nodes
extra = [n for n in data if not n in required_nodes + optional_nodes]
if extra:
raise ConfigError('{}: Unrecognized node{}: {}'.format(SCUBA_YML,
's' if len(extra) > 1 else '', ', '.join(extra)))
self._image = data['image']
self._entrypoint = _get_entrypoint(data)
self._load_aliases(data)
self._load_hooks(data)
self._environment = self._load_environment(data)
def _load_aliases(self, data):
self._aliases = {}
for name, node in data.get('aliases', {}).items():
if ' ' in name:
raise ConfigError('Alias names cannot contain spaces')
self._aliases[name] = ScubaAlias.from_dict(name, node)
def _load_hooks(self, data):
self._hooks = {}
for name in ('user', 'root',):
node = data.get('hooks', {}).get(name)
if node:
hook = _process_script_node(node, name)
self._hooks[name] = hook
def _load_environment(self, data):
return _process_environment(data.get('environment'), 'environment')
@property
def image(self):
return self._image
@property
def entrypoint(self):
return self._entrypoint
@property
def aliases(self):
return self._aliases
@property
def hooks(self):
return self._hooks
@property
def environment(self):
return self._environment
def process_command(self, command):
'''Processes a user command using aliases
Arguments:
command A user command list (e.g. argv)
Returns: A ScubaContext object with the following attributes:
script: a list of command line strings
image: the docker image name to use
'''
result = ScubaContext()
result.script = None
result.image = self.image
result.entrypoint = self.entrypoint
result.environment = self.environment.copy()
if command:
alias = self.aliases.get(command[0])
if not alias:
# Command is not an alias; use it as-is.
result.script = [shell_quote_cmd(command)]
else:
# Using an alias
# Does this alias override the image and/or entrypoint?
if alias.image:
result.image = alias.image
if alias.entrypoint is not None:
result.entrypoint = alias.entrypoint
# Merge/override the environment
if alias.environment:
result.environment.update(alias.environment)
if len(alias.script) > 1:
# Alias is a multiline script; no additional
# arguments are allowed in the scuba invocation.
if len(command) > 1:
raise ConfigError('Additional arguments not allowed with multi-line aliases')
result.script = alias.script
else:
# Alias is a single-line script; perform substituion
# and add user arguments.
command.pop(0)
result.script = [alias.script[0] + ' ' + shell_quote_cmd(command)]
result.script = flatten_list(result.script)
return result
def load_config(path):
try:
with open(path) as f:
data = yaml.load(f, Loader)
except IOError as e:
raise ConfigError('Error opening {}: {}'.format(SCUBA_YML, e))
except yaml.YAMLError as e:
raise ConfigError('Error loading {}: {}'.format(SCUBA_YML, e))
return ScubaConfig(**(data or {}))
|
JonathonReinhart/scuba | scuba/config.py | _process_script_node | python | def _process_script_node(node, name):
'''Process a script-type node
This handles nodes that follow the *Common script schema*,
as outlined in doc/yaml-reference.md.
'''
if isinstance(node, basestring):
# The script is just the text itself
return [node]
if isinstance(node, dict):
# There must be a "script" key, which must be a list of strings
script = node.get('script')
if not script:
raise ConfigError("{}: must have a 'script' subkey".format(name))
if isinstance(script, list):
return script
if isinstance(script, basestring):
return [script]
raise ConfigError("{}.script: must be a string or list".format(name))
raise ConfigError("{}: must be string or dict".format(name)) | Process a script-type node
This handles nodes that follow the *Common script schema*,
as outlined in doc/yaml-reference.md. | train | https://github.com/JonathonReinhart/scuba/blob/0244c81ec482d3c60202028bc075621447bc3ad1/scuba/config.py#L104-L129 | null | from __future__ import print_function
import os
import yaml
import shlex
try:
basestring
except NameError:
basestring = str # Python 3
from .constants import *
from .utils import *
class ConfigError(Exception):
pass
class ConfigNotFoundError(ConfigError):
pass
# http://stackoverflow.com/a/9577670
class Loader(yaml.SafeLoader):
def __init__(self, stream):
self._root = os.path.split(stream.name)[0]
super(Loader, self).__init__(stream)
def from_yaml(self, node):
'''
Implementes a !from_yaml constructor with the following syntax:
!from_yaml filename key
Arguments:
filename: Filename of external YAML document from which to load,
relative to the current YAML file.
key: Key from external YAML document to return,
using a dot-separated syntax for nested keys.
Examples:
!from_yaml external.yml pop
!from_yaml external.yml foo.bar.pop
!from_yaml "another file.yml" "foo bar.snap crackle.pop"
'''
# Load the content from the node, as a scalar
content = self.construct_scalar(node)
# Split on unquoted spaces
try:
parts = shlex.split(content)
except UnicodeEncodeError:
raise yaml.YAMLError('Non-ASCII arguments to !from_yaml are unsupported')
if len(parts) != 2:
raise yaml.YAMLError('Two arguments expected to !from_yaml')
filename, key = parts
# path is relative to the current YAML document
path = os.path.join(self._root, filename)
# Load the other YAML document
with open(path, 'r') as f:
doc = yaml.load(f, self.__class__)
# Retrieve the key
try:
cur = doc
for k in key.split('.'):
cur = cur[k]
except KeyError:
raise yaml.YAMLError('Key "{}" not found in {}'.format(key, filename))
return cur
Loader.add_constructor('!from_yaml', Loader.from_yaml)
def find_config():
'''Search up the diretcory hierarchy for .scuba.yml
Returns: path, rel on success, or None if not found
path The absolute path of the directory where .scuba.yml was found
rel The relative path from the directory where .scuba.yml was found
to the current directory
'''
cross_fs = 'SCUBA_DISCOVERY_ACROSS_FILESYSTEM' in os.environ
path = os.getcwd()
rel = ''
while True:
if os.path.exists(os.path.join(path, SCUBA_YML)):
return path, rel
if not cross_fs and os.path.ismount(path):
msg = '{} not found here or any parent up to mount point {}'.format(SCUBA_YML, path) \
+ '\nStopping at filesystem boundary (SCUBA_DISCOVERY_ACROSS_FILESYSTEM not set).'
raise ConfigNotFoundError(msg)
# Traverse up directory hierarchy
path, rest = os.path.split(path)
if not rest:
raise ConfigNotFoundError('{} not found here or any parent directories'.format(SCUBA_YML))
# Accumulate the relative path back to where we started
rel = os.path.join(rest, rel)
def _process_environment(node, name):
# Environment can be either a list of strings ("KEY=VALUE") or a mapping
# Environment keys and values are always strings
result = {}
if not node:
pass
elif isinstance(node, dict):
for k, v in node.items():
if v is None:
v = os.getenv(k, '')
result[k] = str(v)
elif isinstance(node, list):
for e in node:
k, v = parse_env_var(e)
result[k] = v
else:
raise ConfigError("'{}' must be list or mapping, not {}".format(
name, type(node).__name__))
return result
def _get_entrypoint(data):
# N.B. We can't use data.get() here, because that might return
# None, leading to ambiguity between entrypoint being absent or set
# to a null value.
#
# "Note that a null is different from an empty string and that a
# mapping entry with some key and a null value is valid and
# different from not having that key in the mapping."
# - http://yaml.org/type/null.html
key = 'entrypoint'
if not key in data:
return None
ep = data[key]
# We represent a null value as an empty string.
if ep is None:
ep = ''
if not isinstance(ep, basestring):
raise ConfigError("'{}' must be a string, not {}".format(
key, type(ep).__name__))
return ep
class ScubaAlias(object):
def __init__(self, name, script, image, entrypoint, environment):
self.name = name
self.script = script
self.image = image
self.entrypoint = entrypoint
self.environment = environment
@classmethod
def from_dict(cls, name, node):
script = _process_script_node(node, name)
image = None
entrypoint = None
environment = None
if isinstance(node, dict): # Rich alias
image = node.get('image')
entrypoint = _get_entrypoint(node)
environment = _process_environment(
node.get('environment'),
'{}.{}'.format(name, 'environment'))
return cls(name, script, image, entrypoint, environment)
class ScubaContext(object):
pass
class ScubaConfig(object):
def __init__(self, **data):
required_nodes = ('image',)
optional_nodes = ('aliases','hooks','entrypoint','environment')
# Check for missing required nodes
missing = [n for n in required_nodes if not n in data]
if missing:
raise ConfigError('{}: Required node{} missing: {}'.format(SCUBA_YML,
's' if len(missing) > 1 else '', ', '.join(missing)))
# Check for unrecognized nodes
extra = [n for n in data if not n in required_nodes + optional_nodes]
if extra:
raise ConfigError('{}: Unrecognized node{}: {}'.format(SCUBA_YML,
's' if len(extra) > 1 else '', ', '.join(extra)))
self._image = data['image']
self._entrypoint = _get_entrypoint(data)
self._load_aliases(data)
self._load_hooks(data)
self._environment = self._load_environment(data)
def _load_aliases(self, data):
self._aliases = {}
for name, node in data.get('aliases', {}).items():
if ' ' in name:
raise ConfigError('Alias names cannot contain spaces')
self._aliases[name] = ScubaAlias.from_dict(name, node)
def _load_hooks(self, data):
self._hooks = {}
for name in ('user', 'root',):
node = data.get('hooks', {}).get(name)
if node:
hook = _process_script_node(node, name)
self._hooks[name] = hook
def _load_environment(self, data):
return _process_environment(data.get('environment'), 'environment')
@property
def image(self):
return self._image
@property
def entrypoint(self):
return self._entrypoint
@property
def aliases(self):
return self._aliases
@property
def hooks(self):
return self._hooks
@property
def environment(self):
return self._environment
def process_command(self, command):
'''Processes a user command using aliases
Arguments:
command A user command list (e.g. argv)
Returns: A ScubaContext object with the following attributes:
script: a list of command line strings
image: the docker image name to use
'''
result = ScubaContext()
result.script = None
result.image = self.image
result.entrypoint = self.entrypoint
result.environment = self.environment.copy()
if command:
alias = self.aliases.get(command[0])
if not alias:
# Command is not an alias; use it as-is.
result.script = [shell_quote_cmd(command)]
else:
# Using an alias
# Does this alias override the image and/or entrypoint?
if alias.image:
result.image = alias.image
if alias.entrypoint is not None:
result.entrypoint = alias.entrypoint
# Merge/override the environment
if alias.environment:
result.environment.update(alias.environment)
if len(alias.script) > 1:
# Alias is a multiline script; no additional
# arguments are allowed in the scuba invocation.
if len(command) > 1:
raise ConfigError('Additional arguments not allowed with multi-line aliases')
result.script = alias.script
else:
# Alias is a single-line script; perform substituion
# and add user arguments.
command.pop(0)
result.script = [alias.script[0] + ' ' + shell_quote_cmd(command)]
result.script = flatten_list(result.script)
return result
def load_config(path):
try:
with open(path) as f:
data = yaml.load(f, Loader)
except IOError as e:
raise ConfigError('Error opening {}: {}'.format(SCUBA_YML, e))
except yaml.YAMLError as e:
raise ConfigError('Error loading {}: {}'.format(SCUBA_YML, e))
return ScubaConfig(**(data or {}))
|
JonathonReinhart/scuba | scuba/config.py | Loader.from_yaml | python | def from_yaml(self, node):
'''
Implementes a !from_yaml constructor with the following syntax:
!from_yaml filename key
Arguments:
filename: Filename of external YAML document from which to load,
relative to the current YAML file.
key: Key from external YAML document to return,
using a dot-separated syntax for nested keys.
Examples:
!from_yaml external.yml pop
!from_yaml external.yml foo.bar.pop
!from_yaml "another file.yml" "foo bar.snap crackle.pop"
'''
# Load the content from the node, as a scalar
content = self.construct_scalar(node)
# Split on unquoted spaces
try:
parts = shlex.split(content)
except UnicodeEncodeError:
raise yaml.YAMLError('Non-ASCII arguments to !from_yaml are unsupported')
if len(parts) != 2:
raise yaml.YAMLError('Two arguments expected to !from_yaml')
filename, key = parts
# path is relative to the current YAML document
path = os.path.join(self._root, filename)
# Load the other YAML document
with open(path, 'r') as f:
doc = yaml.load(f, self.__class__)
# Retrieve the key
try:
cur = doc
for k in key.split('.'):
cur = cur[k]
except KeyError:
raise yaml.YAMLError('Key "{}" not found in {}'.format(key, filename))
return cur | Implementes a !from_yaml constructor with the following syntax:
!from_yaml filename key
Arguments:
filename: Filename of external YAML document from which to load,
relative to the current YAML file.
key: Key from external YAML document to return,
using a dot-separated syntax for nested keys.
Examples:
!from_yaml external.yml pop
!from_yaml external.yml foo.bar.pop
!from_yaml "another file.yml" "foo bar.snap crackle.pop" | train | https://github.com/JonathonReinhart/scuba/blob/0244c81ec482d3c60202028bc075621447bc3ad1/scuba/config.py#L25-L69 | null | class Loader(yaml.SafeLoader):
def __init__(self, stream):
self._root = os.path.split(stream.name)[0]
super(Loader, self).__init__(stream)
|
JonathonReinhart/scuba | scuba/config.py | ScubaConfig.process_command | python | def process_command(self, command):
'''Processes a user command using aliases
Arguments:
command A user command list (e.g. argv)
Returns: A ScubaContext object with the following attributes:
script: a list of command line strings
image: the docker image name to use
'''
result = ScubaContext()
result.script = None
result.image = self.image
result.entrypoint = self.entrypoint
result.environment = self.environment.copy()
if command:
alias = self.aliases.get(command[0])
if not alias:
# Command is not an alias; use it as-is.
result.script = [shell_quote_cmd(command)]
else:
# Using an alias
# Does this alias override the image and/or entrypoint?
if alias.image:
result.image = alias.image
if alias.entrypoint is not None:
result.entrypoint = alias.entrypoint
# Merge/override the environment
if alias.environment:
result.environment.update(alias.environment)
if len(alias.script) > 1:
# Alias is a multiline script; no additional
# arguments are allowed in the scuba invocation.
if len(command) > 1:
raise ConfigError('Additional arguments not allowed with multi-line aliases')
result.script = alias.script
else:
# Alias is a single-line script; perform substituion
# and add user arguments.
command.pop(0)
result.script = [alias.script[0] + ' ' + shell_quote_cmd(command)]
result.script = flatten_list(result.script)
return result | Processes a user command using aliases
Arguments:
command A user command list (e.g. argv)
Returns: A ScubaContext object with the following attributes:
script: a list of command line strings
image: the docker image name to use | train | https://github.com/JonathonReinhart/scuba/blob/0244c81ec482d3c60202028bc075621447bc3ad1/scuba/config.py#L276-L324 | [
"def shell_quote_cmd(cmdlist):\n return ' '.join(map(shell_quote, cmdlist))\n",
"def flatten_list(x):\n if not isinstance(x, list):\n raise ValueError(\"argument is not a list\")\n result = []\n for i in x:\n if isinstance(i, list):\n for j in flatten_list(i):\n result.append(j)\n else:\n result.append(i)\n return result\n"
] | class ScubaConfig(object):
def __init__(self, **data):
required_nodes = ('image',)
optional_nodes = ('aliases','hooks','entrypoint','environment')
# Check for missing required nodes
missing = [n for n in required_nodes if not n in data]
if missing:
raise ConfigError('{}: Required node{} missing: {}'.format(SCUBA_YML,
's' if len(missing) > 1 else '', ', '.join(missing)))
# Check for unrecognized nodes
extra = [n for n in data if not n in required_nodes + optional_nodes]
if extra:
raise ConfigError('{}: Unrecognized node{}: {}'.format(SCUBA_YML,
's' if len(extra) > 1 else '', ', '.join(extra)))
self._image = data['image']
self._entrypoint = _get_entrypoint(data)
self._load_aliases(data)
self._load_hooks(data)
self._environment = self._load_environment(data)
def _load_aliases(self, data):
self._aliases = {}
for name, node in data.get('aliases', {}).items():
if ' ' in name:
raise ConfigError('Alias names cannot contain spaces')
self._aliases[name] = ScubaAlias.from_dict(name, node)
def _load_hooks(self, data):
self._hooks = {}
for name in ('user', 'root',):
node = data.get('hooks', {}).get(name)
if node:
hook = _process_script_node(node, name)
self._hooks[name] = hook
def _load_environment(self, data):
return _process_environment(data.get('environment'), 'environment')
@property
def image(self):
return self._image
@property
def entrypoint(self):
return self._entrypoint
@property
def aliases(self):
return self._aliases
@property
def hooks(self):
return self._hooks
@property
def environment(self):
return self._environment
|
SpheMakh/Stimela | stimela/recipe.py | StimelaJob.python_job | python | def python_job(self, function, parameters=None):
if not callable(function):
raise utils.StimelaCabRuntimeError('Object given as function is not callable')
if self.name is None:
self.name = function.__name__
self.job = {
'function' : function,
'parameters': parameters,
}
return 0 | Run python function
function : Python callable to execute
name : Name of function (if not given, will used function.__name__)
parameters : Parameters to parse to function
label : Function label; for logging purposes | train | https://github.com/SpheMakh/Stimela/blob/292e80461a0c3498da8e7e987e2891d3ae5981ad/stimela/recipe.py#L105-L126 | null | class StimelaJob(object):
def __init__(self, name, recipe, label=None,
jtype='docker', cpus=None, memory_limit=None,
singularity_dir=None,
time_out=-1,
log_dir=None):
self.name = name
self.recipe = recipe
self.label = label or '{0}_{1}'.format(name, id(name))
self.log = recipe.log
self.active = False
self.jtype = 'docker' # ['docker', 'python', or 'singularity']
self.job = None
self.created = False
self.args = ['--user {}:{}'.format(UID, GID)]
if cpus:
self.args.append("--cpus {0:f}".format(cpus))
if memory_limit:
self.args.append("--memory {0:s}".format(memory_limit))
self.time_out = time_out
self.log_dir = log_dir
def run_python_job(self):
function = self.job['function']
options = self.job['parameters']
function(**options)
return 0
def run_docker_job(self):
if hasattr(self.job, '_cab'):
self.job._cab.update(self.job.config,
self.job.parameter_file_name)
self.created = False
self.job.create(*self.args)
self.created = True
self.job.start()
return 0
def run_singularity_job(self):
if hasattr(self.job, '_cab'):
self.job._cab.update(self.job.config,
self.job.parameter_file_name)
self.created = False
self.job.start()
self.created = True
self.job.run()
return 0
def python_job(self, function, parameters=None):
"""
Run python function
function : Python callable to execute
name : Name of function (if not given, will used function.__name__)
parameters : Parameters to parse to function
label : Function label; for logging purposes
"""
if not callable(function):
raise utils.StimelaCabRuntimeError('Object given as function is not callable')
if self.name is None:
self.name = function.__name__
self.job = {
'function' : function,
'parameters': parameters,
}
return 0
def singularity_job(self, image, config, singularity_image_dir,
input=None, output=None, msdir=None,
**kw):
"""
Run task in singularity
image : stimela cab name, e.g. 'cab/simms'
name : This name will be part of the name of the contaier that will
execute the task (now optional)
config : Dictionary of options to parse to the task. This will modify
the parameters in the default parameter file which
can be viewd by running 'stimela cabs -i <cab name>', e.g 'stimela cabs -i simms'
input : input dirctory for cab
output : output directory for cab
msdir : MS directory for cab. Only specify if different from recipe ms_dir
"""
# check if name has any offending charecters
offenders = re.findall('\W', self.name)
if offenders:
raise StimelaCabParameterError('The cab name \'{:s}\' has some non-alphanumeric characters.'
' Charecters making up this name must be in [a-z,A-Z,0-9,_]'.format(self.name))
## Update I/O with values specified on command line
# TODO (sphe) I think this feature should be removed
script_context = self.recipe.stimela_context
input = script_context.get('_STIMELA_INPUT', None) or input
output = script_context.get('_STIMELA_OUTPUT', None) or output
msdir = script_context.get('_STIMELA_MSDIR', None) or msdir
# Get location of template parameters file
cabpath = self.recipe.stimela_path + "/cargo/cab/{0:s}/".format(image.split("/")[1])
parameter_file = cabpath+'/parameters.json'
name = '{0}-{1}{2}'.format(self.name, id(image), str(time.time()).replace('.', ''))
_name = '{0}-{1}{2}'.format(self.name, id(image), str(time.time()).replace('.', ''))
name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(3))
_cab = cab.CabDefinition(indir=input, outdir=output,
msdir=msdir, parameter_file=parameter_file)
cab.IODEST = {
"input" : "/scratch/input",
"output" : "/scratch/output",
"msfile" : "/scratch/msdir",
}
cont = singularity.Container(image, name,
logger=self.log, time_out=self.time_out)
# Container parameter file will be updated and validated before the container is executed
cont._cab = _cab
cont.parameter_file_name = '{0}/{1}.json'.format(self.recipe.parameter_file_dir, name)
# Remove dismissable kw arguments:
ops_to_pop = []
for op in config:
if isinstance(config[op], dismissable):
ops_to_pop.append(op)
for op in ops_to_pop:
arg = config.pop(op)()
if arg is not None:
config[op] = arg
cont.config = config
# These are standard volumes and
# environmental variables. These will be
# always exist in a cab container
cont.add_volume(self.recipe.stimela_path, '/scratch/stimela', perm='ro')
cont.add_volume(cont.parameter_file_name, '/scratch/configfile', perm='ro', noverify=True)
cont.add_volume("{0:s}/cargo/cab/{1:s}/src/".format(
self.recipe.stimela_path, _cab.task), "/scratch/code", "ro")
cont.add_volume("{0:s}/cargo/cab/singularity_run".format(self.recipe.stimela_path,
_cab.task), "/singularity")
if msdir:
md = '/scratch/msdir'
cont.add_volume(msdir, md)
# Keep a record of the content of the
# volume
dirname, dirs, files = [a for a in next(os.walk(msdir))]
cont.msdir_content = {
"volume" : dirname,
"dirs" : dirs,
"files" : files,
}
self.log.debug('Mounting volume \'{0}\' from local file system to \'{1}\' in the container'.format(msdir, md))
if input:
cont.add_volume( input,'/scratch/input', perm='ro')
# Keep a record of the content of the
# volume
dirname, dirs, files = [a for a in next(os.walk(input))]
cont.input_content = {
"volume" : dirname,
"dirs" : dirs,
"files" : files,
}
self.log.debug('Mounting volume \'{0}\' from local file system to \'{1}\' in the container'.format(input, '/scratch/input'))
if not os.path.exists(output):
os.mkdir(output)
od = '/scratch/output'
self.logfile = cont.logfile = '{0}/log-{1}.txt'.format(self.log_dir, _name.split('-')[0])
if not os.path.exists(self.logfile):
with open(self.logfile, 'w') as std:
pass
cont.add_volume(self.log_dir, "/scratch/logs/logfile")
cont.add_volume(output, od)
self.log.debug('Mounting volume \'{0}\' from local file system to \'{1}\' in the container'.format(output, od))
simage = _cab.base.replace("/", "_")
cont.image = '{0:s}/{1:s}_{2:s}.img'.format(singularity_image_dir, simage, _cab.tag)
# Added and ready for execution
self.job = cont
return 0
def docker_job(self, image, config=None,
input=None, output=None, msdir=None,
shared_memory='1gb', build_label=None,
**kw):
"""
Add a task to a stimela recipe
image : stimela cab name, e.g. 'cab/simms'
name : This name will be part of the name of the contaier that will
execute the task (now optional)
config : Dictionary of options to parse to the task. This will modify
the parameters in the default parameter file which
can be viewd by running 'stimela cabs -i <cab name>', e.g 'stimela cabs -i simms'
input : input dirctory for cab
output : output directory for cab
msdir : MS directory for cab. Only specify if different from recipe ms_dir
"""
# check if name has any offending charecters
offenders = re.findall('\W', self.name)
if offenders:
raise StimelaCabParameterError('The cab name \'{:s}\' has some non-alphanumeric characters.'
' Charecters making up this name must be in [a-z,A-Z,0-9,_]'.format(self.name))
## Update I/O with values specified on command line
# TODO (sphe) I think this feature should be removed
script_context = self.recipe.stimela_context
input = script_context.get('_STIMELA_INPUT', None) or input
output = script_context.get('_STIMELA_OUTPUT', None) or output
msdir = script_context.get('_STIMELA_MSDIR', None) or msdir
build_label = script_context.get('_STIMELA_BUILD_LABEL', None) or build_label
# Get location of template parameters file
cabs_logger = stimela.get_cabs('{0:s}/{1:s}_stimela_logfile.json'.format(stimela.LOG_HOME, build_label))
try:
cabpath = cabs_logger['{0:s}_{1:s}'.format(build_label, image)]['DIR']
except KeyError:
raise StimelaCabParameterError('Cab {} has is uknown to stimela. Was it built?'.format(image))
parameter_file = cabpath+'/parameters.json'
name = '{0}-{1}{2}'.format(self.name, id(image), str(time.time()).replace('.', ''))
_cab = cab.CabDefinition(indir=input, outdir=output,
msdir=msdir, parameter_file=parameter_file)
cont = docker.Container(image, name,
label=self.label, logger=self.log,
shared_memory=shared_memory,
log_container=stimela.LOG_FILE,
time_out=self.time_out)
# Container parameter file will be updated and validated before the container is executed
cont._cab = _cab
cont.parameter_file_name = '{0}/{1}.json'.format(self.recipe.parameter_file_dir, name)
# Remove dismissable kw arguments:
ops_to_pop = []
for op in config:
if isinstance(config[op], dismissable):
ops_to_pop.append(op)
for op in ops_to_pop:
arg = config.pop(op)()
if arg is not None:
config[op] = arg
cont.config = config
# These are standard volumes and
# environmental variables. These will be
# always exist in a cab container
cont.add_volume(self.recipe.stimela_path, '/scratch/stimela', perm='ro')
cont.add_volume(self.recipe.parameter_file_dir, '/configs', perm='ro')
cont.add_environ('CONFIG', '/configs/{}.json'.format(name))
if msdir:
md = '/home/{0:s}/msdir'.format(USER)
cont.add_volume(msdir, md)
cont.add_environ('MSDIR', md)
# Keep a record of the content of the
# volume
dirname, dirs, files = [a for a in next(os.walk(msdir))]
cont.msdir_content = {
"volume" : dirname,
"dirs" : dirs,
"files" : files,
}
self.log.debug('Mounting volume \'{0}\' from local file system to \'{1}\' in the container'.format(msdir, md))
if input:
cont.add_volume( input,'/input', perm='ro')
cont.add_environ('INPUT', '/input')
# Keep a record of the content of the
# volume
dirname, dirs, files = [a for a in next(os.walk(input))]
cont.input_content = {
"volume" : dirname,
"dirs" : dirs,
"files" : files,
}
self.log.debug('Mounting volume \'{0}\' from local file system to \'{1}\' in the container'.format(input, '/input'))
if not os.path.exists(output):
os.mkdir(output)
od = '/home/{0:s}/output'.format(USER)
cont.add_environ('HOME', od)
self.logfile = cont.logfile = '{0:s}/log-{1:s}.txt'.format(self.log_dir, name.split('-')[0])
cont.add_volume(output, od)
if not os.path.exists(cont.logfile):
with open(cont.logfile, "w") as std:
pass
logfile_cont = '/home/{0:s}/{1:s}/log-{2:s}.txt'.format(USER, self.log_dir, name.split('-')[0])
cont.add_volume(cont.logfile, logfile_cont, "rw")
cont.add_environ('OUTPUT', od)
cont.add_environ('LOGFILE', logfile_cont)
self.log.debug('Mounting volume \'{0}\' from local file system to \'{1}\' in the container'.format(output, od))
cont.image = '{0}_{1}'.format(build_label, image)
# Added and ready for execution
self.job = cont
return 0
|
SpheMakh/Stimela | stimela/recipe.py | StimelaJob.singularity_job | python | def singularity_job(self, image, config, singularity_image_dir,
input=None, output=None, msdir=None,
**kw):
# check if name has any offending charecters
offenders = re.findall('\W', self.name)
if offenders:
raise StimelaCabParameterError('The cab name \'{:s}\' has some non-alphanumeric characters.'
' Charecters making up this name must be in [a-z,A-Z,0-9,_]'.format(self.name))
## Update I/O with values specified on command line
# TODO (sphe) I think this feature should be removed
script_context = self.recipe.stimela_context
input = script_context.get('_STIMELA_INPUT', None) or input
output = script_context.get('_STIMELA_OUTPUT', None) or output
msdir = script_context.get('_STIMELA_MSDIR', None) or msdir
# Get location of template parameters file
cabpath = self.recipe.stimela_path + "/cargo/cab/{0:s}/".format(image.split("/")[1])
parameter_file = cabpath+'/parameters.json'
name = '{0}-{1}{2}'.format(self.name, id(image), str(time.time()).replace('.', ''))
_name = '{0}-{1}{2}'.format(self.name, id(image), str(time.time()).replace('.', ''))
name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(3))
_cab = cab.CabDefinition(indir=input, outdir=output,
msdir=msdir, parameter_file=parameter_file)
cab.IODEST = {
"input" : "/scratch/input",
"output" : "/scratch/output",
"msfile" : "/scratch/msdir",
}
cont = singularity.Container(image, name,
logger=self.log, time_out=self.time_out)
# Container parameter file will be updated and validated before the container is executed
cont._cab = _cab
cont.parameter_file_name = '{0}/{1}.json'.format(self.recipe.parameter_file_dir, name)
# Remove dismissable kw arguments:
ops_to_pop = []
for op in config:
if isinstance(config[op], dismissable):
ops_to_pop.append(op)
for op in ops_to_pop:
arg = config.pop(op)()
if arg is not None:
config[op] = arg
cont.config = config
# These are standard volumes and
# environmental variables. These will be
# always exist in a cab container
cont.add_volume(self.recipe.stimela_path, '/scratch/stimela', perm='ro')
cont.add_volume(cont.parameter_file_name, '/scratch/configfile', perm='ro', noverify=True)
cont.add_volume("{0:s}/cargo/cab/{1:s}/src/".format(
self.recipe.stimela_path, _cab.task), "/scratch/code", "ro")
cont.add_volume("{0:s}/cargo/cab/singularity_run".format(self.recipe.stimela_path,
_cab.task), "/singularity")
if msdir:
md = '/scratch/msdir'
cont.add_volume(msdir, md)
# Keep a record of the content of the
# volume
dirname, dirs, files = [a for a in next(os.walk(msdir))]
cont.msdir_content = {
"volume" : dirname,
"dirs" : dirs,
"files" : files,
}
self.log.debug('Mounting volume \'{0}\' from local file system to \'{1}\' in the container'.format(msdir, md))
if input:
cont.add_volume( input,'/scratch/input', perm='ro')
# Keep a record of the content of the
# volume
dirname, dirs, files = [a for a in next(os.walk(input))]
cont.input_content = {
"volume" : dirname,
"dirs" : dirs,
"files" : files,
}
self.log.debug('Mounting volume \'{0}\' from local file system to \'{1}\' in the container'.format(input, '/scratch/input'))
if not os.path.exists(output):
os.mkdir(output)
od = '/scratch/output'
self.logfile = cont.logfile = '{0}/log-{1}.txt'.format(self.log_dir, _name.split('-')[0])
if not os.path.exists(self.logfile):
with open(self.logfile, 'w') as std:
pass
cont.add_volume(self.log_dir, "/scratch/logs/logfile")
cont.add_volume(output, od)
self.log.debug('Mounting volume \'{0}\' from local file system to \'{1}\' in the container'.format(output, od))
simage = _cab.base.replace("/", "_")
cont.image = '{0:s}/{1:s}_{2:s}.img'.format(singularity_image_dir, simage, _cab.tag)
# Added and ready for execution
self.job = cont
return 0 | Run task in singularity
image : stimela cab name, e.g. 'cab/simms'
name : This name will be part of the name of the contaier that will
execute the task (now optional)
config : Dictionary of options to parse to the task. This will modify
the parameters in the default parameter file which
can be viewd by running 'stimela cabs -i <cab name>', e.g 'stimela cabs -i simms'
input : input dirctory for cab
output : output directory for cab
msdir : MS directory for cab. Only specify if different from recipe ms_dir | train | https://github.com/SpheMakh/Stimela/blob/292e80461a0c3498da8e7e987e2891d3ae5981ad/stimela/recipe.py#L130-L252 | [
"def add_volume(self, host, container, perm=\"rw\", noverify=False):\n\n if os.path.exists(host) or noverify:\n if self.logger:\n self.logger.debug(\"Mounting volume [{0}] in container [{1}] at [{2}]\".format(host, self.name, container))\n host = os.path.abspath(host)\n else:\n raise IOError(\"Path {0} cannot be mounted on container: File doesn't exist\".format(host))\n\n self.volumes.append(\":\".join([host,container,perm]))\n\n return 0\n"
] | class StimelaJob(object):
def __init__(self, name, recipe, label=None,
jtype='docker', cpus=None, memory_limit=None,
singularity_dir=None,
time_out=-1,
log_dir=None):
self.name = name
self.recipe = recipe
self.label = label or '{0}_{1}'.format(name, id(name))
self.log = recipe.log
self.active = False
self.jtype = 'docker' # ['docker', 'python', or 'singularity']
self.job = None
self.created = False
self.args = ['--user {}:{}'.format(UID, GID)]
if cpus:
self.args.append("--cpus {0:f}".format(cpus))
if memory_limit:
self.args.append("--memory {0:s}".format(memory_limit))
self.time_out = time_out
self.log_dir = log_dir
def run_python_job(self):
function = self.job['function']
options = self.job['parameters']
function(**options)
return 0
def run_docker_job(self):
if hasattr(self.job, '_cab'):
self.job._cab.update(self.job.config,
self.job.parameter_file_name)
self.created = False
self.job.create(*self.args)
self.created = True
self.job.start()
return 0
def run_singularity_job(self):
if hasattr(self.job, '_cab'):
self.job._cab.update(self.job.config,
self.job.parameter_file_name)
self.created = False
self.job.start()
self.created = True
self.job.run()
return 0
def python_job(self, function, parameters=None):
"""
Run python function
function : Python callable to execute
name : Name of function (if not given, will used function.__name__)
parameters : Parameters to parse to function
label : Function label; for logging purposes
"""
if not callable(function):
raise utils.StimelaCabRuntimeError('Object given as function is not callable')
if self.name is None:
self.name = function.__name__
self.job = {
'function' : function,
'parameters': parameters,
}
return 0
def singularity_job(self, image, config, singularity_image_dir,
input=None, output=None, msdir=None,
**kw):
"""
Run task in singularity
image : stimela cab name, e.g. 'cab/simms'
name : This name will be part of the name of the contaier that will
execute the task (now optional)
config : Dictionary of options to parse to the task. This will modify
the parameters in the default parameter file which
can be viewd by running 'stimela cabs -i <cab name>', e.g 'stimela cabs -i simms'
input : input dirctory for cab
output : output directory for cab
msdir : MS directory for cab. Only specify if different from recipe ms_dir
"""
# check if name has any offending charecters
offenders = re.findall('\W', self.name)
if offenders:
raise StimelaCabParameterError('The cab name \'{:s}\' has some non-alphanumeric characters.'
' Charecters making up this name must be in [a-z,A-Z,0-9,_]'.format(self.name))
## Update I/O with values specified on command line
# TODO (sphe) I think this feature should be removed
script_context = self.recipe.stimela_context
input = script_context.get('_STIMELA_INPUT', None) or input
output = script_context.get('_STIMELA_OUTPUT', None) or output
msdir = script_context.get('_STIMELA_MSDIR', None) or msdir
# Get location of template parameters file
cabpath = self.recipe.stimela_path + "/cargo/cab/{0:s}/".format(image.split("/")[1])
parameter_file = cabpath+'/parameters.json'
name = '{0}-{1}{2}'.format(self.name, id(image), str(time.time()).replace('.', ''))
_name = '{0}-{1}{2}'.format(self.name, id(image), str(time.time()).replace('.', ''))
name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(3))
_cab = cab.CabDefinition(indir=input, outdir=output,
msdir=msdir, parameter_file=parameter_file)
cab.IODEST = {
"input" : "/scratch/input",
"output" : "/scratch/output",
"msfile" : "/scratch/msdir",
}
cont = singularity.Container(image, name,
logger=self.log, time_out=self.time_out)
# Container parameter file will be updated and validated before the container is executed
cont._cab = _cab
cont.parameter_file_name = '{0}/{1}.json'.format(self.recipe.parameter_file_dir, name)
# Remove dismissable kw arguments:
ops_to_pop = []
for op in config:
if isinstance(config[op], dismissable):
ops_to_pop.append(op)
for op in ops_to_pop:
arg = config.pop(op)()
if arg is not None:
config[op] = arg
cont.config = config
# These are standard volumes and
# environmental variables. These will be
# always exist in a cab container
cont.add_volume(self.recipe.stimela_path, '/scratch/stimela', perm='ro')
cont.add_volume(cont.parameter_file_name, '/scratch/configfile', perm='ro', noverify=True)
cont.add_volume("{0:s}/cargo/cab/{1:s}/src/".format(
self.recipe.stimela_path, _cab.task), "/scratch/code", "ro")
cont.add_volume("{0:s}/cargo/cab/singularity_run".format(self.recipe.stimela_path,
_cab.task), "/singularity")
if msdir:
md = '/scratch/msdir'
cont.add_volume(msdir, md)
# Keep a record of the content of the
# volume
dirname, dirs, files = [a for a in next(os.walk(msdir))]
cont.msdir_content = {
"volume" : dirname,
"dirs" : dirs,
"files" : files,
}
self.log.debug('Mounting volume \'{0}\' from local file system to \'{1}\' in the container'.format(msdir, md))
if input:
cont.add_volume( input,'/scratch/input', perm='ro')
# Keep a record of the content of the
# volume
dirname, dirs, files = [a for a in next(os.walk(input))]
cont.input_content = {
"volume" : dirname,
"dirs" : dirs,
"files" : files,
}
self.log.debug('Mounting volume \'{0}\' from local file system to \'{1}\' in the container'.format(input, '/scratch/input'))
if not os.path.exists(output):
os.mkdir(output)
od = '/scratch/output'
self.logfile = cont.logfile = '{0}/log-{1}.txt'.format(self.log_dir, _name.split('-')[0])
if not os.path.exists(self.logfile):
with open(self.logfile, 'w') as std:
pass
cont.add_volume(self.log_dir, "/scratch/logs/logfile")
cont.add_volume(output, od)
self.log.debug('Mounting volume \'{0}\' from local file system to \'{1}\' in the container'.format(output, od))
simage = _cab.base.replace("/", "_")
cont.image = '{0:s}/{1:s}_{2:s}.img'.format(singularity_image_dir, simage, _cab.tag)
# Added and ready for execution
self.job = cont
return 0
def docker_job(self, image, config=None,
input=None, output=None, msdir=None,
shared_memory='1gb', build_label=None,
**kw):
"""
Add a task to a stimela recipe
image : stimela cab name, e.g. 'cab/simms'
name : This name will be part of the name of the contaier that will
execute the task (now optional)
config : Dictionary of options to parse to the task. This will modify
the parameters in the default parameter file which
can be viewd by running 'stimela cabs -i <cab name>', e.g 'stimela cabs -i simms'
input : input dirctory for cab
output : output directory for cab
msdir : MS directory for cab. Only specify if different from recipe ms_dir
"""
# check if name has any offending charecters
offenders = re.findall('\W', self.name)
if offenders:
raise StimelaCabParameterError('The cab name \'{:s}\' has some non-alphanumeric characters.'
' Charecters making up this name must be in [a-z,A-Z,0-9,_]'.format(self.name))
## Update I/O with values specified on command line
# TODO (sphe) I think this feature should be removed
script_context = self.recipe.stimela_context
input = script_context.get('_STIMELA_INPUT', None) or input
output = script_context.get('_STIMELA_OUTPUT', None) or output
msdir = script_context.get('_STIMELA_MSDIR', None) or msdir
build_label = script_context.get('_STIMELA_BUILD_LABEL', None) or build_label
# Get location of template parameters file
cabs_logger = stimela.get_cabs('{0:s}/{1:s}_stimela_logfile.json'.format(stimela.LOG_HOME, build_label))
try:
cabpath = cabs_logger['{0:s}_{1:s}'.format(build_label, image)]['DIR']
except KeyError:
raise StimelaCabParameterError('Cab {} has is uknown to stimela. Was it built?'.format(image))
parameter_file = cabpath+'/parameters.json'
name = '{0}-{1}{2}'.format(self.name, id(image), str(time.time()).replace('.', ''))
_cab = cab.CabDefinition(indir=input, outdir=output,
msdir=msdir, parameter_file=parameter_file)
cont = docker.Container(image, name,
label=self.label, logger=self.log,
shared_memory=shared_memory,
log_container=stimela.LOG_FILE,
time_out=self.time_out)
# Container parameter file will be updated and validated before the container is executed
cont._cab = _cab
cont.parameter_file_name = '{0}/{1}.json'.format(self.recipe.parameter_file_dir, name)
# Remove dismissable kw arguments:
ops_to_pop = []
for op in config:
if isinstance(config[op], dismissable):
ops_to_pop.append(op)
for op in ops_to_pop:
arg = config.pop(op)()
if arg is not None:
config[op] = arg
cont.config = config
# These are standard volumes and
# environmental variables. These will be
# always exist in a cab container
cont.add_volume(self.recipe.stimela_path, '/scratch/stimela', perm='ro')
cont.add_volume(self.recipe.parameter_file_dir, '/configs', perm='ro')
cont.add_environ('CONFIG', '/configs/{}.json'.format(name))
if msdir:
md = '/home/{0:s}/msdir'.format(USER)
cont.add_volume(msdir, md)
cont.add_environ('MSDIR', md)
# Keep a record of the content of the
# volume
dirname, dirs, files = [a for a in next(os.walk(msdir))]
cont.msdir_content = {
"volume" : dirname,
"dirs" : dirs,
"files" : files,
}
self.log.debug('Mounting volume \'{0}\' from local file system to \'{1}\' in the container'.format(msdir, md))
if input:
cont.add_volume( input,'/input', perm='ro')
cont.add_environ('INPUT', '/input')
# Keep a record of the content of the
# volume
dirname, dirs, files = [a for a in next(os.walk(input))]
cont.input_content = {
"volume" : dirname,
"dirs" : dirs,
"files" : files,
}
self.log.debug('Mounting volume \'{0}\' from local file system to \'{1}\' in the container'.format(input, '/input'))
if not os.path.exists(output):
os.mkdir(output)
od = '/home/{0:s}/output'.format(USER)
cont.add_environ('HOME', od)
self.logfile = cont.logfile = '{0:s}/log-{1:s}.txt'.format(self.log_dir, name.split('-')[0])
cont.add_volume(output, od)
if not os.path.exists(cont.logfile):
with open(cont.logfile, "w") as std:
pass
logfile_cont = '/home/{0:s}/{1:s}/log-{2:s}.txt'.format(USER, self.log_dir, name.split('-')[0])
cont.add_volume(cont.logfile, logfile_cont, "rw")
cont.add_environ('OUTPUT', od)
cont.add_environ('LOGFILE', logfile_cont)
self.log.debug('Mounting volume \'{0}\' from local file system to \'{1}\' in the container'.format(output, od))
cont.image = '{0}_{1}'.format(build_label, image)
# Added and ready for execution
self.job = cont
return 0
|
SpheMakh/Stimela | stimela/recipe.py | StimelaJob.docker_job | python | def docker_job(self, image, config=None,
input=None, output=None, msdir=None,
shared_memory='1gb', build_label=None,
**kw):
# check if name has any offending charecters
offenders = re.findall('\W', self.name)
if offenders:
raise StimelaCabParameterError('The cab name \'{:s}\' has some non-alphanumeric characters.'
' Charecters making up this name must be in [a-z,A-Z,0-9,_]'.format(self.name))
## Update I/O with values specified on command line
# TODO (sphe) I think this feature should be removed
script_context = self.recipe.stimela_context
input = script_context.get('_STIMELA_INPUT', None) or input
output = script_context.get('_STIMELA_OUTPUT', None) or output
msdir = script_context.get('_STIMELA_MSDIR', None) or msdir
build_label = script_context.get('_STIMELA_BUILD_LABEL', None) or build_label
# Get location of template parameters file
cabs_logger = stimela.get_cabs('{0:s}/{1:s}_stimela_logfile.json'.format(stimela.LOG_HOME, build_label))
try:
cabpath = cabs_logger['{0:s}_{1:s}'.format(build_label, image)]['DIR']
except KeyError:
raise StimelaCabParameterError('Cab {} has is uknown to stimela. Was it built?'.format(image))
parameter_file = cabpath+'/parameters.json'
name = '{0}-{1}{2}'.format(self.name, id(image), str(time.time()).replace('.', ''))
_cab = cab.CabDefinition(indir=input, outdir=output,
msdir=msdir, parameter_file=parameter_file)
cont = docker.Container(image, name,
label=self.label, logger=self.log,
shared_memory=shared_memory,
log_container=stimela.LOG_FILE,
time_out=self.time_out)
# Container parameter file will be updated and validated before the container is executed
cont._cab = _cab
cont.parameter_file_name = '{0}/{1}.json'.format(self.recipe.parameter_file_dir, name)
# Remove dismissable kw arguments:
ops_to_pop = []
for op in config:
if isinstance(config[op], dismissable):
ops_to_pop.append(op)
for op in ops_to_pop:
arg = config.pop(op)()
if arg is not None:
config[op] = arg
cont.config = config
# These are standard volumes and
# environmental variables. These will be
# always exist in a cab container
cont.add_volume(self.recipe.stimela_path, '/scratch/stimela', perm='ro')
cont.add_volume(self.recipe.parameter_file_dir, '/configs', perm='ro')
cont.add_environ('CONFIG', '/configs/{}.json'.format(name))
if msdir:
md = '/home/{0:s}/msdir'.format(USER)
cont.add_volume(msdir, md)
cont.add_environ('MSDIR', md)
# Keep a record of the content of the
# volume
dirname, dirs, files = [a for a in next(os.walk(msdir))]
cont.msdir_content = {
"volume" : dirname,
"dirs" : dirs,
"files" : files,
}
self.log.debug('Mounting volume \'{0}\' from local file system to \'{1}\' in the container'.format(msdir, md))
if input:
cont.add_volume( input,'/input', perm='ro')
cont.add_environ('INPUT', '/input')
# Keep a record of the content of the
# volume
dirname, dirs, files = [a for a in next(os.walk(input))]
cont.input_content = {
"volume" : dirname,
"dirs" : dirs,
"files" : files,
}
self.log.debug('Mounting volume \'{0}\' from local file system to \'{1}\' in the container'.format(input, '/input'))
if not os.path.exists(output):
os.mkdir(output)
od = '/home/{0:s}/output'.format(USER)
cont.add_environ('HOME', od)
self.logfile = cont.logfile = '{0:s}/log-{1:s}.txt'.format(self.log_dir, name.split('-')[0])
cont.add_volume(output, od)
if not os.path.exists(cont.logfile):
with open(cont.logfile, "w") as std:
pass
logfile_cont = '/home/{0:s}/{1:s}/log-{2:s}.txt'.format(USER, self.log_dir, name.split('-')[0])
cont.add_volume(cont.logfile, logfile_cont, "rw")
cont.add_environ('OUTPUT', od)
cont.add_environ('LOGFILE', logfile_cont)
self.log.debug('Mounting volume \'{0}\' from local file system to \'{1}\' in the container'.format(output, od))
cont.image = '{0}_{1}'.format(build_label, image)
# Added and ready for execution
self.job = cont
return 0 | Add a task to a stimela recipe
image : stimela cab name, e.g. 'cab/simms'
name : This name will be part of the name of the contaier that will
execute the task (now optional)
config : Dictionary of options to parse to the task. This will modify
the parameters in the default parameter file which
can be viewd by running 'stimela cabs -i <cab name>', e.g 'stimela cabs -i simms'
input : input dirctory for cab
output : output directory for cab
msdir : MS directory for cab. Only specify if different from recipe ms_dir | train | https://github.com/SpheMakh/Stimela/blob/292e80461a0c3498da8e7e987e2891d3ae5981ad/stimela/recipe.py#L255-L378 | [
"def get_cabs(logfile):\n log = logger.StimelaLogger(logfile)\n cabs_ = log.read()['images']\n\n # Remove images that are not cabs\n for key in cabs_.keys():\n if not cabs_[key]['CAB']:\n del cabs_[key]\n\n return cabs_\n",
"def add_volume(self, host, container, perm=\"rw\"):\n\n if os.path.exists(host):\n if self.logger:\n self.logger.debug(\"Mounting volume [{0}] in container [{1}] at [{2}]\".format(host, self.name, container))\n host = os.path.abspath(host)\n else:\n raise IOError(\"Directory {0} cannot be mounted on container: File doesn't exist\".format(host))\n\n self.volumes.append(\":\".join([host,container,perm]))\n",
"def add_environ(self, key, value):\n if self.logger:\n self.logger.debug(\"Adding environ varaible [{0}={1}] in container {2}\".format(key, value, self.name))\n self.environs.append(\"=\".join([key, value]))\n"
] | class StimelaJob(object):
def __init__(self, name, recipe, label=None,
jtype='docker', cpus=None, memory_limit=None,
singularity_dir=None,
time_out=-1,
log_dir=None):
self.name = name
self.recipe = recipe
self.label = label or '{0}_{1}'.format(name, id(name))
self.log = recipe.log
self.active = False
self.jtype = 'docker' # ['docker', 'python', or 'singularity']
self.job = None
self.created = False
self.args = ['--user {}:{}'.format(UID, GID)]
if cpus:
self.args.append("--cpus {0:f}".format(cpus))
if memory_limit:
self.args.append("--memory {0:s}".format(memory_limit))
self.time_out = time_out
self.log_dir = log_dir
def run_python_job(self):
function = self.job['function']
options = self.job['parameters']
function(**options)
return 0
def run_docker_job(self):
if hasattr(self.job, '_cab'):
self.job._cab.update(self.job.config,
self.job.parameter_file_name)
self.created = False
self.job.create(*self.args)
self.created = True
self.job.start()
return 0
def run_singularity_job(self):
if hasattr(self.job, '_cab'):
self.job._cab.update(self.job.config,
self.job.parameter_file_name)
self.created = False
self.job.start()
self.created = True
self.job.run()
return 0
def python_job(self, function, parameters=None):
"""
Run python function
function : Python callable to execute
name : Name of function (if not given, will used function.__name__)
parameters : Parameters to parse to function
label : Function label; for logging purposes
"""
if not callable(function):
raise utils.StimelaCabRuntimeError('Object given as function is not callable')
if self.name is None:
self.name = function.__name__
self.job = {
'function' : function,
'parameters': parameters,
}
return 0
def singularity_job(self, image, config, singularity_image_dir,
input=None, output=None, msdir=None,
**kw):
"""
Run task in singularity
image : stimela cab name, e.g. 'cab/simms'
name : This name will be part of the name of the contaier that will
execute the task (now optional)
config : Dictionary of options to parse to the task. This will modify
the parameters in the default parameter file which
can be viewd by running 'stimela cabs -i <cab name>', e.g 'stimela cabs -i simms'
input : input dirctory for cab
output : output directory for cab
msdir : MS directory for cab. Only specify if different from recipe ms_dir
"""
# check if name has any offending charecters
offenders = re.findall('\W', self.name)
if offenders:
raise StimelaCabParameterError('The cab name \'{:s}\' has some non-alphanumeric characters.'
' Charecters making up this name must be in [a-z,A-Z,0-9,_]'.format(self.name))
## Update I/O with values specified on command line
# TODO (sphe) I think this feature should be removed
script_context = self.recipe.stimela_context
input = script_context.get('_STIMELA_INPUT', None) or input
output = script_context.get('_STIMELA_OUTPUT', None) or output
msdir = script_context.get('_STIMELA_MSDIR', None) or msdir
# Get location of template parameters file
cabpath = self.recipe.stimela_path + "/cargo/cab/{0:s}/".format(image.split("/")[1])
parameter_file = cabpath+'/parameters.json'
name = '{0}-{1}{2}'.format(self.name, id(image), str(time.time()).replace('.', ''))
_name = '{0}-{1}{2}'.format(self.name, id(image), str(time.time()).replace('.', ''))
name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(3))
_cab = cab.CabDefinition(indir=input, outdir=output,
msdir=msdir, parameter_file=parameter_file)
cab.IODEST = {
"input" : "/scratch/input",
"output" : "/scratch/output",
"msfile" : "/scratch/msdir",
}
cont = singularity.Container(image, name,
logger=self.log, time_out=self.time_out)
# Container parameter file will be updated and validated before the container is executed
cont._cab = _cab
cont.parameter_file_name = '{0}/{1}.json'.format(self.recipe.parameter_file_dir, name)
# Remove dismissable kw arguments:
ops_to_pop = []
for op in config:
if isinstance(config[op], dismissable):
ops_to_pop.append(op)
for op in ops_to_pop:
arg = config.pop(op)()
if arg is not None:
config[op] = arg
cont.config = config
# These are standard volumes and
# environmental variables. These will be
# always exist in a cab container
cont.add_volume(self.recipe.stimela_path, '/scratch/stimela', perm='ro')
cont.add_volume(cont.parameter_file_name, '/scratch/configfile', perm='ro', noverify=True)
cont.add_volume("{0:s}/cargo/cab/{1:s}/src/".format(
self.recipe.stimela_path, _cab.task), "/scratch/code", "ro")
cont.add_volume("{0:s}/cargo/cab/singularity_run".format(self.recipe.stimela_path,
_cab.task), "/singularity")
if msdir:
md = '/scratch/msdir'
cont.add_volume(msdir, md)
# Keep a record of the content of the
# volume
dirname, dirs, files = [a for a in next(os.walk(msdir))]
cont.msdir_content = {
"volume" : dirname,
"dirs" : dirs,
"files" : files,
}
self.log.debug('Mounting volume \'{0}\' from local file system to \'{1}\' in the container'.format(msdir, md))
if input:
cont.add_volume( input,'/scratch/input', perm='ro')
# Keep a record of the content of the
# volume
dirname, dirs, files = [a for a in next(os.walk(input))]
cont.input_content = {
"volume" : dirname,
"dirs" : dirs,
"files" : files,
}
self.log.debug('Mounting volume \'{0}\' from local file system to \'{1}\' in the container'.format(input, '/scratch/input'))
if not os.path.exists(output):
os.mkdir(output)
od = '/scratch/output'
self.logfile = cont.logfile = '{0}/log-{1}.txt'.format(self.log_dir, _name.split('-')[0])
if not os.path.exists(self.logfile):
with open(self.logfile, 'w') as std:
pass
cont.add_volume(self.log_dir, "/scratch/logs/logfile")
cont.add_volume(output, od)
self.log.debug('Mounting volume \'{0}\' from local file system to \'{1}\' in the container'.format(output, od))
simage = _cab.base.replace("/", "_")
cont.image = '{0:s}/{1:s}_{2:s}.img'.format(singularity_image_dir, simage, _cab.tag)
# Added and ready for execution
self.job = cont
return 0
def docker_job(self, image, config=None,
input=None, output=None, msdir=None,
shared_memory='1gb', build_label=None,
**kw):
"""
Add a task to a stimela recipe
image : stimela cab name, e.g. 'cab/simms'
name : This name will be part of the name of the contaier that will
execute the task (now optional)
config : Dictionary of options to parse to the task. This will modify
the parameters in the default parameter file which
can be viewd by running 'stimela cabs -i <cab name>', e.g 'stimela cabs -i simms'
input : input dirctory for cab
output : output directory for cab
msdir : MS directory for cab. Only specify if different from recipe ms_dir
"""
# check if name has any offending charecters
offenders = re.findall('\W', self.name)
if offenders:
raise StimelaCabParameterError('The cab name \'{:s}\' has some non-alphanumeric characters.'
' Charecters making up this name must be in [a-z,A-Z,0-9,_]'.format(self.name))
## Update I/O with values specified on command line
# TODO (sphe) I think this feature should be removed
script_context = self.recipe.stimela_context
input = script_context.get('_STIMELA_INPUT', None) or input
output = script_context.get('_STIMELA_OUTPUT', None) or output
msdir = script_context.get('_STIMELA_MSDIR', None) or msdir
build_label = script_context.get('_STIMELA_BUILD_LABEL', None) or build_label
# Get location of template parameters file
cabs_logger = stimela.get_cabs('{0:s}/{1:s}_stimela_logfile.json'.format(stimela.LOG_HOME, build_label))
try:
cabpath = cabs_logger['{0:s}_{1:s}'.format(build_label, image)]['DIR']
except KeyError:
raise StimelaCabParameterError('Cab {} has is uknown to stimela. Was it built?'.format(image))
parameter_file = cabpath+'/parameters.json'
name = '{0}-{1}{2}'.format(self.name, id(image), str(time.time()).replace('.', ''))
_cab = cab.CabDefinition(indir=input, outdir=output,
msdir=msdir, parameter_file=parameter_file)
cont = docker.Container(image, name,
label=self.label, logger=self.log,
shared_memory=shared_memory,
log_container=stimela.LOG_FILE,
time_out=self.time_out)
# Container parameter file will be updated and validated before the container is executed
cont._cab = _cab
cont.parameter_file_name = '{0}/{1}.json'.format(self.recipe.parameter_file_dir, name)
# Remove dismissable kw arguments:
ops_to_pop = []
for op in config:
if isinstance(config[op], dismissable):
ops_to_pop.append(op)
for op in ops_to_pop:
arg = config.pop(op)()
if arg is not None:
config[op] = arg
cont.config = config
# These are standard volumes and
# environmental variables. These will be
# always exist in a cab container
cont.add_volume(self.recipe.stimela_path, '/scratch/stimela', perm='ro')
cont.add_volume(self.recipe.parameter_file_dir, '/configs', perm='ro')
cont.add_environ('CONFIG', '/configs/{}.json'.format(name))
if msdir:
md = '/home/{0:s}/msdir'.format(USER)
cont.add_volume(msdir, md)
cont.add_environ('MSDIR', md)
# Keep a record of the content of the
# volume
dirname, dirs, files = [a for a in next(os.walk(msdir))]
cont.msdir_content = {
"volume" : dirname,
"dirs" : dirs,
"files" : files,
}
self.log.debug('Mounting volume \'{0}\' from local file system to \'{1}\' in the container'.format(msdir, md))
if input:
cont.add_volume( input,'/input', perm='ro')
cont.add_environ('INPUT', '/input')
# Keep a record of the content of the
# volume
dirname, dirs, files = [a for a in next(os.walk(input))]
cont.input_content = {
"volume" : dirname,
"dirs" : dirs,
"files" : files,
}
self.log.debug('Mounting volume \'{0}\' from local file system to \'{1}\' in the container'.format(input, '/input'))
if not os.path.exists(output):
os.mkdir(output)
od = '/home/{0:s}/output'.format(USER)
cont.add_environ('HOME', od)
self.logfile = cont.logfile = '{0:s}/log-{1:s}.txt'.format(self.log_dir, name.split('-')[0])
cont.add_volume(output, od)
if not os.path.exists(cont.logfile):
with open(cont.logfile, "w") as std:
pass
logfile_cont = '/home/{0:s}/{1:s}/log-{2:s}.txt'.format(USER, self.log_dir, name.split('-')[0])
cont.add_volume(cont.logfile, logfile_cont, "rw")
cont.add_environ('OUTPUT', od)
cont.add_environ('LOGFILE', logfile_cont)
self.log.debug('Mounting volume \'{0}\' from local file system to \'{1}\' in the container'.format(output, od))
cont.image = '{0}_{1}'.format(build_label, image)
# Added and ready for execution
self.job = cont
return 0
|
SpheMakh/Stimela | stimela/recipe.py | Recipe.run | python | def run(self, steps=None, resume=False, redo=None):
recipe = {
"name" : self.name,
"steps" : []
}
start_at = 0
if redo:
recipe = utils.readJson(redo)
self.log.info('Rerunning recipe {0} from {1}'.format(recipe['name'], redo))
self.log.info('Recreating recipe instance..')
self.jobs = []
for step in recipe['steps']:
# add I/O folders to the json file
# add a string describing the contents of these folders
# The user has to ensure that these folders exist, and have the required content
if step['jtype'] == 'docker':
self.log.info('Adding job \'{0}\' to recipe. The container will be named \'{1}\''.format(step['cab'], step['name']))
cont = docker.Container(step['cab'], step['name'],
label=step['label'], logger=self.log,
shared_memory=step['shared_memory'])
self.log.debug('Adding volumes {0} and environmental variables {1}'.format(step['volumes'], step['environs']))
cont.volumes = step['volumes']
cont.environs = step['environs']
cont.shared_memory = step['shared_memory']
cont.input_content = step['input_content']
cont.msdir_content = step['msdir_content']
cont.logfile = step['logfile']
job = StimelaJob(step['name'], recipe=self, label=step['label'])
job.job = cont
job.jtype = 'docker'
elif step['jtype'] == 'function':
name = step['name']
func = inspect.currentframe().f_back.f_locals[step['function']]
job = StimelaJob(name, recipe=self, label=step['label'])
job.python_job(func, step['parameters'])
job.jtype = 'function'
self.jobs.append(job)
elif resume:
self.log.info("Resuming recipe from last run.")
try:
recipe = utils.readJson(self.resume_file)
except IOError:
raise StimelaRecipeExecutionError("Cannot resume pipeline, resume file '{}' not found".format(self.resume_file))
steps_ = recipe.pop('steps')
recipe['steps'] = []
_steps = []
for step in steps_:
if step['status'] == 'completed':
recipe['steps'].append(step)
continue
label = step['label']
number = step['number']
# Check if the recipe flow has changed
if label == self.jobs[number-1].label:
self.log.info('recipe step \'{0}\' is fit for re-execution. Label = {1}'.format(number, label))
_steps.append(number)
else:
raise StimelaRecipeExecutionError('Recipe flow, or task scheduling has changed. Cannot resume recipe. Label = {0}'.format(label))
# Check whether there are steps to resume
if len(_steps)==0:
self.log.info('All the steps were completed. No steps to resume')
sys.exit(0)
steps = _steps
if getattr(steps, '__iter__', False):
_steps = []
if isinstance(steps[0], str):
labels = [ job.label.split('::')[0] for job in self.jobs]
for step in steps:
try:
_steps.append(labels.index(step)+1)
except ValueError:
raise StimelaCabParameterError('Recipe label ID [{0}] doesn\'t exist'.format(step))
steps = _steps
else:
steps = range(1, len(self.jobs)+1)
jobs = [(step, self.jobs[step-1]) for step in steps]
for i, (step, job) in enumerate(jobs):
self.log.info('Running job {}'.format(job.name))
self.log.info('STEP {0} :: {1}'.format(i+1, job.label))
self.active = job
try:
if job.jtype == 'function':
job.run_python_job()
elif job.jtype in ['docker', 'singularity']:
with open(job.job.logfile, 'a') as astd:
astd.write('\n-----------------------------------\n')
astd.write('Stimela version : {}\n'.format(version.version))
astd.write('Cab name : {}\n'.format(job.job.image))
astd.write('-------------------------------------\n')
run_job = getattr(job, "run_{0:s}_job".format(job.jtype))
run_job()
self.log2recipe(job, recipe, step, 'completed')
except (utils.StimelaCabRuntimeError,
StimelaRecipeExecutionError,
StimelaCabParameterError) as e:
self.completed = [jb[1] for jb in jobs[:i]]
self.remaining = [jb[1] for jb in jobs[i+1:]]
self.failed = job
self.log.info('Recipe execution failed while running job {}'.format(job.name))
self.log.info('Completed jobs : {}'.format([c.name for c in self.completed]))
self.log.info('Remaining jobs : {}'.format([c.name for c in self.remaining]))
self.log2recipe(job, recipe, step, 'failed')
for step, jb in jobs[i+1:]:
self.log.info('Logging remaining task: {}'.format(jb.label))
self.log2recipe(jb, recipe, step, 'remaining')
self.log.info('Saving pipeline information in {}'.format(self.resume_file))
utils.writeJson(self.resume_file, recipe)
pe = PipelineException(e, self.completed, job, self.remaining)
raise_(pe, None, sys.exc_info()[2])
except:
import traceback
traceback.print_exc()
raise RuntimeError("An unhandled exception has occured. This is a bug, please report")
finally:
if job.jtype == 'docker' and job.created:
job.job.stop()
job.job.remove()
if job.jtype == 'singularity' and job.created:
job.job.stop()
self.log.info('Saving pipeline information in {}'.format(self.resume_file))
utils.writeJson(self.resume_file, recipe)
self.log.info('Recipe executed successfully')
return 0 | Run a Stimela recipe.
steps : recipe steps to run
resume : resume recipe from last run
redo : Re-run an old recipe from a .last file | train | https://github.com/SpheMakh/Stimela/blob/292e80461a0c3498da8e7e987e2891d3ae5981ad/stimela/recipe.py#L539-L697 | [
"def readJson(conf):\n with open(conf) as _std:\n jdict = yaml.safe_load(_std)\n return jdict\n",
"def writeJson(config, dictionary):\n with codecs.open(config, 'w', 'utf8') as std:\n std.write(json.dumps(dictionary, ensure_ascii=False))\n",
"def python_job(self, function, parameters=None):\n \"\"\"\n Run python function\n\n function : Python callable to execute\n name : Name of function (if not given, will used function.__name__)\n parameters : Parameters to parse to function\n label : Function label; for logging purposes\n \"\"\"\n\n if not callable(function):\n raise utils.StimelaCabRuntimeError('Object given as function is not callable')\n\n if self.name is None:\n self.name = function.__name__\n\n self.job = {\n 'function' : function,\n 'parameters': parameters,\n }\n\n return 0\n",
"def log2recipe(self, job, recipe, num, status):\n\n if job.jtype in ['docker', 'singularity']:\n cont = job.job\n step = {\n \"name\" : cont.name,\n \"number\" : num,\n \"cab\" : cont.image,\n \"volumes\" : cont.volumes,\n \"environs\" : getattr(cont, \"environs\", None),\n \"shared_memory\" : getattr(cont, \"shared_memory\", None),\n \"input_content\" : cont.input_content,\n \"msdir_content\" : cont.msdir_content,\n \"label\" : getattr(cont, \"label\", \"\"),\n \"logfile\" : cont.logfile,\n \"status\" : status,\n \"jtype\" : 'docker',\n }\n else:\n step = {\n \"name\" : job.name,\n \"number\" : num,\n \"label\" : job.label,\n \"status\" : status,\n \"function\" : job.job['function'].__name__,\n \"jtype\" : 'function',\n \"parameters\" : job.job['parameters'],\n }\n\n recipe['steps'].append(step)\n\n return 0\n"
] | class Recipe(object):
def __init__(self, name, data=None,
parameter_file_dir=None, ms_dir=None,
tag=None, build_label=None, loglevel='INFO',
loggername='STIMELA', singularity_image_dir=None, log_dir=None):
"""
Deifine and manage a stimela recipe instance.
name : Name of stimela recipe
data : Path of stimela data. The data is assumed to be at Stimela/stimela/cargo/data
msdir : Path of MSs to be used during the execution of the recipe
tag : Use cabs with a specific tag
parameter_file_dir : Will store task specific parameter files here
"""
self.log = logging.getLogger(loggername)
self.log.setLevel(getattr(logging, loglevel))
name_ = name.lower().replace(' ', '_')
self.log_dir = log_dir
if self.log_dir:
if not os.path.exists(self.log_dir):
self.log.info('The Log directory \'{0:s}\' cannot be found. Will create it'.format(self.log_dir))
os.mkdir(self.log_dir)
self.logfile = '{0:s}/log-{1:s}.txt'.format(log_dir, name_)
else:
self.logfile = 'log-{}.txt'.format(name_)
# Create file handler which logs even debug
# messages
self.resume_file = '.last_{}.json'.format(name_)
fh = logging.FileHandler(self.logfile, 'w')
fh.setLevel(logging.DEBUG)
# Create console handler with a higher log level
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(getattr(logging, loglevel))
# Create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
# Add the handlers to logger
len(list(filter(lambda x: isinstance(x, logging.StreamHandler), self.log.handlers))) == 0 and self.log.addHandler(ch)
len(list(filter(lambda x: isinstance(x, logging.FileHandler), self.log.handlers))) == 0 and self.log.addHandler(fh)
self.stimela_context = inspect.currentframe().f_back.f_globals
self.stimela_path = os.path.dirname(docker.__file__)
self.name = name
self.build_label = build_label or USER
self.ms_dir = ms_dir
if not os.path.exists(self.ms_dir):
self.log.info('MS directory \'{}\' does not exist. Will create it'.format(self.ms_dir))
os.mkdir(self.ms_dir)
self.tag = tag
# create a folder to store config files
# if it doesn't exist. These config
# files can be resued to re-run the
# task
self.parameter_file_dir = parameter_file_dir or "stimela_parameter_files"
if not os.path.exists(self.parameter_file_dir):
self.log.info('Config directory cannot be found. Will create ./{}'.format(self.parameter_file_dir))
os.mkdir(self.parameter_file_dir)
self.jobs = []
self.completed = []
self.failed = None
self.remaining = []
#self.proc_logger = utils.logger.StimelaLogger(stimela.LOG_FILE)
self.pid = os.getpid()
#self.proc_logger.log_process(self.pid, self.name)
#self.proc_logger.write()
self.singularity_image_dir = singularity_image_dir
self.log.info('---------------------------------')
self.log.info('Stimela version {0}'.format(version.version))
self.log.info('Sphesihle Makhathini <sphemakh@gmail.com>')
self.log.info('Running: {:s}'.format(self.name))
self.log.info('---------------------------------')
def add(self, image, name, config=None,
input=None, output=None, msdir=None,
label=None, shared_memory='1gb',
build_label=None,
cpus=None, memory_limit=None,
time_out=-1,
log_dir=None):
if self.log_dir:
if not os.path.exists(self.log_dir):
self.log.info('The Log directory \'{0:s}\' cannot be found. Will create it'.format(self.log_dir))
os.mkdir(log_dir)
else:
log_dir = self.log_dir
job = StimelaJob(name, recipe=self, label=label,
cpus=cpus, memory_limit=memory_limit, time_out=time_out,
log_dir=self.log_dir or output)
if callable(image):
job.jtype = 'function'
job.python_job(image, parameters=config)
self.jobs.append(job)
self.log.info('Adding Python job \'{0}\' to recipe.'.format(name))
else:
job.jtype = 'singularity' if self.singularity_image_dir else 'docker'
job_func = getattr(job, "{0:s}_job".format(job.jtype))
job_func(image=image, config=config,
input=input, output=output, msdir=msdir or self.ms_dir,
shared_memory=shared_memory, build_label=build_label or self.build_label,
singularity_image_dir=self.singularity_image_dir,
time_out=time_out)
self.log.info('Adding cab \'{0}\' to recipe. The container will be named \'{1}\''.format(job.job.image, name))
self.jobs.append(job)
return 0
def log2recipe(self, job, recipe, num, status):
if job.jtype in ['docker', 'singularity']:
cont = job.job
step = {
"name" : cont.name,
"number" : num,
"cab" : cont.image,
"volumes" : cont.volumes,
"environs" : getattr(cont, "environs", None),
"shared_memory" : getattr(cont, "shared_memory", None),
"input_content" : cont.input_content,
"msdir_content" : cont.msdir_content,
"label" : getattr(cont, "label", ""),
"logfile" : cont.logfile,
"status" : status,
"jtype" : 'docker',
}
else:
step = {
"name" : job.name,
"number" : num,
"label" : job.label,
"status" : status,
"function" : job.job['function'].__name__,
"jtype" : 'function',
"parameters" : job.job['parameters'],
}
recipe['steps'].append(step)
return 0
def run(self, steps=None, resume=False, redo=None):
"""
Run a Stimela recipe.
steps : recipe steps to run
resume : resume recipe from last run
redo : Re-run an old recipe from a .last file
"""
recipe = {
"name" : self.name,
"steps" : []
}
start_at = 0
if redo:
recipe = utils.readJson(redo)
self.log.info('Rerunning recipe {0} from {1}'.format(recipe['name'], redo))
self.log.info('Recreating recipe instance..')
self.jobs = []
for step in recipe['steps']:
# add I/O folders to the json file
# add a string describing the contents of these folders
# The user has to ensure that these folders exist, and have the required content
if step['jtype'] == 'docker':
self.log.info('Adding job \'{0}\' to recipe. The container will be named \'{1}\''.format(step['cab'], step['name']))
cont = docker.Container(step['cab'], step['name'],
label=step['label'], logger=self.log,
shared_memory=step['shared_memory'])
self.log.debug('Adding volumes {0} and environmental variables {1}'.format(step['volumes'], step['environs']))
cont.volumes = step['volumes']
cont.environs = step['environs']
cont.shared_memory = step['shared_memory']
cont.input_content = step['input_content']
cont.msdir_content = step['msdir_content']
cont.logfile = step['logfile']
job = StimelaJob(step['name'], recipe=self, label=step['label'])
job.job = cont
job.jtype = 'docker'
elif step['jtype'] == 'function':
name = step['name']
func = inspect.currentframe().f_back.f_locals[step['function']]
job = StimelaJob(name, recipe=self, label=step['label'])
job.python_job(func, step['parameters'])
job.jtype = 'function'
self.jobs.append(job)
elif resume:
self.log.info("Resuming recipe from last run.")
try:
recipe = utils.readJson(self.resume_file)
except IOError:
raise StimelaRecipeExecutionError("Cannot resume pipeline, resume file '{}' not found".format(self.resume_file))
steps_ = recipe.pop('steps')
recipe['steps'] = []
_steps = []
for step in steps_:
if step['status'] == 'completed':
recipe['steps'].append(step)
continue
label = step['label']
number = step['number']
# Check if the recipe flow has changed
if label == self.jobs[number-1].label:
self.log.info('recipe step \'{0}\' is fit for re-execution. Label = {1}'.format(number, label))
_steps.append(number)
else:
raise StimelaRecipeExecutionError('Recipe flow, or task scheduling has changed. Cannot resume recipe. Label = {0}'.format(label))
# Check whether there are steps to resume
if len(_steps)==0:
self.log.info('All the steps were completed. No steps to resume')
sys.exit(0)
steps = _steps
if getattr(steps, '__iter__', False):
_steps = []
if isinstance(steps[0], str):
labels = [ job.label.split('::')[0] for job in self.jobs]
for step in steps:
try:
_steps.append(labels.index(step)+1)
except ValueError:
raise StimelaCabParameterError('Recipe label ID [{0}] doesn\'t exist'.format(step))
steps = _steps
else:
steps = range(1, len(self.jobs)+1)
jobs = [(step, self.jobs[step-1]) for step in steps]
for i, (step, job) in enumerate(jobs):
self.log.info('Running job {}'.format(job.name))
self.log.info('STEP {0} :: {1}'.format(i+1, job.label))
self.active = job
try:
if job.jtype == 'function':
job.run_python_job()
elif job.jtype in ['docker', 'singularity']:
with open(job.job.logfile, 'a') as astd:
astd.write('\n-----------------------------------\n')
astd.write('Stimela version : {}\n'.format(version.version))
astd.write('Cab name : {}\n'.format(job.job.image))
astd.write('-------------------------------------\n')
run_job = getattr(job, "run_{0:s}_job".format(job.jtype))
run_job()
self.log2recipe(job, recipe, step, 'completed')
except (utils.StimelaCabRuntimeError,
StimelaRecipeExecutionError,
StimelaCabParameterError) as e:
self.completed = [jb[1] for jb in jobs[:i]]
self.remaining = [jb[1] for jb in jobs[i+1:]]
self.failed = job
self.log.info('Recipe execution failed while running job {}'.format(job.name))
self.log.info('Completed jobs : {}'.format([c.name for c in self.completed]))
self.log.info('Remaining jobs : {}'.format([c.name for c in self.remaining]))
self.log2recipe(job, recipe, step, 'failed')
for step, jb in jobs[i+1:]:
self.log.info('Logging remaining task: {}'.format(jb.label))
self.log2recipe(jb, recipe, step, 'remaining')
self.log.info('Saving pipeline information in {}'.format(self.resume_file))
utils.writeJson(self.resume_file, recipe)
pe = PipelineException(e, self.completed, job, self.remaining)
raise_(pe, None, sys.exc_info()[2])
except:
import traceback
traceback.print_exc()
raise RuntimeError("An unhandled exception has occured. This is a bug, please report")
finally:
if job.jtype == 'docker' and job.created:
job.job.stop()
job.job.remove()
if job.jtype == 'singularity' and job.created:
job.job.stop()
self.log.info('Saving pipeline information in {}'.format(self.resume_file))
utils.writeJson(self.resume_file, recipe)
self.log.info('Recipe executed successfully')
return 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.