repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
jmbeach/KEP.py | src/keppy/project.py | Project.parse_channels | python | def parse_channels(self):
channels = []
for channel in self._project_dict["channels"]:
channels.append(Channel(channel, self._is_sixteen_bit, self._ignore_list))
return channels | Creates an array of Channel objects from the project | train | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/project.py#L20-L25 | null | class Project(object):
"""Represents a Kepware project"""
def __init__(self, kepware_dict, is_sixteen_bit, ignore_list):
self._ignore_list = ignore_list
self._kepware_dict = kepware_dict
self._project_dict = kepware_dict["project"]
self._is_sixteen_bit = is_sixteen_bit
self._channels = self.parse_channels()
@property
def channels(self):
"""Gets the channels of the project"""
return self._channels
def as_json(self):
"""Returns the stringified JSON representation of the Kepware
project"""
return json.dumps(OrderedDict(self._kepware_dict))
def update(self):
"""Updates the dictionary of the project"""
for channel in self.channels:
channel.update()
for i in range(len(self._project_dict["channels"])):
channel_dict = self._project_dict["channels"][i]
for channel in self.channels:
if channel.name == channel_dict["common.ALLTYPES_NAME"]:
self._project_dict["channels"][i] = channel.as_dict()
|
jmbeach/KEP.py | src/keppy/project.py | Project.update | python | def update(self):
for channel in self.channels:
channel.update()
for i in range(len(self._project_dict["channels"])):
channel_dict = self._project_dict["channels"][i]
for channel in self.channels:
if channel.name == channel_dict["common.ALLTYPES_NAME"]:
self._project_dict["channels"][i] = channel.as_dict() | Updates the dictionary of the project | train | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/project.py#L32-L40 | null | class Project(object):
"""Represents a Kepware project"""
def __init__(self, kepware_dict, is_sixteen_bit, ignore_list):
self._ignore_list = ignore_list
self._kepware_dict = kepware_dict
self._project_dict = kepware_dict["project"]
self._is_sixteen_bit = is_sixteen_bit
self._channels = self.parse_channels()
@property
def channels(self):
"""Gets the channels of the project"""
return self._channels
def parse_channels(self):
"""Creates an array of Channel objects from the project"""
channels = []
for channel in self._project_dict["channels"]:
channels.append(Channel(channel, self._is_sixteen_bit, self._ignore_list))
return channels
def as_json(self):
"""Returns the stringified JSON representation of the Kepware
project"""
return json.dumps(OrderedDict(self._kepware_dict))
|
jmbeach/KEP.py | src/keppy/channel.py | Channel.parse_devices | python | def parse_devices(self):
devices = []
for device in self._channel_dict["devices"]:
devices.append(Device(device, self._is_sixteen_bit, self._ignore_list))
return devices | Creates an array of Device objects from the channel | train | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/channel.py#L19-L24 | null | class Channel(object):
"""Represents a Kepware channel"""
def __init__(self, channel_dict, is_sixteen_bit, ignore_list):
self._channel_dict = channel_dict
self._ignore_list = ignore_list
self.set_driver_simulated()
self._is_sixteen_bit = is_sixteen_bit
self._devices = self.parse_devices()
def each_device(self, work):
"""Perform work on each device.
Work is a function whcich takes a device as a parameter"""
for device in self._devices:
work(device)
def set_driver_simulated(self):
"""Sets the channel driver to simulator"""
self._channel_dict["servermain.MULTIPLE_TYPES_DEVICE_DRIVER"] = "Simulator"
@property
def devices(self):
"""Gets the channel devices"""
return self._devices
@property
def name(self):
"""Gets the name of the device"""
return self._channel_dict["common.ALLTYPES_NAME"]
def as_dict(self):
"""Returns dictionary representation of the channel"""
return self._channel_dict
def update(self):
"""Updates the dictionary of the channel"""
for device in self.devices:
device.update()
for i in range(len(self._channel_dict["devices"])):
device_dict = self._channel_dict["devices"][i]
for device in self._devices:
if device.name == device_dict["common.ALLTYPES_NAME"]:
self._channel_dict["devices"][i] = device.as_dict()
|
jmbeach/KEP.py | src/keppy/channel.py | Channel.update | python | def update(self):
for device in self.devices:
device.update()
for i in range(len(self._channel_dict["devices"])):
device_dict = self._channel_dict["devices"][i]
for device in self._devices:
if device.name == device_dict["common.ALLTYPES_NAME"]:
self._channel_dict["devices"][i] = device.as_dict() | Updates the dictionary of the channel | train | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/channel.py#L44-L52 | null | class Channel(object):
"""Represents a Kepware channel"""
def __init__(self, channel_dict, is_sixteen_bit, ignore_list):
self._channel_dict = channel_dict
self._ignore_list = ignore_list
self.set_driver_simulated()
self._is_sixteen_bit = is_sixteen_bit
self._devices = self.parse_devices()
def each_device(self, work):
"""Perform work on each device.
Work is a function whcich takes a device as a parameter"""
for device in self._devices:
work(device)
def parse_devices(self):
"""Creates an array of Device objects from the channel"""
devices = []
for device in self._channel_dict["devices"]:
devices.append(Device(device, self._is_sixteen_bit, self._ignore_list))
return devices
def set_driver_simulated(self):
"""Sets the channel driver to simulator"""
self._channel_dict["servermain.MULTIPLE_TYPES_DEVICE_DRIVER"] = "Simulator"
@property
def devices(self):
"""Gets the channel devices"""
return self._devices
@property
def name(self):
"""Gets the name of the device"""
return self._channel_dict["common.ALLTYPES_NAME"]
def as_dict(self):
"""Returns dictionary representation of the channel"""
return self._channel_dict
|
jmbeach/KEP.py | src/keppy/simulator_device.py | SimulatorDevice.process_tag | python | def process_tag(self, tag):
try:
if not self._is_function(tag):
self._tag_type_processor[tag.data_type](tag)
except KeyError as ex:
raise Exception('Tag type {0} not recognized for tag {1}'
.format(
tag.data_type,
tag.name),
ex) | Processes tag and detects which function to use | train | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/simulator_device.py#L48-L58 | [
"def _is_function(self, tag):\n function_names = ['USER', 'RAMP', 'SINE', 'RANDOM']\n for function_name in function_names:\n if function_name in tag.get_address():\n return True\n return False\n"
] | class SimulatorDevice(object):
"""Represents a simulator device"""
def __init__(self, is_16bit, normal_register_initial_address="K0000", string_register_initial_address="S000"):
self._normal_register = RegularRegister(is_16bit, normal_register_initial_address)
self._string_register = StringRegister(is_16bit, string_register_initial_address)
self._is_sixteen_bit = is_16bit
self._tag_type_processor = {
TagDataType.BOOLEAN: self.process_boolean,
TagDataType.BOOLEAN_ARRAY: self.process_boolean_array,
TagDataType.BYTE: self.process_byte,
TagDataType.DWORD: self.process_dword,
TagDataType.LONG: self.process_dword,
TagDataType.DWORD_ARRAY: self.process_dword_array,
TagDataType.FLOAT: self.process_float,
TagDataType.REAL_ARRAY: self.process_real_array,
TagDataType.SHORT: self.process_short,
TagDataType.SHORT_ARRAY: self.process_short_array,
TagDataType.STRING: self.process_string,
TagDataType.WORD: self.process_word,
TagDataType.WORD_ARRAY: self.process_word_array,
TagDataType.LLONG: self._process_64_bit_type,
TagDataType.QWORD: self._process_64_bit_type,
TagDataType.DOUBLE: self._process_64_bit_type
}
@property
def normal_register(self):
"""Gets the normal register"""
return self._normal_register
@property
def string_register(self):
"""Gets the string register"""
return self._string_register
@property
def is_sixteen_bit(self):
"""Returns True if device is 16 bit"""
return self._is_sixteen_bit
def process_boolean(self, tag):
"""Process Boolean type tags"""
tag.set_address(self.normal_register.current_bit_address)
self.normal_register.move_to_next_bit_address()
def process_boolean_array(self, tag):
"""Process Boolean array type tags"""
array_size = tag.get_array_size()
tag.set_address(self.normal_register.get_array(array_size))
if self.is_sixteen_bit:
# each boolean address needs 1/16 byte
self.normal_register.move_to_next_address((array_size / 16) + 1)
return
# each boolean address needs 1/8 byte
self.normal_register.move_to_next_address((array_size / 8) + 1)
def process_byte(self, tag):
"""Process byte type tags"""
tag.set_address(self.normal_register.current_address)
# each address needs 1 byte
self.normal_register.move_to_next_address(1)
def _process_64_bit_type(self, tag):
tag.set_address(self.normal_register.current_address)
if self.is_sixteen_bit:
# each qword address needs 8 bytes = 4 addresses
self.normal_register.move_to_next_address(4)
return
# each qword address needs 8 bytes = 8 addresses
self.normal_register.move_to_next_address(8)
def _process_32_bit_type(self, tag):
tag.set_address(self.normal_register.current_address)
if self.is_sixteen_bit:
# each dword address needs 4 bytes = 2 addresses
self.normal_register.move_to_next_address(2)
return
# each dword address needs 4 bytes = 4 addresses
self.normal_register.move_to_next_address(4)
def _process_16_bit_type(self, tag):
tag.set_address(self.normal_register.current_address)
if self.is_sixteen_bit:
# each short address needs 2 bytes = 1 address
self.normal_register.move_to_next_address(1)
return
# each short address needs 2 bytes = 2 addresses
self.normal_register.move_to_next_address(2)
def process_dword(self, tag):
"""Process dword type tags"""
self._process_32_bit_type(tag)
def process_float(self, tag):
"""Process float type tags"""
self._process_32_bit_type(tag)
def process_dword_array(self, tag):
"""Process dword array type tags"""
array_size = tag.get_array_size()
tag.set_address(self.normal_register.get_array(array_size))
if self.is_sixteen_bit:
# each double word address needs 4 bytes = 2 addresses
self.normal_register.move_to_next_address(array_size * 2)
return
# each double word address needs 4 bytes = 4 addresses
self.normal_register.move_to_next_address(array_size * 4)
def process_real_array(self, tag):
"""Process real array type tags"""
array_size = tag.get_array_size()
tag.set_address(self.normal_register.get_array(array_size))
if self.is_sixteen_bit:
# each real address needs 2 bytes = 1 address
self.normal_register.move_to_next_address(array_size)
return
# each real address needs 2 bytes = 2 addresses
self.normal_register.move_to_next_address(array_size * 2)
def process_short(self, tag):
"""Process short type tags"""
self._process_16_bit_type(tag)
def process_word(self, tag):
"""Process word type tags"""
self._process_16_bit_type(tag)
def _process_16_bit_array(self, tag):
array_size = tag.get_array_size()
tag.set_address(self.normal_register.get_array(array_size))
if self.is_sixteen_bit:
# each short address needs two bytes = 1 address
self.normal_register.move_to_next_address(array_size)
return
# each short address needs two bytes = 2 addresses
self.normal_register.move_to_next_address(array_size * 2)
def process_short_array(self, tag):
"""Process short array type tags"""
self._process_16_bit_array(tag)
def process_word_array(self, tag):
"""Process short array type tags"""
self._process_16_bit_array(tag)
def process_string(self, tag):
"""Process string type tags"""
tag.set_address(self.string_register.current_address)
if self.is_sixteen_bit:
# each string address needs 1 byte = 1/2 an address
self.string_register.move_to_next_address(1)
return
# each string address needs 1 byte = 1 address
self.string_register.move_to_next_address(1)
def _is_function(self, tag):
function_names = ['USER', 'RAMP', 'SINE', 'RANDOM']
for function_name in function_names:
if function_name in tag.get_address():
return True
return False
|
jmbeach/KEP.py | src/keppy/simulator_device.py | SimulatorDevice.process_boolean | python | def process_boolean(self, tag):
tag.set_address(self.normal_register.current_bit_address)
self.normal_register.move_to_next_bit_address() | Process Boolean type tags | train | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/simulator_device.py#L61-L64 | null | class SimulatorDevice(object):
"""Represents a simulator device"""
def __init__(self, is_16bit, normal_register_initial_address="K0000", string_register_initial_address="S000"):
self._normal_register = RegularRegister(is_16bit, normal_register_initial_address)
self._string_register = StringRegister(is_16bit, string_register_initial_address)
self._is_sixteen_bit = is_16bit
self._tag_type_processor = {
TagDataType.BOOLEAN: self.process_boolean,
TagDataType.BOOLEAN_ARRAY: self.process_boolean_array,
TagDataType.BYTE: self.process_byte,
TagDataType.DWORD: self.process_dword,
TagDataType.LONG: self.process_dword,
TagDataType.DWORD_ARRAY: self.process_dword_array,
TagDataType.FLOAT: self.process_float,
TagDataType.REAL_ARRAY: self.process_real_array,
TagDataType.SHORT: self.process_short,
TagDataType.SHORT_ARRAY: self.process_short_array,
TagDataType.STRING: self.process_string,
TagDataType.WORD: self.process_word,
TagDataType.WORD_ARRAY: self.process_word_array,
TagDataType.LLONG: self._process_64_bit_type,
TagDataType.QWORD: self._process_64_bit_type,
TagDataType.DOUBLE: self._process_64_bit_type
}
@property
def normal_register(self):
"""Gets the normal register"""
return self._normal_register
@property
def string_register(self):
"""Gets the string register"""
return self._string_register
@property
def is_sixteen_bit(self):
"""Returns True if device is 16 bit"""
return self._is_sixteen_bit
def process_tag(self, tag):
"""Processes tag and detects which function to use"""
try:
if not self._is_function(tag):
self._tag_type_processor[tag.data_type](tag)
except KeyError as ex:
raise Exception('Tag type {0} not recognized for tag {1}'
.format(
tag.data_type,
tag.name),
ex)
def process_boolean_array(self, tag):
"""Process Boolean array type tags"""
array_size = tag.get_array_size()
tag.set_address(self.normal_register.get_array(array_size))
if self.is_sixteen_bit:
# each boolean address needs 1/16 byte
self.normal_register.move_to_next_address((array_size / 16) + 1)
return
# each boolean address needs 1/8 byte
self.normal_register.move_to_next_address((array_size / 8) + 1)
def process_byte(self, tag):
"""Process byte type tags"""
tag.set_address(self.normal_register.current_address)
# each address needs 1 byte
self.normal_register.move_to_next_address(1)
def _process_64_bit_type(self, tag):
tag.set_address(self.normal_register.current_address)
if self.is_sixteen_bit:
# each qword address needs 8 bytes = 4 addresses
self.normal_register.move_to_next_address(4)
return
# each qword address needs 8 bytes = 8 addresses
self.normal_register.move_to_next_address(8)
def _process_32_bit_type(self, tag):
tag.set_address(self.normal_register.current_address)
if self.is_sixteen_bit:
# each dword address needs 4 bytes = 2 addresses
self.normal_register.move_to_next_address(2)
return
# each dword address needs 4 bytes = 4 addresses
self.normal_register.move_to_next_address(4)
def _process_16_bit_type(self, tag):
tag.set_address(self.normal_register.current_address)
if self.is_sixteen_bit:
# each short address needs 2 bytes = 1 address
self.normal_register.move_to_next_address(1)
return
# each short address needs 2 bytes = 2 addresses
self.normal_register.move_to_next_address(2)
def process_dword(self, tag):
"""Process dword type tags"""
self._process_32_bit_type(tag)
def process_float(self, tag):
"""Process float type tags"""
self._process_32_bit_type(tag)
def process_dword_array(self, tag):
"""Process dword array type tags"""
array_size = tag.get_array_size()
tag.set_address(self.normal_register.get_array(array_size))
if self.is_sixteen_bit:
# each double word address needs 4 bytes = 2 addresses
self.normal_register.move_to_next_address(array_size * 2)
return
# each double word address needs 4 bytes = 4 addresses
self.normal_register.move_to_next_address(array_size * 4)
def process_real_array(self, tag):
"""Process real array type tags"""
array_size = tag.get_array_size()
tag.set_address(self.normal_register.get_array(array_size))
if self.is_sixteen_bit:
# each real address needs 2 bytes = 1 address
self.normal_register.move_to_next_address(array_size)
return
# each real address needs 2 bytes = 2 addresses
self.normal_register.move_to_next_address(array_size * 2)
def process_short(self, tag):
"""Process short type tags"""
self._process_16_bit_type(tag)
def process_word(self, tag):
"""Process word type tags"""
self._process_16_bit_type(tag)
def _process_16_bit_array(self, tag):
array_size = tag.get_array_size()
tag.set_address(self.normal_register.get_array(array_size))
if self.is_sixteen_bit:
# each short address needs two bytes = 1 address
self.normal_register.move_to_next_address(array_size)
return
# each short address needs two bytes = 2 addresses
self.normal_register.move_to_next_address(array_size * 2)
def process_short_array(self, tag):
"""Process short array type tags"""
self._process_16_bit_array(tag)
def process_word_array(self, tag):
"""Process short array type tags"""
self._process_16_bit_array(tag)
def process_string(self, tag):
"""Process string type tags"""
tag.set_address(self.string_register.current_address)
if self.is_sixteen_bit:
# each string address needs 1 byte = 1/2 an address
self.string_register.move_to_next_address(1)
return
# each string address needs 1 byte = 1 address
self.string_register.move_to_next_address(1)
def _is_function(self, tag):
function_names = ['USER', 'RAMP', 'SINE', 'RANDOM']
for function_name in function_names:
if function_name in tag.get_address():
return True
return False
|
jmbeach/KEP.py | src/keppy/simulator_device.py | SimulatorDevice.process_boolean_array | python | def process_boolean_array(self, tag):
array_size = tag.get_array_size()
tag.set_address(self.normal_register.get_array(array_size))
if self.is_sixteen_bit:
# each boolean address needs 1/16 byte
self.normal_register.move_to_next_address((array_size / 16) + 1)
return
# each boolean address needs 1/8 byte
self.normal_register.move_to_next_address((array_size / 8) + 1) | Process Boolean array type tags | train | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/simulator_device.py#L66-L75 | null | class SimulatorDevice(object):
"""Represents a simulator device"""
def __init__(self, is_16bit, normal_register_initial_address="K0000", string_register_initial_address="S000"):
self._normal_register = RegularRegister(is_16bit, normal_register_initial_address)
self._string_register = StringRegister(is_16bit, string_register_initial_address)
self._is_sixteen_bit = is_16bit
self._tag_type_processor = {
TagDataType.BOOLEAN: self.process_boolean,
TagDataType.BOOLEAN_ARRAY: self.process_boolean_array,
TagDataType.BYTE: self.process_byte,
TagDataType.DWORD: self.process_dword,
TagDataType.LONG: self.process_dword,
TagDataType.DWORD_ARRAY: self.process_dword_array,
TagDataType.FLOAT: self.process_float,
TagDataType.REAL_ARRAY: self.process_real_array,
TagDataType.SHORT: self.process_short,
TagDataType.SHORT_ARRAY: self.process_short_array,
TagDataType.STRING: self.process_string,
TagDataType.WORD: self.process_word,
TagDataType.WORD_ARRAY: self.process_word_array,
TagDataType.LLONG: self._process_64_bit_type,
TagDataType.QWORD: self._process_64_bit_type,
TagDataType.DOUBLE: self._process_64_bit_type
}
@property
def normal_register(self):
"""Gets the normal register"""
return self._normal_register
@property
def string_register(self):
"""Gets the string register"""
return self._string_register
@property
def is_sixteen_bit(self):
"""Returns True if device is 16 bit"""
return self._is_sixteen_bit
def process_tag(self, tag):
"""Processes tag and detects which function to use"""
try:
if not self._is_function(tag):
self._tag_type_processor[tag.data_type](tag)
except KeyError as ex:
raise Exception('Tag type {0} not recognized for tag {1}'
.format(
tag.data_type,
tag.name),
ex)
def process_boolean(self, tag):
"""Process Boolean type tags"""
tag.set_address(self.normal_register.current_bit_address)
self.normal_register.move_to_next_bit_address()
def process_byte(self, tag):
"""Process byte type tags"""
tag.set_address(self.normal_register.current_address)
# each address needs 1 byte
self.normal_register.move_to_next_address(1)
def _process_64_bit_type(self, tag):
tag.set_address(self.normal_register.current_address)
if self.is_sixteen_bit:
# each qword address needs 8 bytes = 4 addresses
self.normal_register.move_to_next_address(4)
return
# each qword address needs 8 bytes = 8 addresses
self.normal_register.move_to_next_address(8)
def _process_32_bit_type(self, tag):
tag.set_address(self.normal_register.current_address)
if self.is_sixteen_bit:
# each dword address needs 4 bytes = 2 addresses
self.normal_register.move_to_next_address(2)
return
# each dword address needs 4 bytes = 4 addresses
self.normal_register.move_to_next_address(4)
def _process_16_bit_type(self, tag):
tag.set_address(self.normal_register.current_address)
if self.is_sixteen_bit:
# each short address needs 2 bytes = 1 address
self.normal_register.move_to_next_address(1)
return
# each short address needs 2 bytes = 2 addresses
self.normal_register.move_to_next_address(2)
def process_dword(self, tag):
"""Process dword type tags"""
self._process_32_bit_type(tag)
def process_float(self, tag):
"""Process float type tags"""
self._process_32_bit_type(tag)
def process_dword_array(self, tag):
"""Process dword array type tags"""
array_size = tag.get_array_size()
tag.set_address(self.normal_register.get_array(array_size))
if self.is_sixteen_bit:
# each double word address needs 4 bytes = 2 addresses
self.normal_register.move_to_next_address(array_size * 2)
return
# each double word address needs 4 bytes = 4 addresses
self.normal_register.move_to_next_address(array_size * 4)
def process_real_array(self, tag):
"""Process real array type tags"""
array_size = tag.get_array_size()
tag.set_address(self.normal_register.get_array(array_size))
if self.is_sixteen_bit:
# each real address needs 2 bytes = 1 address
self.normal_register.move_to_next_address(array_size)
return
# each real address needs 2 bytes = 2 addresses
self.normal_register.move_to_next_address(array_size * 2)
def process_short(self, tag):
"""Process short type tags"""
self._process_16_bit_type(tag)
def process_word(self, tag):
"""Process word type tags"""
self._process_16_bit_type(tag)
def _process_16_bit_array(self, tag):
array_size = tag.get_array_size()
tag.set_address(self.normal_register.get_array(array_size))
if self.is_sixteen_bit:
# each short address needs two bytes = 1 address
self.normal_register.move_to_next_address(array_size)
return
# each short address needs two bytes = 2 addresses
self.normal_register.move_to_next_address(array_size * 2)
def process_short_array(self, tag):
"""Process short array type tags"""
self._process_16_bit_array(tag)
def process_word_array(self, tag):
"""Process short array type tags"""
self._process_16_bit_array(tag)
def process_string(self, tag):
"""Process string type tags"""
tag.set_address(self.string_register.current_address)
if self.is_sixteen_bit:
# each string address needs 1 byte = 1/2 an address
self.string_register.move_to_next_address(1)
return
# each string address needs 1 byte = 1 address
self.string_register.move_to_next_address(1)
def _is_function(self, tag):
function_names = ['USER', 'RAMP', 'SINE', 'RANDOM']
for function_name in function_names:
if function_name in tag.get_address():
return True
return False
|
jmbeach/KEP.py | src/keppy/simulator_device.py | SimulatorDevice.process_byte | python | def process_byte(self, tag):
tag.set_address(self.normal_register.current_address)
# each address needs 1 byte
self.normal_register.move_to_next_address(1) | Process byte type tags | train | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/simulator_device.py#L77-L81 | null | class SimulatorDevice(object):
"""Represents a simulator device"""
def __init__(self, is_16bit, normal_register_initial_address="K0000", string_register_initial_address="S000"):
self._normal_register = RegularRegister(is_16bit, normal_register_initial_address)
self._string_register = StringRegister(is_16bit, string_register_initial_address)
self._is_sixteen_bit = is_16bit
self._tag_type_processor = {
TagDataType.BOOLEAN: self.process_boolean,
TagDataType.BOOLEAN_ARRAY: self.process_boolean_array,
TagDataType.BYTE: self.process_byte,
TagDataType.DWORD: self.process_dword,
TagDataType.LONG: self.process_dword,
TagDataType.DWORD_ARRAY: self.process_dword_array,
TagDataType.FLOAT: self.process_float,
TagDataType.REAL_ARRAY: self.process_real_array,
TagDataType.SHORT: self.process_short,
TagDataType.SHORT_ARRAY: self.process_short_array,
TagDataType.STRING: self.process_string,
TagDataType.WORD: self.process_word,
TagDataType.WORD_ARRAY: self.process_word_array,
TagDataType.LLONG: self._process_64_bit_type,
TagDataType.QWORD: self._process_64_bit_type,
TagDataType.DOUBLE: self._process_64_bit_type
}
@property
def normal_register(self):
"""Gets the normal register"""
return self._normal_register
@property
def string_register(self):
"""Gets the string register"""
return self._string_register
@property
def is_sixteen_bit(self):
"""Returns True if device is 16 bit"""
return self._is_sixteen_bit
def process_tag(self, tag):
"""Processes tag and detects which function to use"""
try:
if not self._is_function(tag):
self._tag_type_processor[tag.data_type](tag)
except KeyError as ex:
raise Exception('Tag type {0} not recognized for tag {1}'
.format(
tag.data_type,
tag.name),
ex)
def process_boolean(self, tag):
"""Process Boolean type tags"""
tag.set_address(self.normal_register.current_bit_address)
self.normal_register.move_to_next_bit_address()
def process_boolean_array(self, tag):
"""Process Boolean array type tags"""
array_size = tag.get_array_size()
tag.set_address(self.normal_register.get_array(array_size))
if self.is_sixteen_bit:
# each boolean address needs 1/16 byte
self.normal_register.move_to_next_address((array_size / 16) + 1)
return
# each boolean address needs 1/8 byte
self.normal_register.move_to_next_address((array_size / 8) + 1)
def _process_64_bit_type(self, tag):
tag.set_address(self.normal_register.current_address)
if self.is_sixteen_bit:
# each qword address needs 8 bytes = 4 addresses
self.normal_register.move_to_next_address(4)
return
# each qword address needs 8 bytes = 8 addresses
self.normal_register.move_to_next_address(8)
def _process_32_bit_type(self, tag):
tag.set_address(self.normal_register.current_address)
if self.is_sixteen_bit:
# each dword address needs 4 bytes = 2 addresses
self.normal_register.move_to_next_address(2)
return
# each dword address needs 4 bytes = 4 addresses
self.normal_register.move_to_next_address(4)
def _process_16_bit_type(self, tag):
tag.set_address(self.normal_register.current_address)
if self.is_sixteen_bit:
# each short address needs 2 bytes = 1 address
self.normal_register.move_to_next_address(1)
return
# each short address needs 2 bytes = 2 addresses
self.normal_register.move_to_next_address(2)
def process_dword(self, tag):
"""Process dword type tags"""
self._process_32_bit_type(tag)
def process_float(self, tag):
"""Process float type tags"""
self._process_32_bit_type(tag)
def process_dword_array(self, tag):
"""Process dword array type tags"""
array_size = tag.get_array_size()
tag.set_address(self.normal_register.get_array(array_size))
if self.is_sixteen_bit:
# each double word address needs 4 bytes = 2 addresses
self.normal_register.move_to_next_address(array_size * 2)
return
# each double word address needs 4 bytes = 4 addresses
self.normal_register.move_to_next_address(array_size * 4)
def process_real_array(self, tag):
"""Process real array type tags"""
array_size = tag.get_array_size()
tag.set_address(self.normal_register.get_array(array_size))
if self.is_sixteen_bit:
# each real address needs 2 bytes = 1 address
self.normal_register.move_to_next_address(array_size)
return
# each real address needs 2 bytes = 2 addresses
self.normal_register.move_to_next_address(array_size * 2)
def process_short(self, tag):
"""Process short type tags"""
self._process_16_bit_type(tag)
def process_word(self, tag):
"""Process word type tags"""
self._process_16_bit_type(tag)
def _process_16_bit_array(self, tag):
array_size = tag.get_array_size()
tag.set_address(self.normal_register.get_array(array_size))
if self.is_sixteen_bit:
# each short address needs two bytes = 1 address
self.normal_register.move_to_next_address(array_size)
return
# each short address needs two bytes = 2 addresses
self.normal_register.move_to_next_address(array_size * 2)
def process_short_array(self, tag):
"""Process short array type tags"""
self._process_16_bit_array(tag)
def process_word_array(self, tag):
"""Process short array type tags"""
self._process_16_bit_array(tag)
def process_string(self, tag):
"""Process string type tags"""
tag.set_address(self.string_register.current_address)
if self.is_sixteen_bit:
# each string address needs 1 byte = 1/2 an address
self.string_register.move_to_next_address(1)
return
# each string address needs 1 byte = 1 address
self.string_register.move_to_next_address(1)
def _is_function(self, tag):
function_names = ['USER', 'RAMP', 'SINE', 'RANDOM']
for function_name in function_names:
if function_name in tag.get_address():
return True
return False
|
jmbeach/KEP.py | src/keppy/simulator_device.py | SimulatorDevice.process_string | python | def process_string(self, tag):
tag.set_address(self.string_register.current_address)
if self.is_sixteen_bit:
# each string address needs 1 byte = 1/2 an address
self.string_register.move_to_next_address(1)
return
# each string address needs 1 byte = 1 address
self.string_register.move_to_next_address(1) | Process string type tags | train | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/simulator_device.py#L166-L174 | null | class SimulatorDevice(object):
"""Represents a simulator device"""
def __init__(self, is_16bit, normal_register_initial_address="K0000", string_register_initial_address="S000"):
self._normal_register = RegularRegister(is_16bit, normal_register_initial_address)
self._string_register = StringRegister(is_16bit, string_register_initial_address)
self._is_sixteen_bit = is_16bit
self._tag_type_processor = {
TagDataType.BOOLEAN: self.process_boolean,
TagDataType.BOOLEAN_ARRAY: self.process_boolean_array,
TagDataType.BYTE: self.process_byte,
TagDataType.DWORD: self.process_dword,
TagDataType.LONG: self.process_dword,
TagDataType.DWORD_ARRAY: self.process_dword_array,
TagDataType.FLOAT: self.process_float,
TagDataType.REAL_ARRAY: self.process_real_array,
TagDataType.SHORT: self.process_short,
TagDataType.SHORT_ARRAY: self.process_short_array,
TagDataType.STRING: self.process_string,
TagDataType.WORD: self.process_word,
TagDataType.WORD_ARRAY: self.process_word_array,
TagDataType.LLONG: self._process_64_bit_type,
TagDataType.QWORD: self._process_64_bit_type,
TagDataType.DOUBLE: self._process_64_bit_type
}
@property
def normal_register(self):
"""Gets the normal register"""
return self._normal_register
@property
def string_register(self):
"""Gets the string register"""
return self._string_register
@property
def is_sixteen_bit(self):
"""Returns True if device is 16 bit"""
return self._is_sixteen_bit
def process_tag(self, tag):
"""Processes tag and detects which function to use"""
try:
if not self._is_function(tag):
self._tag_type_processor[tag.data_type](tag)
except KeyError as ex:
raise Exception('Tag type {0} not recognized for tag {1}'
.format(
tag.data_type,
tag.name),
ex)
def process_boolean(self, tag):
"""Process Boolean type tags"""
tag.set_address(self.normal_register.current_bit_address)
self.normal_register.move_to_next_bit_address()
def process_boolean_array(self, tag):
"""Process Boolean array type tags"""
array_size = tag.get_array_size()
tag.set_address(self.normal_register.get_array(array_size))
if self.is_sixteen_bit:
# each boolean address needs 1/16 byte
self.normal_register.move_to_next_address((array_size / 16) + 1)
return
# each boolean address needs 1/8 byte
self.normal_register.move_to_next_address((array_size / 8) + 1)
def process_byte(self, tag):
"""Process byte type tags"""
tag.set_address(self.normal_register.current_address)
# each address needs 1 byte
self.normal_register.move_to_next_address(1)
def _process_64_bit_type(self, tag):
tag.set_address(self.normal_register.current_address)
if self.is_sixteen_bit:
# each qword address needs 8 bytes = 4 addresses
self.normal_register.move_to_next_address(4)
return
# each qword address needs 8 bytes = 8 addresses
self.normal_register.move_to_next_address(8)
def _process_32_bit_type(self, tag):
tag.set_address(self.normal_register.current_address)
if self.is_sixteen_bit:
# each dword address needs 4 bytes = 2 addresses
self.normal_register.move_to_next_address(2)
return
# each dword address needs 4 bytes = 4 addresses
self.normal_register.move_to_next_address(4)
def _process_16_bit_type(self, tag):
tag.set_address(self.normal_register.current_address)
if self.is_sixteen_bit:
# each short address needs 2 bytes = 1 address
self.normal_register.move_to_next_address(1)
return
# each short address needs 2 bytes = 2 addresses
self.normal_register.move_to_next_address(2)
def process_dword(self, tag):
"""Process dword type tags"""
self._process_32_bit_type(tag)
def process_float(self, tag):
"""Process float type tags"""
self._process_32_bit_type(tag)
def process_dword_array(self, tag):
"""Process dword array type tags"""
array_size = tag.get_array_size()
tag.set_address(self.normal_register.get_array(array_size))
if self.is_sixteen_bit:
# each double word address needs 4 bytes = 2 addresses
self.normal_register.move_to_next_address(array_size * 2)
return
# each double word address needs 4 bytes = 4 addresses
self.normal_register.move_to_next_address(array_size * 4)
def process_real_array(self, tag):
"""Process real array type tags"""
array_size = tag.get_array_size()
tag.set_address(self.normal_register.get_array(array_size))
if self.is_sixteen_bit:
# each real address needs 2 bytes = 1 address
self.normal_register.move_to_next_address(array_size)
return
# each real address needs 2 bytes = 2 addresses
self.normal_register.move_to_next_address(array_size * 2)
def process_short(self, tag):
"""Process short type tags"""
self._process_16_bit_type(tag)
def process_word(self, tag):
"""Process word type tags"""
self._process_16_bit_type(tag)
def _process_16_bit_array(self, tag):
array_size = tag.get_array_size()
tag.set_address(self.normal_register.get_array(array_size))
if self.is_sixteen_bit:
# each short address needs two bytes = 1 address
self.normal_register.move_to_next_address(array_size)
return
# each short address needs two bytes = 2 addresses
self.normal_register.move_to_next_address(array_size * 2)
def process_short_array(self, tag):
"""Process short array type tags"""
self._process_16_bit_array(tag)
def process_word_array(self, tag):
"""Process short array type tags"""
self._process_16_bit_array(tag)
def _is_function(self, tag):
function_names = ['USER', 'RAMP', 'SINE', 'RANDOM']
for function_name in function_names:
if function_name in tag.get_address():
return True
return False
|
jmbeach/KEP.py | src/keppy/tag_group.py | TagGroup.parse_tags | python | def parse_tags(self):
tags = []
try:
for tag in self._tag_group_dict["tags"]:
tags.append(Tag(tag))
except:
return tags
return tags | Parses tags in tag group | train | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/tag_group.py#L18-L26 | null | class TagGroup(object):
"""Represents a Kepware tag group"""
def __init__(self, tag_group_dict):
self._tag_group_dict = tag_group_dict
self._tags = self.parse_tags()
self._is_ignored = False
self._sub_groups = []
# process sub groups
if "tag_groups" in self._tag_group_dict:
for sub_group in self._tag_group_dict["tag_groups"]:
self._sub_groups.append(TagGroup(sub_group))
@property
def tags(self):
"""Gets the tags of the tag group"""
return self._tags
@property
def name(self):
"""Gets the name of the tag group"""
if self._is_ignored:
return ''
return self._tag_group_dict["common.ALLTYPES_NAME"]
@property
def sub_groups(self):
return self._sub_groups
def as_dict(self):
"""Returns dictionary representation of the tag group"""
return self._tag_group_dict
def as_json(self):
"""Returns the stringified JSON representation of the Kepware
tag group"""
return json.dumps(OrderedDict(self._tag_group_dict))
def set_name(self, name):
self._tag_group_dict["common.ALLTYPES_NAME"] = name
def update(self):
"""Updates the dictionary of the tag group"""
if self._is_ignored or "tags" not in self._tag_group_dict:
return
for i in range(len(self._tag_group_dict["tags"])):
tag_dict = self._tag_group_dict["tags"][i]
for tag in self._tags:
if tag.name == tag_dict["common.ALLTYPES_NAME"]:
self._tag_group_dict["tags"][i] = tag.as_dict()
break
for i in range(len(self._sub_groups)):
sub_group = self._sub_groups[i]
sub_group.update()
self._tag_group_dict["tag_groups"][i] = sub_group.as_dict()
|
jmbeach/KEP.py | src/keppy/tag_group.py | TagGroup.update | python | def update(self):
if self._is_ignored or "tags" not in self._tag_group_dict:
return
for i in range(len(self._tag_group_dict["tags"])):
tag_dict = self._tag_group_dict["tags"][i]
for tag in self._tags:
if tag.name == tag_dict["common.ALLTYPES_NAME"]:
self._tag_group_dict["tags"][i] = tag.as_dict()
break
for i in range(len(self._sub_groups)):
sub_group = self._sub_groups[i]
sub_group.update()
self._tag_group_dict["tag_groups"][i] = sub_group.as_dict() | Updates the dictionary of the tag group | train | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/tag_group.py#L56-L70 | null | class TagGroup(object):
"""Represents a Kepware tag group"""
def __init__(self, tag_group_dict):
self._tag_group_dict = tag_group_dict
self._tags = self.parse_tags()
self._is_ignored = False
self._sub_groups = []
# process sub groups
if "tag_groups" in self._tag_group_dict:
for sub_group in self._tag_group_dict["tag_groups"]:
self._sub_groups.append(TagGroup(sub_group))
def parse_tags(self):
"""Parses tags in tag group"""
tags = []
try:
for tag in self._tag_group_dict["tags"]:
tags.append(Tag(tag))
except:
return tags
return tags
@property
def tags(self):
"""Gets the tags of the tag group"""
return self._tags
@property
def name(self):
"""Gets the name of the tag group"""
if self._is_ignored:
return ''
return self._tag_group_dict["common.ALLTYPES_NAME"]
@property
def sub_groups(self):
return self._sub_groups
def as_dict(self):
"""Returns dictionary representation of the tag group"""
return self._tag_group_dict
def as_json(self):
"""Returns the stringified JSON representation of the Kepware
tag group"""
return json.dumps(OrderedDict(self._tag_group_dict))
def set_name(self, name):
self._tag_group_dict["common.ALLTYPES_NAME"] = name
|
jmbeach/KEP.py | src/keppy/tag.py | Tag.name_replace | python | def name_replace(self, to_replace, replacement):
self.name = self.name.replace(to_replace, replacement) | Replaces part of tag name with new value | train | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/tag.py#L31-L33 | null | class Tag(object):
"""Represents a kepware tag"""
def __init__(self, tag_dict):
self._tag_dict = tag_dict
self.address = property(self.get_address, self.set_address)
@property
def name(self):
"""The name of the tag"""
return self._tag_dict["common.ALLTYPES_NAME"]
@property
def data_type(self):
"""Gets tag data type"""
data_type = int(self._tag_dict["servermain.TAG_DATA_TYPE"]);
return TagDataType(data_type)
def get_address(self):
"""Gets tag address"""
return self._tag_dict["servermain.TAG_ADDRESS"]
def set_address(self, value):
"""Sets tag address"""
self._tag_dict["servermain.TAG_ADDRESS"] = value
def as_dict(self):
"""Returns dictionary representation of the tag"""
return self._tag_dict
def get_array_size(self):
"""Attempts to parse array size out of the address"""
match = re.search(r"(?<=\[)\d+(?=\])", self.get_address())
return int(match.group(0))
def get_string_length(self):
"""Attempts to parse array size out of the address"""
try:
return self.get_array_size()
except:
match = re.search(r"(?<=\.)\d+", self.get_address())
try:
return int(match.group(0))
except Exception as ex:
raise Exception('Could not get string size of {0} address {1}'
.format(
self.name,
self.get_address()),
ex)
|
jmbeach/KEP.py | src/keppy/tag.py | Tag.get_array_size | python | def get_array_size(self):
match = re.search(r"(?<=\[)\d+(?=\])", self.get_address())
return int(match.group(0)) | Attempts to parse array size out of the address | train | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/tag.py#L39-L42 | [
"def get_address(self):\n \"\"\"Gets tag address\"\"\"\n return self._tag_dict[\"servermain.TAG_ADDRESS\"]\n"
] | class Tag(object):
"""Represents a kepware tag"""
def __init__(self, tag_dict):
self._tag_dict = tag_dict
self.address = property(self.get_address, self.set_address)
@property
def name(self):
"""The name of the tag"""
return self._tag_dict["common.ALLTYPES_NAME"]
@property
def data_type(self):
"""Gets tag data type"""
data_type = int(self._tag_dict["servermain.TAG_DATA_TYPE"]);
return TagDataType(data_type)
def get_address(self):
"""Gets tag address"""
return self._tag_dict["servermain.TAG_ADDRESS"]
def set_address(self, value):
"""Sets tag address"""
self._tag_dict["servermain.TAG_ADDRESS"] = value
def name_replace(self, to_replace, replacement):
"""Replaces part of tag name with new value"""
self.name = self.name.replace(to_replace, replacement)
def as_dict(self):
"""Returns dictionary representation of the tag"""
return self._tag_dict
def get_string_length(self):
"""Attempts to parse array size out of the address"""
try:
return self.get_array_size()
except:
match = re.search(r"(?<=\.)\d+", self.get_address())
try:
return int(match.group(0))
except Exception as ex:
raise Exception('Could not get string size of {0} address {1}'
.format(
self.name,
self.get_address()),
ex)
|
jmbeach/KEP.py | src/keppy/tag.py | Tag.get_string_length | python | def get_string_length(self):
try:
return self.get_array_size()
except:
match = re.search(r"(?<=\.)\d+", self.get_address())
try:
return int(match.group(0))
except Exception as ex:
raise Exception('Could not get string size of {0} address {1}'
.format(
self.name,
self.get_address()),
ex) | Attempts to parse array size out of the address | train | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/tag.py#L44-L57 | [
"def get_address(self):\n \"\"\"Gets tag address\"\"\"\n return self._tag_dict[\"servermain.TAG_ADDRESS\"]\n",
"def get_array_size(self):\n \"\"\"Attempts to parse array size out of the address\"\"\"\n match = re.search(r\"(?<=\\[)\\d+(?=\\])\", self.get_address())\n return int(match.group(0))\n"
] | class Tag(object):
"""Represents a kepware tag"""
def __init__(self, tag_dict):
self._tag_dict = tag_dict
self.address = property(self.get_address, self.set_address)
@property
def name(self):
"""The name of the tag"""
return self._tag_dict["common.ALLTYPES_NAME"]
@property
def data_type(self):
"""Gets tag data type"""
data_type = int(self._tag_dict["servermain.TAG_DATA_TYPE"]);
return TagDataType(data_type)
def get_address(self):
"""Gets tag address"""
return self._tag_dict["servermain.TAG_ADDRESS"]
def set_address(self, value):
"""Sets tag address"""
self._tag_dict["servermain.TAG_ADDRESS"] = value
def name_replace(self, to_replace, replacement):
"""Replaces part of tag name with new value"""
self.name = self.name.replace(to_replace, replacement)
def as_dict(self):
"""Returns dictionary representation of the tag"""
return self._tag_dict
def get_array_size(self):
"""Attempts to parse array size out of the address"""
match = re.search(r"(?<=\[)\d+(?=\])", self.get_address())
return int(match.group(0))
|
jmbeach/KEP.py | src/keppy/device.py | Device.set_driver_simulated | python | def set_driver_simulated(self):
self._device_dict["servermain.MULTIPLE_TYPES_DEVICE_DRIVER"] = "Simulator"
if self._is_sixteen_bit:
self._device_dict["servermain.DEVICE_MODEL"] = 0
else:
self._device_dict["servermain.DEVICE_MODEL"] = 1
self._device_dict["servermain.DEVICE_ID_OCTAL"] = 1 | Sets the device driver type to simulated | train | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/device.py#L15-L22 | null | class Device(object):
"""Represents a Kepware device"""
def __init__(self, device_dict, is_sixteen_bit, ignore_list):
self._ignore_list = ignore_list
self._device_dict = device_dict
self._tag_groups = self.parse_tag_groups()
self._tags = self.parse_tags()
self._is_sixteen_bit = is_sixteen_bit
self.set_driver_simulated()
def parse_tag_groups(self):
"""Gets an array of TagGroup objects in the Kepware device"""
tag_groups = []
if 'tag_groups' not in self._device_dict:
return tag_groups
to_remove = []
for tag_group in self._device_dict['tag_groups']:
if tag_group['common.ALLTYPES_NAME'] in self._ignore_list:
to_remove.append(tag_group)
continue
tag_groups.append(TagGroup(tag_group))
for removable in to_remove:
self._device_dict['tag_groups'].remove(removable)
return tag_groups
def parse_tags(self):
tags = []
if 'tags' not in self._device_dict:
return tags
for tag in self._device_dict['tags']:
tags.append(Tag(tag))
return tags
@property
def tag_groups(self):
"""Gets the tag groups of the device"""
return self._tag_groups
@property
def tags(self):
return self._tags
@property
def is_sixteen_bit(self):
"""Returns true if is sixteen bit device"""
return self._is_sixteen_bit
@property
def name(self):
"""Gets the name of the device"""
return self._device_dict["common.ALLTYPES_NAME"]
def as_dict(self):
"""Returns dictionary representation of the device"""
return self._device_dict
def update(self):
"""Updates the dictionary of the device"""
if "tag_groups" not in self._device_dict:
return
for group in self.tag_groups:
group.update()
for i in range(len(self._device_dict["tag_groups"])):
tag_group_dict = self._device_dict["tag_groups"][i]
for group in self.tag_groups:
if group.name == tag_group_dict["common.ALLTYPES_NAME"]:
self._device_dict["tag_groups"][i] = group.as_dict()
|
jmbeach/KEP.py | src/keppy/device.py | Device.parse_tag_groups | python | def parse_tag_groups(self):
tag_groups = []
if 'tag_groups' not in self._device_dict:
return tag_groups
to_remove = []
for tag_group in self._device_dict['tag_groups']:
if tag_group['common.ALLTYPES_NAME'] in self._ignore_list:
to_remove.append(tag_group)
continue
tag_groups.append(TagGroup(tag_group))
for removable in to_remove:
self._device_dict['tag_groups'].remove(removable)
return tag_groups | Gets an array of TagGroup objects in the Kepware device | train | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/device.py#L24-L38 | null | class Device(object):
"""Represents a Kepware device"""
def __init__(self, device_dict, is_sixteen_bit, ignore_list):
self._ignore_list = ignore_list
self._device_dict = device_dict
self._tag_groups = self.parse_tag_groups()
self._tags = self.parse_tags()
self._is_sixteen_bit = is_sixteen_bit
self.set_driver_simulated()
def set_driver_simulated(self):
"""Sets the device driver type to simulated"""
self._device_dict["servermain.MULTIPLE_TYPES_DEVICE_DRIVER"] = "Simulator"
if self._is_sixteen_bit:
self._device_dict["servermain.DEVICE_MODEL"] = 0
else:
self._device_dict["servermain.DEVICE_MODEL"] = 1
self._device_dict["servermain.DEVICE_ID_OCTAL"] = 1
def parse_tags(self):
tags = []
if 'tags' not in self._device_dict:
return tags
for tag in self._device_dict['tags']:
tags.append(Tag(tag))
return tags
@property
def tag_groups(self):
"""Gets the tag groups of the device"""
return self._tag_groups
@property
def tags(self):
return self._tags
@property
def is_sixteen_bit(self):
"""Returns true if is sixteen bit device"""
return self._is_sixteen_bit
@property
def name(self):
"""Gets the name of the device"""
return self._device_dict["common.ALLTYPES_NAME"]
def as_dict(self):
"""Returns dictionary representation of the device"""
return self._device_dict
def update(self):
"""Updates the dictionary of the device"""
if "tag_groups" not in self._device_dict:
return
for group in self.tag_groups:
group.update()
for i in range(len(self._device_dict["tag_groups"])):
tag_group_dict = self._device_dict["tag_groups"][i]
for group in self.tag_groups:
if group.name == tag_group_dict["common.ALLTYPES_NAME"]:
self._device_dict["tag_groups"][i] = group.as_dict()
|
jmbeach/KEP.py | src/keppy/device.py | Device.update | python | def update(self):
if "tag_groups" not in self._device_dict:
return
for group in self.tag_groups:
group.update()
for i in range(len(self._device_dict["tag_groups"])):
tag_group_dict = self._device_dict["tag_groups"][i]
for group in self.tag_groups:
if group.name == tag_group_dict["common.ALLTYPES_NAME"]:
self._device_dict["tag_groups"][i] = group.as_dict() | Updates the dictionary of the device | train | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/device.py#L71-L81 | null | class Device(object):
"""Represents a Kepware device"""
def __init__(self, device_dict, is_sixteen_bit, ignore_list):
self._ignore_list = ignore_list
self._device_dict = device_dict
self._tag_groups = self.parse_tag_groups()
self._tags = self.parse_tags()
self._is_sixteen_bit = is_sixteen_bit
self.set_driver_simulated()
def set_driver_simulated(self):
"""Sets the device driver type to simulated"""
self._device_dict["servermain.MULTIPLE_TYPES_DEVICE_DRIVER"] = "Simulator"
if self._is_sixteen_bit:
self._device_dict["servermain.DEVICE_MODEL"] = 0
else:
self._device_dict["servermain.DEVICE_MODEL"] = 1
self._device_dict["servermain.DEVICE_ID_OCTAL"] = 1
def parse_tag_groups(self):
"""Gets an array of TagGroup objects in the Kepware device"""
tag_groups = []
if 'tag_groups' not in self._device_dict:
return tag_groups
to_remove = []
for tag_group in self._device_dict['tag_groups']:
if tag_group['common.ALLTYPES_NAME'] in self._ignore_list:
to_remove.append(tag_group)
continue
tag_groups.append(TagGroup(tag_group))
for removable in to_remove:
self._device_dict['tag_groups'].remove(removable)
return tag_groups
def parse_tags(self):
tags = []
if 'tags' not in self._device_dict:
return tags
for tag in self._device_dict['tags']:
tags.append(Tag(tag))
return tags
@property
def tag_groups(self):
"""Gets the tag groups of the device"""
return self._tag_groups
@property
def tags(self):
return self._tags
@property
def is_sixteen_bit(self):
"""Returns true if is sixteen bit device"""
return self._is_sixteen_bit
@property
def name(self):
"""Gets the name of the device"""
return self._device_dict["common.ALLTYPES_NAME"]
def as_dict(self):
"""Returns dictionary representation of the device"""
return self._device_dict
|
jmbeach/KEP.py | src/keppy/register.py | pad_zeroes | python | def pad_zeroes(addr, n_zeroes):
if len(addr) < n_zeroes:
return pad_zeroes("0" + addr, n_zeroes)
return addr | Padds the address with zeroes | train | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/register.py#L2-L6 | [
"def pad_zeroes(addr, n_zeroes):\n \"\"\"Padds the address with zeroes\"\"\"\n if len(addr) < n_zeroes:\n return pad_zeroes(\"0\" + addr, n_zeroes)\n return addr\n"
] | """Base register module"""
def int_addr(addr):
"""Gets the integer representation of an address"""
return int(addr[1:])
def next_addr(addr, i):
"""Gets address after the current + i"""
str_addr = pad_zeroes(str(int_addr(addr) + i), len(addr[1:]))
return addr[0] + str_addr
class Register(object):
"""Represents the register of a simulator"""
def __init__(self, is_16bit, initial_address):
self._is_16bit = is_16bit
self._register_size = 8
if self._is_16bit:
self._register_size = 16
self._current_address = initial_address
self._register_map = {}
self._size_of_current_register_address = 4
self.mark_address(initial_address, self._size_of_current_register_address)
self._current_bit_address = ""
self.move_to_next_bit_address()
@property
def current_address(self):
"""Gets the current constant address of the register."""
return self._current_address
@property
def is_16bit(self):
"""Gets whether the device is 16-bit
(or else 8-bit)"""
return self._is_16bit
@property
def current_bit_address(self):
"""Gets the current bit address"""
return self._current_bit_address
def get_register_letter(self):
"""Gets the letter representing the register (R, K, or S)
"""
return self._current_address[0]
def mark_address(self, addr, size):
"""Marks address as being used in simulator"""
i = 0
while i < size:
self._register_map[addr] = True
i += 1
def is_address_in_use(self, addr):
"""Returns value which determines if register address in use"""
return self._register_map.get(addr)
def next_address_avoid_collision(self, start_addr):
"""Finds the next address recursively which does not collide with any other address"""
i = 1
while self.is_address_in_use(next_addr(start_addr, i)):
i += 1
return next_addr(start_addr, i)
def next_address(self):
"""Returns the next address after the current"""
return self.next_address_avoid_collision(self._current_address)
def move_to_next_address(self, size_of_current):
"""Moves the register's current address to the next available.
size_of_current specifies how many bytes/words to skip"""
self._size_of_current_register_address = size_of_current
self._current_address = self.next_address()
self.mark_address(self._current_address, size_of_current)
def move_to_next_bit_address(self):
"""Moves to next available bit address position"""
self._current_bit_address = self.next_bit_address()
self.mark_address(self._current_bit_address.split('.')[0], self._size_of_current_register_address)
def get_array(self, array_size):
"""Gets an array address"""
return "{0}[{1}]".format(self._current_address, array_size)
def next_bit_address(self):
"""Gets the next boolean address"""
if self._current_bit_address == "":
if self._is_16bit:
return "{0}.{1}".format(
self.next_address(),
"00")
return "{0}.{1}".format(
self.next_address(),
"0")
if self._is_16bit:
bool_half = int(self._current_bit_address.split(".")[1])
if bool_half < 4:
register_half = self._current_bit_address.split(".")[0]
return "{0}.{1}".format(
register_half,
pad_zeroes(str(bool_half + 1), 2))
self.move_to_next_address(self._size_of_current_register_address)
return "{0}.{1}".format(
self.next_address(),
"00")
bool_half = int(self._current_bit_address.split(".")[1])
if bool_half < 3:
register_half = self._current_bit_address.split(".")[0]
return "{0}.{1}".format(
register_half,
bool_half + 1)
self.move_to_next_address(self._size_of_current_register_address)
return "{0}.{1}".format(
self.next_address(),
"0")
|
jmbeach/KEP.py | src/keppy/register.py | next_addr | python | def next_addr(addr, i):
str_addr = pad_zeroes(str(int_addr(addr) + i), len(addr[1:]))
return addr[0] + str_addr | Gets address after the current + i | train | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/register.py#L12-L15 | [
"def pad_zeroes(addr, n_zeroes):\n \"\"\"Padds the address with zeroes\"\"\"\n if len(addr) < n_zeroes:\n return pad_zeroes(\"0\" + addr, n_zeroes)\n return addr\n",
"def int_addr(addr):\n \"\"\"Gets the integer representation of an address\"\"\"\n return int(addr[1:])\n"
] | """Base register module"""
def pad_zeroes(addr, n_zeroes):
"""Padds the address with zeroes"""
if len(addr) < n_zeroes:
return pad_zeroes("0" + addr, n_zeroes)
return addr
def int_addr(addr):
"""Gets the integer representation of an address"""
return int(addr[1:])
class Register(object):
"""Represents the register of a simulator"""
def __init__(self, is_16bit, initial_address):
self._is_16bit = is_16bit
self._register_size = 8
if self._is_16bit:
self._register_size = 16
self._current_address = initial_address
self._register_map = {}
self._size_of_current_register_address = 4
self.mark_address(initial_address, self._size_of_current_register_address)
self._current_bit_address = ""
self.move_to_next_bit_address()
@property
def current_address(self):
"""Gets the current constant address of the register."""
return self._current_address
@property
def is_16bit(self):
"""Gets whether the device is 16-bit
(or else 8-bit)"""
return self._is_16bit
@property
def current_bit_address(self):
"""Gets the current bit address"""
return self._current_bit_address
def get_register_letter(self):
"""Gets the letter representing the register (R, K, or S)
"""
return self._current_address[0]
def mark_address(self, addr, size):
"""Marks address as being used in simulator"""
i = 0
while i < size:
self._register_map[addr] = True
i += 1
def is_address_in_use(self, addr):
"""Returns value which determines if register address in use"""
return self._register_map.get(addr)
def next_address_avoid_collision(self, start_addr):
"""Finds the next address recursively which does not collide with any other address"""
i = 1
while self.is_address_in_use(next_addr(start_addr, i)):
i += 1
return next_addr(start_addr, i)
def next_address(self):
"""Returns the next address after the current"""
return self.next_address_avoid_collision(self._current_address)
def move_to_next_address(self, size_of_current):
"""Moves the register's current address to the next available.
size_of_current specifies how many bytes/words to skip"""
self._size_of_current_register_address = size_of_current
self._current_address = self.next_address()
self.mark_address(self._current_address, size_of_current)
def move_to_next_bit_address(self):
"""Moves to next available bit address position"""
self._current_bit_address = self.next_bit_address()
self.mark_address(self._current_bit_address.split('.')[0], self._size_of_current_register_address)
def get_array(self, array_size):
"""Gets an array address"""
return "{0}[{1}]".format(self._current_address, array_size)
def next_bit_address(self):
"""Gets the next boolean address"""
if self._current_bit_address == "":
if self._is_16bit:
return "{0}.{1}".format(
self.next_address(),
"00")
return "{0}.{1}".format(
self.next_address(),
"0")
if self._is_16bit:
bool_half = int(self._current_bit_address.split(".")[1])
if bool_half < 4:
register_half = self._current_bit_address.split(".")[0]
return "{0}.{1}".format(
register_half,
pad_zeroes(str(bool_half + 1), 2))
self.move_to_next_address(self._size_of_current_register_address)
return "{0}.{1}".format(
self.next_address(),
"00")
bool_half = int(self._current_bit_address.split(".")[1])
if bool_half < 3:
register_half = self._current_bit_address.split(".")[0]
return "{0}.{1}".format(
register_half,
bool_half + 1)
self.move_to_next_address(self._size_of_current_register_address)
return "{0}.{1}".format(
self.next_address(),
"0")
|
jmbeach/KEP.py | src/keppy/register.py | Register.mark_address | python | def mark_address(self, addr, size):
i = 0
while i < size:
self._register_map[addr] = True
i += 1 | Marks address as being used in simulator | train | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/register.py#L53-L58 | null | class Register(object):
"""Represents the register of a simulator"""
def __init__(self, is_16bit, initial_address):
self._is_16bit = is_16bit
self._register_size = 8
if self._is_16bit:
self._register_size = 16
self._current_address = initial_address
self._register_map = {}
self._size_of_current_register_address = 4
self.mark_address(initial_address, self._size_of_current_register_address)
self._current_bit_address = ""
self.move_to_next_bit_address()
@property
def current_address(self):
"""Gets the current constant address of the register."""
return self._current_address
@property
def is_16bit(self):
"""Gets whether the device is 16-bit
(or else 8-bit)"""
return self._is_16bit
@property
def current_bit_address(self):
"""Gets the current bit address"""
return self._current_bit_address
def get_register_letter(self):
"""Gets the letter representing the register (R, K, or S)
"""
return self._current_address[0]
def is_address_in_use(self, addr):
"""Returns value which determines if register address in use"""
return self._register_map.get(addr)
def next_address_avoid_collision(self, start_addr):
"""Finds the next address recursively which does not collide with any other address"""
i = 1
while self.is_address_in_use(next_addr(start_addr, i)):
i += 1
return next_addr(start_addr, i)
def next_address(self):
"""Returns the next address after the current"""
return self.next_address_avoid_collision(self._current_address)
def move_to_next_address(self, size_of_current):
"""Moves the register's current address to the next available.
size_of_current specifies how many bytes/words to skip"""
self._size_of_current_register_address = size_of_current
self._current_address = self.next_address()
self.mark_address(self._current_address, size_of_current)
def move_to_next_bit_address(self):
"""Moves to next available bit address position"""
self._current_bit_address = self.next_bit_address()
self.mark_address(self._current_bit_address.split('.')[0], self._size_of_current_register_address)
def get_array(self, array_size):
"""Gets an array address"""
return "{0}[{1}]".format(self._current_address, array_size)
def next_bit_address(self):
"""Gets the next boolean address"""
if self._current_bit_address == "":
if self._is_16bit:
return "{0}.{1}".format(
self.next_address(),
"00")
return "{0}.{1}".format(
self.next_address(),
"0")
if self._is_16bit:
bool_half = int(self._current_bit_address.split(".")[1])
if bool_half < 4:
register_half = self._current_bit_address.split(".")[0]
return "{0}.{1}".format(
register_half,
pad_zeroes(str(bool_half + 1), 2))
self.move_to_next_address(self._size_of_current_register_address)
return "{0}.{1}".format(
self.next_address(),
"00")
bool_half = int(self._current_bit_address.split(".")[1])
if bool_half < 3:
register_half = self._current_bit_address.split(".")[0]
return "{0}.{1}".format(
register_half,
bool_half + 1)
self.move_to_next_address(self._size_of_current_register_address)
return "{0}.{1}".format(
self.next_address(),
"0")
|
jmbeach/KEP.py | src/keppy/register.py | Register.next_address_avoid_collision | python | def next_address_avoid_collision(self, start_addr):
i = 1
while self.is_address_in_use(next_addr(start_addr, i)):
i += 1
return next_addr(start_addr, i) | Finds the next address recursively which does not collide with any other address | train | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/register.py#L64-L69 | [
"def next_addr(addr, i):\n \"\"\"Gets address after the current + i\"\"\"\n str_addr = pad_zeroes(str(int_addr(addr) + i), len(addr[1:]))\n return addr[0] + str_addr\n",
"def is_address_in_use(self, addr):\n \"\"\"Returns value which determines if register address in use\"\"\"\n return self._regist... | class Register(object):
"""Represents the register of a simulator"""
def __init__(self, is_16bit, initial_address):
self._is_16bit = is_16bit
self._register_size = 8
if self._is_16bit:
self._register_size = 16
self._current_address = initial_address
self._register_map = {}
self._size_of_current_register_address = 4
self.mark_address(initial_address, self._size_of_current_register_address)
self._current_bit_address = ""
self.move_to_next_bit_address()
@property
def current_address(self):
"""Gets the current constant address of the register."""
return self._current_address
@property
def is_16bit(self):
"""Gets whether the device is 16-bit
(or else 8-bit)"""
return self._is_16bit
@property
def current_bit_address(self):
"""Gets the current bit address"""
return self._current_bit_address
def get_register_letter(self):
"""Gets the letter representing the register (R, K, or S)
"""
return self._current_address[0]
def mark_address(self, addr, size):
"""Marks address as being used in simulator"""
i = 0
while i < size:
self._register_map[addr] = True
i += 1
def is_address_in_use(self, addr):
"""Returns value which determines if register address in use"""
return self._register_map.get(addr)
def next_address(self):
"""Returns the next address after the current"""
return self.next_address_avoid_collision(self._current_address)
def move_to_next_address(self, size_of_current):
"""Moves the register's current address to the next available.
size_of_current specifies how many bytes/words to skip"""
self._size_of_current_register_address = size_of_current
self._current_address = self.next_address()
self.mark_address(self._current_address, size_of_current)
def move_to_next_bit_address(self):
"""Moves to next available bit address position"""
self._current_bit_address = self.next_bit_address()
self.mark_address(self._current_bit_address.split('.')[0], self._size_of_current_register_address)
def get_array(self, array_size):
"""Gets an array address"""
return "{0}[{1}]".format(self._current_address, array_size)
def next_bit_address(self):
"""Gets the next boolean address"""
if self._current_bit_address == "":
if self._is_16bit:
return "{0}.{1}".format(
self.next_address(),
"00")
return "{0}.{1}".format(
self.next_address(),
"0")
if self._is_16bit:
bool_half = int(self._current_bit_address.split(".")[1])
if bool_half < 4:
register_half = self._current_bit_address.split(".")[0]
return "{0}.{1}".format(
register_half,
pad_zeroes(str(bool_half + 1), 2))
self.move_to_next_address(self._size_of_current_register_address)
return "{0}.{1}".format(
self.next_address(),
"00")
bool_half = int(self._current_bit_address.split(".")[1])
if bool_half < 3:
register_half = self._current_bit_address.split(".")[0]
return "{0}.{1}".format(
register_half,
bool_half + 1)
self.move_to_next_address(self._size_of_current_register_address)
return "{0}.{1}".format(
self.next_address(),
"0")
|
jmbeach/KEP.py | src/keppy/register.py | Register.move_to_next_address | python | def move_to_next_address(self, size_of_current):
self._size_of_current_register_address = size_of_current
self._current_address = self.next_address()
self.mark_address(self._current_address, size_of_current) | Moves the register's current address to the next available.
size_of_current specifies how many bytes/words to skip | train | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/register.py#L75-L80 | [
"def mark_address(self, addr, size):\n \"\"\"Marks address as being used in simulator\"\"\"\n i = 0\n while i < size:\n self._register_map[addr] = True\n i += 1\n",
"def next_address(self):\n \"\"\"Returns the next address after the current\"\"\"\n return self.next_address_avoid_colli... | class Register(object):
"""Represents the register of a simulator"""
def __init__(self, is_16bit, initial_address):
self._is_16bit = is_16bit
self._register_size = 8
if self._is_16bit:
self._register_size = 16
self._current_address = initial_address
self._register_map = {}
self._size_of_current_register_address = 4
self.mark_address(initial_address, self._size_of_current_register_address)
self._current_bit_address = ""
self.move_to_next_bit_address()
@property
def current_address(self):
"""Gets the current constant address of the register."""
return self._current_address
@property
def is_16bit(self):
"""Gets whether the device is 16-bit
(or else 8-bit)"""
return self._is_16bit
@property
def current_bit_address(self):
"""Gets the current bit address"""
return self._current_bit_address
def get_register_letter(self):
"""Gets the letter representing the register (R, K, or S)
"""
return self._current_address[0]
def mark_address(self, addr, size):
"""Marks address as being used in simulator"""
i = 0
while i < size:
self._register_map[addr] = True
i += 1
def is_address_in_use(self, addr):
"""Returns value which determines if register address in use"""
return self._register_map.get(addr)
def next_address_avoid_collision(self, start_addr):
"""Finds the next address recursively which does not collide with any other address"""
i = 1
while self.is_address_in_use(next_addr(start_addr, i)):
i += 1
return next_addr(start_addr, i)
def next_address(self):
"""Returns the next address after the current"""
return self.next_address_avoid_collision(self._current_address)
def move_to_next_bit_address(self):
"""Moves to next available bit address position"""
self._current_bit_address = self.next_bit_address()
self.mark_address(self._current_bit_address.split('.')[0], self._size_of_current_register_address)
def get_array(self, array_size):
"""Gets an array address"""
return "{0}[{1}]".format(self._current_address, array_size)
def next_bit_address(self):
"""Gets the next boolean address"""
if self._current_bit_address == "":
if self._is_16bit:
return "{0}.{1}".format(
self.next_address(),
"00")
return "{0}.{1}".format(
self.next_address(),
"0")
if self._is_16bit:
bool_half = int(self._current_bit_address.split(".")[1])
if bool_half < 4:
register_half = self._current_bit_address.split(".")[0]
return "{0}.{1}".format(
register_half,
pad_zeroes(str(bool_half + 1), 2))
self.move_to_next_address(self._size_of_current_register_address)
return "{0}.{1}".format(
self.next_address(),
"00")
bool_half = int(self._current_bit_address.split(".")[1])
if bool_half < 3:
register_half = self._current_bit_address.split(".")[0]
return "{0}.{1}".format(
register_half,
bool_half + 1)
self.move_to_next_address(self._size_of_current_register_address)
return "{0}.{1}".format(
self.next_address(),
"0")
|
jmbeach/KEP.py | src/keppy/register.py | Register.move_to_next_bit_address | python | def move_to_next_bit_address(self):
self._current_bit_address = self.next_bit_address()
self.mark_address(self._current_bit_address.split('.')[0], self._size_of_current_register_address) | Moves to next available bit address position | train | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/register.py#L82-L85 | [
"def mark_address(self, addr, size):\n \"\"\"Marks address as being used in simulator\"\"\"\n i = 0\n while i < size:\n self._register_map[addr] = True\n i += 1\n",
"def next_bit_address(self):\n \"\"\"Gets the next boolean address\"\"\"\n if self._current_bit_address == \"\":\n ... | class Register(object):
"""Represents the register of a simulator"""
def __init__(self, is_16bit, initial_address):
self._is_16bit = is_16bit
self._register_size = 8
if self._is_16bit:
self._register_size = 16
self._current_address = initial_address
self._register_map = {}
self._size_of_current_register_address = 4
self.mark_address(initial_address, self._size_of_current_register_address)
self._current_bit_address = ""
self.move_to_next_bit_address()
@property
def current_address(self):
"""Gets the current constant address of the register."""
return self._current_address
@property
def is_16bit(self):
"""Gets whether the device is 16-bit
(or else 8-bit)"""
return self._is_16bit
@property
def current_bit_address(self):
"""Gets the current bit address"""
return self._current_bit_address
def get_register_letter(self):
"""Gets the letter representing the register (R, K, or S)
"""
return self._current_address[0]
def mark_address(self, addr, size):
"""Marks address as being used in simulator"""
i = 0
while i < size:
self._register_map[addr] = True
i += 1
def is_address_in_use(self, addr):
"""Returns value which determines if register address in use"""
return self._register_map.get(addr)
def next_address_avoid_collision(self, start_addr):
"""Finds the next address recursively which does not collide with any other address"""
i = 1
while self.is_address_in_use(next_addr(start_addr, i)):
i += 1
return next_addr(start_addr, i)
def next_address(self):
"""Returns the next address after the current"""
return self.next_address_avoid_collision(self._current_address)
def move_to_next_address(self, size_of_current):
"""Moves the register's current address to the next available.
size_of_current specifies how many bytes/words to skip"""
self._size_of_current_register_address = size_of_current
self._current_address = self.next_address()
self.mark_address(self._current_address, size_of_current)
def get_array(self, array_size):
"""Gets an array address"""
return "{0}[{1}]".format(self._current_address, array_size)
def next_bit_address(self):
"""Gets the next boolean address"""
if self._current_bit_address == "":
if self._is_16bit:
return "{0}.{1}".format(
self.next_address(),
"00")
return "{0}.{1}".format(
self.next_address(),
"0")
if self._is_16bit:
bool_half = int(self._current_bit_address.split(".")[1])
if bool_half < 4:
register_half = self._current_bit_address.split(".")[0]
return "{0}.{1}".format(
register_half,
pad_zeroes(str(bool_half + 1), 2))
self.move_to_next_address(self._size_of_current_register_address)
return "{0}.{1}".format(
self.next_address(),
"00")
bool_half = int(self._current_bit_address.split(".")[1])
if bool_half < 3:
register_half = self._current_bit_address.split(".")[0]
return "{0}.{1}".format(
register_half,
bool_half + 1)
self.move_to_next_address(self._size_of_current_register_address)
return "{0}.{1}".format(
self.next_address(),
"0")
|
jmbeach/KEP.py | src/keppy/register.py | Register.next_bit_address | python | def next_bit_address(self):
if self._current_bit_address == "":
if self._is_16bit:
return "{0}.{1}".format(
self.next_address(),
"00")
return "{0}.{1}".format(
self.next_address(),
"0")
if self._is_16bit:
bool_half = int(self._current_bit_address.split(".")[1])
if bool_half < 4:
register_half = self._current_bit_address.split(".")[0]
return "{0}.{1}".format(
register_half,
pad_zeroes(str(bool_half + 1), 2))
self.move_to_next_address(self._size_of_current_register_address)
return "{0}.{1}".format(
self.next_address(),
"00")
bool_half = int(self._current_bit_address.split(".")[1])
if bool_half < 3:
register_half = self._current_bit_address.split(".")[0]
return "{0}.{1}".format(
register_half,
bool_half + 1)
self.move_to_next_address(self._size_of_current_register_address)
return "{0}.{1}".format(
self.next_address(),
"0") | Gets the next boolean address | train | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/register.py#L91-L121 | [
"def next_address(self):\n \"\"\"Returns the next address after the current\"\"\"\n return self.next_address_avoid_collision(self._current_address)\n"
] | class Register(object):
"""Represents the register of a simulator"""
def __init__(self, is_16bit, initial_address):
self._is_16bit = is_16bit
self._register_size = 8
if self._is_16bit:
self._register_size = 16
self._current_address = initial_address
self._register_map = {}
self._size_of_current_register_address = 4
self.mark_address(initial_address, self._size_of_current_register_address)
self._current_bit_address = ""
self.move_to_next_bit_address()
@property
def current_address(self):
"""Gets the current constant address of the register."""
return self._current_address
@property
def is_16bit(self):
"""Gets whether the device is 16-bit
(or else 8-bit)"""
return self._is_16bit
@property
def current_bit_address(self):
"""Gets the current bit address"""
return self._current_bit_address
def get_register_letter(self):
"""Gets the letter representing the register (R, K, or S)
"""
return self._current_address[0]
def mark_address(self, addr, size):
"""Marks address as being used in simulator"""
i = 0
while i < size:
self._register_map[addr] = True
i += 1
def is_address_in_use(self, addr):
"""Returns value which determines if register address in use"""
return self._register_map.get(addr)
def next_address_avoid_collision(self, start_addr):
"""Finds the next address recursively which does not collide with any other address"""
i = 1
while self.is_address_in_use(next_addr(start_addr, i)):
i += 1
return next_addr(start_addr, i)
def next_address(self):
"""Returns the next address after the current"""
return self.next_address_avoid_collision(self._current_address)
def move_to_next_address(self, size_of_current):
"""Moves the register's current address to the next available.
size_of_current specifies how many bytes/words to skip"""
self._size_of_current_register_address = size_of_current
self._current_address = self.next_address()
self.mark_address(self._current_address, size_of_current)
def move_to_next_bit_address(self):
"""Moves to next available bit address position"""
self._current_bit_address = self.next_bit_address()
self.mark_address(self._current_bit_address.split('.')[0], self._size_of_current_register_address)
def get_array(self, array_size):
"""Gets an array address"""
return "{0}[{1}]".format(self._current_address, array_size)
|
mlavin/argyle | argyle/nginx.py | upload_nginx_site_conf | python | def upload_nginx_site_conf(site_name, template_name=None, context=None, enable=True):
template_name = template_name or [u'nginx/%s.conf' % site_name, u'nginx/site.conf']
site_available = u'/etc/nginx/sites-available/%s' % site_name
upload_template(template_name, site_available, context=context, use_sudo=True)
if enable:
enable_site(site_name) | Upload Nginx site configuration from a template. | train | https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/nginx.py#L15-L22 | [
"def upload_template(filename, destination, context=None,\n use_sudo=False, backup=True, mode=None):\n func = use_sudo and sudo or run\n # Process template\n loaders = []\n template_dirs = getattr(env, 'ARGYLE_TEMPLATE_DIRS', ())\n if template_dirs:\n loaders.append(FileSystemLoader(templat... | from argyle.base import upload_template
from argyle.system import restart_service
from fabric.api import abort, sudo, task
from fabric.contrib import files
@task
def remove_default_site():
"""Remove the default Nginx site if it exists."""
disable_site('default')
@task
def upload_nginx_site_conf(site_name, template_name=None, context=None, enable=True):
"""Upload Nginx site configuration from a template."""
template_name = template_name or [u'nginx/%s.conf' % site_name, u'nginx/site.conf']
site_available = u'/etc/nginx/sites-available/%s' % site_name
upload_template(template_name, site_available, context=context, use_sudo=True)
if enable:
enable_site(site_name)
@task
def enable_site(site_name):
"""Enable an available Nginx site."""
site_available = u'/etc/nginx/sites-available/%s' % site_name
site_enabled = u'/etc/nginx/sites-enabled/%s' % site_name
if files.exists(site_available):
sudo(u'ln -s -f %s %s' % (site_available, site_enabled))
restart_service(u'nginx')
else:
abort(u'%s site configuration is not available' % site_name)
@task
def disable_site(site_name):
"""Disables Nginx site configuration."""
site = u'/etc/nginx/sites-enabled/%s' % site_name
if files.exists(site):
sudo(u'rm %s' % site)
restart_service(u'nginx')
|
mlavin/argyle | argyle/nginx.py | enable_site | python | def enable_site(site_name):
site_available = u'/etc/nginx/sites-available/%s' % site_name
site_enabled = u'/etc/nginx/sites-enabled/%s' % site_name
if files.exists(site_available):
sudo(u'ln -s -f %s %s' % (site_available, site_enabled))
restart_service(u'nginx')
else:
abort(u'%s site configuration is not available' % site_name) | Enable an available Nginx site. | train | https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/nginx.py#L26-L35 | null | from argyle.base import upload_template
from argyle.system import restart_service
from fabric.api import abort, sudo, task
from fabric.contrib import files
@task
def remove_default_site():
"""Remove the default Nginx site if it exists."""
disable_site('default')
@task
def upload_nginx_site_conf(site_name, template_name=None, context=None, enable=True):
"""Upload Nginx site configuration from a template."""
template_name = template_name or [u'nginx/%s.conf' % site_name, u'nginx/site.conf']
site_available = u'/etc/nginx/sites-available/%s' % site_name
upload_template(template_name, site_available, context=context, use_sudo=True)
if enable:
enable_site(site_name)
@task
@task
def disable_site(site_name):
"""Disables Nginx site configuration."""
site = u'/etc/nginx/sites-enabled/%s' % site_name
if files.exists(site):
sudo(u'rm %s' % site)
restart_service(u'nginx')
|
mlavin/argyle | argyle/nginx.py | disable_site | python | def disable_site(site_name):
site = u'/etc/nginx/sites-enabled/%s' % site_name
if files.exists(site):
sudo(u'rm %s' % site)
restart_service(u'nginx') | Disables Nginx site configuration. | train | https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/nginx.py#L39-L45 | null | from argyle.base import upload_template
from argyle.system import restart_service
from fabric.api import abort, sudo, task
from fabric.contrib import files
@task
def remove_default_site():
"""Remove the default Nginx site if it exists."""
disable_site('default')
@task
def upload_nginx_site_conf(site_name, template_name=None, context=None, enable=True):
"""Upload Nginx site configuration from a template."""
template_name = template_name or [u'nginx/%s.conf' % site_name, u'nginx/site.conf']
site_available = u'/etc/nginx/sites-available/%s' % site_name
upload_template(template_name, site_available, context=context, use_sudo=True)
if enable:
enable_site(site_name)
@task
def enable_site(site_name):
"""Enable an available Nginx site."""
site_available = u'/etc/nginx/sites-available/%s' % site_name
site_enabled = u'/etc/nginx/sites-enabled/%s' % site_name
if files.exists(site_available):
sudo(u'ln -s -f %s %s' % (site_available, site_enabled))
restart_service(u'nginx')
else:
abort(u'%s site configuration is not available' % site_name)
@task
|
mlavin/argyle | argyle/system.py | add_ppas_from_file | python | def add_ppas_from_file(file_name, update=True):
for ppa in _read_lines_from_file(file_name):
add_ppa(ppa, update=False)
if update:
update_apt_sources() | Add personal package archive from a file list. | train | https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/system.py#L66-L72 | [
"def _read_lines_from_file(file_name):\n with open(file_name) as f:\n packages = f.readlines()\n return map(lambda x: x.strip('\\n\\r'), packages)\n"
] | import re
from fabric.api import put, sudo, task, env, hide, settings, run
from fabric.contrib import files
def _read_lines_from_file(file_name):
with open(file_name) as f:
packages = f.readlines()
return map(lambda x: x.strip('\n\r'), packages)
def user_exists(username):
exists = False
with settings(hide('everything'), warn_only=True):
exists = run(u"grep ^%s /etc/passwd" % username)
return exists
def group_exists(name):
exists = False
with settings(hide('everything'), warn_only=True):
exists = run(u"grep ^%s /etc/group" % name)
return exists
@task
def install_packages(*packages):
"""Install apt packages from a list."""
sudo(u"apt-get install -y %s" % u" ".join(packages))
@task
def install_packages_from_file(file_name):
"""Install apt packages from a file list."""
install_packages(*_read_lines_from_file(file_name))
@task
def update_apt_sources():
"""Update apt source."""
sudo(u"apt-get update")
@task
def upgrade_apt_packages():
"""Safe upgrade of all packages."""
update_apt_sources()
sudo(u"apt-get upgrade -y")
@task
def add_ppa(name, update=True):
"""Add personal package archive."""
sudo(u"add-apt-repository %s" % name)
if update:
update_apt_sources()
@task
@task
def add_apt_source(source, key=None, update=True):
"""Adds source url to apt sources.list. Optional to pass the key url."""
# Make a backup of list
source_list = u'/etc/apt/sources.list'
sudo("cp %s{,.bak}" % source_list)
files.append(source_list, source, use_sudo=True)
if key:
# Fecth key from url and add
sudo(u"wget -q %s -O - | sudo apt-key add -" % key)
if update:
update_apt_sources()
@task
def add_sources_from_file(file_name, update=True):
"""
Add source urls from a file list.
The file should contain the source line to add followed by the
key url, if any, enclosed in parentheses.
Ex:
deb http://example.com/deb lucid main (http://example.com/key)
"""
key_regex = re.compile(r'(?P<source>[^()]*)(\s+\((?P<key>.*)\))?$')
for line in _read_lines_from_file(file_name):
kwargs = key_regex.match(line).groupdict()
kwargs['update'] = False
add_apt_source(**kwargs)
if update:
update_apt_sources()
@task
def create_user(name, groups=None, key_file=None):
"""Create a user. Adds a key file to authorized_keys if given."""
groups = groups or []
if not user_exists(name):
for group in groups:
if not group_exists(group):
sudo(u"addgroup %s" % group)
groups = groups and u'-G %s' % u','.join(groups) or ''
sudo(u"useradd -m %s -s /bin/bash %s" % (groups, name))
sudo(u"passwd -d %s" % name)
if key_file:
sudo(u"mkdir -p /home/%s/.ssh" % name)
put(key_file, u"/home/%s/.ssh/authorized_keys" % name, use_sudo=True)
sudo(u"chown -R %(name)s:%(name)s /home/%(name)s/.ssh" % {'name': name})
@task
def service_command(name, command):
"""Run an init.d/upstart command."""
service_command_template = getattr(env, 'ARGYLE_SERVICE_COMMAND_TEMPLATE',
u'/etc/init.d/%(name)s %(command)s')
sudo(service_command_template % {'name': name,
'command': command}, pty=False)
@task
def start_service(name):
"""Start an init.d service."""
service_command(name, u"start")
@task
def stop_service(name):
"""Stop an init.d service."""
service_command(name, u"stop")
@task
def restart_service(name):
"""Restart an init.d service."""
service_command(name, u"restart")
|
mlavin/argyle | argyle/system.py | add_apt_source | python | def add_apt_source(source, key=None, update=True):
# Make a backup of list
source_list = u'/etc/apt/sources.list'
sudo("cp %s{,.bak}" % source_list)
files.append(source_list, source, use_sudo=True)
if key:
# Fecth key from url and add
sudo(u"wget -q %s -O - | sudo apt-key add -" % key)
if update:
update_apt_sources() | Adds source url to apt sources.list. Optional to pass the key url. | train | https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/system.py#L76-L87 | null | import re
from fabric.api import put, sudo, task, env, hide, settings, run
from fabric.contrib import files
def _read_lines_from_file(file_name):
with open(file_name) as f:
packages = f.readlines()
return map(lambda x: x.strip('\n\r'), packages)
def user_exists(username):
exists = False
with settings(hide('everything'), warn_only=True):
exists = run(u"grep ^%s /etc/passwd" % username)
return exists
def group_exists(name):
exists = False
with settings(hide('everything'), warn_only=True):
exists = run(u"grep ^%s /etc/group" % name)
return exists
@task
def install_packages(*packages):
"""Install apt packages from a list."""
sudo(u"apt-get install -y %s" % u" ".join(packages))
@task
def install_packages_from_file(file_name):
"""Install apt packages from a file list."""
install_packages(*_read_lines_from_file(file_name))
@task
def update_apt_sources():
"""Update apt source."""
sudo(u"apt-get update")
@task
def upgrade_apt_packages():
"""Safe upgrade of all packages."""
update_apt_sources()
sudo(u"apt-get upgrade -y")
@task
def add_ppa(name, update=True):
"""Add personal package archive."""
sudo(u"add-apt-repository %s" % name)
if update:
update_apt_sources()
@task
def add_ppas_from_file(file_name, update=True):
"""Add personal package archive from a file list."""
for ppa in _read_lines_from_file(file_name):
add_ppa(ppa, update=False)
if update:
update_apt_sources()
@task
@task
def add_sources_from_file(file_name, update=True):
"""
Add source urls from a file list.
The file should contain the source line to add followed by the
key url, if any, enclosed in parentheses.
Ex:
deb http://example.com/deb lucid main (http://example.com/key)
"""
key_regex = re.compile(r'(?P<source>[^()]*)(\s+\((?P<key>.*)\))?$')
for line in _read_lines_from_file(file_name):
kwargs = key_regex.match(line).groupdict()
kwargs['update'] = False
add_apt_source(**kwargs)
if update:
update_apt_sources()
@task
def create_user(name, groups=None, key_file=None):
"""Create a user. Adds a key file to authorized_keys if given."""
groups = groups or []
if not user_exists(name):
for group in groups:
if not group_exists(group):
sudo(u"addgroup %s" % group)
groups = groups and u'-G %s' % u','.join(groups) or ''
sudo(u"useradd -m %s -s /bin/bash %s" % (groups, name))
sudo(u"passwd -d %s" % name)
if key_file:
sudo(u"mkdir -p /home/%s/.ssh" % name)
put(key_file, u"/home/%s/.ssh/authorized_keys" % name, use_sudo=True)
sudo(u"chown -R %(name)s:%(name)s /home/%(name)s/.ssh" % {'name': name})
@task
def service_command(name, command):
"""Run an init.d/upstart command."""
service_command_template = getattr(env, 'ARGYLE_SERVICE_COMMAND_TEMPLATE',
u'/etc/init.d/%(name)s %(command)s')
sudo(service_command_template % {'name': name,
'command': command}, pty=False)
@task
def start_service(name):
"""Start an init.d service."""
service_command(name, u"start")
@task
def stop_service(name):
"""Stop an init.d service."""
service_command(name, u"stop")
@task
def restart_service(name):
"""Restart an init.d service."""
service_command(name, u"restart")
|
mlavin/argyle | argyle/system.py | add_sources_from_file | python | def add_sources_from_file(file_name, update=True):
key_regex = re.compile(r'(?P<source>[^()]*)(\s+\((?P<key>.*)\))?$')
for line in _read_lines_from_file(file_name):
kwargs = key_regex.match(line).groupdict()
kwargs['update'] = False
add_apt_source(**kwargs)
if update:
update_apt_sources() | Add source urls from a file list.
The file should contain the source line to add followed by the
key url, if any, enclosed in parentheses.
Ex:
deb http://example.com/deb lucid main (http://example.com/key) | train | https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/system.py#L91-L107 | [
"def _read_lines_from_file(file_name):\n with open(file_name) as f:\n packages = f.readlines()\n return map(lambda x: x.strip('\\n\\r'), packages)\n"
] | import re
from fabric.api import put, sudo, task, env, hide, settings, run
from fabric.contrib import files
def _read_lines_from_file(file_name):
with open(file_name) as f:
packages = f.readlines()
return map(lambda x: x.strip('\n\r'), packages)
def user_exists(username):
exists = False
with settings(hide('everything'), warn_only=True):
exists = run(u"grep ^%s /etc/passwd" % username)
return exists
def group_exists(name):
exists = False
with settings(hide('everything'), warn_only=True):
exists = run(u"grep ^%s /etc/group" % name)
return exists
@task
def install_packages(*packages):
"""Install apt packages from a list."""
sudo(u"apt-get install -y %s" % u" ".join(packages))
@task
def install_packages_from_file(file_name):
"""Install apt packages from a file list."""
install_packages(*_read_lines_from_file(file_name))
@task
def update_apt_sources():
"""Update apt source."""
sudo(u"apt-get update")
@task
def upgrade_apt_packages():
"""Safe upgrade of all packages."""
update_apt_sources()
sudo(u"apt-get upgrade -y")
@task
def add_ppa(name, update=True):
"""Add personal package archive."""
sudo(u"add-apt-repository %s" % name)
if update:
update_apt_sources()
@task
def add_ppas_from_file(file_name, update=True):
"""Add personal package archive from a file list."""
for ppa in _read_lines_from_file(file_name):
add_ppa(ppa, update=False)
if update:
update_apt_sources()
@task
def add_apt_source(source, key=None, update=True):
"""Adds source url to apt sources.list. Optional to pass the key url."""
# Make a backup of list
source_list = u'/etc/apt/sources.list'
sudo("cp %s{,.bak}" % source_list)
files.append(source_list, source, use_sudo=True)
if key:
# Fecth key from url and add
sudo(u"wget -q %s -O - | sudo apt-key add -" % key)
if update:
update_apt_sources()
@task
@task
def create_user(name, groups=None, key_file=None):
"""Create a user. Adds a key file to authorized_keys if given."""
groups = groups or []
if not user_exists(name):
for group in groups:
if not group_exists(group):
sudo(u"addgroup %s" % group)
groups = groups and u'-G %s' % u','.join(groups) or ''
sudo(u"useradd -m %s -s /bin/bash %s" % (groups, name))
sudo(u"passwd -d %s" % name)
if key_file:
sudo(u"mkdir -p /home/%s/.ssh" % name)
put(key_file, u"/home/%s/.ssh/authorized_keys" % name, use_sudo=True)
sudo(u"chown -R %(name)s:%(name)s /home/%(name)s/.ssh" % {'name': name})
@task
def service_command(name, command):
"""Run an init.d/upstart command."""
service_command_template = getattr(env, 'ARGYLE_SERVICE_COMMAND_TEMPLATE',
u'/etc/init.d/%(name)s %(command)s')
sudo(service_command_template % {'name': name,
'command': command}, pty=False)
@task
def start_service(name):
"""Start an init.d service."""
service_command(name, u"start")
@task
def stop_service(name):
"""Stop an init.d service."""
service_command(name, u"stop")
@task
def restart_service(name):
"""Restart an init.d service."""
service_command(name, u"restart")
|
mlavin/argyle | argyle/system.py | create_user | python | def create_user(name, groups=None, key_file=None):
groups = groups or []
if not user_exists(name):
for group in groups:
if not group_exists(group):
sudo(u"addgroup %s" % group)
groups = groups and u'-G %s' % u','.join(groups) or ''
sudo(u"useradd -m %s -s /bin/bash %s" % (groups, name))
sudo(u"passwd -d %s" % name)
if key_file:
sudo(u"mkdir -p /home/%s/.ssh" % name)
put(key_file, u"/home/%s/.ssh/authorized_keys" % name, use_sudo=True)
sudo(u"chown -R %(name)s:%(name)s /home/%(name)s/.ssh" % {'name': name}) | Create a user. Adds a key file to authorized_keys if given. | train | https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/system.py#L111-L125 | [
"def user_exists(username):\n exists = False\n with settings(hide('everything'), warn_only=True):\n exists = run(u\"grep ^%s /etc/passwd\" % username)\n return exists\n",
"def group_exists(name):\n exists = False\n with settings(hide('everything'), warn_only=True):\n exists = run(u\"g... | import re
from fabric.api import put, sudo, task, env, hide, settings, run
from fabric.contrib import files
def _read_lines_from_file(file_name):
with open(file_name) as f:
packages = f.readlines()
return map(lambda x: x.strip('\n\r'), packages)
def user_exists(username):
exists = False
with settings(hide('everything'), warn_only=True):
exists = run(u"grep ^%s /etc/passwd" % username)
return exists
def group_exists(name):
exists = False
with settings(hide('everything'), warn_only=True):
exists = run(u"grep ^%s /etc/group" % name)
return exists
@task
def install_packages(*packages):
"""Install apt packages from a list."""
sudo(u"apt-get install -y %s" % u" ".join(packages))
@task
def install_packages_from_file(file_name):
"""Install apt packages from a file list."""
install_packages(*_read_lines_from_file(file_name))
@task
def update_apt_sources():
"""Update apt source."""
sudo(u"apt-get update")
@task
def upgrade_apt_packages():
"""Safe upgrade of all packages."""
update_apt_sources()
sudo(u"apt-get upgrade -y")
@task
def add_ppa(name, update=True):
"""Add personal package archive."""
sudo(u"add-apt-repository %s" % name)
if update:
update_apt_sources()
@task
def add_ppas_from_file(file_name, update=True):
"""Add personal package archive from a file list."""
for ppa in _read_lines_from_file(file_name):
add_ppa(ppa, update=False)
if update:
update_apt_sources()
@task
def add_apt_source(source, key=None, update=True):
"""Adds source url to apt sources.list. Optional to pass the key url."""
# Make a backup of list
source_list = u'/etc/apt/sources.list'
sudo("cp %s{,.bak}" % source_list)
files.append(source_list, source, use_sudo=True)
if key:
# Fecth key from url and add
sudo(u"wget -q %s -O - | sudo apt-key add -" % key)
if update:
update_apt_sources()
@task
def add_sources_from_file(file_name, update=True):
"""
Add source urls from a file list.
The file should contain the source line to add followed by the
key url, if any, enclosed in parentheses.
Ex:
deb http://example.com/deb lucid main (http://example.com/key)
"""
key_regex = re.compile(r'(?P<source>[^()]*)(\s+\((?P<key>.*)\))?$')
for line in _read_lines_from_file(file_name):
kwargs = key_regex.match(line).groupdict()
kwargs['update'] = False
add_apt_source(**kwargs)
if update:
update_apt_sources()
@task
@task
def service_command(name, command):
"""Run an init.d/upstart command."""
service_command_template = getattr(env, 'ARGYLE_SERVICE_COMMAND_TEMPLATE',
u'/etc/init.d/%(name)s %(command)s')
sudo(service_command_template % {'name': name,
'command': command}, pty=False)
@task
def start_service(name):
"""Start an init.d service."""
service_command(name, u"start")
@task
def stop_service(name):
"""Stop an init.d service."""
service_command(name, u"stop")
@task
def restart_service(name):
"""Restart an init.d service."""
service_command(name, u"restart")
|
mlavin/argyle | argyle/system.py | service_command | python | def service_command(name, command):
service_command_template = getattr(env, 'ARGYLE_SERVICE_COMMAND_TEMPLATE',
u'/etc/init.d/%(name)s %(command)s')
sudo(service_command_template % {'name': name,
'command': command}, pty=False) | Run an init.d/upstart command. | train | https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/system.py#L129-L135 | null | import re
from fabric.api import put, sudo, task, env, hide, settings, run
from fabric.contrib import files
def _read_lines_from_file(file_name):
with open(file_name) as f:
packages = f.readlines()
return map(lambda x: x.strip('\n\r'), packages)
def user_exists(username):
exists = False
with settings(hide('everything'), warn_only=True):
exists = run(u"grep ^%s /etc/passwd" % username)
return exists
def group_exists(name):
exists = False
with settings(hide('everything'), warn_only=True):
exists = run(u"grep ^%s /etc/group" % name)
return exists
@task
def install_packages(*packages):
"""Install apt packages from a list."""
sudo(u"apt-get install -y %s" % u" ".join(packages))
@task
def install_packages_from_file(file_name):
"""Install apt packages from a file list."""
install_packages(*_read_lines_from_file(file_name))
@task
def update_apt_sources():
"""Update apt source."""
sudo(u"apt-get update")
@task
def upgrade_apt_packages():
"""Safe upgrade of all packages."""
update_apt_sources()
sudo(u"apt-get upgrade -y")
@task
def add_ppa(name, update=True):
"""Add personal package archive."""
sudo(u"add-apt-repository %s" % name)
if update:
update_apt_sources()
@task
def add_ppas_from_file(file_name, update=True):
"""Add personal package archive from a file list."""
for ppa in _read_lines_from_file(file_name):
add_ppa(ppa, update=False)
if update:
update_apt_sources()
@task
def add_apt_source(source, key=None, update=True):
"""Adds source url to apt sources.list. Optional to pass the key url."""
# Make a backup of list
source_list = u'/etc/apt/sources.list'
sudo("cp %s{,.bak}" % source_list)
files.append(source_list, source, use_sudo=True)
if key:
# Fecth key from url and add
sudo(u"wget -q %s -O - | sudo apt-key add -" % key)
if update:
update_apt_sources()
@task
def add_sources_from_file(file_name, update=True):
"""
Add source urls from a file list.
The file should contain the source line to add followed by the
key url, if any, enclosed in parentheses.
Ex:
deb http://example.com/deb lucid main (http://example.com/key)
"""
key_regex = re.compile(r'(?P<source>[^()]*)(\s+\((?P<key>.*)\))?$')
for line in _read_lines_from_file(file_name):
kwargs = key_regex.match(line).groupdict()
kwargs['update'] = False
add_apt_source(**kwargs)
if update:
update_apt_sources()
@task
def create_user(name, groups=None, key_file=None):
"""Create a user. Adds a key file to authorized_keys if given."""
groups = groups or []
if not user_exists(name):
for group in groups:
if not group_exists(group):
sudo(u"addgroup %s" % group)
groups = groups and u'-G %s' % u','.join(groups) or ''
sudo(u"useradd -m %s -s /bin/bash %s" % (groups, name))
sudo(u"passwd -d %s" % name)
if key_file:
sudo(u"mkdir -p /home/%s/.ssh" % name)
put(key_file, u"/home/%s/.ssh/authorized_keys" % name, use_sudo=True)
sudo(u"chown -R %(name)s:%(name)s /home/%(name)s/.ssh" % {'name': name})
@task
@task
def start_service(name):
"""Start an init.d service."""
service_command(name, u"start")
@task
def stop_service(name):
"""Stop an init.d service."""
service_command(name, u"stop")
@task
def restart_service(name):
"""Restart an init.d service."""
service_command(name, u"restart")
|
mlavin/argyle | argyle/postgres.py | create_db_user | python | def create_db_user(username, password=None, flags=None):
flags = flags or u'-D -A -R'
sudo(u'createuser %s %s' % (flags, username), user=u'postgres')
if password:
change_db_user_password(username, password) | Create a databse user. | train | https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/postgres.py#L11-L17 | null | from StringIO import StringIO
import re
from argyle.base import upload_template
from argyle.system import restart_service
from fabric.api import abort, hide, run, sudo, task
from fabric.contrib.console import confirm
@task
@task
def excute_query(query, db=None, flags=None, use_sudo=False, **kwargs):
"""Execute remote psql query."""
flags = flags or u''
if db:
flags = u"%s -d %s" % (flags, db)
command = u'psql %s -c "%s"' % (flags, query)
if use_sudo:
sudo(command, user='postgres', **kwargs)
else:
run(command, **kwargs)
def db_user_exists(username):
"""Return True if the DB user already exists.
"""
qry = u"""SELECT COUNT(*) FROM pg_roles where rolname = \'{username}\';"""
output = StringIO()
excute_query(
qry.format(username=username),
flags="-Aqt",
use_sudo=True,
stdout=output
)
# FIXME: is there a way to get fabric to not clutter the output
# with "[127.0.0.1] out:" on each line?
lines = output.getvalue().splitlines()
return lines and lines[0].endswith('out: 1')
def db_exists(dbname):
# SELECT datname FROM pg_database;
qry = u"""SELECT COUNT(*) FROM pg_database where datname = \'{dbname}\';"""
output = StringIO()
excute_query(
qry.format(dbname=dbname),
flags="-Aqt",
use_sudo=True,
stdout=output
)
lines = output.getvalue().splitlines()
return lines and lines[0].endswith('out: 1')
@task
def change_db_user_password(username, password):
"""Change a db user's password."""
sql = "ALTER USER %s WITH PASSWORD '%s'" % (username, password)
excute_query(sql, use_sudo=True)
@task
def create_db(name, owner=None, encoding=u'UTF-8', template='template1',
**kwargs):
"""Create a Postgres database."""
flags = u''
if encoding:
flags = u'-E %s' % encoding
if owner:
flags = u'%s -O %s' % (flags, owner)
if template and template != 'template1':
flags = u'%s --template=%s' % (flags, template)
sudo('createdb %s %s' % (flags, name), user='postgres', **kwargs)
@task
def upload_pg_hba_conf(template_name=None, pg_version=None, pg_cluster='main', restart=True):
"""
Upload configuration for pg_hba.conf
If the version is not given it will be guessed.
"""
template_name = template_name or u'postgres/pg_hba.conf'
version = pg_version or detect_version()
config = {'version': version, 'cluster': pg_cluster}
destination = u'/etc/postgresql/%(version)s/%(cluster)s/pg_hba.conf' % config
upload_template(template_name, destination, use_sudo=True)
if restart:
restart_service(u'postgresql')
def detect_version():
"""Parse the output of psql to detect Postgres version."""
version_regex = re.compile(r'\(PostgreSQL\) (?P<major>\d)\.(?P<minor>\d)\.(?P<bugfix>\d)')
pg_version = None
with hide('running', 'stdout', 'stderr'):
output = run('psql --version')
match = version_regex.search(output)
if match:
result = match.groupdict()
if 'major' in result and 'minor' in result:
pg_version = u'%(major)s.%(minor)s' % result
if not pg_version:
abort(u"Error: Could not determine Postgres version of the server.")
return pg_version
@task
def reset_cluster(pg_cluster='main', pg_version=None, encoding=u'UTF-8',
locale=u'en_US.UTF-8'):
"""Drop and restore a given cluster."""
warning = u'You are about to drop the %s cluster. This cannot be undone.' \
u' Are you sure you want to continue?' % pg_cluster
if confirm(warning, default=False):
version = pg_version or detect_version()
config = {'version': version, 'cluster': pg_cluster,
'encoding': encoding, 'locale': locale}
sudo(u'pg_dropcluster --stop %(version)s %(cluster)s' % config,
user='postgres', warn_only=True)
sudo(u'pg_createcluster --start -e %(encoding)s --locale %(locale)s'
u' %(version)s %(cluster)s' % config, user='postgres')
else:
abort(u"Dropping %s cluster aborted by user input." % pg_cluster)
|
mlavin/argyle | argyle/postgres.py | excute_query | python | def excute_query(query, db=None, flags=None, use_sudo=False, **kwargs):
flags = flags or u''
if db:
flags = u"%s -d %s" % (flags, db)
command = u'psql %s -c "%s"' % (flags, query)
if use_sudo:
sudo(command, user='postgres', **kwargs)
else:
run(command, **kwargs) | Execute remote psql query. | train | https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/postgres.py#L21-L31 | null | from StringIO import StringIO
import re
from argyle.base import upload_template
from argyle.system import restart_service
from fabric.api import abort, hide, run, sudo, task
from fabric.contrib.console import confirm
@task
def create_db_user(username, password=None, flags=None):
"""Create a databse user."""
flags = flags or u'-D -A -R'
sudo(u'createuser %s %s' % (flags, username), user=u'postgres')
if password:
change_db_user_password(username, password)
@task
def db_user_exists(username):
"""Return True if the DB user already exists.
"""
qry = u"""SELECT COUNT(*) FROM pg_roles where rolname = \'{username}\';"""
output = StringIO()
excute_query(
qry.format(username=username),
flags="-Aqt",
use_sudo=True,
stdout=output
)
# FIXME: is there a way to get fabric to not clutter the output
# with "[127.0.0.1] out:" on each line?
lines = output.getvalue().splitlines()
return lines and lines[0].endswith('out: 1')
def db_exists(dbname):
# SELECT datname FROM pg_database;
qry = u"""SELECT COUNT(*) FROM pg_database where datname = \'{dbname}\';"""
output = StringIO()
excute_query(
qry.format(dbname=dbname),
flags="-Aqt",
use_sudo=True,
stdout=output
)
lines = output.getvalue().splitlines()
return lines and lines[0].endswith('out: 1')
@task
def change_db_user_password(username, password):
"""Change a db user's password."""
sql = "ALTER USER %s WITH PASSWORD '%s'" % (username, password)
excute_query(sql, use_sudo=True)
@task
def create_db(name, owner=None, encoding=u'UTF-8', template='template1',
**kwargs):
"""Create a Postgres database."""
flags = u''
if encoding:
flags = u'-E %s' % encoding
if owner:
flags = u'%s -O %s' % (flags, owner)
if template and template != 'template1':
flags = u'%s --template=%s' % (flags, template)
sudo('createdb %s %s' % (flags, name), user='postgres', **kwargs)
@task
def upload_pg_hba_conf(template_name=None, pg_version=None, pg_cluster='main', restart=True):
"""
Upload configuration for pg_hba.conf
If the version is not given it will be guessed.
"""
template_name = template_name or u'postgres/pg_hba.conf'
version = pg_version or detect_version()
config = {'version': version, 'cluster': pg_cluster}
destination = u'/etc/postgresql/%(version)s/%(cluster)s/pg_hba.conf' % config
upload_template(template_name, destination, use_sudo=True)
if restart:
restart_service(u'postgresql')
def detect_version():
"""Parse the output of psql to detect Postgres version."""
version_regex = re.compile(r'\(PostgreSQL\) (?P<major>\d)\.(?P<minor>\d)\.(?P<bugfix>\d)')
pg_version = None
with hide('running', 'stdout', 'stderr'):
output = run('psql --version')
match = version_regex.search(output)
if match:
result = match.groupdict()
if 'major' in result and 'minor' in result:
pg_version = u'%(major)s.%(minor)s' % result
if not pg_version:
abort(u"Error: Could not determine Postgres version of the server.")
return pg_version
@task
def reset_cluster(pg_cluster='main', pg_version=None, encoding=u'UTF-8',
locale=u'en_US.UTF-8'):
"""Drop and restore a given cluster."""
warning = u'You are about to drop the %s cluster. This cannot be undone.' \
u' Are you sure you want to continue?' % pg_cluster
if confirm(warning, default=False):
version = pg_version or detect_version()
config = {'version': version, 'cluster': pg_cluster,
'encoding': encoding, 'locale': locale}
sudo(u'pg_dropcluster --stop %(version)s %(cluster)s' % config,
user='postgres', warn_only=True)
sudo(u'pg_createcluster --start -e %(encoding)s --locale %(locale)s'
u' %(version)s %(cluster)s' % config, user='postgres')
else:
abort(u"Dropping %s cluster aborted by user input." % pg_cluster)
|
mlavin/argyle | argyle/postgres.py | db_user_exists | python | def db_user_exists(username):
qry = u"""SELECT COUNT(*) FROM pg_roles where rolname = \'{username}\';"""
output = StringIO()
excute_query(
qry.format(username=username),
flags="-Aqt",
use_sudo=True,
stdout=output
)
# FIXME: is there a way to get fabric to not clutter the output
# with "[127.0.0.1] out:" on each line?
lines = output.getvalue().splitlines()
return lines and lines[0].endswith('out: 1') | Return True if the DB user already exists. | train | https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/postgres.py#L34-L48 | null | from StringIO import StringIO
import re
from argyle.base import upload_template
from argyle.system import restart_service
from fabric.api import abort, hide, run, sudo, task
from fabric.contrib.console import confirm
@task
def create_db_user(username, password=None, flags=None):
"""Create a databse user."""
flags = flags or u'-D -A -R'
sudo(u'createuser %s %s' % (flags, username), user=u'postgres')
if password:
change_db_user_password(username, password)
@task
def excute_query(query, db=None, flags=None, use_sudo=False, **kwargs):
"""Execute remote psql query."""
flags = flags or u''
if db:
flags = u"%s -d %s" % (flags, db)
command = u'psql %s -c "%s"' % (flags, query)
if use_sudo:
sudo(command, user='postgres', **kwargs)
else:
run(command, **kwargs)
def db_exists(dbname):
# SELECT datname FROM pg_database;
qry = u"""SELECT COUNT(*) FROM pg_database where datname = \'{dbname}\';"""
output = StringIO()
excute_query(
qry.format(dbname=dbname),
flags="-Aqt",
use_sudo=True,
stdout=output
)
lines = output.getvalue().splitlines()
return lines and lines[0].endswith('out: 1')
@task
def change_db_user_password(username, password):
"""Change a db user's password."""
sql = "ALTER USER %s WITH PASSWORD '%s'" % (username, password)
excute_query(sql, use_sudo=True)
@task
def create_db(name, owner=None, encoding=u'UTF-8', template='template1',
**kwargs):
"""Create a Postgres database."""
flags = u''
if encoding:
flags = u'-E %s' % encoding
if owner:
flags = u'%s -O %s' % (flags, owner)
if template and template != 'template1':
flags = u'%s --template=%s' % (flags, template)
sudo('createdb %s %s' % (flags, name), user='postgres', **kwargs)
@task
def upload_pg_hba_conf(template_name=None, pg_version=None, pg_cluster='main', restart=True):
"""
Upload configuration for pg_hba.conf
If the version is not given it will be guessed.
"""
template_name = template_name or u'postgres/pg_hba.conf'
version = pg_version or detect_version()
config = {'version': version, 'cluster': pg_cluster}
destination = u'/etc/postgresql/%(version)s/%(cluster)s/pg_hba.conf' % config
upload_template(template_name, destination, use_sudo=True)
if restart:
restart_service(u'postgresql')
def detect_version():
"""Parse the output of psql to detect Postgres version."""
version_regex = re.compile(r'\(PostgreSQL\) (?P<major>\d)\.(?P<minor>\d)\.(?P<bugfix>\d)')
pg_version = None
with hide('running', 'stdout', 'stderr'):
output = run('psql --version')
match = version_regex.search(output)
if match:
result = match.groupdict()
if 'major' in result and 'minor' in result:
pg_version = u'%(major)s.%(minor)s' % result
if not pg_version:
abort(u"Error: Could not determine Postgres version of the server.")
return pg_version
@task
def reset_cluster(pg_cluster='main', pg_version=None, encoding=u'UTF-8',
locale=u'en_US.UTF-8'):
"""Drop and restore a given cluster."""
warning = u'You are about to drop the %s cluster. This cannot be undone.' \
u' Are you sure you want to continue?' % pg_cluster
if confirm(warning, default=False):
version = pg_version or detect_version()
config = {'version': version, 'cluster': pg_cluster,
'encoding': encoding, 'locale': locale}
sudo(u'pg_dropcluster --stop %(version)s %(cluster)s' % config,
user='postgres', warn_only=True)
sudo(u'pg_createcluster --start -e %(encoding)s --locale %(locale)s'
u' %(version)s %(cluster)s' % config, user='postgres')
else:
abort(u"Dropping %s cluster aborted by user input." % pg_cluster)
|
mlavin/argyle | argyle/postgres.py | change_db_user_password | python | def change_db_user_password(username, password):
sql = "ALTER USER %s WITH PASSWORD '%s'" % (username, password)
excute_query(sql, use_sudo=True) | Change a db user's password. | train | https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/postgres.py#L66-L70 | null | from StringIO import StringIO
import re
from argyle.base import upload_template
from argyle.system import restart_service
from fabric.api import abort, hide, run, sudo, task
from fabric.contrib.console import confirm
@task
def create_db_user(username, password=None, flags=None):
"""Create a databse user."""
flags = flags or u'-D -A -R'
sudo(u'createuser %s %s' % (flags, username), user=u'postgres')
if password:
change_db_user_password(username, password)
@task
def excute_query(query, db=None, flags=None, use_sudo=False, **kwargs):
"""Execute remote psql query."""
flags = flags or u''
if db:
flags = u"%s -d %s" % (flags, db)
command = u'psql %s -c "%s"' % (flags, query)
if use_sudo:
sudo(command, user='postgres', **kwargs)
else:
run(command, **kwargs)
def db_user_exists(username):
"""Return True if the DB user already exists.
"""
qry = u"""SELECT COUNT(*) FROM pg_roles where rolname = \'{username}\';"""
output = StringIO()
excute_query(
qry.format(username=username),
flags="-Aqt",
use_sudo=True,
stdout=output
)
# FIXME: is there a way to get fabric to not clutter the output
# with "[127.0.0.1] out:" on each line?
lines = output.getvalue().splitlines()
return lines and lines[0].endswith('out: 1')
def db_exists(dbname):
# SELECT datname FROM pg_database;
qry = u"""SELECT COUNT(*) FROM pg_database where datname = \'{dbname}\';"""
output = StringIO()
excute_query(
qry.format(dbname=dbname),
flags="-Aqt",
use_sudo=True,
stdout=output
)
lines = output.getvalue().splitlines()
return lines and lines[0].endswith('out: 1')
@task
@task
def create_db(name, owner=None, encoding=u'UTF-8', template='template1',
**kwargs):
"""Create a Postgres database."""
flags = u''
if encoding:
flags = u'-E %s' % encoding
if owner:
flags = u'%s -O %s' % (flags, owner)
if template and template != 'template1':
flags = u'%s --template=%s' % (flags, template)
sudo('createdb %s %s' % (flags, name), user='postgres', **kwargs)
@task
def upload_pg_hba_conf(template_name=None, pg_version=None, pg_cluster='main', restart=True):
"""
Upload configuration for pg_hba.conf
If the version is not given it will be guessed.
"""
template_name = template_name or u'postgres/pg_hba.conf'
version = pg_version or detect_version()
config = {'version': version, 'cluster': pg_cluster}
destination = u'/etc/postgresql/%(version)s/%(cluster)s/pg_hba.conf' % config
upload_template(template_name, destination, use_sudo=True)
if restart:
restart_service(u'postgresql')
def detect_version():
"""Parse the output of psql to detect Postgres version."""
version_regex = re.compile(r'\(PostgreSQL\) (?P<major>\d)\.(?P<minor>\d)\.(?P<bugfix>\d)')
pg_version = None
with hide('running', 'stdout', 'stderr'):
output = run('psql --version')
match = version_regex.search(output)
if match:
result = match.groupdict()
if 'major' in result and 'minor' in result:
pg_version = u'%(major)s.%(minor)s' % result
if not pg_version:
abort(u"Error: Could not determine Postgres version of the server.")
return pg_version
@task
def reset_cluster(pg_cluster='main', pg_version=None, encoding=u'UTF-8',
locale=u'en_US.UTF-8'):
"""Drop and restore a given cluster."""
warning = u'You are about to drop the %s cluster. This cannot be undone.' \
u' Are you sure you want to continue?' % pg_cluster
if confirm(warning, default=False):
version = pg_version or detect_version()
config = {'version': version, 'cluster': pg_cluster,
'encoding': encoding, 'locale': locale}
sudo(u'pg_dropcluster --stop %(version)s %(cluster)s' % config,
user='postgres', warn_only=True)
sudo(u'pg_createcluster --start -e %(encoding)s --locale %(locale)s'
u' %(version)s %(cluster)s' % config, user='postgres')
else:
abort(u"Dropping %s cluster aborted by user input." % pg_cluster)
|
mlavin/argyle | argyle/postgres.py | create_db | python | def create_db(name, owner=None, encoding=u'UTF-8', template='template1',
**kwargs):
flags = u''
if encoding:
flags = u'-E %s' % encoding
if owner:
flags = u'%s -O %s' % (flags, owner)
if template and template != 'template1':
flags = u'%s --template=%s' % (flags, template)
sudo('createdb %s %s' % (flags, name), user='postgres', **kwargs) | Create a Postgres database. | train | https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/postgres.py#L74-L85 | null | from StringIO import StringIO
import re
from argyle.base import upload_template
from argyle.system import restart_service
from fabric.api import abort, hide, run, sudo, task
from fabric.contrib.console import confirm
@task
def create_db_user(username, password=None, flags=None):
"""Create a databse user."""
flags = flags or u'-D -A -R'
sudo(u'createuser %s %s' % (flags, username), user=u'postgres')
if password:
change_db_user_password(username, password)
@task
def excute_query(query, db=None, flags=None, use_sudo=False, **kwargs):
"""Execute remote psql query."""
flags = flags or u''
if db:
flags = u"%s -d %s" % (flags, db)
command = u'psql %s -c "%s"' % (flags, query)
if use_sudo:
sudo(command, user='postgres', **kwargs)
else:
run(command, **kwargs)
def db_user_exists(username):
"""Return True if the DB user already exists.
"""
qry = u"""SELECT COUNT(*) FROM pg_roles where rolname = \'{username}\';"""
output = StringIO()
excute_query(
qry.format(username=username),
flags="-Aqt",
use_sudo=True,
stdout=output
)
# FIXME: is there a way to get fabric to not clutter the output
# with "[127.0.0.1] out:" on each line?
lines = output.getvalue().splitlines()
return lines and lines[0].endswith('out: 1')
def db_exists(dbname):
# SELECT datname FROM pg_database;
qry = u"""SELECT COUNT(*) FROM pg_database where datname = \'{dbname}\';"""
output = StringIO()
excute_query(
qry.format(dbname=dbname),
flags="-Aqt",
use_sudo=True,
stdout=output
)
lines = output.getvalue().splitlines()
return lines and lines[0].endswith('out: 1')
@task
def change_db_user_password(username, password):
"""Change a db user's password."""
sql = "ALTER USER %s WITH PASSWORD '%s'" % (username, password)
excute_query(sql, use_sudo=True)
@task
@task
def upload_pg_hba_conf(template_name=None, pg_version=None, pg_cluster='main', restart=True):
"""
Upload configuration for pg_hba.conf
If the version is not given it will be guessed.
"""
template_name = template_name or u'postgres/pg_hba.conf'
version = pg_version or detect_version()
config = {'version': version, 'cluster': pg_cluster}
destination = u'/etc/postgresql/%(version)s/%(cluster)s/pg_hba.conf' % config
upload_template(template_name, destination, use_sudo=True)
if restart:
restart_service(u'postgresql')
def detect_version():
"""Parse the output of psql to detect Postgres version."""
version_regex = re.compile(r'\(PostgreSQL\) (?P<major>\d)\.(?P<minor>\d)\.(?P<bugfix>\d)')
pg_version = None
with hide('running', 'stdout', 'stderr'):
output = run('psql --version')
match = version_regex.search(output)
if match:
result = match.groupdict()
if 'major' in result and 'minor' in result:
pg_version = u'%(major)s.%(minor)s' % result
if not pg_version:
abort(u"Error: Could not determine Postgres version of the server.")
return pg_version
@task
def reset_cluster(pg_cluster='main', pg_version=None, encoding=u'UTF-8',
locale=u'en_US.UTF-8'):
"""Drop and restore a given cluster."""
warning = u'You are about to drop the %s cluster. This cannot be undone.' \
u' Are you sure you want to continue?' % pg_cluster
if confirm(warning, default=False):
version = pg_version or detect_version()
config = {'version': version, 'cluster': pg_cluster,
'encoding': encoding, 'locale': locale}
sudo(u'pg_dropcluster --stop %(version)s %(cluster)s' % config,
user='postgres', warn_only=True)
sudo(u'pg_createcluster --start -e %(encoding)s --locale %(locale)s'
u' %(version)s %(cluster)s' % config, user='postgres')
else:
abort(u"Dropping %s cluster aborted by user input." % pg_cluster)
|
mlavin/argyle | argyle/postgres.py | upload_pg_hba_conf | python | def upload_pg_hba_conf(template_name=None, pg_version=None, pg_cluster='main', restart=True):
template_name = template_name or u'postgres/pg_hba.conf'
version = pg_version or detect_version()
config = {'version': version, 'cluster': pg_cluster}
destination = u'/etc/postgresql/%(version)s/%(cluster)s/pg_hba.conf' % config
upload_template(template_name, destination, use_sudo=True)
if restart:
restart_service(u'postgresql') | Upload configuration for pg_hba.conf
If the version is not given it will be guessed. | train | https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/postgres.py#L89-L101 | [
"def upload_template(filename, destination, context=None,\n use_sudo=False, backup=True, mode=None):\n func = use_sudo and sudo or run\n # Process template\n loaders = []\n template_dirs = getattr(env, 'ARGYLE_TEMPLATE_DIRS', ())\n if template_dirs:\n loaders.append(FileSystemLoader(templat... | from StringIO import StringIO
import re
from argyle.base import upload_template
from argyle.system import restart_service
from fabric.api import abort, hide, run, sudo, task
from fabric.contrib.console import confirm
@task
def create_db_user(username, password=None, flags=None):
"""Create a databse user."""
flags = flags or u'-D -A -R'
sudo(u'createuser %s %s' % (flags, username), user=u'postgres')
if password:
change_db_user_password(username, password)
@task
def excute_query(query, db=None, flags=None, use_sudo=False, **kwargs):
"""Execute remote psql query."""
flags = flags or u''
if db:
flags = u"%s -d %s" % (flags, db)
command = u'psql %s -c "%s"' % (flags, query)
if use_sudo:
sudo(command, user='postgres', **kwargs)
else:
run(command, **kwargs)
def db_user_exists(username):
"""Return True if the DB user already exists.
"""
qry = u"""SELECT COUNT(*) FROM pg_roles where rolname = \'{username}\';"""
output = StringIO()
excute_query(
qry.format(username=username),
flags="-Aqt",
use_sudo=True,
stdout=output
)
# FIXME: is there a way to get fabric to not clutter the output
# with "[127.0.0.1] out:" on each line?
lines = output.getvalue().splitlines()
return lines and lines[0].endswith('out: 1')
def db_exists(dbname):
# SELECT datname FROM pg_database;
qry = u"""SELECT COUNT(*) FROM pg_database where datname = \'{dbname}\';"""
output = StringIO()
excute_query(
qry.format(dbname=dbname),
flags="-Aqt",
use_sudo=True,
stdout=output
)
lines = output.getvalue().splitlines()
return lines and lines[0].endswith('out: 1')
@task
def change_db_user_password(username, password):
"""Change a db user's password."""
sql = "ALTER USER %s WITH PASSWORD '%s'" % (username, password)
excute_query(sql, use_sudo=True)
@task
def create_db(name, owner=None, encoding=u'UTF-8', template='template1',
**kwargs):
"""Create a Postgres database."""
flags = u''
if encoding:
flags = u'-E %s' % encoding
if owner:
flags = u'%s -O %s' % (flags, owner)
if template and template != 'template1':
flags = u'%s --template=%s' % (flags, template)
sudo('createdb %s %s' % (flags, name), user='postgres', **kwargs)
@task
def detect_version():
"""Parse the output of psql to detect Postgres version."""
version_regex = re.compile(r'\(PostgreSQL\) (?P<major>\d)\.(?P<minor>\d)\.(?P<bugfix>\d)')
pg_version = None
with hide('running', 'stdout', 'stderr'):
output = run('psql --version')
match = version_regex.search(output)
if match:
result = match.groupdict()
if 'major' in result and 'minor' in result:
pg_version = u'%(major)s.%(minor)s' % result
if not pg_version:
abort(u"Error: Could not determine Postgres version of the server.")
return pg_version
@task
def reset_cluster(pg_cluster='main', pg_version=None, encoding=u'UTF-8',
locale=u'en_US.UTF-8'):
"""Drop and restore a given cluster."""
warning = u'You are about to drop the %s cluster. This cannot be undone.' \
u' Are you sure you want to continue?' % pg_cluster
if confirm(warning, default=False):
version = pg_version or detect_version()
config = {'version': version, 'cluster': pg_cluster,
'encoding': encoding, 'locale': locale}
sudo(u'pg_dropcluster --stop %(version)s %(cluster)s' % config,
user='postgres', warn_only=True)
sudo(u'pg_createcluster --start -e %(encoding)s --locale %(locale)s'
u' %(version)s %(cluster)s' % config, user='postgres')
else:
abort(u"Dropping %s cluster aborted by user input." % pg_cluster)
|
mlavin/argyle | argyle/postgres.py | detect_version | python | def detect_version():
version_regex = re.compile(r'\(PostgreSQL\) (?P<major>\d)\.(?P<minor>\d)\.(?P<bugfix>\d)')
pg_version = None
with hide('running', 'stdout', 'stderr'):
output = run('psql --version')
match = version_regex.search(output)
if match:
result = match.groupdict()
if 'major' in result and 'minor' in result:
pg_version = u'%(major)s.%(minor)s' % result
if not pg_version:
abort(u"Error: Could not determine Postgres version of the server.")
return pg_version | Parse the output of psql to detect Postgres version. | train | https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/postgres.py#L104-L117 | null | from StringIO import StringIO
import re
from argyle.base import upload_template
from argyle.system import restart_service
from fabric.api import abort, hide, run, sudo, task
from fabric.contrib.console import confirm
@task
def create_db_user(username, password=None, flags=None):
"""Create a databse user."""
flags = flags or u'-D -A -R'
sudo(u'createuser %s %s' % (flags, username), user=u'postgres')
if password:
change_db_user_password(username, password)
@task
def excute_query(query, db=None, flags=None, use_sudo=False, **kwargs):
"""Execute remote psql query."""
flags = flags or u''
if db:
flags = u"%s -d %s" % (flags, db)
command = u'psql %s -c "%s"' % (flags, query)
if use_sudo:
sudo(command, user='postgres', **kwargs)
else:
run(command, **kwargs)
def db_user_exists(username):
"""Return True if the DB user already exists.
"""
qry = u"""SELECT COUNT(*) FROM pg_roles where rolname = \'{username}\';"""
output = StringIO()
excute_query(
qry.format(username=username),
flags="-Aqt",
use_sudo=True,
stdout=output
)
# FIXME: is there a way to get fabric to not clutter the output
# with "[127.0.0.1] out:" on each line?
lines = output.getvalue().splitlines()
return lines and lines[0].endswith('out: 1')
def db_exists(dbname):
# SELECT datname FROM pg_database;
qry = u"""SELECT COUNT(*) FROM pg_database where datname = \'{dbname}\';"""
output = StringIO()
excute_query(
qry.format(dbname=dbname),
flags="-Aqt",
use_sudo=True,
stdout=output
)
lines = output.getvalue().splitlines()
return lines and lines[0].endswith('out: 1')
@task
def change_db_user_password(username, password):
"""Change a db user's password."""
sql = "ALTER USER %s WITH PASSWORD '%s'" % (username, password)
excute_query(sql, use_sudo=True)
@task
def create_db(name, owner=None, encoding=u'UTF-8', template='template1',
**kwargs):
"""Create a Postgres database."""
flags = u''
if encoding:
flags = u'-E %s' % encoding
if owner:
flags = u'%s -O %s' % (flags, owner)
if template and template != 'template1':
flags = u'%s --template=%s' % (flags, template)
sudo('createdb %s %s' % (flags, name), user='postgres', **kwargs)
@task
def upload_pg_hba_conf(template_name=None, pg_version=None, pg_cluster='main', restart=True):
"""
Upload configuration for pg_hba.conf
If the version is not given it will be guessed.
"""
template_name = template_name or u'postgres/pg_hba.conf'
version = pg_version or detect_version()
config = {'version': version, 'cluster': pg_cluster}
destination = u'/etc/postgresql/%(version)s/%(cluster)s/pg_hba.conf' % config
upload_template(template_name, destination, use_sudo=True)
if restart:
restart_service(u'postgresql')
@task
def reset_cluster(pg_cluster='main', pg_version=None, encoding=u'UTF-8',
locale=u'en_US.UTF-8'):
"""Drop and restore a given cluster."""
warning = u'You are about to drop the %s cluster. This cannot be undone.' \
u' Are you sure you want to continue?' % pg_cluster
if confirm(warning, default=False):
version = pg_version or detect_version()
config = {'version': version, 'cluster': pg_cluster,
'encoding': encoding, 'locale': locale}
sudo(u'pg_dropcluster --stop %(version)s %(cluster)s' % config,
user='postgres', warn_only=True)
sudo(u'pg_createcluster --start -e %(encoding)s --locale %(locale)s'
u' %(version)s %(cluster)s' % config, user='postgres')
else:
abort(u"Dropping %s cluster aborted by user input." % pg_cluster)
|
mlavin/argyle | argyle/postgres.py | reset_cluster | python | def reset_cluster(pg_cluster='main', pg_version=None, encoding=u'UTF-8',
locale=u'en_US.UTF-8'):
warning = u'You are about to drop the %s cluster. This cannot be undone.' \
u' Are you sure you want to continue?' % pg_cluster
if confirm(warning, default=False):
version = pg_version or detect_version()
config = {'version': version, 'cluster': pg_cluster,
'encoding': encoding, 'locale': locale}
sudo(u'pg_dropcluster --stop %(version)s %(cluster)s' % config,
user='postgres', warn_only=True)
sudo(u'pg_createcluster --start -e %(encoding)s --locale %(locale)s'
u' %(version)s %(cluster)s' % config, user='postgres')
else:
abort(u"Dropping %s cluster aborted by user input." % pg_cluster) | Drop and restore a given cluster. | train | https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/postgres.py#L121-L135 | [
"def detect_version():\n \"\"\"Parse the output of psql to detect Postgres version.\"\"\"\n version_regex = re.compile(r'\\(PostgreSQL\\) (?P<major>\\d)\\.(?P<minor>\\d)\\.(?P<bugfix>\\d)')\n pg_version = None\n with hide('running', 'stdout', 'stderr'):\n output = run('psql --version')\n match... | from StringIO import StringIO
import re
from argyle.base import upload_template
from argyle.system import restart_service
from fabric.api import abort, hide, run, sudo, task
from fabric.contrib.console import confirm
@task
def create_db_user(username, password=None, flags=None):
"""Create a databse user."""
flags = flags or u'-D -A -R'
sudo(u'createuser %s %s' % (flags, username), user=u'postgres')
if password:
change_db_user_password(username, password)
@task
def excute_query(query, db=None, flags=None, use_sudo=False, **kwargs):
"""Execute remote psql query."""
flags = flags or u''
if db:
flags = u"%s -d %s" % (flags, db)
command = u'psql %s -c "%s"' % (flags, query)
if use_sudo:
sudo(command, user='postgres', **kwargs)
else:
run(command, **kwargs)
def db_user_exists(username):
"""Return True if the DB user already exists.
"""
qry = u"""SELECT COUNT(*) FROM pg_roles where rolname = \'{username}\';"""
output = StringIO()
excute_query(
qry.format(username=username),
flags="-Aqt",
use_sudo=True,
stdout=output
)
# FIXME: is there a way to get fabric to not clutter the output
# with "[127.0.0.1] out:" on each line?
lines = output.getvalue().splitlines()
return lines and lines[0].endswith('out: 1')
def db_exists(dbname):
# SELECT datname FROM pg_database;
qry = u"""SELECT COUNT(*) FROM pg_database where datname = \'{dbname}\';"""
output = StringIO()
excute_query(
qry.format(dbname=dbname),
flags="-Aqt",
use_sudo=True,
stdout=output
)
lines = output.getvalue().splitlines()
return lines and lines[0].endswith('out: 1')
@task
def change_db_user_password(username, password):
"""Change a db user's password."""
sql = "ALTER USER %s WITH PASSWORD '%s'" % (username, password)
excute_query(sql, use_sudo=True)
@task
def create_db(name, owner=None, encoding=u'UTF-8', template='template1',
**kwargs):
"""Create a Postgres database."""
flags = u''
if encoding:
flags = u'-E %s' % encoding
if owner:
flags = u'%s -O %s' % (flags, owner)
if template and template != 'template1':
flags = u'%s --template=%s' % (flags, template)
sudo('createdb %s %s' % (flags, name), user='postgres', **kwargs)
@task
def upload_pg_hba_conf(template_name=None, pg_version=None, pg_cluster='main', restart=True):
"""
Upload configuration for pg_hba.conf
If the version is not given it will be guessed.
"""
template_name = template_name or u'postgres/pg_hba.conf'
version = pg_version or detect_version()
config = {'version': version, 'cluster': pg_cluster}
destination = u'/etc/postgresql/%(version)s/%(cluster)s/pg_hba.conf' % config
upload_template(template_name, destination, use_sudo=True)
if restart:
restart_service(u'postgresql')
def detect_version():
"""Parse the output of psql to detect Postgres version."""
version_regex = re.compile(r'\(PostgreSQL\) (?P<major>\d)\.(?P<minor>\d)\.(?P<bugfix>\d)')
pg_version = None
with hide('running', 'stdout', 'stderr'):
output = run('psql --version')
match = version_regex.search(output)
if match:
result = match.groupdict()
if 'major' in result and 'minor' in result:
pg_version = u'%(major)s.%(minor)s' % result
if not pg_version:
abort(u"Error: Could not determine Postgres version of the server.")
return pg_version
@task
|
mlavin/argyle | argyle/npm.py | npm_install | python | def npm_install(package, flags=None):
command = u'install %s %s' % (package, flags or u'')
npm_command(command.strip()) | Install a package from NPM. | train | https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/npm.py#L12-L16 | null | from fabric.api import sudo, task
@task
def npm_command(command):
"""Run a NPM command."""
sudo(u'npm %s' % command)
@task
@task
def npm_uninstall(package):
"""Uninstall a package from NPM."""
command = u'uninstall %s' % package
npm_command(command)
@task
def npm_update(package):
"""Update a package from NPM."""
command = u'update %s' % package
npm_command(command)
|
mlavin/argyle | argyle/base.py | sshagent_run | python | def sshagent_run(cmd):
# Handle context manager modifications
wrapped_cmd = _prefix_commands(_prefix_env_vars(cmd), 'remote')
try:
host, port = env.host_string.split(':')
return local(
u"ssh -p %s -A -o StrictHostKeyChecking=no %s@%s '%s'" % (
port, env.user, host, wrapped_cmd
)
)
except ValueError:
return local(
u"ssh -A -o StrictHostKeyChecking=no %s@%s '%s'" % (
env.user, env.host_string, wrapped_cmd
)
) | Helper function.
Runs a command with SSH agent forwarding enabled.
Note:: Fabric (and paramiko) can't forward your SSH agent.
This helper uses your system's ssh to do so. | train | https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/base.py#L10-L32 | null | import os
from StringIO import StringIO
from fabric.api import env, hide, local, put, run, settings, sudo
from fabric.contrib import files
from fabric.operations import _prefix_commands, _prefix_env_vars
from jinja2 import ChoiceLoader, Environment, FileSystemLoader, PackageLoader
def upload_template(filename, destination, context=None,
use_sudo=False, backup=True, mode=None):
func = use_sudo and sudo or run
# Process template
loaders = []
template_dirs = getattr(env, 'ARGYLE_TEMPLATE_DIRS', ())
if template_dirs:
loaders.append(FileSystemLoader(template_dirs))
loaders.append(PackageLoader('argyle'))
jenv = Environment(loader=ChoiceLoader(loaders))
context = context or {}
env_context = env.copy()
env_context.update(context)
template = jenv.get_or_select_template(filename)
text = template.render(env_context)
# Normalize destination to be an actual filename, due to using StringIO
with settings(hide('everything'), warn_only=True):
if func('test -d %s' % destination).succeeded:
sep = "" if destination.endswith('/') else "/"
if hasattr(filename, '__iter__'):
# Use selected filename for destination
final = template.filename
else:
final = filename
destination += sep + os.path.basename(final)
# Back up original file
if backup and files.exists(destination):
func("cp %s{,.bak}" % destination)
# Upload the file.
put(
local_path=StringIO(text),
remote_path=destination,
use_sudo=use_sudo,
mode=mode
)
|
mlavin/argyle | argyle/supervisor.py | upload_supervisor_app_conf | python | def upload_supervisor_app_conf(app_name, template_name=None, context=None):
default = {'app_name': app_name}
context = context or {}
default.update(context)
template_name = template_name or [u'supervisor/%s.conf' % app_name, u'supervisor/base.conf']
destination = u'/etc/supervisor/conf.d/%s.conf' % app_name
upload_template(template_name, destination, context=default, use_sudo=True)
supervisor_command(u'update') | Upload Supervisor app configuration from a template. | train | https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/supervisor.py#L14-L23 | [
"def upload_template(filename, destination, context=None,\n use_sudo=False, backup=True, mode=None):\n func = use_sudo and sudo or run\n # Process template\n loaders = []\n template_dirs = getattr(env, 'ARGYLE_TEMPLATE_DIRS', ())\n if template_dirs:\n loaders.append(FileSystemLoader(templat... | from argyle.base import upload_template
from fabric.api import sudo, task
from fabric.contrib import files
@task
def supervisor_command(command):
"""Run a supervisorctl command."""
sudo(u'supervisorctl %s' % command)
@task
@task
def remove_supervisor_app(app_name):
"""Remove Supervisor app configuration."""
app = u'/etc/supervisor/conf.d/%s.conf' % app_name
if files.exists(app):
sudo(u'rm %s' % app)
supervisor_command(u'update')
@task
def upload_celery_conf(command='celeryd', app_name=None, template_name=None, context=None):
"""Upload Supervisor configuration for a celery command."""
app_name = app_name or command
default = {'app_name': app_name, 'command': command}
context = context or {}
default.update(context)
template_name = template_name or [u'supervisor/%s.conf' % command, u'supervisor/celery.conf']
upload_supervisor_app_conf(app_name=app_name, template_name=template_name, context=default)
@task
def upload_gunicorn_conf(command='gunicorn', app_name=None, template_name=None, context=None):
"""Upload Supervisor configuration for a gunicorn server."""
app_name = app_name or command
default = {'app_name': app_name, 'command': command}
context = context or {}
default.update(context)
template_name = template_name or [u'supervisor/%s.conf' % command, u'supervisor/gunicorn.conf']
upload_supervisor_app_conf(app_name=app_name, template_name=template_name, context=default)
|
mlavin/argyle | argyle/supervisor.py | remove_supervisor_app | python | def remove_supervisor_app(app_name):
app = u'/etc/supervisor/conf.d/%s.conf' % app_name
if files.exists(app):
sudo(u'rm %s' % app)
supervisor_command(u'update') | Remove Supervisor app configuration. | train | https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/supervisor.py#L27-L33 | null | from argyle.base import upload_template
from fabric.api import sudo, task
from fabric.contrib import files
@task
def supervisor_command(command):
"""Run a supervisorctl command."""
sudo(u'supervisorctl %s' % command)
@task
def upload_supervisor_app_conf(app_name, template_name=None, context=None):
"""Upload Supervisor app configuration from a template."""
default = {'app_name': app_name}
context = context or {}
default.update(context)
template_name = template_name or [u'supervisor/%s.conf' % app_name, u'supervisor/base.conf']
destination = u'/etc/supervisor/conf.d/%s.conf' % app_name
upload_template(template_name, destination, context=default, use_sudo=True)
supervisor_command(u'update')
@task
@task
def upload_celery_conf(command='celeryd', app_name=None, template_name=None, context=None):
"""Upload Supervisor configuration for a celery command."""
app_name = app_name or command
default = {'app_name': app_name, 'command': command}
context = context or {}
default.update(context)
template_name = template_name or [u'supervisor/%s.conf' % command, u'supervisor/celery.conf']
upload_supervisor_app_conf(app_name=app_name, template_name=template_name, context=default)
@task
def upload_gunicorn_conf(command='gunicorn', app_name=None, template_name=None, context=None):
"""Upload Supervisor configuration for a gunicorn server."""
app_name = app_name or command
default = {'app_name': app_name, 'command': command}
context = context or {}
default.update(context)
template_name = template_name or [u'supervisor/%s.conf' % command, u'supervisor/gunicorn.conf']
upload_supervisor_app_conf(app_name=app_name, template_name=template_name, context=default)
|
mlavin/argyle | argyle/supervisor.py | upload_celery_conf | python | def upload_celery_conf(command='celeryd', app_name=None, template_name=None, context=None):
app_name = app_name or command
default = {'app_name': app_name, 'command': command}
context = context or {}
default.update(context)
template_name = template_name or [u'supervisor/%s.conf' % command, u'supervisor/celery.conf']
upload_supervisor_app_conf(app_name=app_name, template_name=template_name, context=default) | Upload Supervisor configuration for a celery command. | train | https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/supervisor.py#L37-L45 | null | from argyle.base import upload_template
from fabric.api import sudo, task
from fabric.contrib import files
@task
def supervisor_command(command):
"""Run a supervisorctl command."""
sudo(u'supervisorctl %s' % command)
@task
def upload_supervisor_app_conf(app_name, template_name=None, context=None):
"""Upload Supervisor app configuration from a template."""
default = {'app_name': app_name}
context = context or {}
default.update(context)
template_name = template_name or [u'supervisor/%s.conf' % app_name, u'supervisor/base.conf']
destination = u'/etc/supervisor/conf.d/%s.conf' % app_name
upload_template(template_name, destination, context=default, use_sudo=True)
supervisor_command(u'update')
@task
def remove_supervisor_app(app_name):
"""Remove Supervisor app configuration."""
app = u'/etc/supervisor/conf.d/%s.conf' % app_name
if files.exists(app):
sudo(u'rm %s' % app)
supervisor_command(u'update')
@task
@task
def upload_gunicorn_conf(command='gunicorn', app_name=None, template_name=None, context=None):
"""Upload Supervisor configuration for a gunicorn server."""
app_name = app_name or command
default = {'app_name': app_name, 'command': command}
context = context or {}
default.update(context)
template_name = template_name or [u'supervisor/%s.conf' % command, u'supervisor/gunicorn.conf']
upload_supervisor_app_conf(app_name=app_name, template_name=template_name, context=default)
|
mlavin/argyle | argyle/supervisor.py | upload_gunicorn_conf | python | def upload_gunicorn_conf(command='gunicorn', app_name=None, template_name=None, context=None):
app_name = app_name or command
default = {'app_name': app_name, 'command': command}
context = context or {}
default.update(context)
template_name = template_name or [u'supervisor/%s.conf' % command, u'supervisor/gunicorn.conf']
upload_supervisor_app_conf(app_name=app_name, template_name=template_name, context=default) | Upload Supervisor configuration for a gunicorn server. | train | https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/supervisor.py#L49-L57 | null | from argyle.base import upload_template
from fabric.api import sudo, task
from fabric.contrib import files
@task
def supervisor_command(command):
"""Run a supervisorctl command."""
sudo(u'supervisorctl %s' % command)
@task
def upload_supervisor_app_conf(app_name, template_name=None, context=None):
"""Upload Supervisor app configuration from a template."""
default = {'app_name': app_name}
context = context or {}
default.update(context)
template_name = template_name or [u'supervisor/%s.conf' % app_name, u'supervisor/base.conf']
destination = u'/etc/supervisor/conf.d/%s.conf' % app_name
upload_template(template_name, destination, context=default, use_sudo=True)
supervisor_command(u'update')
@task
def remove_supervisor_app(app_name):
"""Remove Supervisor app configuration."""
app = u'/etc/supervisor/conf.d/%s.conf' % app_name
if files.exists(app):
sudo(u'rm %s' % app)
supervisor_command(u'update')
@task
def upload_celery_conf(command='celeryd', app_name=None, template_name=None, context=None):
"""Upload Supervisor configuration for a celery command."""
app_name = app_name or command
default = {'app_name': app_name, 'command': command}
context = context or {}
default.update(context)
template_name = template_name or [u'supervisor/%s.conf' % command, u'supervisor/celery.conf']
upload_supervisor_app_conf(app_name=app_name, template_name=template_name, context=default)
@task
def upload_gunicorn_conf(command='gunicorn', app_name=None, template_name=None, context=None):
"""Upload Supervisor configuration for a gunicorn server."""
app_name = app_name or command
default = {'app_name': app_name, 'command': command}
context = context or {}
default.update(context)
template_name = template_name or [u'supervisor/%s.conf' % command, u'supervisor/gunicorn.conf']
upload_supervisor_app_conf(app_name=app_name, template_name=template_name, context=default)
|
mlavin/argyle | argyle/rabbitmq.py | upload_rabbitmq_environment_conf | python | def upload_rabbitmq_environment_conf(template_name=None, context=None, restart=True):
template_name = template_name or u'rabbitmq/rabbitmq-env.conf'
destination = u'/etc/rabbitmq/rabbitmq-env.conf'
upload_template(template_name, destination, context=context, use_sudo=True)
if restart:
restart_service(u'rabbitmq') | Upload RabbitMQ environment configuration from a template. | train | https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/rabbitmq.py#L35-L42 | [
"def upload_template(filename, destination, context=None,\n use_sudo=False, backup=True, mode=None):\n func = use_sudo and sudo or run\n # Process template\n loaders = []\n template_dirs = getattr(env, 'ARGYLE_TEMPLATE_DIRS', ())\n if template_dirs:\n loaders.append(FileSystemLoader(templat... | from argyle.base import upload_template
from argyle.system import restart_service
from fabric.api import sudo, task
@task
def rabbitmq_command(command):
"""Run a rabbitmqctl command."""
sudo(u'rabbitmqctl %s' % command)
@task
def create_user(username, password):
"""Create a rabbitmq user."""
rabbitmq_command(u'add_user %s %s' % (username, password))
@task
def create_vhost(name):
"""Create a rabbitmq vhost."""
rabbitmq_command(u'add_vhost %s' % name)
@task
def set_vhost_permissions(vhost, username, permissions='".*" ".*" ".*"'):
"""Set permssions for a user on a given vhost."""
rabbitmq_command(u'set_permissions -p %s %s %s' % (vhost, username, permissions))
@task
def upload_rabbitmq_environment_conf(template_name=None, context=None, restart=True):
"""Upload RabbitMQ environment configuration from a template."""
template_name = template_name or u'rabbitmq/rabbitmq-env.conf'
destination = u'/etc/rabbitmq/rabbitmq-env.conf'
upload_template(template_name, destination, context=context, use_sudo=True)
if restart:
restart_service(u'rabbitmq')
@task
def upload_rabbitmq_conf(template_name=None, context=None, restart=True):
"""Upload RabbitMQ configuration from a template."""
template_name = template_name or u'rabbitmq/rabbitmq.config'
destination = u'/etc/rabbitmq/rabbitmq.config'
upload_template(template_name, destination, context=context, use_sudo=True)
if restart:
restart_service(u'rabbitmq')
|
mlavin/argyle | argyle/rabbitmq.py | upload_rabbitmq_conf | python | def upload_rabbitmq_conf(template_name=None, context=None, restart=True):
template_name = template_name or u'rabbitmq/rabbitmq.config'
destination = u'/etc/rabbitmq/rabbitmq.config'
upload_template(template_name, destination, context=context, use_sudo=True)
if restart:
restart_service(u'rabbitmq') | Upload RabbitMQ configuration from a template. | train | https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/rabbitmq.py#L46-L53 | [
"def upload_template(filename, destination, context=None,\n use_sudo=False, backup=True, mode=None):\n func = use_sudo and sudo or run\n # Process template\n loaders = []\n template_dirs = getattr(env, 'ARGYLE_TEMPLATE_DIRS', ())\n if template_dirs:\n loaders.append(FileSystemLoader(templat... | from argyle.base import upload_template
from argyle.system import restart_service
from fabric.api import sudo, task
@task
def rabbitmq_command(command):
"""Run a rabbitmqctl command."""
sudo(u'rabbitmqctl %s' % command)
@task
def create_user(username, password):
"""Create a rabbitmq user."""
rabbitmq_command(u'add_user %s %s' % (username, password))
@task
def create_vhost(name):
"""Create a rabbitmq vhost."""
rabbitmq_command(u'add_vhost %s' % name)
@task
def set_vhost_permissions(vhost, username, permissions='".*" ".*" ".*"'):
"""Set permssions for a user on a given vhost."""
rabbitmq_command(u'set_permissions -p %s %s %s' % (vhost, username, permissions))
@task
def upload_rabbitmq_environment_conf(template_name=None, context=None, restart=True):
"""Upload RabbitMQ environment configuration from a template."""
template_name = template_name or u'rabbitmq/rabbitmq-env.conf'
destination = u'/etc/rabbitmq/rabbitmq-env.conf'
upload_template(template_name, destination, context=context, use_sudo=True)
if restart:
restart_service(u'rabbitmq')
@task
def upload_rabbitmq_conf(template_name=None, context=None, restart=True):
"""Upload RabbitMQ configuration from a template."""
template_name = template_name or u'rabbitmq/rabbitmq.config'
destination = u'/etc/rabbitmq/rabbitmq.config'
upload_template(template_name, destination, context=context, use_sudo=True)
if restart:
restart_service(u'rabbitmq')
|
webadmin87/midnight | midnight_main/templatetags/midnight_main.py | show_menu | python | def show_menu(context, slug, level=2, **kwargs):
try:
menu = Menu.objects.published().get(slug=slug)
menus = menu.get_descendants().all()
mark_current_menus(menus, context['request'].path_info)
max_level = menu.level + level
return {'menus': menus, 'max_level': max_level, 'data': kwargs}
except Menu.DoesNotExist:
return None | Тег для подключения меню
Пример использования::
{% show_menu 'menu_slug' 2 class='menu-class' %}
:param context: контекст шаблона
:param slug: символьный код родительского пункта меню
:param level: максимальный уровень вложенности
:param kwargs: html атрибуты оборачивающего тега
:return: | train | https://github.com/webadmin87/midnight/blob/b60b3b257b4d633550b82a692f3ea3756c62a0a9/midnight_main/templatetags/midnight_main.py#L12-L33 | [
"def mark_current_menus(menus, path_info):\n \"\"\"\n Отмечает активные модели меню (У которых ссылка соответствует текущему path info)\n :param menus: список моделей меню\n :param path_info: path info\n :return:\n \"\"\"\n for menu in menus:\n if menu.get_absolute_url() == \"/\":\n ... | from django import template
from midnight_main.models import Menu, PhotoAlbum, IncludeArea
from django.core.urlresolvers import reverse
import uuid
from django.utils.module_loading import import_string
from midnight_main.services import mark_current_menus
register = template.Library()
register.inclusion_tag(file_name='midnight_main/tags/show_menu.html', takes_context=True, name='show_menu')(show_menu)
def show_gallery(slug, size="100x100", crop="center", **kwargs):
"""
Тег отображения фотогалереи
Пример использования::
{% show_gallery "gallery-slug" "150x110" "center" class='gallery-class' %}
:param slug: символьный код фотогалереи
:param size: размер
:param crop: параметры кропа
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
try:
album = PhotoAlbum.objects.published().get(slug=slug)
photos = album.photo_set.published().order_by('sort').all()
return {'album': album, 'photos': photos, 'size': size, 'crop': crop, 'data': kwargs}
except PhotoAlbum.DoesNotExist:
return None
register.inclusion_tag(file_name='midnight_main/tags/show_gallery.html', name='show_gallery')(show_gallery)
@register.simple_tag()
def show_area(slug):
"""
Подключение включаемой области
Пример использования::
{% show_area "area_slug" %}
:param slug: символьный код области
:return:
"""
try:
area = IncludeArea.objects.published().get(slug=slug)
return area.text
except IncludeArea.DoesNotExist:
return ""
def ajax_form(cls_name, view_name, modal=False, tag_id=None):
"""
Подключение Ajax формы
Пример использования (обратная связь)::
{% ajax_form 'midnight_main.forms.Feedback' 'midnight_main:page_feedback' tag_id='feedback_form' modal=True %}
:param cls_name: имя класса формы
:param view_name: имя представления для обработки формы
:param modal: форма предназначена для отображаения в fancybox и изначально скрыта
:param tag_id: идентификатор оборачивающего тега
:return:
"""
if tag_id is None:
tag_id = uuid.uuid4().hex[:6].lower()
form = import_string(cls_name)()
url = reverse(view_name)
return {'form': form, 'url': url, 'modal': modal, 'id': tag_id}
register.inclusion_tag(file_name='midnight_main/tags/ajax_form.html', name='ajax_form')(ajax_form)
def user_info(context, **kwargs):
"""
Отображает информацию о текущем авторизованом пользователе, либо ссылки на авторизацию и регистрацию
Пример использования::
{% user_info %}
:param context: контекст
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
request = context['request']
return {'user': request.user, 'data': kwargs}
register.inclusion_tag(file_name='midnight_main/tags/user_info.html', takes_context=True, name='user_info')(user_info)
def breadcrumbs(crumbs, **kwargs):
"""
Тег для отображения хлебхых крошек
Пример использования::
{% breadcrumbs crumbs class='breadcrumb' %}
:param crumbs: список хлебных крошек. [{'label': 'Crumb label', 'url': '/crumb-url/'}, ... ]
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
return {'crumbs': crumbs, 'data': kwargs}
register.inclusion_tag(file_name='midnight_main/tags/breadcrumbs.html', name='breadcrumbs')(breadcrumbs)
def comments_block(context, comments, form, obj, url, **kwargs):
"""
Тег для отображения блока комментариев
Пример использования (комментарии к текстовой странице)::
{% comments comments comments_form page "midnight_main:page_comments" %}
:param context: контекст
:param comments: список комментариев
:param form: форма добавления комментария
:param obj: объект к которому привязаны комментарии
:param url: имя представления обрабатывающего создание и получение комментариев
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
data = kwargs
if 'class' in data:
data['class'] += ' comments-block'
else:
data['class'] = 'comments-block'
if context['request'].user.is_authenticated():
del form.fields['captcha']
return {'comments': comments, 'form': form, 'url': url, 'obj': obj, 'data': data}
register.inclusion_tag(file_name='midnight_main/tags/comments.html', name='comments', takes_context=True)(comments_block)
def search_simple_form(context, **kwargs):
"""
Форма поиска
Пример использования::
{% search_simple_form %}
:param context: контекст
:param kwargs: html атрибуты формы
:return:
"""
data = kwargs
if 'action' in data:
del data['action']
if 'method' in data:
del data['method']
return {'query': context['request'].GET.get('q', ''), 'data': data}
register.inclusion_tag(file_name='midnight_main/tags/search.html', takes_context=True, name='search_simple_form')(search_simple_form)
|
webadmin87/midnight | midnight_main/templatetags/midnight_main.py | show_gallery | python | def show_gallery(slug, size="100x100", crop="center", **kwargs):
try:
album = PhotoAlbum.objects.published().get(slug=slug)
photos = album.photo_set.published().order_by('sort').all()
return {'album': album, 'photos': photos, 'size': size, 'crop': crop, 'data': kwargs}
except PhotoAlbum.DoesNotExist:
return None | Тег отображения фотогалереи
Пример использования::
{% show_gallery "gallery-slug" "150x110" "center" class='gallery-class' %}
:param slug: символьный код фотогалереи
:param size: размер
:param crop: параметры кропа
:param kwargs: html атрибуты оборачивающего тега
:return: | train | https://github.com/webadmin87/midnight/blob/b60b3b257b4d633550b82a692f3ea3756c62a0a9/midnight_main/templatetags/midnight_main.py#L38-L58 | null | from django import template
from midnight_main.models import Menu, PhotoAlbum, IncludeArea
from django.core.urlresolvers import reverse
import uuid
from django.utils.module_loading import import_string
from midnight_main.services import mark_current_menus
register = template.Library()
def show_menu(context, slug, level=2, **kwargs):
"""
Тег для подключения меню
Пример использования::
{% show_menu 'menu_slug' 2 class='menu-class' %}
:param context: контекст шаблона
:param slug: символьный код родительского пункта меню
:param level: максимальный уровень вложенности
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
try:
menu = Menu.objects.published().get(slug=slug)
menus = menu.get_descendants().all()
mark_current_menus(menus, context['request'].path_info)
max_level = menu.level + level
return {'menus': menus, 'max_level': max_level, 'data': kwargs}
except Menu.DoesNotExist:
return None
register.inclusion_tag(file_name='midnight_main/tags/show_menu.html', takes_context=True, name='show_menu')(show_menu)
register.inclusion_tag(file_name='midnight_main/tags/show_gallery.html', name='show_gallery')(show_gallery)
@register.simple_tag()
def show_area(slug):
"""
Подключение включаемой области
Пример использования::
{% show_area "area_slug" %}
:param slug: символьный код области
:return:
"""
try:
area = IncludeArea.objects.published().get(slug=slug)
return area.text
except IncludeArea.DoesNotExist:
return ""
def ajax_form(cls_name, view_name, modal=False, tag_id=None):
"""
Подключение Ajax формы
Пример использования (обратная связь)::
{% ajax_form 'midnight_main.forms.Feedback' 'midnight_main:page_feedback' tag_id='feedback_form' modal=True %}
:param cls_name: имя класса формы
:param view_name: имя представления для обработки формы
:param modal: форма предназначена для отображаения в fancybox и изначально скрыта
:param tag_id: идентификатор оборачивающего тега
:return:
"""
if tag_id is None:
tag_id = uuid.uuid4().hex[:6].lower()
form = import_string(cls_name)()
url = reverse(view_name)
return {'form': form, 'url': url, 'modal': modal, 'id': tag_id}
register.inclusion_tag(file_name='midnight_main/tags/ajax_form.html', name='ajax_form')(ajax_form)
def user_info(context, **kwargs):
"""
Отображает информацию о текущем авторизованом пользователе, либо ссылки на авторизацию и регистрацию
Пример использования::
{% user_info %}
:param context: контекст
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
request = context['request']
return {'user': request.user, 'data': kwargs}
register.inclusion_tag(file_name='midnight_main/tags/user_info.html', takes_context=True, name='user_info')(user_info)
def breadcrumbs(crumbs, **kwargs):
"""
Тег для отображения хлебхых крошек
Пример использования::
{% breadcrumbs crumbs class='breadcrumb' %}
:param crumbs: список хлебных крошек. [{'label': 'Crumb label', 'url': '/crumb-url/'}, ... ]
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
return {'crumbs': crumbs, 'data': kwargs}
register.inclusion_tag(file_name='midnight_main/tags/breadcrumbs.html', name='breadcrumbs')(breadcrumbs)
def comments_block(context, comments, form, obj, url, **kwargs):
"""
Тег для отображения блока комментариев
Пример использования (комментарии к текстовой странице)::
{% comments comments comments_form page "midnight_main:page_comments" %}
:param context: контекст
:param comments: список комментариев
:param form: форма добавления комментария
:param obj: объект к которому привязаны комментарии
:param url: имя представления обрабатывающего создание и получение комментариев
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
data = kwargs
if 'class' in data:
data['class'] += ' comments-block'
else:
data['class'] = 'comments-block'
if context['request'].user.is_authenticated():
del form.fields['captcha']
return {'comments': comments, 'form': form, 'url': url, 'obj': obj, 'data': data}
register.inclusion_tag(file_name='midnight_main/tags/comments.html', name='comments', takes_context=True)(comments_block)
def search_simple_form(context, **kwargs):
"""
Форма поиска
Пример использования::
{% search_simple_form %}
:param context: контекст
:param kwargs: html атрибуты формы
:return:
"""
data = kwargs
if 'action' in data:
del data['action']
if 'method' in data:
del data['method']
return {'query': context['request'].GET.get('q', ''), 'data': data}
register.inclusion_tag(file_name='midnight_main/tags/search.html', takes_context=True, name='search_simple_form')(search_simple_form)
|
webadmin87/midnight | midnight_main/templatetags/midnight_main.py | show_area | python | def show_area(slug):
try:
area = IncludeArea.objects.published().get(slug=slug)
return area.text
except IncludeArea.DoesNotExist:
return "" | Подключение включаемой области
Пример использования::
{% show_area "area_slug" %}
:param slug: символьный код области
:return: | train | https://github.com/webadmin87/midnight/blob/b60b3b257b4d633550b82a692f3ea3756c62a0a9/midnight_main/templatetags/midnight_main.py#L64-L79 | null | from django import template
from midnight_main.models import Menu, PhotoAlbum, IncludeArea
from django.core.urlresolvers import reverse
import uuid
from django.utils.module_loading import import_string
from midnight_main.services import mark_current_menus
register = template.Library()
def show_menu(context, slug, level=2, **kwargs):
"""
Тег для подключения меню
Пример использования::
{% show_menu 'menu_slug' 2 class='menu-class' %}
:param context: контекст шаблона
:param slug: символьный код родительского пункта меню
:param level: максимальный уровень вложенности
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
try:
menu = Menu.objects.published().get(slug=slug)
menus = menu.get_descendants().all()
mark_current_menus(menus, context['request'].path_info)
max_level = menu.level + level
return {'menus': menus, 'max_level': max_level, 'data': kwargs}
except Menu.DoesNotExist:
return None
register.inclusion_tag(file_name='midnight_main/tags/show_menu.html', takes_context=True, name='show_menu')(show_menu)
def show_gallery(slug, size="100x100", crop="center", **kwargs):
"""
Тег отображения фотогалереи
Пример использования::
{% show_gallery "gallery-slug" "150x110" "center" class='gallery-class' %}
:param slug: символьный код фотогалереи
:param size: размер
:param crop: параметры кропа
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
try:
album = PhotoAlbum.objects.published().get(slug=slug)
photos = album.photo_set.published().order_by('sort').all()
return {'album': album, 'photos': photos, 'size': size, 'crop': crop, 'data': kwargs}
except PhotoAlbum.DoesNotExist:
return None
register.inclusion_tag(file_name='midnight_main/tags/show_gallery.html', name='show_gallery')(show_gallery)
@register.simple_tag()
def ajax_form(cls_name, view_name, modal=False, tag_id=None):
"""
Подключение Ajax формы
Пример использования (обратная связь)::
{% ajax_form 'midnight_main.forms.Feedback' 'midnight_main:page_feedback' tag_id='feedback_form' modal=True %}
:param cls_name: имя класса формы
:param view_name: имя представления для обработки формы
:param modal: форма предназначена для отображаения в fancybox и изначально скрыта
:param tag_id: идентификатор оборачивающего тега
:return:
"""
if tag_id is None:
tag_id = uuid.uuid4().hex[:6].lower()
form = import_string(cls_name)()
url = reverse(view_name)
return {'form': form, 'url': url, 'modal': modal, 'id': tag_id}
register.inclusion_tag(file_name='midnight_main/tags/ajax_form.html', name='ajax_form')(ajax_form)
def user_info(context, **kwargs):
"""
Отображает информацию о текущем авторизованом пользователе, либо ссылки на авторизацию и регистрацию
Пример использования::
{% user_info %}
:param context: контекст
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
request = context['request']
return {'user': request.user, 'data': kwargs}
register.inclusion_tag(file_name='midnight_main/tags/user_info.html', takes_context=True, name='user_info')(user_info)
def breadcrumbs(crumbs, **kwargs):
"""
Тег для отображения хлебхых крошек
Пример использования::
{% breadcrumbs crumbs class='breadcrumb' %}
:param crumbs: список хлебных крошек. [{'label': 'Crumb label', 'url': '/crumb-url/'}, ... ]
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
return {'crumbs': crumbs, 'data': kwargs}
register.inclusion_tag(file_name='midnight_main/tags/breadcrumbs.html', name='breadcrumbs')(breadcrumbs)
def comments_block(context, comments, form, obj, url, **kwargs):
"""
Тег для отображения блока комментариев
Пример использования (комментарии к текстовой странице)::
{% comments comments comments_form page "midnight_main:page_comments" %}
:param context: контекст
:param comments: список комментариев
:param form: форма добавления комментария
:param obj: объект к которому привязаны комментарии
:param url: имя представления обрабатывающего создание и получение комментариев
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
data = kwargs
if 'class' in data:
data['class'] += ' comments-block'
else:
data['class'] = 'comments-block'
if context['request'].user.is_authenticated():
del form.fields['captcha']
return {'comments': comments, 'form': form, 'url': url, 'obj': obj, 'data': data}
register.inclusion_tag(file_name='midnight_main/tags/comments.html', name='comments', takes_context=True)(comments_block)
def search_simple_form(context, **kwargs):
"""
Форма поиска
Пример использования::
{% search_simple_form %}
:param context: контекст
:param kwargs: html атрибуты формы
:return:
"""
data = kwargs
if 'action' in data:
del data['action']
if 'method' in data:
del data['method']
return {'query': context['request'].GET.get('q', ''), 'data': data}
register.inclusion_tag(file_name='midnight_main/tags/search.html', takes_context=True, name='search_simple_form')(search_simple_form)
|
webadmin87/midnight | midnight_main/templatetags/midnight_main.py | ajax_form | python | def ajax_form(cls_name, view_name, modal=False, tag_id=None):
if tag_id is None:
tag_id = uuid.uuid4().hex[:6].lower()
form = import_string(cls_name)()
url = reverse(view_name)
return {'form': form, 'url': url, 'modal': modal, 'id': tag_id} | Подключение Ajax формы
Пример использования (обратная связь)::
{% ajax_form 'midnight_main.forms.Feedback' 'midnight_main:page_feedback' tag_id='feedback_form' modal=True %}
:param cls_name: имя класса формы
:param view_name: имя представления для обработки формы
:param modal: форма предназначена для отображаения в fancybox и изначально скрыта
:param tag_id: идентификатор оборачивающего тега
:return: | train | https://github.com/webadmin87/midnight/blob/b60b3b257b4d633550b82a692f3ea3756c62a0a9/midnight_main/templatetags/midnight_main.py#L82-L100 | null | from django import template
from midnight_main.models import Menu, PhotoAlbum, IncludeArea
from django.core.urlresolvers import reverse
import uuid
from django.utils.module_loading import import_string
from midnight_main.services import mark_current_menus
register = template.Library()
def show_menu(context, slug, level=2, **kwargs):
"""
Тег для подключения меню
Пример использования::
{% show_menu 'menu_slug' 2 class='menu-class' %}
:param context: контекст шаблона
:param slug: символьный код родительского пункта меню
:param level: максимальный уровень вложенности
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
try:
menu = Menu.objects.published().get(slug=slug)
menus = menu.get_descendants().all()
mark_current_menus(menus, context['request'].path_info)
max_level = menu.level + level
return {'menus': menus, 'max_level': max_level, 'data': kwargs}
except Menu.DoesNotExist:
return None
register.inclusion_tag(file_name='midnight_main/tags/show_menu.html', takes_context=True, name='show_menu')(show_menu)
def show_gallery(slug, size="100x100", crop="center", **kwargs):
"""
Тег отображения фотогалереи
Пример использования::
{% show_gallery "gallery-slug" "150x110" "center" class='gallery-class' %}
:param slug: символьный код фотогалереи
:param size: размер
:param crop: параметры кропа
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
try:
album = PhotoAlbum.objects.published().get(slug=slug)
photos = album.photo_set.published().order_by('sort').all()
return {'album': album, 'photos': photos, 'size': size, 'crop': crop, 'data': kwargs}
except PhotoAlbum.DoesNotExist:
return None
register.inclusion_tag(file_name='midnight_main/tags/show_gallery.html', name='show_gallery')(show_gallery)
@register.simple_tag()
def show_area(slug):
"""
Подключение включаемой области
Пример использования::
{% show_area "area_slug" %}
:param slug: символьный код области
:return:
"""
try:
area = IncludeArea.objects.published().get(slug=slug)
return area.text
except IncludeArea.DoesNotExist:
return ""
register.inclusion_tag(file_name='midnight_main/tags/ajax_form.html', name='ajax_form')(ajax_form)
def user_info(context, **kwargs):
"""
Отображает информацию о текущем авторизованом пользователе, либо ссылки на авторизацию и регистрацию
Пример использования::
{% user_info %}
:param context: контекст
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
request = context['request']
return {'user': request.user, 'data': kwargs}
register.inclusion_tag(file_name='midnight_main/tags/user_info.html', takes_context=True, name='user_info')(user_info)
def breadcrumbs(crumbs, **kwargs):
"""
Тег для отображения хлебхых крошек
Пример использования::
{% breadcrumbs crumbs class='breadcrumb' %}
:param crumbs: список хлебных крошек. [{'label': 'Crumb label', 'url': '/crumb-url/'}, ... ]
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
return {'crumbs': crumbs, 'data': kwargs}
register.inclusion_tag(file_name='midnight_main/tags/breadcrumbs.html', name='breadcrumbs')(breadcrumbs)
def comments_block(context, comments, form, obj, url, **kwargs):
"""
Тег для отображения блока комментариев
Пример использования (комментарии к текстовой странице)::
{% comments comments comments_form page "midnight_main:page_comments" %}
:param context: контекст
:param comments: список комментариев
:param form: форма добавления комментария
:param obj: объект к которому привязаны комментарии
:param url: имя представления обрабатывающего создание и получение комментариев
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
data = kwargs
if 'class' in data:
data['class'] += ' comments-block'
else:
data['class'] = 'comments-block'
if context['request'].user.is_authenticated():
del form.fields['captcha']
return {'comments': comments, 'form': form, 'url': url, 'obj': obj, 'data': data}
register.inclusion_tag(file_name='midnight_main/tags/comments.html', name='comments', takes_context=True)(comments_block)
def search_simple_form(context, **kwargs):
"""
Форма поиска
Пример использования::
{% search_simple_form %}
:param context: контекст
:param kwargs: html атрибуты формы
:return:
"""
data = kwargs
if 'action' in data:
del data['action']
if 'method' in data:
del data['method']
return {'query': context['request'].GET.get('q', ''), 'data': data}
register.inclusion_tag(file_name='midnight_main/tags/search.html', takes_context=True, name='search_simple_form')(search_simple_form)
|
webadmin87/midnight | midnight_main/templatetags/midnight_main.py | comments_block | python | def comments_block(context, comments, form, obj, url, **kwargs):
data = kwargs
if 'class' in data:
data['class'] += ' comments-block'
else:
data['class'] = 'comments-block'
if context['request'].user.is_authenticated():
del form.fields['captcha']
return {'comments': comments, 'form': form, 'url': url, 'obj': obj, 'data': data} | Тег для отображения блока комментариев
Пример использования (комментарии к текстовой странице)::
{% comments comments comments_form page "midnight_main:page_comments" %}
:param context: контекст
:param comments: список комментариев
:param form: форма добавления комментария
:param obj: объект к которому привязаны комментарии
:param url: имя представления обрабатывающего создание и получение комментариев
:param kwargs: html атрибуты оборачивающего тега
:return: | train | https://github.com/webadmin87/midnight/blob/b60b3b257b4d633550b82a692f3ea3756c62a0a9/midnight_main/templatetags/midnight_main.py#L140-L167 | null | from django import template
from midnight_main.models import Menu, PhotoAlbum, IncludeArea
from django.core.urlresolvers import reverse
import uuid
from django.utils.module_loading import import_string
from midnight_main.services import mark_current_menus
register = template.Library()
def show_menu(context, slug, level=2, **kwargs):
"""
Тег для подключения меню
Пример использования::
{% show_menu 'menu_slug' 2 class='menu-class' %}
:param context: контекст шаблона
:param slug: символьный код родительского пункта меню
:param level: максимальный уровень вложенности
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
try:
menu = Menu.objects.published().get(slug=slug)
menus = menu.get_descendants().all()
mark_current_menus(menus, context['request'].path_info)
max_level = menu.level + level
return {'menus': menus, 'max_level': max_level, 'data': kwargs}
except Menu.DoesNotExist:
return None
register.inclusion_tag(file_name='midnight_main/tags/show_menu.html', takes_context=True, name='show_menu')(show_menu)
def show_gallery(slug, size="100x100", crop="center", **kwargs):
"""
Тег отображения фотогалереи
Пример использования::
{% show_gallery "gallery-slug" "150x110" "center" class='gallery-class' %}
:param slug: символьный код фотогалереи
:param size: размер
:param crop: параметры кропа
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
try:
album = PhotoAlbum.objects.published().get(slug=slug)
photos = album.photo_set.published().order_by('sort').all()
return {'album': album, 'photos': photos, 'size': size, 'crop': crop, 'data': kwargs}
except PhotoAlbum.DoesNotExist:
return None
register.inclusion_tag(file_name='midnight_main/tags/show_gallery.html', name='show_gallery')(show_gallery)
@register.simple_tag()
def show_area(slug):
"""
Подключение включаемой области
Пример использования::
{% show_area "area_slug" %}
:param slug: символьный код области
:return:
"""
try:
area = IncludeArea.objects.published().get(slug=slug)
return area.text
except IncludeArea.DoesNotExist:
return ""
def ajax_form(cls_name, view_name, modal=False, tag_id=None):
"""
Подключение Ajax формы
Пример использования (обратная связь)::
{% ajax_form 'midnight_main.forms.Feedback' 'midnight_main:page_feedback' tag_id='feedback_form' modal=True %}
:param cls_name: имя класса формы
:param view_name: имя представления для обработки формы
:param modal: форма предназначена для отображаения в fancybox и изначально скрыта
:param tag_id: идентификатор оборачивающего тега
:return:
"""
if tag_id is None:
tag_id = uuid.uuid4().hex[:6].lower()
form = import_string(cls_name)()
url = reverse(view_name)
return {'form': form, 'url': url, 'modal': modal, 'id': tag_id}
register.inclusion_tag(file_name='midnight_main/tags/ajax_form.html', name='ajax_form')(ajax_form)
def user_info(context, **kwargs):
"""
Отображает информацию о текущем авторизованом пользователе, либо ссылки на авторизацию и регистрацию
Пример использования::
{% user_info %}
:param context: контекст
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
request = context['request']
return {'user': request.user, 'data': kwargs}
register.inclusion_tag(file_name='midnight_main/tags/user_info.html', takes_context=True, name='user_info')(user_info)
def breadcrumbs(crumbs, **kwargs):
"""
Тег для отображения хлебхых крошек
Пример использования::
{% breadcrumbs crumbs class='breadcrumb' %}
:param crumbs: список хлебных крошек. [{'label': 'Crumb label', 'url': '/crumb-url/'}, ... ]
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
return {'crumbs': crumbs, 'data': kwargs}
register.inclusion_tag(file_name='midnight_main/tags/breadcrumbs.html', name='breadcrumbs')(breadcrumbs)
register.inclusion_tag(file_name='midnight_main/tags/comments.html', name='comments', takes_context=True)(comments_block)
def search_simple_form(context, **kwargs):
"""
Форма поиска
Пример использования::
{% search_simple_form %}
:param context: контекст
:param kwargs: html атрибуты формы
:return:
"""
data = kwargs
if 'action' in data:
del data['action']
if 'method' in data:
del data['method']
return {'query': context['request'].GET.get('q', ''), 'data': data}
register.inclusion_tag(file_name='midnight_main/tags/search.html', takes_context=True, name='search_simple_form')(search_simple_form)
|
webadmin87/midnight | midnight_main/templatetags/midnight_main.py | search_simple_form | python | def search_simple_form(context, **kwargs):
data = kwargs
if 'action' in data:
del data['action']
if 'method' in data:
del data['method']
return {'query': context['request'].GET.get('q', ''), 'data': data} | Форма поиска
Пример использования::
{% search_simple_form %}
:param context: контекст
:param kwargs: html атрибуты формы
:return: | train | https://github.com/webadmin87/midnight/blob/b60b3b257b4d633550b82a692f3ea3756c62a0a9/midnight_main/templatetags/midnight_main.py#L172-L193 | null | from django import template
from midnight_main.models import Menu, PhotoAlbum, IncludeArea
from django.core.urlresolvers import reverse
import uuid
from django.utils.module_loading import import_string
from midnight_main.services import mark_current_menus
register = template.Library()
def show_menu(context, slug, level=2, **kwargs):
"""
Тег для подключения меню
Пример использования::
{% show_menu 'menu_slug' 2 class='menu-class' %}
:param context: контекст шаблона
:param slug: символьный код родительского пункта меню
:param level: максимальный уровень вложенности
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
try:
menu = Menu.objects.published().get(slug=slug)
menus = menu.get_descendants().all()
mark_current_menus(menus, context['request'].path_info)
max_level = menu.level + level
return {'menus': menus, 'max_level': max_level, 'data': kwargs}
except Menu.DoesNotExist:
return None
register.inclusion_tag(file_name='midnight_main/tags/show_menu.html', takes_context=True, name='show_menu')(show_menu)
def show_gallery(slug, size="100x100", crop="center", **kwargs):
"""
Тег отображения фотогалереи
Пример использования::
{% show_gallery "gallery-slug" "150x110" "center" class='gallery-class' %}
:param slug: символьный код фотогалереи
:param size: размер
:param crop: параметры кропа
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
try:
album = PhotoAlbum.objects.published().get(slug=slug)
photos = album.photo_set.published().order_by('sort').all()
return {'album': album, 'photos': photos, 'size': size, 'crop': crop, 'data': kwargs}
except PhotoAlbum.DoesNotExist:
return None
register.inclusion_tag(file_name='midnight_main/tags/show_gallery.html', name='show_gallery')(show_gallery)
@register.simple_tag()
def show_area(slug):
"""
Подключение включаемой области
Пример использования::
{% show_area "area_slug" %}
:param slug: символьный код области
:return:
"""
try:
area = IncludeArea.objects.published().get(slug=slug)
return area.text
except IncludeArea.DoesNotExist:
return ""
def ajax_form(cls_name, view_name, modal=False, tag_id=None):
"""
Подключение Ajax формы
Пример использования (обратная связь)::
{% ajax_form 'midnight_main.forms.Feedback' 'midnight_main:page_feedback' tag_id='feedback_form' modal=True %}
:param cls_name: имя класса формы
:param view_name: имя представления для обработки формы
:param modal: форма предназначена для отображаения в fancybox и изначально скрыта
:param tag_id: идентификатор оборачивающего тега
:return:
"""
if tag_id is None:
tag_id = uuid.uuid4().hex[:6].lower()
form = import_string(cls_name)()
url = reverse(view_name)
return {'form': form, 'url': url, 'modal': modal, 'id': tag_id}
register.inclusion_tag(file_name='midnight_main/tags/ajax_form.html', name='ajax_form')(ajax_form)
def user_info(context, **kwargs):
"""
Отображает информацию о текущем авторизованом пользователе, либо ссылки на авторизацию и регистрацию
Пример использования::
{% user_info %}
:param context: контекст
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
request = context['request']
return {'user': request.user, 'data': kwargs}
register.inclusion_tag(file_name='midnight_main/tags/user_info.html', takes_context=True, name='user_info')(user_info)
def breadcrumbs(crumbs, **kwargs):
"""
Тег для отображения хлебхых крошек
Пример использования::
{% breadcrumbs crumbs class='breadcrumb' %}
:param crumbs: список хлебных крошек. [{'label': 'Crumb label', 'url': '/crumb-url/'}, ... ]
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
return {'crumbs': crumbs, 'data': kwargs}
register.inclusion_tag(file_name='midnight_main/tags/breadcrumbs.html', name='breadcrumbs')(breadcrumbs)
def comments_block(context, comments, form, obj, url, **kwargs):
"""
Тег для отображения блока комментариев
Пример использования (комментарии к текстовой странице)::
{% comments comments comments_form page "midnight_main:page_comments" %}
:param context: контекст
:param comments: список комментариев
:param form: форма добавления комментария
:param obj: объект к которому привязаны комментарии
:param url: имя представления обрабатывающего создание и получение комментариев
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
data = kwargs
if 'class' in data:
data['class'] += ' comments-block'
else:
data['class'] = 'comments-block'
if context['request'].user.is_authenticated():
del form.fields['captcha']
return {'comments': comments, 'form': form, 'url': url, 'obj': obj, 'data': data}
register.inclusion_tag(file_name='midnight_main/tags/comments.html', name='comments', takes_context=True)(comments_block)
register.inclusion_tag(file_name='midnight_main/tags/search.html', takes_context=True, name='search_simple_form')(search_simple_form)
|
webadmin87/midnight | midnight_catalog/templatetags/midnight_catalog.py | param_value | python | def param_value(param_values, slug):
for val in param_values:
if val.param.slug == slug:
return val.value
return None | Отображает значение параметра товара
Пример использования::
{% param_value item.paramvalue_set.all "producer" %}
:param param_values: список значений параметров
:param slug: символьный код параметра
:return: | train | https://github.com/webadmin87/midnight/blob/b60b3b257b4d633550b82a692f3ea3756c62a0a9/midnight_catalog/templatetags/midnight_catalog.py#L13-L28 | null | from django import template
from django.contrib.humanize.templatetags.humanize import intcomma
from django.conf import settings
from django.db.models import Count, Case, When, Value, IntegerField
from midnight_catalog.models import Section
from midnight_main.services import mark_current_menus
register = template.Library()
@register.simple_tag()
@register.simple_tag()
def param_title(param_values, slug):
"""
Отображает наименование параметра товара
Пример использования::
{% param_title item.paramvalue_set.all "producer" %}
:param param_values: список значений параметров
:param slug: символьный код параметра
:return:
"""
for val in param_values:
if val.param.slug == slug:
return val.param.title
return None
@register.filter()
def currency(money):
"""
Фильтр валюты. Форматирует цену в соответствии с установленным количеством знаков после запятой,
а также добавлеят символ валюты.
:param money:
:return:
"""
decimals = getattr(settings, 'MIDNIGHT_CATALOG_DECIMALS', 2)
money = round(float(money), decimals)
symbol = getattr(settings, 'MIDNIGHT_CATALOG_CURRENCY', 'руб')
if decimals > 0:
formatted = (str("%0."+str(decimals)+"f") % money)[-decimals-1:]
else:
formatted = ""
return "%s%s %s" % (intcomma(int(money)), formatted, symbol)
def catalog_sections(context, slug=None, level=3, **kwargs):
"""
Отображает иерерхический список категорий каталога.
Для каждой категории отображается количество содержащегося в ней товара.
Пример использования::
{% catalog_sections 'section_slug' 2 class='catalog-class' %}
:param context: контекст
:param slug: символьный код родительской категории, если не задан, отображается вся иерархия
:param level: отображаемый уровень вложенности
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
count_products = Count(Case(When(product__active=True, then=Value(1)), output_field=IntegerField()))
if slug is None:
sections = Section.objects.annotate(product__count=count_products).all()
max_level = level - 1
else:
section = Section.objects.get(slug=slug)
sections = section.get_descendants().annotate(product__count=count_products).all()
max_level = section.level + level
mark_current_menus(sections, context['request'].path_info)
return {'sections': sections, 'max_level': max_level, 'data': kwargs}
register.inclusion_tag(file_name='midnight_catalog/tags/catalog_sections.html', takes_context=True, name='catalog_sections')(catalog_sections)
|
webadmin87/midnight | midnight_catalog/templatetags/midnight_catalog.py | param_title | python | def param_title(param_values, slug):
for val in param_values:
if val.param.slug == slug:
return val.param.title
return None | Отображает наименование параметра товара
Пример использования::
{% param_title item.paramvalue_set.all "producer" %}
:param param_values: список значений параметров
:param slug: символьный код параметра
:return: | train | https://github.com/webadmin87/midnight/blob/b60b3b257b4d633550b82a692f3ea3756c62a0a9/midnight_catalog/templatetags/midnight_catalog.py#L32-L47 | null | from django import template
from django.contrib.humanize.templatetags.humanize import intcomma
from django.conf import settings
from django.db.models import Count, Case, When, Value, IntegerField
from midnight_catalog.models import Section
from midnight_main.services import mark_current_menus
register = template.Library()
@register.simple_tag()
def param_value(param_values, slug):
"""
Отображает значение параметра товара
Пример использования::
{% param_value item.paramvalue_set.all "producer" %}
:param param_values: список значений параметров
:param slug: символьный код параметра
:return:
"""
for val in param_values:
if val.param.slug == slug:
return val.value
return None
@register.simple_tag()
@register.filter()
def currency(money):
"""
Фильтр валюты. Форматирует цену в соответствии с установленным количеством знаков после запятой,
а также добавлеят символ валюты.
:param money:
:return:
"""
decimals = getattr(settings, 'MIDNIGHT_CATALOG_DECIMALS', 2)
money = round(float(money), decimals)
symbol = getattr(settings, 'MIDNIGHT_CATALOG_CURRENCY', 'руб')
if decimals > 0:
formatted = (str("%0."+str(decimals)+"f") % money)[-decimals-1:]
else:
formatted = ""
return "%s%s %s" % (intcomma(int(money)), formatted, symbol)
def catalog_sections(context, slug=None, level=3, **kwargs):
"""
Отображает иерерхический список категорий каталога.
Для каждой категории отображается количество содержащегося в ней товара.
Пример использования::
{% catalog_sections 'section_slug' 2 class='catalog-class' %}
:param context: контекст
:param slug: символьный код родительской категории, если не задан, отображается вся иерархия
:param level: отображаемый уровень вложенности
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
count_products = Count(Case(When(product__active=True, then=Value(1)), output_field=IntegerField()))
if slug is None:
sections = Section.objects.annotate(product__count=count_products).all()
max_level = level - 1
else:
section = Section.objects.get(slug=slug)
sections = section.get_descendants().annotate(product__count=count_products).all()
max_level = section.level + level
mark_current_menus(sections, context['request'].path_info)
return {'sections': sections, 'max_level': max_level, 'data': kwargs}
register.inclusion_tag(file_name='midnight_catalog/tags/catalog_sections.html', takes_context=True, name='catalog_sections')(catalog_sections)
|
webadmin87/midnight | midnight_catalog/templatetags/midnight_catalog.py | currency | python | def currency(money):
decimals = getattr(settings, 'MIDNIGHT_CATALOG_DECIMALS', 2)
money = round(float(money), decimals)
symbol = getattr(settings, 'MIDNIGHT_CATALOG_CURRENCY', 'руб')
if decimals > 0:
formatted = (str("%0."+str(decimals)+"f") % money)[-decimals-1:]
else:
formatted = ""
return "%s%s %s" % (intcomma(int(money)), formatted, symbol) | Фильтр валюты. Форматирует цену в соответствии с установленным количеством знаков после запятой,
а также добавлеят символ валюты.
:param money:
:return: | train | https://github.com/webadmin87/midnight/blob/b60b3b257b4d633550b82a692f3ea3756c62a0a9/midnight_catalog/templatetags/midnight_catalog.py#L51-L65 | null | from django import template
from django.contrib.humanize.templatetags.humanize import intcomma
from django.conf import settings
from django.db.models import Count, Case, When, Value, IntegerField
from midnight_catalog.models import Section
from midnight_main.services import mark_current_menus
register = template.Library()
@register.simple_tag()
def param_value(param_values, slug):
"""
Отображает значение параметра товара
Пример использования::
{% param_value item.paramvalue_set.all "producer" %}
:param param_values: список значений параметров
:param slug: символьный код параметра
:return:
"""
for val in param_values:
if val.param.slug == slug:
return val.value
return None
@register.simple_tag()
def param_title(param_values, slug):
"""
Отображает наименование параметра товара
Пример использования::
{% param_title item.paramvalue_set.all "producer" %}
:param param_values: список значений параметров
:param slug: символьный код параметра
:return:
"""
for val in param_values:
if val.param.slug == slug:
return val.param.title
return None
@register.filter()
def catalog_sections(context, slug=None, level=3, **kwargs):
"""
Отображает иерерхический список категорий каталога.
Для каждой категории отображается количество содержащегося в ней товара.
Пример использования::
{% catalog_sections 'section_slug' 2 class='catalog-class' %}
:param context: контекст
:param slug: символьный код родительской категории, если не задан, отображается вся иерархия
:param level: отображаемый уровень вложенности
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
count_products = Count(Case(When(product__active=True, then=Value(1)), output_field=IntegerField()))
if slug is None:
sections = Section.objects.annotate(product__count=count_products).all()
max_level = level - 1
else:
section = Section.objects.get(slug=slug)
sections = section.get_descendants().annotate(product__count=count_products).all()
max_level = section.level + level
mark_current_menus(sections, context['request'].path_info)
return {'sections': sections, 'max_level': max_level, 'data': kwargs}
register.inclusion_tag(file_name='midnight_catalog/tags/catalog_sections.html', takes_context=True, name='catalog_sections')(catalog_sections)
|
webadmin87/midnight | midnight_catalog/templatetags/midnight_catalog.py | catalog_sections | python | def catalog_sections(context, slug=None, level=3, **kwargs):
count_products = Count(Case(When(product__active=True, then=Value(1)), output_field=IntegerField()))
if slug is None:
sections = Section.objects.annotate(product__count=count_products).all()
max_level = level - 1
else:
section = Section.objects.get(slug=slug)
sections = section.get_descendants().annotate(product__count=count_products).all()
max_level = section.level + level
mark_current_menus(sections, context['request'].path_info)
return {'sections': sections, 'max_level': max_level, 'data': kwargs} | Отображает иерерхический список категорий каталога.
Для каждой категории отображается количество содержащегося в ней товара.
Пример использования::
{% catalog_sections 'section_slug' 2 class='catalog-class' %}
:param context: контекст
:param slug: символьный код родительской категории, если не задан, отображается вся иерархия
:param level: отображаемый уровень вложенности
:param kwargs: html атрибуты оборачивающего тега
:return: | train | https://github.com/webadmin87/midnight/blob/b60b3b257b4d633550b82a692f3ea3756c62a0a9/midnight_catalog/templatetags/midnight_catalog.py#L68-L92 | [
"def mark_current_menus(menus, path_info):\n \"\"\"\n Отмечает активные модели меню (У которых ссылка соответствует текущему path info)\n :param menus: список моделей меню\n :param path_info: path info\n :return:\n \"\"\"\n for menu in menus:\n if menu.get_absolute_url() == \"/\":\n ... | from django import template
from django.contrib.humanize.templatetags.humanize import intcomma
from django.conf import settings
from django.db.models import Count, Case, When, Value, IntegerField
from midnight_catalog.models import Section
from midnight_main.services import mark_current_menus
register = template.Library()
@register.simple_tag()
def param_value(param_values, slug):
"""
Отображает значение параметра товара
Пример использования::
{% param_value item.paramvalue_set.all "producer" %}
:param param_values: список значений параметров
:param slug: символьный код параметра
:return:
"""
for val in param_values:
if val.param.slug == slug:
return val.value
return None
@register.simple_tag()
def param_title(param_values, slug):
"""
Отображает наименование параметра товара
Пример использования::
{% param_title item.paramvalue_set.all "producer" %}
:param param_values: список значений параметров
:param slug: символьный код параметра
:return:
"""
for val in param_values:
if val.param.slug == slug:
return val.param.title
return None
@register.filter()
def currency(money):
"""
Фильтр валюты. Форматирует цену в соответствии с установленным количеством знаков после запятой,
а также добавлеят символ валюты.
:param money:
:return:
"""
decimals = getattr(settings, 'MIDNIGHT_CATALOG_DECIMALS', 2)
money = round(float(money), decimals)
symbol = getattr(settings, 'MIDNIGHT_CATALOG_CURRENCY', 'руб')
if decimals > 0:
formatted = (str("%0."+str(decimals)+"f") % money)[-decimals-1:]
else:
formatted = ""
return "%s%s %s" % (intcomma(int(money)), formatted, symbol)
register.inclusion_tag(file_name='midnight_catalog/tags/catalog_sections.html', takes_context=True, name='catalog_sections')(catalog_sections)
|
webadmin87/midnight | midnight_main/views.py | pages | python | def pages(request, path=None, instance=None):
if instance and instance.active:
p = instance
else:
raise Http404()
return render(request, p.tpl, get_page_tpl_ctx(p, request)) | Представление для отображения текстовых страниц
:param request: запрос
:param path: адрес
:param instance: страница
:return: | train | https://github.com/webadmin87/midnight/blob/b60b3b257b4d633550b82a692f3ea3756c62a0a9/midnight_main/views.py#L39-L53 | [
"def get_page_tpl_ctx(page, request):\n \"\"\"\n Возвращает контекст для рендеринга представления текстовой страницы\n :param page: модель страницы\n :param request: запрос\n :return:\n \"\"\"\n text = Template(page.text).render(Context())\n meta = MetaSeo(page)\n comments = get_object_co... | from django.core.urlresolvers import reverse
from django.http import Http404
from django.shortcuts import render, get_object_or_404
from midnight_main.forms import Feedback, Profile
from midnight_main.mailer import send_templated_mail
from midnight_main.services import get_page_tpl_ctx, get_object_comments, post_comment
from .models import Page, AppUser
from django.utils.translation import ugettext_lazy as _
from django.views.generic.edit import UpdateView
from django.contrib.auth import update_session_auth_hash
from django.views.generic import View
class UpdateProfile(UpdateView):
"""
Изменение профиля пользователя
"""
model = AppUser
form_class = Profile
template_name = 'midnight_main/users/appuser_update_form.html'
def get_success_url(self):
return reverse('midnight_main:user_profile')
def form_valid(self, form):
if form.data['password_change']:
user = form.save(commit=False)
user.set_password(form.data['password_change'])
update_session_auth_hash(self.request, user)
return super().form_valid(form)
def get_object(self, queryset=None):
return self.request.user
def main_page(request):
"""
Представление для отображения главной страницы
:param request: запрос
:return:
"""
p = get_object_or_404(Page, slug='main', active=True)
return render(request, p.tpl, get_page_tpl_ctx(p, request))
class FeedbackView(View):
"""
Представление для обратной связи
"""
form_cls = None
subject = 'Feedback message'
mail_tpl = 'midnight_main/mails/feedback.html'
form_tpl = 'midnight_main/tags/ajax_form_body.html'
def post(self, request):
form = self.form_cls(request.POST)
if form.is_valid():
send_templated_mail(self.mail_tpl, _(self.subject), form)
status = 200
else:
status = 422
return render(request, self.form_tpl, {'form': form}, status=status)
class CommentView(View):
"""
Представление для добавления и отображения комментариев
"""
model_cls = None
"""Класс модели комментариев"""
form_cls = None
"""Класс формы добавления комментариев"""
list_tpl = 'midnight_main/tags/comments_list.html'
form_tpl = 'midnight_main/tags/comments_form_body.html'
def get(self, request):
comments = get_object_comments(self.model_cls, request.GET.get('id', 0))
return render(request, self.list_tpl, {'comments': comments})
def post(self, request):
form = self.form_cls(request.POST)
if request.user.is_authenticated():
del form.fields['captcha']
res = post_comment(form, request.user)
status = 200 if res else 422
return render(request, self.form_tpl, {'form': form}, status=status)
|
webadmin87/midnight | midnight_main/views.py | main_page | python | def main_page(request):
p = get_object_or_404(Page, slug='main', active=True)
return render(request, p.tpl, get_page_tpl_ctx(p, request)) | Представление для отображения главной страницы
:param request: запрос
:return: | train | https://github.com/webadmin87/midnight/blob/b60b3b257b4d633550b82a692f3ea3756c62a0a9/midnight_main/views.py#L56-L63 | [
"def get_page_tpl_ctx(page, request):\n \"\"\"\n Возвращает контекст для рендеринга представления текстовой страницы\n :param page: модель страницы\n :param request: запрос\n :return:\n \"\"\"\n text = Template(page.text).render(Context())\n meta = MetaSeo(page)\n comments = get_object_co... | from django.core.urlresolvers import reverse
from django.http import Http404
from django.shortcuts import render, get_object_or_404
from midnight_main.forms import Feedback, Profile
from midnight_main.mailer import send_templated_mail
from midnight_main.services import get_page_tpl_ctx, get_object_comments, post_comment
from .models import Page, AppUser
from django.utils.translation import ugettext_lazy as _
from django.views.generic.edit import UpdateView
from django.contrib.auth import update_session_auth_hash
from django.views.generic import View
class UpdateProfile(UpdateView):
"""
Изменение профиля пользователя
"""
model = AppUser
form_class = Profile
template_name = 'midnight_main/users/appuser_update_form.html'
def get_success_url(self):
return reverse('midnight_main:user_profile')
def form_valid(self, form):
if form.data['password_change']:
user = form.save(commit=False)
user.set_password(form.data['password_change'])
update_session_auth_hash(self.request, user)
return super().form_valid(form)
def get_object(self, queryset=None):
return self.request.user
def pages(request, path=None, instance=None):
"""
Представление для отображения текстовых страниц
:param request: запрос
:param path: адрес
:param instance: страница
:return:
"""
if instance and instance.active:
p = instance
else:
raise Http404()
return render(request, p.tpl, get_page_tpl_ctx(p, request))
class FeedbackView(View):
"""
Представление для обратной связи
"""
form_cls = None
subject = 'Feedback message'
mail_tpl = 'midnight_main/mails/feedback.html'
form_tpl = 'midnight_main/tags/ajax_form_body.html'
def post(self, request):
form = self.form_cls(request.POST)
if form.is_valid():
send_templated_mail(self.mail_tpl, _(self.subject), form)
status = 200
else:
status = 422
return render(request, self.form_tpl, {'form': form}, status=status)
class CommentView(View):
"""
Представление для добавления и отображения комментариев
"""
model_cls = None
"""Класс модели комментариев"""
form_cls = None
"""Класс формы добавления комментариев"""
list_tpl = 'midnight_main/tags/comments_list.html'
form_tpl = 'midnight_main/tags/comments_form_body.html'
def get(self, request):
comments = get_object_comments(self.model_cls, request.GET.get('id', 0))
return render(request, self.list_tpl, {'comments': comments})
def post(self, request):
form = self.form_cls(request.POST)
if request.user.is_authenticated():
del form.fields['captcha']
res = post_comment(form, request.user)
status = 200 if res else 422
return render(request, self.form_tpl, {'form': form}, status=status)
|
webadmin87/midnight | midnight_catalog/services.py | get_all | python | def get_all(slug=None):
if slug is None:
q = Product.objects.published()
else:
q = Product.objects.published().filter(sections__slug=slug)
if getattr(settings, 'MIDNIGHT_CATALOG_PREFETCH_PARAMS', False):
q = q.prefetch_related(Prefetch("paramvalue_set", queryset=ParamValue.objects.published().order_by('sort').prefetch_related("param")))
q = q.prefetch_related('sections').order_by('sort', '-id')
return q.all() | Возвращает QuerySet содержащий выборку товаров
:param slug: символьный код категории каталога, если не задан выбираются товары в не зависимости от категории
:return: | train | https://github.com/webadmin87/midnight/blob/b60b3b257b4d633550b82a692f3ea3756c62a0a9/midnight_catalog/services.py#L6-L22 | null | from django.conf import settings
from django.db.models import Prefetch
from midnight_catalog.models import Product, ParamValue
def get_one(slug):
"""
Возвращает один товар
:param slug: символьный код товара
:return:
"""
item = Product.objects.published()\
.prefetch_related(Prefetch("paramvalue_set", queryset=ParamValue.objects.published().order_by('sort').prefetch_related("param")))\
.filter(slug=slug).first()
return item
|
webadmin87/midnight | midnight_catalog/services.py | get_one | python | def get_one(slug):
item = Product.objects.published()\
.prefetch_related(Prefetch("paramvalue_set", queryset=ParamValue.objects.published().order_by('sort').prefetch_related("param")))\
.filter(slug=slug).first()
return item | Возвращает один товар
:param slug: символьный код товара
:return: | train | https://github.com/webadmin87/midnight/blob/b60b3b257b4d633550b82a692f3ea3756c62a0a9/midnight_catalog/services.py#L25-L34 | null | from django.conf import settings
from django.db.models import Prefetch
from midnight_catalog.models import Product, ParamValue
def get_all(slug=None):
"""
Возвращает QuerySet содержащий выборку товаров
:param slug: символьный код категории каталога, если не задан выбираются товары в не зависимости от категории
:return:
"""
if slug is None:
q = Product.objects.published()
else:
q = Product.objects.published().filter(sections__slug=slug)
if getattr(settings, 'MIDNIGHT_CATALOG_PREFETCH_PARAMS', False):
q = q.prefetch_related(Prefetch("paramvalue_set", queryset=ParamValue.objects.published().order_by('sort').prefetch_related("param")))
q = q.prefetch_related('sections').order_by('sort', '-id')
return q.all()
|
webadmin87/midnight | midnight_main/services.py | get_page_tpl_ctx | python | def get_page_tpl_ctx(page, request):
text = Template(page.text).render(Context())
meta = MetaSeo(page)
comments = get_object_comments(PageComment, page.id)
comments_form = PageCommentForm(initial=get_comment_init(request, page))
if page.slug == Page.MAIN_SLUG:
crumbs = None
else:
crumbs = page.get_breadcrumbs()
return {'page': page, 'comments': comments, 'comments_form': comments_form, 'text': text, 'meta': meta, 'crumbs': crumbs} | Возвращает контекст для рендеринга представления текстовой страницы
:param page: модель страницы
:param request: запрос
:return: | train | https://github.com/webadmin87/midnight/blob/b60b3b257b4d633550b82a692f3ea3756c62a0a9/midnight_main/services.py#L9-L24 | [
"def get_object_comments(model_cls, obj_id):\n \"\"\"\n Возвращает все комментарии для объекта\n :param model_cls: класс модели комментария\n :param obj_id: идентификатор объекта\n :return:\n \"\"\"\n return model_cls.objects.filter(obj__id=obj_id).all()\n",
"def get_comment_init(request, obj... | from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.template import Template, Context
from midnight_main.forms import PageCommentForm
from midnight_main.models import PageComment, Page
from midnight_main.components import MetaSeo
def get_comment_init(request, obj):
"""
Возвращает словарь для инициализации начальных значений модели комментария
:param request: запрос
:param obj: объект к которому добавляется комментарий
:return:
"""
if request.user.is_authenticated():
init = {'obj': obj, 'username': request.user.username, 'email': request.user.email}
else:
init = {'obj': obj}
return init
def get_object_comments(model_cls, obj_id):
"""
Возвращает все комментарии для объекта
:param model_cls: класс модели комментария
:param obj_id: идентификатор объекта
:return:
"""
return model_cls.objects.filter(obj__id=obj_id).all()
def post_comment(form, user):
"""
Постинг комментария
:param form: форма комментария
:param user: пользователь
:return:
"""
if form.is_valid():
model = form.save(commit=False)
if user.is_authenticated():
model.author = user
model.save()
return True
else:
return False
def save_formset_with_author(formset, user):
"""
Проставляет моделям из набора форм автора
:param formset: набор форм
:param user: автор
:return:
"""
instances = formset.save(commit=False)
for obj in formset.deleted_objects:
obj.delete()
for instance in instances:
if user.is_authenticated() and hasattr(instance, 'author') and not instance.author:
instance.author = user
instance.save()
formset.save_m2m()
def get_by_page(query, page, page_size):
"""
Осуществляет пагинацию
:param query: запрос
:param page: номер страницы
:param page_size: количество объектов на странице
:return:
"""
pager = Paginator(query, page_size)
try:
models = pager.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
models = pager.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
models = pager.page(pager.num_pages)
return models
def mark_current_menus(menus, path_info):
"""
Отмечает активные модели меню (У которых ссылка соответствует текущему path info)
:param menus: список моделей меню
:param path_info: path info
:return:
"""
for menu in menus:
if menu.get_absolute_url() == "/":
menu.is_current = menu.get_absolute_url() == path_info
else:
menu.is_current = path_info.find(menu.get_absolute_url()) == 0
|
webadmin87/midnight | midnight_main/services.py | get_comment_init | python | def get_comment_init(request, obj):
if request.user.is_authenticated():
init = {'obj': obj, 'username': request.user.username, 'email': request.user.email}
else:
init = {'obj': obj}
return init | Возвращает словарь для инициализации начальных значений модели комментария
:param request: запрос
:param obj: объект к которому добавляется комментарий
:return: | train | https://github.com/webadmin87/midnight/blob/b60b3b257b4d633550b82a692f3ea3756c62a0a9/midnight_main/services.py#L27-L38 | null | from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.template import Template, Context
from midnight_main.forms import PageCommentForm
from midnight_main.models import PageComment, Page
from midnight_main.components import MetaSeo
def get_page_tpl_ctx(page, request):
"""
Возвращает контекст для рендеринга представления текстовой страницы
:param page: модель страницы
:param request: запрос
:return:
"""
text = Template(page.text).render(Context())
meta = MetaSeo(page)
comments = get_object_comments(PageComment, page.id)
comments_form = PageCommentForm(initial=get_comment_init(request, page))
if page.slug == Page.MAIN_SLUG:
crumbs = None
else:
crumbs = page.get_breadcrumbs()
return {'page': page, 'comments': comments, 'comments_form': comments_form, 'text': text, 'meta': meta, 'crumbs': crumbs}
def get_object_comments(model_cls, obj_id):
"""
Возвращает все комментарии для объекта
:param model_cls: класс модели комментария
:param obj_id: идентификатор объекта
:return:
"""
return model_cls.objects.filter(obj__id=obj_id).all()
def post_comment(form, user):
"""
Постинг комментария
:param form: форма комментария
:param user: пользователь
:return:
"""
if form.is_valid():
model = form.save(commit=False)
if user.is_authenticated():
model.author = user
model.save()
return True
else:
return False
def save_formset_with_author(formset, user):
"""
Проставляет моделям из набора форм автора
:param formset: набор форм
:param user: автор
:return:
"""
instances = formset.save(commit=False)
for obj in formset.deleted_objects:
obj.delete()
for instance in instances:
if user.is_authenticated() and hasattr(instance, 'author') and not instance.author:
instance.author = user
instance.save()
formset.save_m2m()
def get_by_page(query, page, page_size):
"""
Осуществляет пагинацию
:param query: запрос
:param page: номер страницы
:param page_size: количество объектов на странице
:return:
"""
pager = Paginator(query, page_size)
try:
models = pager.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
models = pager.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
models = pager.page(pager.num_pages)
return models
def mark_current_menus(menus, path_info):
"""
Отмечает активные модели меню (У которых ссылка соответствует текущему path info)
:param menus: список моделей меню
:param path_info: path info
:return:
"""
for menu in menus:
if menu.get_absolute_url() == "/":
menu.is_current = menu.get_absolute_url() == path_info
else:
menu.is_current = path_info.find(menu.get_absolute_url()) == 0
|
webadmin87/midnight | midnight_main/services.py | post_comment | python | def post_comment(form, user):
if form.is_valid():
model = form.save(commit=False)
if user.is_authenticated():
model.author = user
model.save()
return True
else:
return False | Постинг комментария
:param form: форма комментария
:param user: пользователь
:return: | train | https://github.com/webadmin87/midnight/blob/b60b3b257b4d633550b82a692f3ea3756c62a0a9/midnight_main/services.py#L51-L65 | null | from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.template import Template, Context
from midnight_main.forms import PageCommentForm
from midnight_main.models import PageComment, Page
from midnight_main.components import MetaSeo
def get_page_tpl_ctx(page, request):
"""
Возвращает контекст для рендеринга представления текстовой страницы
:param page: модель страницы
:param request: запрос
:return:
"""
text = Template(page.text).render(Context())
meta = MetaSeo(page)
comments = get_object_comments(PageComment, page.id)
comments_form = PageCommentForm(initial=get_comment_init(request, page))
if page.slug == Page.MAIN_SLUG:
crumbs = None
else:
crumbs = page.get_breadcrumbs()
return {'page': page, 'comments': comments, 'comments_form': comments_form, 'text': text, 'meta': meta, 'crumbs': crumbs}
def get_comment_init(request, obj):
"""
Возвращает словарь для инициализации начальных значений модели комментария
:param request: запрос
:param obj: объект к которому добавляется комментарий
:return:
"""
if request.user.is_authenticated():
init = {'obj': obj, 'username': request.user.username, 'email': request.user.email}
else:
init = {'obj': obj}
return init
def get_object_comments(model_cls, obj_id):
"""
Возвращает все комментарии для объекта
:param model_cls: класс модели комментария
:param obj_id: идентификатор объекта
:return:
"""
return model_cls.objects.filter(obj__id=obj_id).all()
def save_formset_with_author(formset, user):
"""
Проставляет моделям из набора форм автора
:param formset: набор форм
:param user: автор
:return:
"""
instances = formset.save(commit=False)
for obj in formset.deleted_objects:
obj.delete()
for instance in instances:
if user.is_authenticated() and hasattr(instance, 'author') and not instance.author:
instance.author = user
instance.save()
formset.save_m2m()
def get_by_page(query, page, page_size):
"""
Осуществляет пагинацию
:param query: запрос
:param page: номер страницы
:param page_size: количество объектов на странице
:return:
"""
pager = Paginator(query, page_size)
try:
models = pager.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
models = pager.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
models = pager.page(pager.num_pages)
return models
def mark_current_menus(menus, path_info):
"""
Отмечает активные модели меню (У которых ссылка соответствует текущему path info)
:param menus: список моделей меню
:param path_info: path info
:return:
"""
for menu in menus:
if menu.get_absolute_url() == "/":
menu.is_current = menu.get_absolute_url() == path_info
else:
menu.is_current = path_info.find(menu.get_absolute_url()) == 0
|
webadmin87/midnight | midnight_main/services.py | save_formset_with_author | python | def save_formset_with_author(formset, user):
instances = formset.save(commit=False)
for obj in formset.deleted_objects:
obj.delete()
for instance in instances:
if user.is_authenticated() and hasattr(instance, 'author') and not instance.author:
instance.author = user
instance.save()
formset.save_m2m() | Проставляет моделям из набора форм автора
:param formset: набор форм
:param user: автор
:return: | train | https://github.com/webadmin87/midnight/blob/b60b3b257b4d633550b82a692f3ea3756c62a0a9/midnight_main/services.py#L68-L82 | null | from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.template import Template, Context
from midnight_main.forms import PageCommentForm
from midnight_main.models import PageComment, Page
from midnight_main.components import MetaSeo
def get_page_tpl_ctx(page, request):
"""
Возвращает контекст для рендеринга представления текстовой страницы
:param page: модель страницы
:param request: запрос
:return:
"""
text = Template(page.text).render(Context())
meta = MetaSeo(page)
comments = get_object_comments(PageComment, page.id)
comments_form = PageCommentForm(initial=get_comment_init(request, page))
if page.slug == Page.MAIN_SLUG:
crumbs = None
else:
crumbs = page.get_breadcrumbs()
return {'page': page, 'comments': comments, 'comments_form': comments_form, 'text': text, 'meta': meta, 'crumbs': crumbs}
def get_comment_init(request, obj):
"""
Возвращает словарь для инициализации начальных значений модели комментария
:param request: запрос
:param obj: объект к которому добавляется комментарий
:return:
"""
if request.user.is_authenticated():
init = {'obj': obj, 'username': request.user.username, 'email': request.user.email}
else:
init = {'obj': obj}
return init
def get_object_comments(model_cls, obj_id):
"""
Возвращает все комментарии для объекта
:param model_cls: класс модели комментария
:param obj_id: идентификатор объекта
:return:
"""
return model_cls.objects.filter(obj__id=obj_id).all()
def post_comment(form, user):
"""
Постинг комментария
:param form: форма комментария
:param user: пользователь
:return:
"""
if form.is_valid():
model = form.save(commit=False)
if user.is_authenticated():
model.author = user
model.save()
return True
else:
return False
def get_by_page(query, page, page_size):
"""
Осуществляет пагинацию
:param query: запрос
:param page: номер страницы
:param page_size: количество объектов на странице
:return:
"""
pager = Paginator(query, page_size)
try:
models = pager.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
models = pager.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
models = pager.page(pager.num_pages)
return models
def mark_current_menus(menus, path_info):
"""
Отмечает активные модели меню (У которых ссылка соответствует текущему path info)
:param menus: список моделей меню
:param path_info: path info
:return:
"""
for menu in menus:
if menu.get_absolute_url() == "/":
menu.is_current = menu.get_absolute_url() == path_info
else:
menu.is_current = path_info.find(menu.get_absolute_url()) == 0
|
webadmin87/midnight | midnight_main/services.py | get_by_page | python | def get_by_page(query, page, page_size):
pager = Paginator(query, page_size)
try:
models = pager.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
models = pager.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
models = pager.page(pager.num_pages)
return models | Осуществляет пагинацию
:param query: запрос
:param page: номер страницы
:param page_size: количество объектов на странице
:return: | train | https://github.com/webadmin87/midnight/blob/b60b3b257b4d633550b82a692f3ea3756c62a0a9/midnight_main/services.py#L85-L104 | null | from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.template import Template, Context
from midnight_main.forms import PageCommentForm
from midnight_main.models import PageComment, Page
from midnight_main.components import MetaSeo
def get_page_tpl_ctx(page, request):
"""
Возвращает контекст для рендеринга представления текстовой страницы
:param page: модель страницы
:param request: запрос
:return:
"""
text = Template(page.text).render(Context())
meta = MetaSeo(page)
comments = get_object_comments(PageComment, page.id)
comments_form = PageCommentForm(initial=get_comment_init(request, page))
if page.slug == Page.MAIN_SLUG:
crumbs = None
else:
crumbs = page.get_breadcrumbs()
return {'page': page, 'comments': comments, 'comments_form': comments_form, 'text': text, 'meta': meta, 'crumbs': crumbs}
def get_comment_init(request, obj):
"""
Возвращает словарь для инициализации начальных значений модели комментария
:param request: запрос
:param obj: объект к которому добавляется комментарий
:return:
"""
if request.user.is_authenticated():
init = {'obj': obj, 'username': request.user.username, 'email': request.user.email}
else:
init = {'obj': obj}
return init
def get_object_comments(model_cls, obj_id):
"""
Возвращает все комментарии для объекта
:param model_cls: класс модели комментария
:param obj_id: идентификатор объекта
:return:
"""
return model_cls.objects.filter(obj__id=obj_id).all()
def post_comment(form, user):
"""
Постинг комментария
:param form: форма комментария
:param user: пользователь
:return:
"""
if form.is_valid():
model = form.save(commit=False)
if user.is_authenticated():
model.author = user
model.save()
return True
else:
return False
def save_formset_with_author(formset, user):
"""
Проставляет моделям из набора форм автора
:param formset: набор форм
:param user: автор
:return:
"""
instances = formset.save(commit=False)
for obj in formset.deleted_objects:
obj.delete()
for instance in instances:
if user.is_authenticated() and hasattr(instance, 'author') and not instance.author:
instance.author = user
instance.save()
formset.save_m2m()
def mark_current_menus(menus, path_info):
"""
Отмечает активные модели меню (У которых ссылка соответствует текущему path info)
:param menus: список моделей меню
:param path_info: path info
:return:
"""
for menu in menus:
if menu.get_absolute_url() == "/":
menu.is_current = menu.get_absolute_url() == path_info
else:
menu.is_current = path_info.find(menu.get_absolute_url()) == 0
|
webadmin87/midnight | midnight_main/services.py | mark_current_menus | python | def mark_current_menus(menus, path_info):
for menu in menus:
if menu.get_absolute_url() == "/":
menu.is_current = menu.get_absolute_url() == path_info
else:
menu.is_current = path_info.find(menu.get_absolute_url()) == 0 | Отмечает активные модели меню (У которых ссылка соответствует текущему path info)
:param menus: список моделей меню
:param path_info: path info
:return: | train | https://github.com/webadmin87/midnight/blob/b60b3b257b4d633550b82a692f3ea3756c62a0a9/midnight_main/services.py#L107-L118 | null | from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.template import Template, Context
from midnight_main.forms import PageCommentForm
from midnight_main.models import PageComment, Page
from midnight_main.components import MetaSeo
def get_page_tpl_ctx(page, request):
"""
Возвращает контекст для рендеринга представления текстовой страницы
:param page: модель страницы
:param request: запрос
:return:
"""
text = Template(page.text).render(Context())
meta = MetaSeo(page)
comments = get_object_comments(PageComment, page.id)
comments_form = PageCommentForm(initial=get_comment_init(request, page))
if page.slug == Page.MAIN_SLUG:
crumbs = None
else:
crumbs = page.get_breadcrumbs()
return {'page': page, 'comments': comments, 'comments_form': comments_form, 'text': text, 'meta': meta, 'crumbs': crumbs}
def get_comment_init(request, obj):
"""
Возвращает словарь для инициализации начальных значений модели комментария
:param request: запрос
:param obj: объект к которому добавляется комментарий
:return:
"""
if request.user.is_authenticated():
init = {'obj': obj, 'username': request.user.username, 'email': request.user.email}
else:
init = {'obj': obj}
return init
def get_object_comments(model_cls, obj_id):
"""
Возвращает все комментарии для объекта
:param model_cls: класс модели комментария
:param obj_id: идентификатор объекта
:return:
"""
return model_cls.objects.filter(obj__id=obj_id).all()
def post_comment(form, user):
"""
Постинг комментария
:param form: форма комментария
:param user: пользователь
:return:
"""
if form.is_valid():
model = form.save(commit=False)
if user.is_authenticated():
model.author = user
model.save()
return True
else:
return False
def save_formset_with_author(formset, user):
"""
Проставляет моделям из набора форм автора
:param formset: набор форм
:param user: автор
:return:
"""
instances = formset.save(commit=False)
for obj in formset.deleted_objects:
obj.delete()
for instance in instances:
if user.is_authenticated() and hasattr(instance, 'author') and not instance.author:
instance.author = user
instance.save()
formset.save_m2m()
def get_by_page(query, page, page_size):
"""
Осуществляет пагинацию
:param query: запрос
:param page: номер страницы
:param page_size: количество объектов на странице
:return:
"""
pager = Paginator(query, page_size)
try:
models = pager.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
models = pager.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
models = pager.page(pager.num_pages)
return models
|
webadmin87/midnight | midnight_banners/templatetags/midnight_banners.py | one_banner | python | def one_banner(slug):
place = BannerPlace.objects.published().get(slug=slug)
banner = place.banner_set.published().order_by('?').first()
renderer = get_renderer(banner)
return renderer(banner) | Служит для отображения одного случайного баннера из указанного баннерного места
Пример использования::
{% one_banner 'place_slug' %}
:param slug: символьный код баннерного места
:return: | train | https://github.com/webadmin87/midnight/blob/b60b3b257b4d633550b82a692f3ea3756c62a0a9/midnight_banners/templatetags/midnight_banners.py#L9-L23 | [
"def get_renderer(banner):\n filename, file_extension = os.path.splitext(banner.file.name)\n if file_extension.lower() == '.swf':\n return swf_renderer\n else:\n return img_renderer\n",
"def swf_renderer(banner, cls=\"banners-swf\"):\n tag_id = uuid.uuid4().hex[:6].lower()\n return '<... | from midnight_banners.models import BannerPlace
from midnight_banners.renderers import get_renderer
from django import template
register = template.Library()
@register.simple_tag()
def list_banners(slug, **kwargs):
"""
Отображает все баннеры из указанного баннерного места
Пример использования::
{% list_banners 'place_slug' class='banners-class' %}
:param slug: символьный код баннерного места
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
place = BannerPlace.objects.published().get(slug=slug)
banners = place.banner_set.published().order_by('sort').all()
rendered = []
for banner in banners:
renderer = get_renderer(banner)
rendered.append(renderer(banner))
return {'rendered': rendered, 'banners': banners, 'data': kwargs}
register.inclusion_tag(file_name='midnight_banners/tags/list_banners.html', name='list_banners')(list_banners) |
webadmin87/midnight | midnight_banners/templatetags/midnight_banners.py | list_banners | python | def list_banners(slug, **kwargs):
place = BannerPlace.objects.published().get(slug=slug)
banners = place.banner_set.published().order_by('sort').all()
rendered = []
for banner in banners:
renderer = get_renderer(banner)
rendered.append(renderer(banner))
return {'rendered': rendered, 'banners': banners, 'data': kwargs} | Отображает все баннеры из указанного баннерного места
Пример использования::
{% list_banners 'place_slug' class='banners-class' %}
:param slug: символьный код баннерного места
:param kwargs: html атрибуты оборачивающего тега
:return: | train | https://github.com/webadmin87/midnight/blob/b60b3b257b4d633550b82a692f3ea3756c62a0a9/midnight_banners/templatetags/midnight_banners.py#L26-L44 | [
"def get_renderer(banner):\n filename, file_extension = os.path.splitext(banner.file.name)\n if file_extension.lower() == '.swf':\n return swf_renderer\n else:\n return img_renderer\n",
"def swf_renderer(banner, cls=\"banners-swf\"):\n tag_id = uuid.uuid4().hex[:6].lower()\n return '<... | from midnight_banners.models import BannerPlace
from midnight_banners.renderers import get_renderer
from django import template
register = template.Library()
@register.simple_tag()
def one_banner(slug):
"""
Служит для отображения одного случайного баннера из указанного баннерного места
Пример использования::
{% one_banner 'place_slug' %}
:param slug: символьный код баннерного места
:return:
"""
place = BannerPlace.objects.published().get(slug=slug)
banner = place.banner_set.published().order_by('?').first()
renderer = get_renderer(banner)
return renderer(banner)
register.inclusion_tag(file_name='midnight_banners/tags/list_banners.html', name='list_banners')(list_banners) |
webadmin87/midnight | midnight_main/mailer.py | send_templated_mail | python | def send_templated_mail(tpl, subject, context, to=getattr(settings, 'MIDNIGHT_MAIN_ADMIN_EMAIL', 'admin@example.com')):
msg_html = render_to_string(tpl, {'context': context})
send_mail(subject, '', getattr(settings, 'MIDNIGHT_MAIN_MAIL_FROM', 'admin@example.com'), [to], html_message=msg_html,) | Отправляет письмо на основе шаблона
:param tpl: шаблон
:param subject: тема письма
:param context: контекст для рендеринга шаблона
:param to: кому слать письмо
:return: | train | https://github.com/webadmin87/midnight/blob/b60b3b257b4d633550b82a692f3ea3756c62a0a9/midnight_main/mailer.py#L6-L17 | null | from django.conf import settings
from django.core.mail import send_mail
from django.template.loader import render_to_string
|
webadmin87/midnight | midnight_news/templatetags/midnight_news.py | show_news_line | python | def show_news_line(slug=None, limit=3, **kwargs):
if slug is None:
section = None
q = News.objects.published()
else:
section = Section.objects.get(slug=slug)
q = News.objects.published().filter(sections__slug=slug)
models = q.prefetch_related('sections').order_by('-date', '-id').all()[:limit]
return {'models': models, 'section': section, 'data': kwargs} | Отображает список последних новостей
Пример использования::
{% show_news_line 'news_section_slug' 3 class='news-class' %}
:param slug: символьный код категории новостей, если не задан фильтрация по категории не происходит
:param limit: количество выводимых новостей
:param kwargs: html атрибуты оборачивающего тега
:return: | train | https://github.com/webadmin87/midnight/blob/b60b3b257b4d633550b82a692f3ea3756c62a0a9/midnight_news/templatetags/midnight_news.py#L7-L30 | null | from django import template
from midnight_news.models import News, Section
register = template.Library()
register.inclusion_tag(file_name='midnight_news/tags/show_news_line.html', name='show_news_line')(show_news_line)
|
rdo-management/python-rdomanager-oscplugin | rdomanager_oscplugin/v1/baremetal.py | _csv_to_nodes_dict | python | def _csv_to_nodes_dict(nodes_csv):
data = []
for row in csv.reader(nodes_csv):
node = {
"pm_user": row[2],
"pm_addr": row[1],
"pm_password": row[3],
"pm_type": row[0],
"mac": [
row[4]
]
}
data.append(node)
return data | Convert CSV to a list of dicts formatted for os_cloud_config
Given a CSV file in the format below, convert it into the
structure expected by os_could_config JSON files.
pm_type, pm_addr, pm_user, pm_password, mac | train | https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/v1/baremetal.py#L35-L58 | null | # Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
import argparse
import csv
import json
import logging
import sys
import time
from cliff import command
from cliff import lister
from ironic_discoverd import client as discoverd_client
from openstackclient.common import utils as osc_utils
from os_cloud_config import nodes
from rdomanager_oscplugin import exceptions
from rdomanager_oscplugin import utils
class ValidateInstackEnv(command.Command):
"""Validate `instackenv.json` which is used in `baremetal import`."""
auth_required = False
log = logging.getLogger(__name__ + ".ValidateInstackEnv")
def get_parser(self, prog_name):
parser = super(ValidateInstackEnv, self).get_parser(prog_name)
parser.add_argument(
'-f', '--file', dest='instackenv',
help="Path to the instackenv.json file.",
default='instackenv.json')
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
self.error_count = 0
with open(parsed_args.instackenv, 'r') as net_file:
env_data = json.load(net_file)
maclist = []
baremetal_ips = []
for node in env_data['nodes']:
self.log.info("Checking node %s" % node['pm_addr'])
try:
if len(node['pm_password']) == 0:
self.log.error('ERROR: Password 0 length.')
self.error_count += 1
except Exception as e:
self.log.error('ERROR: Password does not exist: %s', e)
self.error_count += 1
try:
if len(node['pm_user']) == 0:
self.log.error('ERROR: User 0 length.')
self.error_count += 1
except Exception as e:
self.log.error('ERROR: User does not exist: %s', e)
self.error_count += 1
try:
if len(node['mac']) == 0:
self.log.error('ERROR: MAC address 0 length.')
self.error_count += 1
maclist.extend(node['mac'])
except Exception as e:
self.log.error('ERROR: MAC address does not exist: %s', e)
self.error_count += 1
if node['pm_type'] == "pxe_ssh":
self.log.debug("Identified virtual node")
if node['pm_type'] == "pxe_ipmitool":
self.log.debug("Identified baremetal node")
cmd = ('ipmitool -R 1 -I lanplus -H %s -U %s -P %s chassis '
'status' % (node['pm_addr'], node['pm_user'],
node['pm_password']))
self.log.debug("Executing: %s", cmd)
status = utils.run_shell(cmd)
if status != 0:
self.log.error('ERROR: ipmitool failed')
self.error_count += 1
baremetal_ips.append(node['pm_addr'])
if not utils.all_unique(baremetal_ips):
self.log.error('ERROR: Baremetals IPs are not all unique.')
self.error_count += 1
else:
self.log.debug('Baremetal IPs are all unique.')
if not utils.all_unique(maclist):
self.log.error('ERROR: MAC addresses are not all unique.')
self.error_count += 1
else:
self.log.debug('MAC addresses are all unique.')
if self.error_count == 0:
print('SUCCESS: found 0 errors')
else:
print('FAILURE: found %d errors' % self.error_count)
class ImportBaremetal(command.Command):
"""Import baremetal nodes from a JSON or CSV file"""
log = logging.getLogger(__name__ + ".ImportBaremetal")
def get_parser(self, prog_name):
parser = super(ImportBaremetal, self).get_parser(prog_name)
parser.add_argument('-s', '--service-host', dest='service_host',
help='Nova compute service host to register nodes '
'with')
parser.add_argument('--json', dest='json', action='store_true')
parser.add_argument('--csv', dest='csv', action='store_true')
parser.add_argument('file_in', type=argparse.FileType('r'))
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
# We need JSON or CSV to be specified, not both.
if parsed_args.json == parsed_args.csv:
print("ERROR: Either --json or --csv needs to be specified.",
file=sys.stderr)
return
if parsed_args.json is True:
nodes_json = json.load(parsed_args.file_in)
if 'nodes' in nodes_json:
nodes_json = nodes_json['nodes']
else:
nodes_json = _csv_to_nodes_dict(parsed_args.file_in)
nodes.register_all_nodes(
parsed_args.service_host,
nodes_json,
client=self.app.client_manager.rdomanager_oscplugin.baremetal(),
keystone_client=self.app.client_manager.identity)
class IntrospectionParser(object):
def get_parser(self, prog_name):
parser = super(IntrospectionParser, self).get_parser(prog_name)
parser.add_argument(
'--discoverd-url',
default=osc_utils.env('DISCOVERD_URL', default=None),
help='discoverd URL, defaults to localhost (env: DISCOVERD_URL).')
return parser
class StartBaremetalIntrospectionBulk(IntrospectionParser, command.Command):
"""Start bulk introspection on all baremetal nodes"""
log = logging.getLogger(__name__ + ".StartBaremetalIntrospectionBulk")
def get_parser(self, prog_name):
parser = super(
StartBaremetalIntrospectionBulk, self).get_parser(prog_name)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
client = self.app.client_manager.rdomanager_oscplugin.baremetal()
auth_token = self.app.client_manager.auth_ref.auth_token
node_uuids = []
print("Setting available nodes to manageable...")
self.log.debug("Moving available nodes to manageable state.")
available_nodes = [node for node in client.node.list()
if node.provision_state == "available"]
for uuid in utils.set_nodes_state(client, available_nodes, 'manage',
'manageable'):
self.log.debug("Node {0} has been set to manageable.".format(uuid))
for node in client.node.list():
if node.provision_state != "manageable":
continue
node_uuids.append(node.uuid)
print("Starting introspection of node: {0}".format(node.uuid))
discoverd_client.introspect(
node.uuid,
base_url=parsed_args.discoverd_url,
auth_token=auth_token)
# NOTE(dtantsur): PXE firmware on virtual machines misbehaves when
# a lot of nodes start DHCPing simultaneously: it ignores NACK from
# DHCP server, tries to get the same address, then times out. Work
# around it by using sleep, anyway introspection takes much longer.
time.sleep(5)
print("Waiting for discovery to finish...")
has_errors = False
for uuid, status in utils.wait_for_node_discovery(
discoverd_client, auth_token, parsed_args.discoverd_url,
node_uuids):
if status['error'] is None:
print("Discovery for UUID {0} finished successfully."
.format(uuid))
else:
print("Discovery for UUID {0} finished with error: {1}"
.format(uuid, status['error']))
has_errors = True
clients = self.app.client_manager
baremetal_client = clients.rdomanager_oscplugin.baremetal()
print("Setting manageable nodes to available...")
self.log.debug("Moving manageable nodes to available state.")
available_nodes = [node for node in client.node.list()
if node.provision_state == "manageable"]
for uuid in utils.set_nodes_state(
baremetal_client, baremetal_client.node.list(), 'provide',
'available', skipped_states=("available", "active")):
print("Node {0} has been set to available.".format(uuid))
if has_errors:
print("Discovery completed with errors.")
else:
print("Discovery completed.")
class StatusBaremetalIntrospectionBulk(IntrospectionParser, lister.Lister):
"""Get the status of all baremetal nodes"""
log = logging.getLogger(__name__ + ".StatusBaremetalIntrospectionBulk")
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
client = self.app.client_manager.rdomanager_oscplugin.baremetal()
statuses = []
for node in client.node.list():
self.log.debug("Getting introspection status of Ironic node {0}"
.format(node.uuid))
auth_token = self.app.client_manager.auth_ref.auth_token
statuses.append((node.uuid, discoverd_client.get_status(
node.uuid,
base_url=parsed_args.discoverd_url,
auth_token=auth_token)))
return (
("Node UUID", "Finished", "Error"),
list((node_uuid, status['finished'], status['error'])
for (node_uuid, status) in statuses)
)
class ConfigureReadyState(IntrospectionParser, command.Command):
"""Configure all baremetal nodes for enrollment"""
log = logging.getLogger(__name__ + ".ConfigureReadyState")
sleep_time = 15
loops = 120
def _configure_bios(self, nodes):
for node in nodes:
print("Configuring BIOS for node {0}".format(node.uuid))
self.bm_client.node.vendor_passthru(
node.uuid, 'configure_bios_settings', http_method='POST')
# NOTE(ifarkas): give the DRAC card some time to process the job
time.sleep(self.sleep_time)
def _configure_root_raid_volumes(self, nodes):
for node in nodes:
print("Configuring root RAID volume for node {0}"
.format(node.uuid))
self.bm_client.node.vendor_passthru(
node.uuid, 'create_raid_configuration',
{'create_root_volume': True, 'create_nonroot_volumes': False},
'POST')
# NOTE(ifarkas): give the DRAC card some time to process the job
time.sleep(self.sleep_time)
def _configure_nonroot_raid_volumes(self, nodes):
for node in nodes:
print("Configuring non-root RAID volume for node {0}"
.format(node.uuid))
self.bm_client.node.vendor_passthru(
node.uuid, 'create_raid_configuration',
{'create_root_volume': False, 'create_nonroot_volumes': True},
'POST')
# NOTE(ifarkas): give the DRAC card some time to process the job
time.sleep(self.sleep_time)
def _wait_for_drac_config_jobs(self, nodes):
for node in nodes:
print("Waiting for DRAC config jobs to finish on node {0}"
.format(node.uuid))
for _ in range(self.loops):
resp = self.bm_client.node.vendor_passthru(
node.uuid, 'list_unfinished_jobs', http_method='GET')
if not resp.unfinished_jobs:
break
time.sleep(self.sleep_time)
else:
msg = ("Timed out waiting for DRAC config jobs on node {0}"
.format(node.uuid))
raise exceptions.Timeout(msg)
def _delete_raid_volumes(self, nodes):
nodes_with_reboot_request = set()
for node in nodes:
print("Deleting RAID volumes on node {0}".format(node.uuid))
resp = self.bm_client.node.vendor_passthru(
node.uuid, 'list_virtual_disks', http_method='GET')
virtual_disks = resp.virtual_disks
changed_raid_controllers = set()
for disk in virtual_disks:
self.bm_client.node.vendor_passthru(
node.uuid, 'delete_virtual_disk',
{'virtual_disk': disk['id']}, 'POST')
changed_raid_controllers.add(disk['controller'])
if changed_raid_controllers:
nodes_with_reboot_request.add(node)
for controller in changed_raid_controllers:
self.bm_client.node.vendor_passthru(
node.uuid, 'apply_pending_raid_config',
{'raid_controller': controller}, 'POST')
# NOTE(ifarkas): give the DRAC card some time to process the job
time.sleep(self.sleep_time)
return nodes_with_reboot_request
def _change_power_state(self, nodes, target_power_state):
for node in nodes:
print("Changing power state on "
"node {0} to {1}".format(node.uuid, target_power_state))
self.bm_client.node.set_power_state(node.uuid, target_power_state)
def _run_introspection(self, nodes):
auth_token = self.app.client_manager.auth_ref.auth_token
node_uuids = []
for node in nodes:
print("Starting introspection on node {0}".format(node.uuid))
discoverd_client.introspect(
node.uuid,
base_url=self.discoverd_url,
auth_token=auth_token)
node_uuids.append(node.uuid)
print("Waiting for discovery to finish")
for uuid, status in utils.wait_for_node_discovery(
discoverd_client, auth_token, self.discoverd_url,
node_uuids):
if status['error'] is None:
print("Discovery for node {0} finished successfully."
.format(uuid))
else:
print("Discovery for node {0} finished with error: {1}"
.format(uuid, status['error']))
def get_parser(self, prog_name):
parser = super(ConfigureReadyState, self).get_parser(prog_name)
parser.add_argument('--delete-existing-raid-volumes',
dest='delete_raid_volumes', action='store_true')
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
self.bm_client = (
self.app.client_manager.rdomanager_oscplugin.baremetal())
self.discoverd_url = parsed_args.discoverd_url
drac_nodes = [node for node in self.bm_client.node.list(detail=True)
if 'drac' in node.driver]
if parsed_args.delete_raid_volumes:
changed_nodes = self._delete_raid_volumes(drac_nodes)
self._change_power_state(changed_nodes, 'reboot')
self._wait_for_drac_config_jobs(changed_nodes)
self._configure_root_raid_volumes(drac_nodes)
self._configure_bios(drac_nodes)
self._change_power_state(drac_nodes, 'reboot')
self._wait_for_drac_config_jobs(drac_nodes)
self._run_introspection(drac_nodes)
self._configure_nonroot_raid_volumes(drac_nodes)
self._change_power_state(drac_nodes, 'reboot')
self._wait_for_drac_config_jobs(drac_nodes)
self._change_power_state(drac_nodes, 'off')
class ConfigureBaremetalBoot(command.Command):
"""Configure baremetal boot for all nodes"""
log = logging.getLogger(__name__ + ".ConfigureBaremetalBoot")
loops = 12
sleep_time = 10
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
bm_client = self.app.client_manager.rdomanager_oscplugin.baremetal()
image_client = self.app.client_manager.image
try:
kernel_id = osc_utils.find_resource(
image_client.images, 'bm-deploy-kernel').id
except AttributeError:
print("ERROR: Please make sure there is only one image named "
"'bm-deploy-kernel' in glance.",
file=sys.stderr)
return
try:
ramdisk_id = osc_utils.find_resource(
image_client.images, 'bm-deploy-ramdisk').id
except AttributeError:
print("ERROR: Please make sure there is only one image named "
"'bm-deploy-ramdisk' in glance.",
file=sys.stderr)
return
self.log.debug("Using kernel ID: {0} and ramdisk ID: {1}".format(
kernel_id, ramdisk_id))
for node in bm_client.node.list(maintenance=False):
# NOTE(bnemec): Ironic won't let us update the node while the
# power_state is transitioning.
if node.power_state is None:
self.log.warning('Node %s power state is in transition. '
'Waiting up to %d seconds for it to '
'complete.',
node.uuid,
self.loops * self.sleep_time)
for _ in range(self.loops):
time.sleep(self.sleep_time)
node = bm_client.node.get(node.uuid)
if node.power_state is not None:
break
else:
msg = ('Timed out waiting for node %s power state.' %
node.uuid)
raise exceptions.Timeout(msg)
# Get the full node info
node_detail = bm_client.node.get(node.uuid)
capabilities = node_detail.properties.get('capabilities', None)
# Only update capabilities to add boot_option if it doesn't exist.
if capabilities:
if 'boot_option' not in capabilities:
capabilities = "boot_option:local,%s" % capabilities
else:
capabilities = "boot_option:local"
self.log.debug("Configuring boot for Node {0}".format(
node.uuid))
bm_client.node.update(node.uuid, [
{
'op': 'add',
'path': '/properties/capabilities',
'value': capabilities,
},
{
'op': 'add',
'path': '/driver_info/deploy_ramdisk',
'value': ramdisk_id,
},
{
'op': 'add',
'path': '/driver_info/deploy_kernel',
'value': kernel_id,
},
])
class ShowNodeCapabilities(lister.Lister):
"""List the capabilities for all Nodes"""
log = logging.getLogger(__name__ + ".ShowNodeProfile")
def take_action(self, parsed_args):
bm_client = self.app.client_manager.rdomanager_oscplugin.baremetal()
rows = []
for node in bm_client.node.list():
node_detail = bm_client.node.get(node.uuid)
capabilities = node_detail.properties.get('capabilities')
rows.append((node.uuid, capabilities))
return (("Node UUID", "Node Capabilities"), rows, )
|
rdo-management/python-rdomanager-oscplugin | rdomanager_oscplugin/plugin.py | build_option_parser | python | def build_option_parser(parser):
parser.add_argument(
'--os-rdomanager-oscplugin-api-version',
metavar='<rdomanager-oscplugin-api-version>',
default=utils.env(
'OS_RDOMANAGER_OSCPLUGIN_API_VERSION',
default=DEFAULT_RDOMANAGER_OSCPLUGIN_API_VERSION),
help='RDO Manager OSC Plugin API version, default=' +
DEFAULT_RDOMANAGER_OSCPLUGIN_API_VERSION +
' (Env: OS_RDOMANAGER_OSCPLUGIN_API_VERSION)')
return parser | Hook to add global options
Called from openstackclient.shell.OpenStackShell.__init__()
after the builtin parser has been initialized. This is
where a plugin can add global options such as an API version setting.
:param argparse.ArgumentParser parser: The parser object that has been
initialized by OpenStackShell. | train | https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/plugin.py#L42-L61 | null | # Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""OpenStackClient Plugin interface"""
import logging
from ironicclient import client as ironic_client
from openstackclient.common import utils
from tuskarclient import client as tuskar_client
LOG = logging.getLogger(__name__)
DEFAULT_RDOMANAGER_OSCPLUGIN_API_VERSION = '1'
# Required by the OSC plugin interface
API_NAME = 'rdomanager_oscplugin'
API_VERSION_OPTION = 'os_rdomanager_oscplugin_api_version'
API_VERSIONS = {
'1': 'rdomanager_oscplugin.plugin'
}
def make_client(instance):
return ClientWrapper(instance)
# Required by the OSC plugin interface
class ClientWrapper(object):
def __init__(self, instance):
self._instance = instance
self._baremetal = None
self._orchestration = None
self._management = None
def baremetal(self):
"""Returns an baremetal service client"""
# TODO(d0ugal): When the ironicclient has it's own OSC plugin, the
# following client handling code should be removed in favor of the
# upstream version.
if self._baremetal is not None:
return self._baremetal
endpoint = self._instance.get_endpoint_for_service_type(
"baremetal",
region_name=self._instance._region_name,
)
token = self._instance.auth.get_token(self._instance.session)
self._baremetal = ironic_client.get_client(
1, os_auth_token=token, ironic_url=endpoint,
ca_file=self._instance._cli_options.os_cacert)
return self._baremetal
def orchestration(self):
"""Returns an orchestration service client"""
# TODO(d0ugal): This code is based on the upstream WIP implementation
# and should be removed when it lands:
# https://review.openstack.org/#/c/111786
if self._orchestration is not None:
return self._orchestration
API_VERSIONS = {
'1': 'heatclient.v1.client.Client',
}
heat_client = utils.get_client_class(
API_NAME,
self._instance._api_version[API_NAME],
API_VERSIONS)
LOG.debug('Instantiating orchestration client: %s', heat_client)
endpoint = self._instance.get_endpoint_for_service_type(
'orchestration')
token = self._instance.auth.get_token(self._instance.session)
client = heat_client(
endpoint=endpoint,
auth_url=self._instance._auth_url,
token=token,
username=self._instance._username,
password=self._instance._password,
region_name=self._instance._region_name,
insecure=self._instance._insecure,
ca_file=self._instance._cli_options.os_cacert,
)
self._orchestration = client
return self._orchestration
def management(self):
"""Returns an management service client"""
endpoint = self._instance.get_endpoint_for_service_type(
"management",
region_name=self._instance._region_name,
)
token = self._instance.auth.get_token(self._instance.session)
self._management = tuskar_client.get_client(
2, os_auth_token=token, tuskar_url=endpoint)
return self._management
|
rdo-management/python-rdomanager-oscplugin | rdomanager_oscplugin/plugin.py | ClientWrapper.baremetal | python | def baremetal(self):
# TODO(d0ugal): When the ironicclient has it's own OSC plugin, the
# following client handling code should be removed in favor of the
# upstream version.
if self._baremetal is not None:
return self._baremetal
endpoint = self._instance.get_endpoint_for_service_type(
"baremetal",
region_name=self._instance._region_name,
)
token = self._instance.auth.get_token(self._instance.session)
self._baremetal = ironic_client.get_client(
1, os_auth_token=token, ironic_url=endpoint,
ca_file=self._instance._cli_options.os_cacert)
return self._baremetal | Returns an baremetal service client | train | https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/plugin.py#L72-L93 | null | class ClientWrapper(object):
def __init__(self, instance):
self._instance = instance
self._baremetal = None
self._orchestration = None
self._management = None
def orchestration(self):
"""Returns an orchestration service client"""
# TODO(d0ugal): This code is based on the upstream WIP implementation
# and should be removed when it lands:
# https://review.openstack.org/#/c/111786
if self._orchestration is not None:
return self._orchestration
API_VERSIONS = {
'1': 'heatclient.v1.client.Client',
}
heat_client = utils.get_client_class(
API_NAME,
self._instance._api_version[API_NAME],
API_VERSIONS)
LOG.debug('Instantiating orchestration client: %s', heat_client)
endpoint = self._instance.get_endpoint_for_service_type(
'orchestration')
token = self._instance.auth.get_token(self._instance.session)
client = heat_client(
endpoint=endpoint,
auth_url=self._instance._auth_url,
token=token,
username=self._instance._username,
password=self._instance._password,
region_name=self._instance._region_name,
insecure=self._instance._insecure,
ca_file=self._instance._cli_options.os_cacert,
)
self._orchestration = client
return self._orchestration
def management(self):
"""Returns an management service client"""
endpoint = self._instance.get_endpoint_for_service_type(
"management",
region_name=self._instance._region_name,
)
token = self._instance.auth.get_token(self._instance.session)
self._management = tuskar_client.get_client(
2, os_auth_token=token, tuskar_url=endpoint)
return self._management
|
rdo-management/python-rdomanager-oscplugin | rdomanager_oscplugin/plugin.py | ClientWrapper.orchestration | python | def orchestration(self):
# TODO(d0ugal): This code is based on the upstream WIP implementation
# and should be removed when it lands:
# https://review.openstack.org/#/c/111786
if self._orchestration is not None:
return self._orchestration
API_VERSIONS = {
'1': 'heatclient.v1.client.Client',
}
heat_client = utils.get_client_class(
API_NAME,
self._instance._api_version[API_NAME],
API_VERSIONS)
LOG.debug('Instantiating orchestration client: %s', heat_client)
endpoint = self._instance.get_endpoint_for_service_type(
'orchestration')
token = self._instance.auth.get_token(self._instance.session)
client = heat_client(
endpoint=endpoint,
auth_url=self._instance._auth_url,
token=token,
username=self._instance._username,
password=self._instance._password,
region_name=self._instance._region_name,
insecure=self._instance._insecure,
ca_file=self._instance._cli_options.os_cacert,
)
self._orchestration = client
return self._orchestration | Returns an orchestration service client | train | https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/plugin.py#L95-L131 | null | class ClientWrapper(object):
def __init__(self, instance):
self._instance = instance
self._baremetal = None
self._orchestration = None
self._management = None
def baremetal(self):
"""Returns an baremetal service client"""
# TODO(d0ugal): When the ironicclient has it's own OSC plugin, the
# following client handling code should be removed in favor of the
# upstream version.
if self._baremetal is not None:
return self._baremetal
endpoint = self._instance.get_endpoint_for_service_type(
"baremetal",
region_name=self._instance._region_name,
)
token = self._instance.auth.get_token(self._instance.session)
self._baremetal = ironic_client.get_client(
1, os_auth_token=token, ironic_url=endpoint,
ca_file=self._instance._cli_options.os_cacert)
return self._baremetal
def management(self):
"""Returns an management service client"""
endpoint = self._instance.get_endpoint_for_service_type(
"management",
region_name=self._instance._region_name,
)
token = self._instance.auth.get_token(self._instance.session)
self._management = tuskar_client.get_client(
2, os_auth_token=token, tuskar_url=endpoint)
return self._management
|
rdo-management/python-rdomanager-oscplugin | rdomanager_oscplugin/plugin.py | ClientWrapper.management | python | def management(self):
endpoint = self._instance.get_endpoint_for_service_type(
"management",
region_name=self._instance._region_name,
)
token = self._instance.auth.get_token(self._instance.session)
self._management = tuskar_client.get_client(
2, os_auth_token=token, tuskar_url=endpoint)
return self._management | Returns an management service client | train | https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/plugin.py#L133-L146 | null | class ClientWrapper(object):
def __init__(self, instance):
self._instance = instance
self._baremetal = None
self._orchestration = None
self._management = None
def baremetal(self):
"""Returns an baremetal service client"""
# TODO(d0ugal): When the ironicclient has it's own OSC plugin, the
# following client handling code should be removed in favor of the
# upstream version.
if self._baremetal is not None:
return self._baremetal
endpoint = self._instance.get_endpoint_for_service_type(
"baremetal",
region_name=self._instance._region_name,
)
token = self._instance.auth.get_token(self._instance.session)
self._baremetal = ironic_client.get_client(
1, os_auth_token=token, ironic_url=endpoint,
ca_file=self._instance._cli_options.os_cacert)
return self._baremetal
def orchestration(self):
"""Returns an orchestration service client"""
# TODO(d0ugal): This code is based on the upstream WIP implementation
# and should be removed when it lands:
# https://review.openstack.org/#/c/111786
if self._orchestration is not None:
return self._orchestration
API_VERSIONS = {
'1': 'heatclient.v1.client.Client',
}
heat_client = utils.get_client_class(
API_NAME,
self._instance._api_version[API_NAME],
API_VERSIONS)
LOG.debug('Instantiating orchestration client: %s', heat_client)
endpoint = self._instance.get_endpoint_for_service_type(
'orchestration')
token = self._instance.auth.get_token(self._instance.session)
client = heat_client(
endpoint=endpoint,
auth_url=self._instance._auth_url,
token=token,
username=self._instance._username,
password=self._instance._password,
region_name=self._instance._region_name,
insecure=self._instance._insecure,
ca_file=self._instance._cli_options.os_cacert,
)
self._orchestration = client
return self._orchestration
|
rdo-management/python-rdomanager-oscplugin | rdomanager_oscplugin/utils.py | _generate_password | python | def _generate_password():
uuid_str = six.text_type(uuid.uuid4()).encode("UTF-8")
return hashlib.sha1(uuid_str).hexdigest() | Create a random password
The password is made by taking a uuid and passing it though sha1sum.
We may change this in future to gain more entropy.
This is based on the tripleo command os-make-password | train | https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/utils.py#L51-L60 | null | # Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import base64
import hashlib
import json
import logging
import os
import re
import six
import struct
import subprocess
import sys
import time
import uuid
from rdomanager_oscplugin import exceptions
WEBROOT = '/dashboard/'
SERVICE_LIST = {
'ceilometer': {'password_field': 'OVERCLOUD_CEILOMETER_PASSWORD'},
'cinder': {'password_field': 'OVERCLOUD_CINDER_PASSWORD'},
'cinderv2': {'password_field': 'OVERCLOUD_CINDER_PASSWORD'},
'glance': {'password_field': 'OVERCLOUD_GLANCE_PASSWORD'},
'heat': {'password_field': 'OVERCLOUD_HEAT_PASSWORD'},
'neutron': {'password_field': 'OVERCLOUD_NEUTRON_PASSWORD'},
'nova': {'password_field': 'OVERCLOUD_NOVA_PASSWORD'},
'novav3': {'password_field': 'OVERCLOUD_NOVA_PASSWORD'},
'swift': {'password_field': 'OVERCLOUD_SWIFT_PASSWORD'},
'horizon': {
'port': '80',
'path': WEBROOT,
'admin_path': '%sadmin' % WEBROOT},
}
def generate_overcloud_passwords(output_file="tripleo-overcloud-passwords"):
"""Create the passwords needed for the overcloud
This will create the set of passwords required by the overcloud, store
them in the output file path and return a dictionary of passwords. If the
file already exists the existing passwords will be returned instead,
"""
if os.path.isfile(output_file):
with open(output_file) as f:
return dict(line.split('=') for line in f.read().splitlines())
password_names = (
"OVERCLOUD_ADMIN_PASSWORD",
"OVERCLOUD_ADMIN_TOKEN",
"OVERCLOUD_CEILOMETER_PASSWORD",
"OVERCLOUD_CEILOMETER_SECRET",
"OVERCLOUD_CINDER_PASSWORD",
"OVERCLOUD_DEMO_PASSWORD",
"OVERCLOUD_GLANCE_PASSWORD",
"OVERCLOUD_HEAT_PASSWORD",
"OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD",
"OVERCLOUD_NEUTRON_PASSWORD",
"OVERCLOUD_NOVA_PASSWORD",
"OVERCLOUD_SWIFT_HASH",
"OVERCLOUD_SWIFT_PASSWORD",
)
passwords = dict((p, _generate_password()) for p in password_names)
with open(output_file, 'w') as f:
for name, password in passwords.items():
f.write("{0}={1}\n".format(name, password))
return passwords
def check_hypervisor_stats(compute_client, nodes=1, memory=0, vcpu=0):
"""Check the Hypervisor stats meet a minimum value
Check the hypervisor stats match the required counts. This is an
implementation of a command in TripleO with the same name.
:param compute_client: Instance of Nova client
:type compute_client: novaclient.client.v2.Client
:param nodes: The number of nodes to wait for, defaults to 1.
:type nodes: int
:param memory: The amount of memory to wait for in MB, defaults to 0.
:type memory: int
:param vcpu: The number of vcpus to wait for, defaults to 0.
:type vcpu: int
"""
statistics = compute_client.hypervisors.statistics().to_dict()
if all([statistics['count'] >= nodes,
statistics['memory_mb'] >= memory,
statistics['vcpus'] >= vcpu]):
return statistics
else:
return None
def wait_for_stack_ready(orchestration_client, stack_name):
"""Check the status of an orchestration stack
Get the status of an orchestration stack and check whether it is complete
or failed.
:param orchestration_client: Instance of Orchestration client
:type orchestration_client: heatclient.v1.client.Client
:param stack_name: Name or UUID of stack to retrieve
:type stack_name: string
"""
SUCCESSFUL_MATCH_OUTPUT = "(CREATE|UPDATE)_COMPLETE"
FAIL_MATCH_OUTPUT = "(CREATE|UPDATE)_FAILED"
while True:
stack = orchestration_client.stacks.get(stack_name)
if not stack:
return False
status = stack.stack_status
if re.match(SUCCESSFUL_MATCH_OUTPUT, status):
return True
if re.match(FAIL_MATCH_OUTPUT, status):
print("Stack failed with status: {}".format(
stack.stack_status_reason, file=sys.stderr))
return False
time.sleep(10)
def wait_for_provision_state(baremetal_client, node_uuid, provision_state,
loops=10, sleep=1):
"""Wait for a given Provisioning state in Ironic Discoverd
Updating the provisioning state is an async operation, we
need to wait for it to be completed.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param node_uuid: The Ironic node UUID
:type node_uuid: str
:param provision_state: The provisioning state name to wait for
:type provision_state: str
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
for _ in range(0, loops):
node = baremetal_client.node.get(node_uuid)
if node is None:
# The node can't be found in ironic, so we don't need to wait for
# the provision state
return True
if node.provision_state == provision_state:
return True
time.sleep(sleep)
return False
def wait_for_node_discovery(discoverd_client, auth_token, discoverd_url,
node_uuids, loops=220, sleep=10):
"""Check the status of Node discovery in Ironic discoverd
Gets the status and waits for them to complete.
:param discoverd_client: Ironic Discoverd client
:type discoverd_client: ironic_discoverd.client
:param auth_token: Authorisation token used by discoverd client
:type auth_token: string
:param discoverd_url: URL used by the discoverd client
:type discoverd_url: string
:param node_uuids: List of Node UUID's to wait for discovery
:type node_uuids: [string, ]
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
log = logging.getLogger(__name__ + ".wait_for_node_discovery")
node_uuids = node_uuids[:]
for _ in range(0, loops):
for node_uuid in node_uuids:
status = discoverd_client.get_status(
node_uuid,
base_url=discoverd_url,
auth_token=auth_token)
if status['finished']:
log.debug("Discover finished for node {0} (Error: {1})".format(
node_uuid, status['error']))
node_uuids.remove(node_uuid)
yield node_uuid, status
if not len(node_uuids):
raise StopIteration
time.sleep(sleep)
if len(node_uuids):
log.error("Discovery didn't finish for nodes {0}".format(
','.join(node_uuids)))
def create_environment_file(path="~/overcloud-env.json",
control_scale=1, compute_scale=1,
ceph_storage_scale=0, block_storage_scale=0,
swift_storage_scale=0):
"""Create a heat environment file
Create the heat environment file with the scale parameters.
:param control_scale: Scale value for control roles.
:type control_scale: int
:param compute_scale: Scale value for compute roles.
:type compute_scale: int
:param ceph_storage_scale: Scale value for ceph storage roles.
:type ceph_storage_scale: int
:param block_storage_scale: Scale value for block storage roles.
:type block_storage_scale: int
:param swift_storage_scale: Scale value for swift storage roles.
:type swift_storage_scale: int
"""
env_path = os.path.expanduser(path)
with open(env_path, 'w+') as f:
f.write(json.dumps({
"parameters": {
"ControllerCount": control_scale,
"ComputeCount": compute_scale,
"CephStorageCount": ceph_storage_scale,
"BlockStorageCount": block_storage_scale,
"ObjectStorageCount": swift_storage_scale}
}))
return env_path
def set_nodes_state(baremetal_client, nodes, transition, target_state,
skipped_states=()):
"""Make all nodes available in the baremetal service for a deployment
For each node, make it available unless it is already available or active.
Available nodes can be used for a deployment and an active node is already
in use.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param nodes: List of Baremetal Nodes
:type nodes: [ironicclient.v1.node.Node]
:param transition: The state to set for a node. The full list of states
can be found in ironic.common.states.
:type transition: string
:param target_state: The expected result state for a node. For example when
transitioning to 'manage' the result is 'manageable'
:type target_state: string
:param skipped_states: A set of states to skip, for example 'active' nodes
are already deployed and the state can't always be
changed.
:type skipped_states: iterable of strings
"""
log = logging.getLogger(__name__ + ".set_nodes_state")
for node in nodes:
if node.provision_state in skipped_states:
continue
log.debug(
"Setting provision state from {0} to '{1} for Node {2}"
.format(node.provision_state, transition, node.uuid))
baremetal_client.node.set_provision_state(node.uuid, transition)
if not wait_for_provision_state(baremetal_client, node.uuid,
target_state):
print("FAIL: State not updated for Node {0}".format(
node.uuid, file=sys.stderr))
else:
yield node.uuid
def get_hiera_key(key_name):
"""Retrieve a key from the hiera store
:param password_name: Name of the key to retrieve
:type password_name: type
"""
command = ["hiera", key_name]
p = subprocess.Popen(command, stdout=subprocess.PIPE)
out, err = p.communicate()
return out
def get_config_value(section, option):
p = six.moves.configparser.ConfigParser()
p.read(os.path.expanduser("~/undercloud-passwords.conf"))
return p.get(section, option)
def remove_known_hosts(overcloud_ip):
"""For a given IP address remove SSH keys from the known_hosts file"""
known_hosts = os.path.expanduser("~/.ssh/known_hosts")
if os.path.exists(known_hosts):
command = ['ssh-keygen', '-R', overcloud_ip, '-f', known_hosts]
subprocess.check_call(command)
def create_cephx_key():
# NOTE(gfidente): Taken from
# https://github.com/ceph/ceph-deploy/blob/master/ceph_deploy/new.py#L21
key = os.urandom(16)
header = struct.pack("<hiih", 1, int(time.time()), 0, len(key))
return base64.b64encode(header + key)
def run_shell(cmd):
return subprocess.call([cmd], shell=True)
def all_unique(x):
"""Return True if the collection has no duplications."""
return len(set(x)) == len(x)
def file_checksum(filepath):
"""Calculate md5 checksum on file
:param filepath: Full path to file (e.g. /home/stack/image.qcow2)
:type filepath: string
"""
checksum = hashlib.md5()
with open(filepath, 'rb') as f:
for fragment in iter(lambda: f.read(65536), ''):
checksum.update(fragment)
return checksum.hexdigest()
def check_nodes_count(baremetal_client, stack, parameters, defaults):
"""Check if there are enough available nodes for creating/scaling stack"""
count = 0
if stack:
for param in defaults:
try:
current = int(stack.parameters[param])
except KeyError:
raise ValueError(
"Parameter '%s' was not found in existing stack" % param)
count += parameters.get(param, current)
else:
for param, default in defaults.items():
count += parameters.get(param, default)
available = len(baremetal_client.node.list(associated=False,
maintenance=False))
if count > available:
raise exceptions.DeploymentError(
"Not enough nodes - available: {0}, requested: {1}".format(
available, count))
else:
return True
|
rdo-management/python-rdomanager-oscplugin | rdomanager_oscplugin/utils.py | generate_overcloud_passwords | python | def generate_overcloud_passwords(output_file="tripleo-overcloud-passwords"):
if os.path.isfile(output_file):
with open(output_file) as f:
return dict(line.split('=') for line in f.read().splitlines())
password_names = (
"OVERCLOUD_ADMIN_PASSWORD",
"OVERCLOUD_ADMIN_TOKEN",
"OVERCLOUD_CEILOMETER_PASSWORD",
"OVERCLOUD_CEILOMETER_SECRET",
"OVERCLOUD_CINDER_PASSWORD",
"OVERCLOUD_DEMO_PASSWORD",
"OVERCLOUD_GLANCE_PASSWORD",
"OVERCLOUD_HEAT_PASSWORD",
"OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD",
"OVERCLOUD_NEUTRON_PASSWORD",
"OVERCLOUD_NOVA_PASSWORD",
"OVERCLOUD_SWIFT_HASH",
"OVERCLOUD_SWIFT_PASSWORD",
)
passwords = dict((p, _generate_password()) for p in password_names)
with open(output_file, 'w') as f:
for name, password in passwords.items():
f.write("{0}={1}\n".format(name, password))
return passwords | Create the passwords needed for the overcloud
This will create the set of passwords required by the overcloud, store
them in the output file path and return a dictionary of passwords. If the
file already exists the existing passwords will be returned instead, | train | https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/utils.py#L63-L97 | null | # Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import base64
import hashlib
import json
import logging
import os
import re
import six
import struct
import subprocess
import sys
import time
import uuid
from rdomanager_oscplugin import exceptions
WEBROOT = '/dashboard/'
SERVICE_LIST = {
'ceilometer': {'password_field': 'OVERCLOUD_CEILOMETER_PASSWORD'},
'cinder': {'password_field': 'OVERCLOUD_CINDER_PASSWORD'},
'cinderv2': {'password_field': 'OVERCLOUD_CINDER_PASSWORD'},
'glance': {'password_field': 'OVERCLOUD_GLANCE_PASSWORD'},
'heat': {'password_field': 'OVERCLOUD_HEAT_PASSWORD'},
'neutron': {'password_field': 'OVERCLOUD_NEUTRON_PASSWORD'},
'nova': {'password_field': 'OVERCLOUD_NOVA_PASSWORD'},
'novav3': {'password_field': 'OVERCLOUD_NOVA_PASSWORD'},
'swift': {'password_field': 'OVERCLOUD_SWIFT_PASSWORD'},
'horizon': {
'port': '80',
'path': WEBROOT,
'admin_path': '%sadmin' % WEBROOT},
}
def _generate_password():
"""Create a random password
The password is made by taking a uuid and passing it though sha1sum.
We may change this in future to gain more entropy.
This is based on the tripleo command os-make-password
"""
uuid_str = six.text_type(uuid.uuid4()).encode("UTF-8")
return hashlib.sha1(uuid_str).hexdigest()
def check_hypervisor_stats(compute_client, nodes=1, memory=0, vcpu=0):
"""Check the Hypervisor stats meet a minimum value
Check the hypervisor stats match the required counts. This is an
implementation of a command in TripleO with the same name.
:param compute_client: Instance of Nova client
:type compute_client: novaclient.client.v2.Client
:param nodes: The number of nodes to wait for, defaults to 1.
:type nodes: int
:param memory: The amount of memory to wait for in MB, defaults to 0.
:type memory: int
:param vcpu: The number of vcpus to wait for, defaults to 0.
:type vcpu: int
"""
statistics = compute_client.hypervisors.statistics().to_dict()
if all([statistics['count'] >= nodes,
statistics['memory_mb'] >= memory,
statistics['vcpus'] >= vcpu]):
return statistics
else:
return None
def wait_for_stack_ready(orchestration_client, stack_name):
"""Check the status of an orchestration stack
Get the status of an orchestration stack and check whether it is complete
or failed.
:param orchestration_client: Instance of Orchestration client
:type orchestration_client: heatclient.v1.client.Client
:param stack_name: Name or UUID of stack to retrieve
:type stack_name: string
"""
SUCCESSFUL_MATCH_OUTPUT = "(CREATE|UPDATE)_COMPLETE"
FAIL_MATCH_OUTPUT = "(CREATE|UPDATE)_FAILED"
while True:
stack = orchestration_client.stacks.get(stack_name)
if not stack:
return False
status = stack.stack_status
if re.match(SUCCESSFUL_MATCH_OUTPUT, status):
return True
if re.match(FAIL_MATCH_OUTPUT, status):
print("Stack failed with status: {}".format(
stack.stack_status_reason, file=sys.stderr))
return False
time.sleep(10)
def wait_for_provision_state(baremetal_client, node_uuid, provision_state,
loops=10, sleep=1):
"""Wait for a given Provisioning state in Ironic Discoverd
Updating the provisioning state is an async operation, we
need to wait for it to be completed.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param node_uuid: The Ironic node UUID
:type node_uuid: str
:param provision_state: The provisioning state name to wait for
:type provision_state: str
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
for _ in range(0, loops):
node = baremetal_client.node.get(node_uuid)
if node is None:
# The node can't be found in ironic, so we don't need to wait for
# the provision state
return True
if node.provision_state == provision_state:
return True
time.sleep(sleep)
return False
def wait_for_node_discovery(discoverd_client, auth_token, discoverd_url,
node_uuids, loops=220, sleep=10):
"""Check the status of Node discovery in Ironic discoverd
Gets the status and waits for them to complete.
:param discoverd_client: Ironic Discoverd client
:type discoverd_client: ironic_discoverd.client
:param auth_token: Authorisation token used by discoverd client
:type auth_token: string
:param discoverd_url: URL used by the discoverd client
:type discoverd_url: string
:param node_uuids: List of Node UUID's to wait for discovery
:type node_uuids: [string, ]
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
log = logging.getLogger(__name__ + ".wait_for_node_discovery")
node_uuids = node_uuids[:]
for _ in range(0, loops):
for node_uuid in node_uuids:
status = discoverd_client.get_status(
node_uuid,
base_url=discoverd_url,
auth_token=auth_token)
if status['finished']:
log.debug("Discover finished for node {0} (Error: {1})".format(
node_uuid, status['error']))
node_uuids.remove(node_uuid)
yield node_uuid, status
if not len(node_uuids):
raise StopIteration
time.sleep(sleep)
if len(node_uuids):
log.error("Discovery didn't finish for nodes {0}".format(
','.join(node_uuids)))
def create_environment_file(path="~/overcloud-env.json",
control_scale=1, compute_scale=1,
ceph_storage_scale=0, block_storage_scale=0,
swift_storage_scale=0):
"""Create a heat environment file
Create the heat environment file with the scale parameters.
:param control_scale: Scale value for control roles.
:type control_scale: int
:param compute_scale: Scale value for compute roles.
:type compute_scale: int
:param ceph_storage_scale: Scale value for ceph storage roles.
:type ceph_storage_scale: int
:param block_storage_scale: Scale value for block storage roles.
:type block_storage_scale: int
:param swift_storage_scale: Scale value for swift storage roles.
:type swift_storage_scale: int
"""
env_path = os.path.expanduser(path)
with open(env_path, 'w+') as f:
f.write(json.dumps({
"parameters": {
"ControllerCount": control_scale,
"ComputeCount": compute_scale,
"CephStorageCount": ceph_storage_scale,
"BlockStorageCount": block_storage_scale,
"ObjectStorageCount": swift_storage_scale}
}))
return env_path
def set_nodes_state(baremetal_client, nodes, transition, target_state,
skipped_states=()):
"""Make all nodes available in the baremetal service for a deployment
For each node, make it available unless it is already available or active.
Available nodes can be used for a deployment and an active node is already
in use.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param nodes: List of Baremetal Nodes
:type nodes: [ironicclient.v1.node.Node]
:param transition: The state to set for a node. The full list of states
can be found in ironic.common.states.
:type transition: string
:param target_state: The expected result state for a node. For example when
transitioning to 'manage' the result is 'manageable'
:type target_state: string
:param skipped_states: A set of states to skip, for example 'active' nodes
are already deployed and the state can't always be
changed.
:type skipped_states: iterable of strings
"""
log = logging.getLogger(__name__ + ".set_nodes_state")
for node in nodes:
if node.provision_state in skipped_states:
continue
log.debug(
"Setting provision state from {0} to '{1} for Node {2}"
.format(node.provision_state, transition, node.uuid))
baremetal_client.node.set_provision_state(node.uuid, transition)
if not wait_for_provision_state(baremetal_client, node.uuid,
target_state):
print("FAIL: State not updated for Node {0}".format(
node.uuid, file=sys.stderr))
else:
yield node.uuid
def get_hiera_key(key_name):
"""Retrieve a key from the hiera store
:param password_name: Name of the key to retrieve
:type password_name: type
"""
command = ["hiera", key_name]
p = subprocess.Popen(command, stdout=subprocess.PIPE)
out, err = p.communicate()
return out
def get_config_value(section, option):
p = six.moves.configparser.ConfigParser()
p.read(os.path.expanduser("~/undercloud-passwords.conf"))
return p.get(section, option)
def remove_known_hosts(overcloud_ip):
"""For a given IP address remove SSH keys from the known_hosts file"""
known_hosts = os.path.expanduser("~/.ssh/known_hosts")
if os.path.exists(known_hosts):
command = ['ssh-keygen', '-R', overcloud_ip, '-f', known_hosts]
subprocess.check_call(command)
def create_cephx_key():
# NOTE(gfidente): Taken from
# https://github.com/ceph/ceph-deploy/blob/master/ceph_deploy/new.py#L21
key = os.urandom(16)
header = struct.pack("<hiih", 1, int(time.time()), 0, len(key))
return base64.b64encode(header + key)
def run_shell(cmd):
return subprocess.call([cmd], shell=True)
def all_unique(x):
"""Return True if the collection has no duplications."""
return len(set(x)) == len(x)
def file_checksum(filepath):
"""Calculate md5 checksum on file
:param filepath: Full path to file (e.g. /home/stack/image.qcow2)
:type filepath: string
"""
checksum = hashlib.md5()
with open(filepath, 'rb') as f:
for fragment in iter(lambda: f.read(65536), ''):
checksum.update(fragment)
return checksum.hexdigest()
def check_nodes_count(baremetal_client, stack, parameters, defaults):
"""Check if there are enough available nodes for creating/scaling stack"""
count = 0
if stack:
for param in defaults:
try:
current = int(stack.parameters[param])
except KeyError:
raise ValueError(
"Parameter '%s' was not found in existing stack" % param)
count += parameters.get(param, current)
else:
for param, default in defaults.items():
count += parameters.get(param, default)
available = len(baremetal_client.node.list(associated=False,
maintenance=False))
if count > available:
raise exceptions.DeploymentError(
"Not enough nodes - available: {0}, requested: {1}".format(
available, count))
else:
return True
|
rdo-management/python-rdomanager-oscplugin | rdomanager_oscplugin/utils.py | check_hypervisor_stats | python | def check_hypervisor_stats(compute_client, nodes=1, memory=0, vcpu=0):
statistics = compute_client.hypervisors.statistics().to_dict()
if all([statistics['count'] >= nodes,
statistics['memory_mb'] >= memory,
statistics['vcpus'] >= vcpu]):
return statistics
else:
return None | Check the Hypervisor stats meet a minimum value
Check the hypervisor stats match the required counts. This is an
implementation of a command in TripleO with the same name.
:param compute_client: Instance of Nova client
:type compute_client: novaclient.client.v2.Client
:param nodes: The number of nodes to wait for, defaults to 1.
:type nodes: int
:param memory: The amount of memory to wait for in MB, defaults to 0.
:type memory: int
:param vcpu: The number of vcpus to wait for, defaults to 0.
:type vcpu: int | train | https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/utils.py#L100-L126 | null | # Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import base64
import hashlib
import json
import logging
import os
import re
import six
import struct
import subprocess
import sys
import time
import uuid
from rdomanager_oscplugin import exceptions
WEBROOT = '/dashboard/'
SERVICE_LIST = {
'ceilometer': {'password_field': 'OVERCLOUD_CEILOMETER_PASSWORD'},
'cinder': {'password_field': 'OVERCLOUD_CINDER_PASSWORD'},
'cinderv2': {'password_field': 'OVERCLOUD_CINDER_PASSWORD'},
'glance': {'password_field': 'OVERCLOUD_GLANCE_PASSWORD'},
'heat': {'password_field': 'OVERCLOUD_HEAT_PASSWORD'},
'neutron': {'password_field': 'OVERCLOUD_NEUTRON_PASSWORD'},
'nova': {'password_field': 'OVERCLOUD_NOVA_PASSWORD'},
'novav3': {'password_field': 'OVERCLOUD_NOVA_PASSWORD'},
'swift': {'password_field': 'OVERCLOUD_SWIFT_PASSWORD'},
'horizon': {
'port': '80',
'path': WEBROOT,
'admin_path': '%sadmin' % WEBROOT},
}
def _generate_password():
"""Create a random password
The password is made by taking a uuid and passing it though sha1sum.
We may change this in future to gain more entropy.
This is based on the tripleo command os-make-password
"""
uuid_str = six.text_type(uuid.uuid4()).encode("UTF-8")
return hashlib.sha1(uuid_str).hexdigest()
def generate_overcloud_passwords(output_file="tripleo-overcloud-passwords"):
"""Create the passwords needed for the overcloud
This will create the set of passwords required by the overcloud, store
them in the output file path and return a dictionary of passwords. If the
file already exists the existing passwords will be returned instead,
"""
if os.path.isfile(output_file):
with open(output_file) as f:
return dict(line.split('=') for line in f.read().splitlines())
password_names = (
"OVERCLOUD_ADMIN_PASSWORD",
"OVERCLOUD_ADMIN_TOKEN",
"OVERCLOUD_CEILOMETER_PASSWORD",
"OVERCLOUD_CEILOMETER_SECRET",
"OVERCLOUD_CINDER_PASSWORD",
"OVERCLOUD_DEMO_PASSWORD",
"OVERCLOUD_GLANCE_PASSWORD",
"OVERCLOUD_HEAT_PASSWORD",
"OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD",
"OVERCLOUD_NEUTRON_PASSWORD",
"OVERCLOUD_NOVA_PASSWORD",
"OVERCLOUD_SWIFT_HASH",
"OVERCLOUD_SWIFT_PASSWORD",
)
passwords = dict((p, _generate_password()) for p in password_names)
with open(output_file, 'w') as f:
for name, password in passwords.items():
f.write("{0}={1}\n".format(name, password))
return passwords
def wait_for_stack_ready(orchestration_client, stack_name):
"""Check the status of an orchestration stack
Get the status of an orchestration stack and check whether it is complete
or failed.
:param orchestration_client: Instance of Orchestration client
:type orchestration_client: heatclient.v1.client.Client
:param stack_name: Name or UUID of stack to retrieve
:type stack_name: string
"""
SUCCESSFUL_MATCH_OUTPUT = "(CREATE|UPDATE)_COMPLETE"
FAIL_MATCH_OUTPUT = "(CREATE|UPDATE)_FAILED"
while True:
stack = orchestration_client.stacks.get(stack_name)
if not stack:
return False
status = stack.stack_status
if re.match(SUCCESSFUL_MATCH_OUTPUT, status):
return True
if re.match(FAIL_MATCH_OUTPUT, status):
print("Stack failed with status: {}".format(
stack.stack_status_reason, file=sys.stderr))
return False
time.sleep(10)
def wait_for_provision_state(baremetal_client, node_uuid, provision_state,
loops=10, sleep=1):
"""Wait for a given Provisioning state in Ironic Discoverd
Updating the provisioning state is an async operation, we
need to wait for it to be completed.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param node_uuid: The Ironic node UUID
:type node_uuid: str
:param provision_state: The provisioning state name to wait for
:type provision_state: str
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
for _ in range(0, loops):
node = baremetal_client.node.get(node_uuid)
if node is None:
# The node can't be found in ironic, so we don't need to wait for
# the provision state
return True
if node.provision_state == provision_state:
return True
time.sleep(sleep)
return False
def wait_for_node_discovery(discoverd_client, auth_token, discoverd_url,
node_uuids, loops=220, sleep=10):
"""Check the status of Node discovery in Ironic discoverd
Gets the status and waits for them to complete.
:param discoverd_client: Ironic Discoverd client
:type discoverd_client: ironic_discoverd.client
:param auth_token: Authorisation token used by discoverd client
:type auth_token: string
:param discoverd_url: URL used by the discoverd client
:type discoverd_url: string
:param node_uuids: List of Node UUID's to wait for discovery
:type node_uuids: [string, ]
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
log = logging.getLogger(__name__ + ".wait_for_node_discovery")
node_uuids = node_uuids[:]
for _ in range(0, loops):
for node_uuid in node_uuids:
status = discoverd_client.get_status(
node_uuid,
base_url=discoverd_url,
auth_token=auth_token)
if status['finished']:
log.debug("Discover finished for node {0} (Error: {1})".format(
node_uuid, status['error']))
node_uuids.remove(node_uuid)
yield node_uuid, status
if not len(node_uuids):
raise StopIteration
time.sleep(sleep)
if len(node_uuids):
log.error("Discovery didn't finish for nodes {0}".format(
','.join(node_uuids)))
def create_environment_file(path="~/overcloud-env.json",
control_scale=1, compute_scale=1,
ceph_storage_scale=0, block_storage_scale=0,
swift_storage_scale=0):
"""Create a heat environment file
Create the heat environment file with the scale parameters.
:param control_scale: Scale value for control roles.
:type control_scale: int
:param compute_scale: Scale value for compute roles.
:type compute_scale: int
:param ceph_storage_scale: Scale value for ceph storage roles.
:type ceph_storage_scale: int
:param block_storage_scale: Scale value for block storage roles.
:type block_storage_scale: int
:param swift_storage_scale: Scale value for swift storage roles.
:type swift_storage_scale: int
"""
env_path = os.path.expanduser(path)
with open(env_path, 'w+') as f:
f.write(json.dumps({
"parameters": {
"ControllerCount": control_scale,
"ComputeCount": compute_scale,
"CephStorageCount": ceph_storage_scale,
"BlockStorageCount": block_storage_scale,
"ObjectStorageCount": swift_storage_scale}
}))
return env_path
def set_nodes_state(baremetal_client, nodes, transition, target_state,
skipped_states=()):
"""Make all nodes available in the baremetal service for a deployment
For each node, make it available unless it is already available or active.
Available nodes can be used for a deployment and an active node is already
in use.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param nodes: List of Baremetal Nodes
:type nodes: [ironicclient.v1.node.Node]
:param transition: The state to set for a node. The full list of states
can be found in ironic.common.states.
:type transition: string
:param target_state: The expected result state for a node. For example when
transitioning to 'manage' the result is 'manageable'
:type target_state: string
:param skipped_states: A set of states to skip, for example 'active' nodes
are already deployed and the state can't always be
changed.
:type skipped_states: iterable of strings
"""
log = logging.getLogger(__name__ + ".set_nodes_state")
for node in nodes:
if node.provision_state in skipped_states:
continue
log.debug(
"Setting provision state from {0} to '{1} for Node {2}"
.format(node.provision_state, transition, node.uuid))
baremetal_client.node.set_provision_state(node.uuid, transition)
if not wait_for_provision_state(baremetal_client, node.uuid,
target_state):
print("FAIL: State not updated for Node {0}".format(
node.uuid, file=sys.stderr))
else:
yield node.uuid
def get_hiera_key(key_name):
"""Retrieve a key from the hiera store
:param password_name: Name of the key to retrieve
:type password_name: type
"""
command = ["hiera", key_name]
p = subprocess.Popen(command, stdout=subprocess.PIPE)
out, err = p.communicate()
return out
def get_config_value(section, option):
p = six.moves.configparser.ConfigParser()
p.read(os.path.expanduser("~/undercloud-passwords.conf"))
return p.get(section, option)
def remove_known_hosts(overcloud_ip):
"""For a given IP address remove SSH keys from the known_hosts file"""
known_hosts = os.path.expanduser("~/.ssh/known_hosts")
if os.path.exists(known_hosts):
command = ['ssh-keygen', '-R', overcloud_ip, '-f', known_hosts]
subprocess.check_call(command)
def create_cephx_key():
# NOTE(gfidente): Taken from
# https://github.com/ceph/ceph-deploy/blob/master/ceph_deploy/new.py#L21
key = os.urandom(16)
header = struct.pack("<hiih", 1, int(time.time()), 0, len(key))
return base64.b64encode(header + key)
def run_shell(cmd):
return subprocess.call([cmd], shell=True)
def all_unique(x):
"""Return True if the collection has no duplications."""
return len(set(x)) == len(x)
def file_checksum(filepath):
"""Calculate md5 checksum on file
:param filepath: Full path to file (e.g. /home/stack/image.qcow2)
:type filepath: string
"""
checksum = hashlib.md5()
with open(filepath, 'rb') as f:
for fragment in iter(lambda: f.read(65536), ''):
checksum.update(fragment)
return checksum.hexdigest()
def check_nodes_count(baremetal_client, stack, parameters, defaults):
"""Check if there are enough available nodes for creating/scaling stack"""
count = 0
if stack:
for param in defaults:
try:
current = int(stack.parameters[param])
except KeyError:
raise ValueError(
"Parameter '%s' was not found in existing stack" % param)
count += parameters.get(param, current)
else:
for param, default in defaults.items():
count += parameters.get(param, default)
available = len(baremetal_client.node.list(associated=False,
maintenance=False))
if count > available:
raise exceptions.DeploymentError(
"Not enough nodes - available: {0}, requested: {1}".format(
available, count))
else:
return True
|
rdo-management/python-rdomanager-oscplugin | rdomanager_oscplugin/utils.py | wait_for_stack_ready | python | def wait_for_stack_ready(orchestration_client, stack_name):
SUCCESSFUL_MATCH_OUTPUT = "(CREATE|UPDATE)_COMPLETE"
FAIL_MATCH_OUTPUT = "(CREATE|UPDATE)_FAILED"
while True:
stack = orchestration_client.stacks.get(stack_name)
if not stack:
return False
status = stack.stack_status
if re.match(SUCCESSFUL_MATCH_OUTPUT, status):
return True
if re.match(FAIL_MATCH_OUTPUT, status):
print("Stack failed with status: {}".format(
stack.stack_status_reason, file=sys.stderr))
return False
time.sleep(10) | Check the status of an orchestration stack
Get the status of an orchestration stack and check whether it is complete
or failed.
:param orchestration_client: Instance of Orchestration client
:type orchestration_client: heatclient.v1.client.Client
:param stack_name: Name or UUID of stack to retrieve
:type stack_name: string | train | https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/utils.py#L129-L159 | null | # Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import base64
import hashlib
import json
import logging
import os
import re
import six
import struct
import subprocess
import sys
import time
import uuid
from rdomanager_oscplugin import exceptions
WEBROOT = '/dashboard/'
SERVICE_LIST = {
'ceilometer': {'password_field': 'OVERCLOUD_CEILOMETER_PASSWORD'},
'cinder': {'password_field': 'OVERCLOUD_CINDER_PASSWORD'},
'cinderv2': {'password_field': 'OVERCLOUD_CINDER_PASSWORD'},
'glance': {'password_field': 'OVERCLOUD_GLANCE_PASSWORD'},
'heat': {'password_field': 'OVERCLOUD_HEAT_PASSWORD'},
'neutron': {'password_field': 'OVERCLOUD_NEUTRON_PASSWORD'},
'nova': {'password_field': 'OVERCLOUD_NOVA_PASSWORD'},
'novav3': {'password_field': 'OVERCLOUD_NOVA_PASSWORD'},
'swift': {'password_field': 'OVERCLOUD_SWIFT_PASSWORD'},
'horizon': {
'port': '80',
'path': WEBROOT,
'admin_path': '%sadmin' % WEBROOT},
}
def _generate_password():
"""Create a random password
The password is made by taking a uuid and passing it though sha1sum.
We may change this in future to gain more entropy.
This is based on the tripleo command os-make-password
"""
uuid_str = six.text_type(uuid.uuid4()).encode("UTF-8")
return hashlib.sha1(uuid_str).hexdigest()
def generate_overcloud_passwords(output_file="tripleo-overcloud-passwords"):
"""Create the passwords needed for the overcloud
This will create the set of passwords required by the overcloud, store
them in the output file path and return a dictionary of passwords. If the
file already exists the existing passwords will be returned instead,
"""
if os.path.isfile(output_file):
with open(output_file) as f:
return dict(line.split('=') for line in f.read().splitlines())
password_names = (
"OVERCLOUD_ADMIN_PASSWORD",
"OVERCLOUD_ADMIN_TOKEN",
"OVERCLOUD_CEILOMETER_PASSWORD",
"OVERCLOUD_CEILOMETER_SECRET",
"OVERCLOUD_CINDER_PASSWORD",
"OVERCLOUD_DEMO_PASSWORD",
"OVERCLOUD_GLANCE_PASSWORD",
"OVERCLOUD_HEAT_PASSWORD",
"OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD",
"OVERCLOUD_NEUTRON_PASSWORD",
"OVERCLOUD_NOVA_PASSWORD",
"OVERCLOUD_SWIFT_HASH",
"OVERCLOUD_SWIFT_PASSWORD",
)
passwords = dict((p, _generate_password()) for p in password_names)
with open(output_file, 'w') as f:
for name, password in passwords.items():
f.write("{0}={1}\n".format(name, password))
return passwords
def check_hypervisor_stats(compute_client, nodes=1, memory=0, vcpu=0):
"""Check the Hypervisor stats meet a minimum value
Check the hypervisor stats match the required counts. This is an
implementation of a command in TripleO with the same name.
:param compute_client: Instance of Nova client
:type compute_client: novaclient.client.v2.Client
:param nodes: The number of nodes to wait for, defaults to 1.
:type nodes: int
:param memory: The amount of memory to wait for in MB, defaults to 0.
:type memory: int
:param vcpu: The number of vcpus to wait for, defaults to 0.
:type vcpu: int
"""
statistics = compute_client.hypervisors.statistics().to_dict()
if all([statistics['count'] >= nodes,
statistics['memory_mb'] >= memory,
statistics['vcpus'] >= vcpu]):
return statistics
else:
return None
def wait_for_provision_state(baremetal_client, node_uuid, provision_state,
loops=10, sleep=1):
"""Wait for a given Provisioning state in Ironic Discoverd
Updating the provisioning state is an async operation, we
need to wait for it to be completed.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param node_uuid: The Ironic node UUID
:type node_uuid: str
:param provision_state: The provisioning state name to wait for
:type provision_state: str
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
for _ in range(0, loops):
node = baremetal_client.node.get(node_uuid)
if node is None:
# The node can't be found in ironic, so we don't need to wait for
# the provision state
return True
if node.provision_state == provision_state:
return True
time.sleep(sleep)
return False
def wait_for_node_discovery(discoverd_client, auth_token, discoverd_url,
node_uuids, loops=220, sleep=10):
"""Check the status of Node discovery in Ironic discoverd
Gets the status and waits for them to complete.
:param discoverd_client: Ironic Discoverd client
:type discoverd_client: ironic_discoverd.client
:param auth_token: Authorisation token used by discoverd client
:type auth_token: string
:param discoverd_url: URL used by the discoverd client
:type discoverd_url: string
:param node_uuids: List of Node UUID's to wait for discovery
:type node_uuids: [string, ]
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
log = logging.getLogger(__name__ + ".wait_for_node_discovery")
node_uuids = node_uuids[:]
for _ in range(0, loops):
for node_uuid in node_uuids:
status = discoverd_client.get_status(
node_uuid,
base_url=discoverd_url,
auth_token=auth_token)
if status['finished']:
log.debug("Discover finished for node {0} (Error: {1})".format(
node_uuid, status['error']))
node_uuids.remove(node_uuid)
yield node_uuid, status
if not len(node_uuids):
raise StopIteration
time.sleep(sleep)
if len(node_uuids):
log.error("Discovery didn't finish for nodes {0}".format(
','.join(node_uuids)))
def create_environment_file(path="~/overcloud-env.json",
control_scale=1, compute_scale=1,
ceph_storage_scale=0, block_storage_scale=0,
swift_storage_scale=0):
"""Create a heat environment file
Create the heat environment file with the scale parameters.
:param control_scale: Scale value for control roles.
:type control_scale: int
:param compute_scale: Scale value for compute roles.
:type compute_scale: int
:param ceph_storage_scale: Scale value for ceph storage roles.
:type ceph_storage_scale: int
:param block_storage_scale: Scale value for block storage roles.
:type block_storage_scale: int
:param swift_storage_scale: Scale value for swift storage roles.
:type swift_storage_scale: int
"""
env_path = os.path.expanduser(path)
with open(env_path, 'w+') as f:
f.write(json.dumps({
"parameters": {
"ControllerCount": control_scale,
"ComputeCount": compute_scale,
"CephStorageCount": ceph_storage_scale,
"BlockStorageCount": block_storage_scale,
"ObjectStorageCount": swift_storage_scale}
}))
return env_path
def set_nodes_state(baremetal_client, nodes, transition, target_state,
skipped_states=()):
"""Make all nodes available in the baremetal service for a deployment
For each node, make it available unless it is already available or active.
Available nodes can be used for a deployment and an active node is already
in use.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param nodes: List of Baremetal Nodes
:type nodes: [ironicclient.v1.node.Node]
:param transition: The state to set for a node. The full list of states
can be found in ironic.common.states.
:type transition: string
:param target_state: The expected result state for a node. For example when
transitioning to 'manage' the result is 'manageable'
:type target_state: string
:param skipped_states: A set of states to skip, for example 'active' nodes
are already deployed and the state can't always be
changed.
:type skipped_states: iterable of strings
"""
log = logging.getLogger(__name__ + ".set_nodes_state")
for node in nodes:
if node.provision_state in skipped_states:
continue
log.debug(
"Setting provision state from {0} to '{1} for Node {2}"
.format(node.provision_state, transition, node.uuid))
baremetal_client.node.set_provision_state(node.uuid, transition)
if not wait_for_provision_state(baremetal_client, node.uuid,
target_state):
print("FAIL: State not updated for Node {0}".format(
node.uuid, file=sys.stderr))
else:
yield node.uuid
def get_hiera_key(key_name):
"""Retrieve a key from the hiera store
:param password_name: Name of the key to retrieve
:type password_name: type
"""
command = ["hiera", key_name]
p = subprocess.Popen(command, stdout=subprocess.PIPE)
out, err = p.communicate()
return out
def get_config_value(section, option):
p = six.moves.configparser.ConfigParser()
p.read(os.path.expanduser("~/undercloud-passwords.conf"))
return p.get(section, option)
def remove_known_hosts(overcloud_ip):
"""For a given IP address remove SSH keys from the known_hosts file"""
known_hosts = os.path.expanduser("~/.ssh/known_hosts")
if os.path.exists(known_hosts):
command = ['ssh-keygen', '-R', overcloud_ip, '-f', known_hosts]
subprocess.check_call(command)
def create_cephx_key():
# NOTE(gfidente): Taken from
# https://github.com/ceph/ceph-deploy/blob/master/ceph_deploy/new.py#L21
key = os.urandom(16)
header = struct.pack("<hiih", 1, int(time.time()), 0, len(key))
return base64.b64encode(header + key)
def run_shell(cmd):
return subprocess.call([cmd], shell=True)
def all_unique(x):
"""Return True if the collection has no duplications."""
return len(set(x)) == len(x)
def file_checksum(filepath):
"""Calculate md5 checksum on file
:param filepath: Full path to file (e.g. /home/stack/image.qcow2)
:type filepath: string
"""
checksum = hashlib.md5()
with open(filepath, 'rb') as f:
for fragment in iter(lambda: f.read(65536), ''):
checksum.update(fragment)
return checksum.hexdigest()
def check_nodes_count(baremetal_client, stack, parameters, defaults):
"""Check if there are enough available nodes for creating/scaling stack"""
count = 0
if stack:
for param in defaults:
try:
current = int(stack.parameters[param])
except KeyError:
raise ValueError(
"Parameter '%s' was not found in existing stack" % param)
count += parameters.get(param, current)
else:
for param, default in defaults.items():
count += parameters.get(param, default)
available = len(baremetal_client.node.list(associated=False,
maintenance=False))
if count > available:
raise exceptions.DeploymentError(
"Not enough nodes - available: {0}, requested: {1}".format(
available, count))
else:
return True
|
rdo-management/python-rdomanager-oscplugin | rdomanager_oscplugin/utils.py | wait_for_provision_state | python | def wait_for_provision_state(baremetal_client, node_uuid, provision_state,
loops=10, sleep=1):
for _ in range(0, loops):
node = baremetal_client.node.get(node_uuid)
if node is None:
# The node can't be found in ironic, so we don't need to wait for
# the provision state
return True
if node.provision_state == provision_state:
return True
time.sleep(sleep)
return False | Wait for a given Provisioning state in Ironic Discoverd
Updating the provisioning state is an async operation, we
need to wait for it to be completed.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param node_uuid: The Ironic node UUID
:type node_uuid: str
:param provision_state: The provisioning state name to wait for
:type provision_state: str
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int | train | https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/utils.py#L162-L199 | null | # Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import base64
import hashlib
import json
import logging
import os
import re
import six
import struct
import subprocess
import sys
import time
import uuid
from rdomanager_oscplugin import exceptions
WEBROOT = '/dashboard/'
SERVICE_LIST = {
'ceilometer': {'password_field': 'OVERCLOUD_CEILOMETER_PASSWORD'},
'cinder': {'password_field': 'OVERCLOUD_CINDER_PASSWORD'},
'cinderv2': {'password_field': 'OVERCLOUD_CINDER_PASSWORD'},
'glance': {'password_field': 'OVERCLOUD_GLANCE_PASSWORD'},
'heat': {'password_field': 'OVERCLOUD_HEAT_PASSWORD'},
'neutron': {'password_field': 'OVERCLOUD_NEUTRON_PASSWORD'},
'nova': {'password_field': 'OVERCLOUD_NOVA_PASSWORD'},
'novav3': {'password_field': 'OVERCLOUD_NOVA_PASSWORD'},
'swift': {'password_field': 'OVERCLOUD_SWIFT_PASSWORD'},
'horizon': {
'port': '80',
'path': WEBROOT,
'admin_path': '%sadmin' % WEBROOT},
}
def _generate_password():
"""Create a random password
The password is made by taking a uuid and passing it though sha1sum.
We may change this in future to gain more entropy.
This is based on the tripleo command os-make-password
"""
uuid_str = six.text_type(uuid.uuid4()).encode("UTF-8")
return hashlib.sha1(uuid_str).hexdigest()
def generate_overcloud_passwords(output_file="tripleo-overcloud-passwords"):
"""Create the passwords needed for the overcloud
This will create the set of passwords required by the overcloud, store
them in the output file path and return a dictionary of passwords. If the
file already exists the existing passwords will be returned instead,
"""
if os.path.isfile(output_file):
with open(output_file) as f:
return dict(line.split('=') for line in f.read().splitlines())
password_names = (
"OVERCLOUD_ADMIN_PASSWORD",
"OVERCLOUD_ADMIN_TOKEN",
"OVERCLOUD_CEILOMETER_PASSWORD",
"OVERCLOUD_CEILOMETER_SECRET",
"OVERCLOUD_CINDER_PASSWORD",
"OVERCLOUD_DEMO_PASSWORD",
"OVERCLOUD_GLANCE_PASSWORD",
"OVERCLOUD_HEAT_PASSWORD",
"OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD",
"OVERCLOUD_NEUTRON_PASSWORD",
"OVERCLOUD_NOVA_PASSWORD",
"OVERCLOUD_SWIFT_HASH",
"OVERCLOUD_SWIFT_PASSWORD",
)
passwords = dict((p, _generate_password()) for p in password_names)
with open(output_file, 'w') as f:
for name, password in passwords.items():
f.write("{0}={1}\n".format(name, password))
return passwords
def check_hypervisor_stats(compute_client, nodes=1, memory=0, vcpu=0):
"""Check the Hypervisor stats meet a minimum value
Check the hypervisor stats match the required counts. This is an
implementation of a command in TripleO with the same name.
:param compute_client: Instance of Nova client
:type compute_client: novaclient.client.v2.Client
:param nodes: The number of nodes to wait for, defaults to 1.
:type nodes: int
:param memory: The amount of memory to wait for in MB, defaults to 0.
:type memory: int
:param vcpu: The number of vcpus to wait for, defaults to 0.
:type vcpu: int
"""
statistics = compute_client.hypervisors.statistics().to_dict()
if all([statistics['count'] >= nodes,
statistics['memory_mb'] >= memory,
statistics['vcpus'] >= vcpu]):
return statistics
else:
return None
def wait_for_stack_ready(orchestration_client, stack_name):
"""Check the status of an orchestration stack
Get the status of an orchestration stack and check whether it is complete
or failed.
:param orchestration_client: Instance of Orchestration client
:type orchestration_client: heatclient.v1.client.Client
:param stack_name: Name or UUID of stack to retrieve
:type stack_name: string
"""
SUCCESSFUL_MATCH_OUTPUT = "(CREATE|UPDATE)_COMPLETE"
FAIL_MATCH_OUTPUT = "(CREATE|UPDATE)_FAILED"
while True:
stack = orchestration_client.stacks.get(stack_name)
if not stack:
return False
status = stack.stack_status
if re.match(SUCCESSFUL_MATCH_OUTPUT, status):
return True
if re.match(FAIL_MATCH_OUTPUT, status):
print("Stack failed with status: {}".format(
stack.stack_status_reason, file=sys.stderr))
return False
time.sleep(10)
def wait_for_node_discovery(discoverd_client, auth_token, discoverd_url,
node_uuids, loops=220, sleep=10):
"""Check the status of Node discovery in Ironic discoverd
Gets the status and waits for them to complete.
:param discoverd_client: Ironic Discoverd client
:type discoverd_client: ironic_discoverd.client
:param auth_token: Authorisation token used by discoverd client
:type auth_token: string
:param discoverd_url: URL used by the discoverd client
:type discoverd_url: string
:param node_uuids: List of Node UUID's to wait for discovery
:type node_uuids: [string, ]
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
log = logging.getLogger(__name__ + ".wait_for_node_discovery")
node_uuids = node_uuids[:]
for _ in range(0, loops):
for node_uuid in node_uuids:
status = discoverd_client.get_status(
node_uuid,
base_url=discoverd_url,
auth_token=auth_token)
if status['finished']:
log.debug("Discover finished for node {0} (Error: {1})".format(
node_uuid, status['error']))
node_uuids.remove(node_uuid)
yield node_uuid, status
if not len(node_uuids):
raise StopIteration
time.sleep(sleep)
if len(node_uuids):
log.error("Discovery didn't finish for nodes {0}".format(
','.join(node_uuids)))
def create_environment_file(path="~/overcloud-env.json",
control_scale=1, compute_scale=1,
ceph_storage_scale=0, block_storage_scale=0,
swift_storage_scale=0):
"""Create a heat environment file
Create the heat environment file with the scale parameters.
:param control_scale: Scale value for control roles.
:type control_scale: int
:param compute_scale: Scale value for compute roles.
:type compute_scale: int
:param ceph_storage_scale: Scale value for ceph storage roles.
:type ceph_storage_scale: int
:param block_storage_scale: Scale value for block storage roles.
:type block_storage_scale: int
:param swift_storage_scale: Scale value for swift storage roles.
:type swift_storage_scale: int
"""
env_path = os.path.expanduser(path)
with open(env_path, 'w+') as f:
f.write(json.dumps({
"parameters": {
"ControllerCount": control_scale,
"ComputeCount": compute_scale,
"CephStorageCount": ceph_storage_scale,
"BlockStorageCount": block_storage_scale,
"ObjectStorageCount": swift_storage_scale}
}))
return env_path
def set_nodes_state(baremetal_client, nodes, transition, target_state,
skipped_states=()):
"""Make all nodes available in the baremetal service for a deployment
For each node, make it available unless it is already available or active.
Available nodes can be used for a deployment and an active node is already
in use.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param nodes: List of Baremetal Nodes
:type nodes: [ironicclient.v1.node.Node]
:param transition: The state to set for a node. The full list of states
can be found in ironic.common.states.
:type transition: string
:param target_state: The expected result state for a node. For example when
transitioning to 'manage' the result is 'manageable'
:type target_state: string
:param skipped_states: A set of states to skip, for example 'active' nodes
are already deployed and the state can't always be
changed.
:type skipped_states: iterable of strings
"""
log = logging.getLogger(__name__ + ".set_nodes_state")
for node in nodes:
if node.provision_state in skipped_states:
continue
log.debug(
"Setting provision state from {0} to '{1} for Node {2}"
.format(node.provision_state, transition, node.uuid))
baremetal_client.node.set_provision_state(node.uuid, transition)
if not wait_for_provision_state(baremetal_client, node.uuid,
target_state):
print("FAIL: State not updated for Node {0}".format(
node.uuid, file=sys.stderr))
else:
yield node.uuid
def get_hiera_key(key_name):
"""Retrieve a key from the hiera store
:param password_name: Name of the key to retrieve
:type password_name: type
"""
command = ["hiera", key_name]
p = subprocess.Popen(command, stdout=subprocess.PIPE)
out, err = p.communicate()
return out
def get_config_value(section, option):
p = six.moves.configparser.ConfigParser()
p.read(os.path.expanduser("~/undercloud-passwords.conf"))
return p.get(section, option)
def remove_known_hosts(overcloud_ip):
"""For a given IP address remove SSH keys from the known_hosts file"""
known_hosts = os.path.expanduser("~/.ssh/known_hosts")
if os.path.exists(known_hosts):
command = ['ssh-keygen', '-R', overcloud_ip, '-f', known_hosts]
subprocess.check_call(command)
def create_cephx_key():
# NOTE(gfidente): Taken from
# https://github.com/ceph/ceph-deploy/blob/master/ceph_deploy/new.py#L21
key = os.urandom(16)
header = struct.pack("<hiih", 1, int(time.time()), 0, len(key))
return base64.b64encode(header + key)
def run_shell(cmd):
return subprocess.call([cmd], shell=True)
def all_unique(x):
"""Return True if the collection has no duplications."""
return len(set(x)) == len(x)
def file_checksum(filepath):
"""Calculate md5 checksum on file
:param filepath: Full path to file (e.g. /home/stack/image.qcow2)
:type filepath: string
"""
checksum = hashlib.md5()
with open(filepath, 'rb') as f:
for fragment in iter(lambda: f.read(65536), ''):
checksum.update(fragment)
return checksum.hexdigest()
def check_nodes_count(baremetal_client, stack, parameters, defaults):
"""Check if there are enough available nodes for creating/scaling stack"""
count = 0
if stack:
for param in defaults:
try:
current = int(stack.parameters[param])
except KeyError:
raise ValueError(
"Parameter '%s' was not found in existing stack" % param)
count += parameters.get(param, current)
else:
for param, default in defaults.items():
count += parameters.get(param, default)
available = len(baremetal_client.node.list(associated=False,
maintenance=False))
if count > available:
raise exceptions.DeploymentError(
"Not enough nodes - available: {0}, requested: {1}".format(
available, count))
else:
return True
|
rdo-management/python-rdomanager-oscplugin | rdomanager_oscplugin/utils.py | wait_for_node_discovery | python | def wait_for_node_discovery(discoverd_client, auth_token, discoverd_url,
node_uuids, loops=220, sleep=10):
log = logging.getLogger(__name__ + ".wait_for_node_discovery")
node_uuids = node_uuids[:]
for _ in range(0, loops):
for node_uuid in node_uuids:
status = discoverd_client.get_status(
node_uuid,
base_url=discoverd_url,
auth_token=auth_token)
if status['finished']:
log.debug("Discover finished for node {0} (Error: {1})".format(
node_uuid, status['error']))
node_uuids.remove(node_uuid)
yield node_uuid, status
if not len(node_uuids):
raise StopIteration
time.sleep(sleep)
if len(node_uuids):
log.error("Discovery didn't finish for nodes {0}".format(
','.join(node_uuids))) | Check the status of Node discovery in Ironic discoverd
Gets the status and waits for them to complete.
:param discoverd_client: Ironic Discoverd client
:type discoverd_client: ironic_discoverd.client
:param auth_token: Authorisation token used by discoverd client
:type auth_token: string
:param discoverd_url: URL used by the discoverd client
:type discoverd_url: string
:param node_uuids: List of Node UUID's to wait for discovery
:type node_uuids: [string, ]
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int | train | https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/utils.py#L202-L251 | null | # Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import base64
import hashlib
import json
import logging
import os
import re
import six
import struct
import subprocess
import sys
import time
import uuid
from rdomanager_oscplugin import exceptions
WEBROOT = '/dashboard/'
SERVICE_LIST = {
'ceilometer': {'password_field': 'OVERCLOUD_CEILOMETER_PASSWORD'},
'cinder': {'password_field': 'OVERCLOUD_CINDER_PASSWORD'},
'cinderv2': {'password_field': 'OVERCLOUD_CINDER_PASSWORD'},
'glance': {'password_field': 'OVERCLOUD_GLANCE_PASSWORD'},
'heat': {'password_field': 'OVERCLOUD_HEAT_PASSWORD'},
'neutron': {'password_field': 'OVERCLOUD_NEUTRON_PASSWORD'},
'nova': {'password_field': 'OVERCLOUD_NOVA_PASSWORD'},
'novav3': {'password_field': 'OVERCLOUD_NOVA_PASSWORD'},
'swift': {'password_field': 'OVERCLOUD_SWIFT_PASSWORD'},
'horizon': {
'port': '80',
'path': WEBROOT,
'admin_path': '%sadmin' % WEBROOT},
}
def _generate_password():
"""Create a random password
The password is made by taking a uuid and passing it though sha1sum.
We may change this in future to gain more entropy.
This is based on the tripleo command os-make-password
"""
uuid_str = six.text_type(uuid.uuid4()).encode("UTF-8")
return hashlib.sha1(uuid_str).hexdigest()
def generate_overcloud_passwords(output_file="tripleo-overcloud-passwords"):
"""Create the passwords needed for the overcloud
This will create the set of passwords required by the overcloud, store
them in the output file path and return a dictionary of passwords. If the
file already exists the existing passwords will be returned instead,
"""
if os.path.isfile(output_file):
with open(output_file) as f:
return dict(line.split('=') for line in f.read().splitlines())
password_names = (
"OVERCLOUD_ADMIN_PASSWORD",
"OVERCLOUD_ADMIN_TOKEN",
"OVERCLOUD_CEILOMETER_PASSWORD",
"OVERCLOUD_CEILOMETER_SECRET",
"OVERCLOUD_CINDER_PASSWORD",
"OVERCLOUD_DEMO_PASSWORD",
"OVERCLOUD_GLANCE_PASSWORD",
"OVERCLOUD_HEAT_PASSWORD",
"OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD",
"OVERCLOUD_NEUTRON_PASSWORD",
"OVERCLOUD_NOVA_PASSWORD",
"OVERCLOUD_SWIFT_HASH",
"OVERCLOUD_SWIFT_PASSWORD",
)
passwords = dict((p, _generate_password()) for p in password_names)
with open(output_file, 'w') as f:
for name, password in passwords.items():
f.write("{0}={1}\n".format(name, password))
return passwords
def check_hypervisor_stats(compute_client, nodes=1, memory=0, vcpu=0):
"""Check the Hypervisor stats meet a minimum value
Check the hypervisor stats match the required counts. This is an
implementation of a command in TripleO with the same name.
:param compute_client: Instance of Nova client
:type compute_client: novaclient.client.v2.Client
:param nodes: The number of nodes to wait for, defaults to 1.
:type nodes: int
:param memory: The amount of memory to wait for in MB, defaults to 0.
:type memory: int
:param vcpu: The number of vcpus to wait for, defaults to 0.
:type vcpu: int
"""
statistics = compute_client.hypervisors.statistics().to_dict()
if all([statistics['count'] >= nodes,
statistics['memory_mb'] >= memory,
statistics['vcpus'] >= vcpu]):
return statistics
else:
return None
def wait_for_stack_ready(orchestration_client, stack_name):
"""Check the status of an orchestration stack
Get the status of an orchestration stack and check whether it is complete
or failed.
:param orchestration_client: Instance of Orchestration client
:type orchestration_client: heatclient.v1.client.Client
:param stack_name: Name or UUID of stack to retrieve
:type stack_name: string
"""
SUCCESSFUL_MATCH_OUTPUT = "(CREATE|UPDATE)_COMPLETE"
FAIL_MATCH_OUTPUT = "(CREATE|UPDATE)_FAILED"
while True:
stack = orchestration_client.stacks.get(stack_name)
if not stack:
return False
status = stack.stack_status
if re.match(SUCCESSFUL_MATCH_OUTPUT, status):
return True
if re.match(FAIL_MATCH_OUTPUT, status):
print("Stack failed with status: {}".format(
stack.stack_status_reason, file=sys.stderr))
return False
time.sleep(10)
def wait_for_provision_state(baremetal_client, node_uuid, provision_state,
loops=10, sleep=1):
"""Wait for a given Provisioning state in Ironic Discoverd
Updating the provisioning state is an async operation, we
need to wait for it to be completed.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param node_uuid: The Ironic node UUID
:type node_uuid: str
:param provision_state: The provisioning state name to wait for
:type provision_state: str
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
for _ in range(0, loops):
node = baremetal_client.node.get(node_uuid)
if node is None:
# The node can't be found in ironic, so we don't need to wait for
# the provision state
return True
if node.provision_state == provision_state:
return True
time.sleep(sleep)
return False
def create_environment_file(path="~/overcloud-env.json",
control_scale=1, compute_scale=1,
ceph_storage_scale=0, block_storage_scale=0,
swift_storage_scale=0):
"""Create a heat environment file
Create the heat environment file with the scale parameters.
:param control_scale: Scale value for control roles.
:type control_scale: int
:param compute_scale: Scale value for compute roles.
:type compute_scale: int
:param ceph_storage_scale: Scale value for ceph storage roles.
:type ceph_storage_scale: int
:param block_storage_scale: Scale value for block storage roles.
:type block_storage_scale: int
:param swift_storage_scale: Scale value for swift storage roles.
:type swift_storage_scale: int
"""
env_path = os.path.expanduser(path)
with open(env_path, 'w+') as f:
f.write(json.dumps({
"parameters": {
"ControllerCount": control_scale,
"ComputeCount": compute_scale,
"CephStorageCount": ceph_storage_scale,
"BlockStorageCount": block_storage_scale,
"ObjectStorageCount": swift_storage_scale}
}))
return env_path
def set_nodes_state(baremetal_client, nodes, transition, target_state,
skipped_states=()):
"""Make all nodes available in the baremetal service for a deployment
For each node, make it available unless it is already available or active.
Available nodes can be used for a deployment and an active node is already
in use.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param nodes: List of Baremetal Nodes
:type nodes: [ironicclient.v1.node.Node]
:param transition: The state to set for a node. The full list of states
can be found in ironic.common.states.
:type transition: string
:param target_state: The expected result state for a node. For example when
transitioning to 'manage' the result is 'manageable'
:type target_state: string
:param skipped_states: A set of states to skip, for example 'active' nodes
are already deployed and the state can't always be
changed.
:type skipped_states: iterable of strings
"""
log = logging.getLogger(__name__ + ".set_nodes_state")
for node in nodes:
if node.provision_state in skipped_states:
continue
log.debug(
"Setting provision state from {0} to '{1} for Node {2}"
.format(node.provision_state, transition, node.uuid))
baremetal_client.node.set_provision_state(node.uuid, transition)
if not wait_for_provision_state(baremetal_client, node.uuid,
target_state):
print("FAIL: State not updated for Node {0}".format(
node.uuid, file=sys.stderr))
else:
yield node.uuid
def get_hiera_key(key_name):
"""Retrieve a key from the hiera store
:param password_name: Name of the key to retrieve
:type password_name: type
"""
command = ["hiera", key_name]
p = subprocess.Popen(command, stdout=subprocess.PIPE)
out, err = p.communicate()
return out
def get_config_value(section, option):
p = six.moves.configparser.ConfigParser()
p.read(os.path.expanduser("~/undercloud-passwords.conf"))
return p.get(section, option)
def remove_known_hosts(overcloud_ip):
"""For a given IP address remove SSH keys from the known_hosts file"""
known_hosts = os.path.expanduser("~/.ssh/known_hosts")
if os.path.exists(known_hosts):
command = ['ssh-keygen', '-R', overcloud_ip, '-f', known_hosts]
subprocess.check_call(command)
def create_cephx_key():
# NOTE(gfidente): Taken from
# https://github.com/ceph/ceph-deploy/blob/master/ceph_deploy/new.py#L21
key = os.urandom(16)
header = struct.pack("<hiih", 1, int(time.time()), 0, len(key))
return base64.b64encode(header + key)
def run_shell(cmd):
return subprocess.call([cmd], shell=True)
def all_unique(x):
"""Return True if the collection has no duplications."""
return len(set(x)) == len(x)
def file_checksum(filepath):
"""Calculate md5 checksum on file
:param filepath: Full path to file (e.g. /home/stack/image.qcow2)
:type filepath: string
"""
checksum = hashlib.md5()
with open(filepath, 'rb') as f:
for fragment in iter(lambda: f.read(65536), ''):
checksum.update(fragment)
return checksum.hexdigest()
def check_nodes_count(baremetal_client, stack, parameters, defaults):
"""Check if there are enough available nodes for creating/scaling stack"""
count = 0
if stack:
for param in defaults:
try:
current = int(stack.parameters[param])
except KeyError:
raise ValueError(
"Parameter '%s' was not found in existing stack" % param)
count += parameters.get(param, current)
else:
for param, default in defaults.items():
count += parameters.get(param, default)
available = len(baremetal_client.node.list(associated=False,
maintenance=False))
if count > available:
raise exceptions.DeploymentError(
"Not enough nodes - available: {0}, requested: {1}".format(
available, count))
else:
return True
|
rdo-management/python-rdomanager-oscplugin | rdomanager_oscplugin/utils.py | create_environment_file | python | def create_environment_file(path="~/overcloud-env.json",
control_scale=1, compute_scale=1,
ceph_storage_scale=0, block_storage_scale=0,
swift_storage_scale=0):
env_path = os.path.expanduser(path)
with open(env_path, 'w+') as f:
f.write(json.dumps({
"parameters": {
"ControllerCount": control_scale,
"ComputeCount": compute_scale,
"CephStorageCount": ceph_storage_scale,
"BlockStorageCount": block_storage_scale,
"ObjectStorageCount": swift_storage_scale}
}))
return env_path | Create a heat environment file
Create the heat environment file with the scale parameters.
:param control_scale: Scale value for control roles.
:type control_scale: int
:param compute_scale: Scale value for compute roles.
:type compute_scale: int
:param ceph_storage_scale: Scale value for ceph storage roles.
:type ceph_storage_scale: int
:param block_storage_scale: Scale value for block storage roles.
:type block_storage_scale: int
:param swift_storage_scale: Scale value for swift storage roles.
:type swift_storage_scale: int | train | https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/utils.py#L254-L289 | null | # Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import base64
import hashlib
import json
import logging
import os
import re
import six
import struct
import subprocess
import sys
import time
import uuid
from rdomanager_oscplugin import exceptions
WEBROOT = '/dashboard/'
SERVICE_LIST = {
'ceilometer': {'password_field': 'OVERCLOUD_CEILOMETER_PASSWORD'},
'cinder': {'password_field': 'OVERCLOUD_CINDER_PASSWORD'},
'cinderv2': {'password_field': 'OVERCLOUD_CINDER_PASSWORD'},
'glance': {'password_field': 'OVERCLOUD_GLANCE_PASSWORD'},
'heat': {'password_field': 'OVERCLOUD_HEAT_PASSWORD'},
'neutron': {'password_field': 'OVERCLOUD_NEUTRON_PASSWORD'},
'nova': {'password_field': 'OVERCLOUD_NOVA_PASSWORD'},
'novav3': {'password_field': 'OVERCLOUD_NOVA_PASSWORD'},
'swift': {'password_field': 'OVERCLOUD_SWIFT_PASSWORD'},
'horizon': {
'port': '80',
'path': WEBROOT,
'admin_path': '%sadmin' % WEBROOT},
}
def _generate_password():
"""Create a random password
The password is made by taking a uuid and passing it though sha1sum.
We may change this in future to gain more entropy.
This is based on the tripleo command os-make-password
"""
uuid_str = six.text_type(uuid.uuid4()).encode("UTF-8")
return hashlib.sha1(uuid_str).hexdigest()
def generate_overcloud_passwords(output_file="tripleo-overcloud-passwords"):
"""Create the passwords needed for the overcloud
This will create the set of passwords required by the overcloud, store
them in the output file path and return a dictionary of passwords. If the
file already exists the existing passwords will be returned instead,
"""
if os.path.isfile(output_file):
with open(output_file) as f:
return dict(line.split('=') for line in f.read().splitlines())
password_names = (
"OVERCLOUD_ADMIN_PASSWORD",
"OVERCLOUD_ADMIN_TOKEN",
"OVERCLOUD_CEILOMETER_PASSWORD",
"OVERCLOUD_CEILOMETER_SECRET",
"OVERCLOUD_CINDER_PASSWORD",
"OVERCLOUD_DEMO_PASSWORD",
"OVERCLOUD_GLANCE_PASSWORD",
"OVERCLOUD_HEAT_PASSWORD",
"OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD",
"OVERCLOUD_NEUTRON_PASSWORD",
"OVERCLOUD_NOVA_PASSWORD",
"OVERCLOUD_SWIFT_HASH",
"OVERCLOUD_SWIFT_PASSWORD",
)
passwords = dict((p, _generate_password()) for p in password_names)
with open(output_file, 'w') as f:
for name, password in passwords.items():
f.write("{0}={1}\n".format(name, password))
return passwords
def check_hypervisor_stats(compute_client, nodes=1, memory=0, vcpu=0):
"""Check the Hypervisor stats meet a minimum value
Check the hypervisor stats match the required counts. This is an
implementation of a command in TripleO with the same name.
:param compute_client: Instance of Nova client
:type compute_client: novaclient.client.v2.Client
:param nodes: The number of nodes to wait for, defaults to 1.
:type nodes: int
:param memory: The amount of memory to wait for in MB, defaults to 0.
:type memory: int
:param vcpu: The number of vcpus to wait for, defaults to 0.
:type vcpu: int
"""
statistics = compute_client.hypervisors.statistics().to_dict()
if all([statistics['count'] >= nodes,
statistics['memory_mb'] >= memory,
statistics['vcpus'] >= vcpu]):
return statistics
else:
return None
def wait_for_stack_ready(orchestration_client, stack_name):
"""Check the status of an orchestration stack
Get the status of an orchestration stack and check whether it is complete
or failed.
:param orchestration_client: Instance of Orchestration client
:type orchestration_client: heatclient.v1.client.Client
:param stack_name: Name or UUID of stack to retrieve
:type stack_name: string
"""
SUCCESSFUL_MATCH_OUTPUT = "(CREATE|UPDATE)_COMPLETE"
FAIL_MATCH_OUTPUT = "(CREATE|UPDATE)_FAILED"
while True:
stack = orchestration_client.stacks.get(stack_name)
if not stack:
return False
status = stack.stack_status
if re.match(SUCCESSFUL_MATCH_OUTPUT, status):
return True
if re.match(FAIL_MATCH_OUTPUT, status):
print("Stack failed with status: {}".format(
stack.stack_status_reason, file=sys.stderr))
return False
time.sleep(10)
def wait_for_provision_state(baremetal_client, node_uuid, provision_state,
loops=10, sleep=1):
"""Wait for a given Provisioning state in Ironic Discoverd
Updating the provisioning state is an async operation, we
need to wait for it to be completed.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param node_uuid: The Ironic node UUID
:type node_uuid: str
:param provision_state: The provisioning state name to wait for
:type provision_state: str
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
for _ in range(0, loops):
node = baremetal_client.node.get(node_uuid)
if node is None:
# The node can't be found in ironic, so we don't need to wait for
# the provision state
return True
if node.provision_state == provision_state:
return True
time.sleep(sleep)
return False
def wait_for_node_discovery(discoverd_client, auth_token, discoverd_url,
node_uuids, loops=220, sleep=10):
"""Check the status of Node discovery in Ironic discoverd
Gets the status and waits for them to complete.
:param discoverd_client: Ironic Discoverd client
:type discoverd_client: ironic_discoverd.client
:param auth_token: Authorisation token used by discoverd client
:type auth_token: string
:param discoverd_url: URL used by the discoverd client
:type discoverd_url: string
:param node_uuids: List of Node UUID's to wait for discovery
:type node_uuids: [string, ]
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
log = logging.getLogger(__name__ + ".wait_for_node_discovery")
node_uuids = node_uuids[:]
for _ in range(0, loops):
for node_uuid in node_uuids:
status = discoverd_client.get_status(
node_uuid,
base_url=discoverd_url,
auth_token=auth_token)
if status['finished']:
log.debug("Discover finished for node {0} (Error: {1})".format(
node_uuid, status['error']))
node_uuids.remove(node_uuid)
yield node_uuid, status
if not len(node_uuids):
raise StopIteration
time.sleep(sleep)
if len(node_uuids):
log.error("Discovery didn't finish for nodes {0}".format(
','.join(node_uuids)))
def set_nodes_state(baremetal_client, nodes, transition, target_state,
skipped_states=()):
"""Make all nodes available in the baremetal service for a deployment
For each node, make it available unless it is already available or active.
Available nodes can be used for a deployment and an active node is already
in use.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param nodes: List of Baremetal Nodes
:type nodes: [ironicclient.v1.node.Node]
:param transition: The state to set for a node. The full list of states
can be found in ironic.common.states.
:type transition: string
:param target_state: The expected result state for a node. For example when
transitioning to 'manage' the result is 'manageable'
:type target_state: string
:param skipped_states: A set of states to skip, for example 'active' nodes
are already deployed and the state can't always be
changed.
:type skipped_states: iterable of strings
"""
log = logging.getLogger(__name__ + ".set_nodes_state")
for node in nodes:
if node.provision_state in skipped_states:
continue
log.debug(
"Setting provision state from {0} to '{1} for Node {2}"
.format(node.provision_state, transition, node.uuid))
baremetal_client.node.set_provision_state(node.uuid, transition)
if not wait_for_provision_state(baremetal_client, node.uuid,
target_state):
print("FAIL: State not updated for Node {0}".format(
node.uuid, file=sys.stderr))
else:
yield node.uuid
def get_hiera_key(key_name):
"""Retrieve a key from the hiera store
:param password_name: Name of the key to retrieve
:type password_name: type
"""
command = ["hiera", key_name]
p = subprocess.Popen(command, stdout=subprocess.PIPE)
out, err = p.communicate()
return out
def get_config_value(section, option):
p = six.moves.configparser.ConfigParser()
p.read(os.path.expanduser("~/undercloud-passwords.conf"))
return p.get(section, option)
def remove_known_hosts(overcloud_ip):
"""For a given IP address remove SSH keys from the known_hosts file"""
known_hosts = os.path.expanduser("~/.ssh/known_hosts")
if os.path.exists(known_hosts):
command = ['ssh-keygen', '-R', overcloud_ip, '-f', known_hosts]
subprocess.check_call(command)
def create_cephx_key():
# NOTE(gfidente): Taken from
# https://github.com/ceph/ceph-deploy/blob/master/ceph_deploy/new.py#L21
key = os.urandom(16)
header = struct.pack("<hiih", 1, int(time.time()), 0, len(key))
return base64.b64encode(header + key)
def run_shell(cmd):
return subprocess.call([cmd], shell=True)
def all_unique(x):
"""Return True if the collection has no duplications."""
return len(set(x)) == len(x)
def file_checksum(filepath):
"""Calculate md5 checksum on file
:param filepath: Full path to file (e.g. /home/stack/image.qcow2)
:type filepath: string
"""
checksum = hashlib.md5()
with open(filepath, 'rb') as f:
for fragment in iter(lambda: f.read(65536), ''):
checksum.update(fragment)
return checksum.hexdigest()
def check_nodes_count(baremetal_client, stack, parameters, defaults):
"""Check if there are enough available nodes for creating/scaling stack"""
count = 0
if stack:
for param in defaults:
try:
current = int(stack.parameters[param])
except KeyError:
raise ValueError(
"Parameter '%s' was not found in existing stack" % param)
count += parameters.get(param, current)
else:
for param, default in defaults.items():
count += parameters.get(param, default)
available = len(baremetal_client.node.list(associated=False,
maintenance=False))
if count > available:
raise exceptions.DeploymentError(
"Not enough nodes - available: {0}, requested: {1}".format(
available, count))
else:
return True
|
rdo-management/python-rdomanager-oscplugin | rdomanager_oscplugin/utils.py | set_nodes_state | python | def set_nodes_state(baremetal_client, nodes, transition, target_state,
skipped_states=()):
log = logging.getLogger(__name__ + ".set_nodes_state")
for node in nodes:
if node.provision_state in skipped_states:
continue
log.debug(
"Setting provision state from {0} to '{1} for Node {2}"
.format(node.provision_state, transition, node.uuid))
baremetal_client.node.set_provision_state(node.uuid, transition)
if not wait_for_provision_state(baremetal_client, node.uuid,
target_state):
print("FAIL: State not updated for Node {0}".format(
node.uuid, file=sys.stderr))
else:
yield node.uuid | Make all nodes available in the baremetal service for a deployment
For each node, make it available unless it is already available or active.
Available nodes can be used for a deployment and an active node is already
in use.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param nodes: List of Baremetal Nodes
:type nodes: [ironicclient.v1.node.Node]
:param transition: The state to set for a node. The full list of states
can be found in ironic.common.states.
:type transition: string
:param target_state: The expected result state for a node. For example when
transitioning to 'manage' the result is 'manageable'
:type target_state: string
:param skipped_states: A set of states to skip, for example 'active' nodes
are already deployed and the state can't always be
changed.
:type skipped_states: iterable of strings | train | https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/utils.py#L292-L338 | [
"def wait_for_provision_state(baremetal_client, node_uuid, provision_state,\n loops=10, sleep=1):\n \"\"\"Wait for a given Provisioning state in Ironic Discoverd\n\n Updating the provisioning state is an async operation, we\n need to wait for it to be completed.\n\n :param ba... | # Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import base64
import hashlib
import json
import logging
import os
import re
import six
import struct
import subprocess
import sys
import time
import uuid
from rdomanager_oscplugin import exceptions
WEBROOT = '/dashboard/'
SERVICE_LIST = {
'ceilometer': {'password_field': 'OVERCLOUD_CEILOMETER_PASSWORD'},
'cinder': {'password_field': 'OVERCLOUD_CINDER_PASSWORD'},
'cinderv2': {'password_field': 'OVERCLOUD_CINDER_PASSWORD'},
'glance': {'password_field': 'OVERCLOUD_GLANCE_PASSWORD'},
'heat': {'password_field': 'OVERCLOUD_HEAT_PASSWORD'},
'neutron': {'password_field': 'OVERCLOUD_NEUTRON_PASSWORD'},
'nova': {'password_field': 'OVERCLOUD_NOVA_PASSWORD'},
'novav3': {'password_field': 'OVERCLOUD_NOVA_PASSWORD'},
'swift': {'password_field': 'OVERCLOUD_SWIFT_PASSWORD'},
'horizon': {
'port': '80',
'path': WEBROOT,
'admin_path': '%sadmin' % WEBROOT},
}
def _generate_password():
"""Create a random password
The password is made by taking a uuid and passing it though sha1sum.
We may change this in future to gain more entropy.
This is based on the tripleo command os-make-password
"""
uuid_str = six.text_type(uuid.uuid4()).encode("UTF-8")
return hashlib.sha1(uuid_str).hexdigest()
def generate_overcloud_passwords(output_file="tripleo-overcloud-passwords"):
"""Create the passwords needed for the overcloud
This will create the set of passwords required by the overcloud, store
them in the output file path and return a dictionary of passwords. If the
file already exists the existing passwords will be returned instead,
"""
if os.path.isfile(output_file):
with open(output_file) as f:
return dict(line.split('=') for line in f.read().splitlines())
password_names = (
"OVERCLOUD_ADMIN_PASSWORD",
"OVERCLOUD_ADMIN_TOKEN",
"OVERCLOUD_CEILOMETER_PASSWORD",
"OVERCLOUD_CEILOMETER_SECRET",
"OVERCLOUD_CINDER_PASSWORD",
"OVERCLOUD_DEMO_PASSWORD",
"OVERCLOUD_GLANCE_PASSWORD",
"OVERCLOUD_HEAT_PASSWORD",
"OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD",
"OVERCLOUD_NEUTRON_PASSWORD",
"OVERCLOUD_NOVA_PASSWORD",
"OVERCLOUD_SWIFT_HASH",
"OVERCLOUD_SWIFT_PASSWORD",
)
passwords = dict((p, _generate_password()) for p in password_names)
with open(output_file, 'w') as f:
for name, password in passwords.items():
f.write("{0}={1}\n".format(name, password))
return passwords
def check_hypervisor_stats(compute_client, nodes=1, memory=0, vcpu=0):
"""Check the Hypervisor stats meet a minimum value
Check the hypervisor stats match the required counts. This is an
implementation of a command in TripleO with the same name.
:param compute_client: Instance of Nova client
:type compute_client: novaclient.client.v2.Client
:param nodes: The number of nodes to wait for, defaults to 1.
:type nodes: int
:param memory: The amount of memory to wait for in MB, defaults to 0.
:type memory: int
:param vcpu: The number of vcpus to wait for, defaults to 0.
:type vcpu: int
"""
statistics = compute_client.hypervisors.statistics().to_dict()
if all([statistics['count'] >= nodes,
statistics['memory_mb'] >= memory,
statistics['vcpus'] >= vcpu]):
return statistics
else:
return None
def wait_for_stack_ready(orchestration_client, stack_name):
"""Check the status of an orchestration stack
Get the status of an orchestration stack and check whether it is complete
or failed.
:param orchestration_client: Instance of Orchestration client
:type orchestration_client: heatclient.v1.client.Client
:param stack_name: Name or UUID of stack to retrieve
:type stack_name: string
"""
SUCCESSFUL_MATCH_OUTPUT = "(CREATE|UPDATE)_COMPLETE"
FAIL_MATCH_OUTPUT = "(CREATE|UPDATE)_FAILED"
while True:
stack = orchestration_client.stacks.get(stack_name)
if not stack:
return False
status = stack.stack_status
if re.match(SUCCESSFUL_MATCH_OUTPUT, status):
return True
if re.match(FAIL_MATCH_OUTPUT, status):
print("Stack failed with status: {}".format(
stack.stack_status_reason, file=sys.stderr))
return False
time.sleep(10)
def wait_for_provision_state(baremetal_client, node_uuid, provision_state,
loops=10, sleep=1):
"""Wait for a given Provisioning state in Ironic Discoverd
Updating the provisioning state is an async operation, we
need to wait for it to be completed.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param node_uuid: The Ironic node UUID
:type node_uuid: str
:param provision_state: The provisioning state name to wait for
:type provision_state: str
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
for _ in range(0, loops):
node = baremetal_client.node.get(node_uuid)
if node is None:
# The node can't be found in ironic, so we don't need to wait for
# the provision state
return True
if node.provision_state == provision_state:
return True
time.sleep(sleep)
return False
def wait_for_node_discovery(discoverd_client, auth_token, discoverd_url,
node_uuids, loops=220, sleep=10):
"""Check the status of Node discovery in Ironic discoverd
Gets the status and waits for them to complete.
:param discoverd_client: Ironic Discoverd client
:type discoverd_client: ironic_discoverd.client
:param auth_token: Authorisation token used by discoverd client
:type auth_token: string
:param discoverd_url: URL used by the discoverd client
:type discoverd_url: string
:param node_uuids: List of Node UUID's to wait for discovery
:type node_uuids: [string, ]
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
log = logging.getLogger(__name__ + ".wait_for_node_discovery")
node_uuids = node_uuids[:]
for _ in range(0, loops):
for node_uuid in node_uuids:
status = discoverd_client.get_status(
node_uuid,
base_url=discoverd_url,
auth_token=auth_token)
if status['finished']:
log.debug("Discover finished for node {0} (Error: {1})".format(
node_uuid, status['error']))
node_uuids.remove(node_uuid)
yield node_uuid, status
if not len(node_uuids):
raise StopIteration
time.sleep(sleep)
if len(node_uuids):
log.error("Discovery didn't finish for nodes {0}".format(
','.join(node_uuids)))
def create_environment_file(path="~/overcloud-env.json",
control_scale=1, compute_scale=1,
ceph_storage_scale=0, block_storage_scale=0,
swift_storage_scale=0):
"""Create a heat environment file
Create the heat environment file with the scale parameters.
:param control_scale: Scale value for control roles.
:type control_scale: int
:param compute_scale: Scale value for compute roles.
:type compute_scale: int
:param ceph_storage_scale: Scale value for ceph storage roles.
:type ceph_storage_scale: int
:param block_storage_scale: Scale value for block storage roles.
:type block_storage_scale: int
:param swift_storage_scale: Scale value for swift storage roles.
:type swift_storage_scale: int
"""
env_path = os.path.expanduser(path)
with open(env_path, 'w+') as f:
f.write(json.dumps({
"parameters": {
"ControllerCount": control_scale,
"ComputeCount": compute_scale,
"CephStorageCount": ceph_storage_scale,
"BlockStorageCount": block_storage_scale,
"ObjectStorageCount": swift_storage_scale}
}))
return env_path
def get_hiera_key(key_name):
"""Retrieve a key from the hiera store
:param password_name: Name of the key to retrieve
:type password_name: type
"""
command = ["hiera", key_name]
p = subprocess.Popen(command, stdout=subprocess.PIPE)
out, err = p.communicate()
return out
def get_config_value(section, option):
p = six.moves.configparser.ConfigParser()
p.read(os.path.expanduser("~/undercloud-passwords.conf"))
return p.get(section, option)
def remove_known_hosts(overcloud_ip):
"""For a given IP address remove SSH keys from the known_hosts file"""
known_hosts = os.path.expanduser("~/.ssh/known_hosts")
if os.path.exists(known_hosts):
command = ['ssh-keygen', '-R', overcloud_ip, '-f', known_hosts]
subprocess.check_call(command)
def create_cephx_key():
# NOTE(gfidente): Taken from
# https://github.com/ceph/ceph-deploy/blob/master/ceph_deploy/new.py#L21
key = os.urandom(16)
header = struct.pack("<hiih", 1, int(time.time()), 0, len(key))
return base64.b64encode(header + key)
def run_shell(cmd):
return subprocess.call([cmd], shell=True)
def all_unique(x):
"""Return True if the collection has no duplications."""
return len(set(x)) == len(x)
def file_checksum(filepath):
"""Calculate md5 checksum on file
:param filepath: Full path to file (e.g. /home/stack/image.qcow2)
:type filepath: string
"""
checksum = hashlib.md5()
with open(filepath, 'rb') as f:
for fragment in iter(lambda: f.read(65536), ''):
checksum.update(fragment)
return checksum.hexdigest()
def check_nodes_count(baremetal_client, stack, parameters, defaults):
"""Check if there are enough available nodes for creating/scaling stack"""
count = 0
if stack:
for param in defaults:
try:
current = int(stack.parameters[param])
except KeyError:
raise ValueError(
"Parameter '%s' was not found in existing stack" % param)
count += parameters.get(param, current)
else:
for param, default in defaults.items():
count += parameters.get(param, default)
available = len(baremetal_client.node.list(associated=False,
maintenance=False))
if count > available:
raise exceptions.DeploymentError(
"Not enough nodes - available: {0}, requested: {1}".format(
available, count))
else:
return True
|
rdo-management/python-rdomanager-oscplugin | rdomanager_oscplugin/utils.py | get_hiera_key | python | def get_hiera_key(key_name):
command = ["hiera", key_name]
p = subprocess.Popen(command, stdout=subprocess.PIPE)
out, err = p.communicate()
return out | Retrieve a key from the hiera store
:param password_name: Name of the key to retrieve
:type password_name: type | train | https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/utils.py#L341-L351 | null | # Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import base64
import hashlib
import json
import logging
import os
import re
import six
import struct
import subprocess
import sys
import time
import uuid
from rdomanager_oscplugin import exceptions
WEBROOT = '/dashboard/'
SERVICE_LIST = {
'ceilometer': {'password_field': 'OVERCLOUD_CEILOMETER_PASSWORD'},
'cinder': {'password_field': 'OVERCLOUD_CINDER_PASSWORD'},
'cinderv2': {'password_field': 'OVERCLOUD_CINDER_PASSWORD'},
'glance': {'password_field': 'OVERCLOUD_GLANCE_PASSWORD'},
'heat': {'password_field': 'OVERCLOUD_HEAT_PASSWORD'},
'neutron': {'password_field': 'OVERCLOUD_NEUTRON_PASSWORD'},
'nova': {'password_field': 'OVERCLOUD_NOVA_PASSWORD'},
'novav3': {'password_field': 'OVERCLOUD_NOVA_PASSWORD'},
'swift': {'password_field': 'OVERCLOUD_SWIFT_PASSWORD'},
'horizon': {
'port': '80',
'path': WEBROOT,
'admin_path': '%sadmin' % WEBROOT},
}
def _generate_password():
"""Create a random password
The password is made by taking a uuid and passing it though sha1sum.
We may change this in future to gain more entropy.
This is based on the tripleo command os-make-password
"""
uuid_str = six.text_type(uuid.uuid4()).encode("UTF-8")
return hashlib.sha1(uuid_str).hexdigest()
def generate_overcloud_passwords(output_file="tripleo-overcloud-passwords"):
"""Create the passwords needed for the overcloud
This will create the set of passwords required by the overcloud, store
them in the output file path and return a dictionary of passwords. If the
file already exists the existing passwords will be returned instead,
"""
if os.path.isfile(output_file):
with open(output_file) as f:
return dict(line.split('=') for line in f.read().splitlines())
password_names = (
"OVERCLOUD_ADMIN_PASSWORD",
"OVERCLOUD_ADMIN_TOKEN",
"OVERCLOUD_CEILOMETER_PASSWORD",
"OVERCLOUD_CEILOMETER_SECRET",
"OVERCLOUD_CINDER_PASSWORD",
"OVERCLOUD_DEMO_PASSWORD",
"OVERCLOUD_GLANCE_PASSWORD",
"OVERCLOUD_HEAT_PASSWORD",
"OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD",
"OVERCLOUD_NEUTRON_PASSWORD",
"OVERCLOUD_NOVA_PASSWORD",
"OVERCLOUD_SWIFT_HASH",
"OVERCLOUD_SWIFT_PASSWORD",
)
passwords = dict((p, _generate_password()) for p in password_names)
with open(output_file, 'w') as f:
for name, password in passwords.items():
f.write("{0}={1}\n".format(name, password))
return passwords
def check_hypervisor_stats(compute_client, nodes=1, memory=0, vcpu=0):
"""Check the Hypervisor stats meet a minimum value
Check the hypervisor stats match the required counts. This is an
implementation of a command in TripleO with the same name.
:param compute_client: Instance of Nova client
:type compute_client: novaclient.client.v2.Client
:param nodes: The number of nodes to wait for, defaults to 1.
:type nodes: int
:param memory: The amount of memory to wait for in MB, defaults to 0.
:type memory: int
:param vcpu: The number of vcpus to wait for, defaults to 0.
:type vcpu: int
"""
statistics = compute_client.hypervisors.statistics().to_dict()
if all([statistics['count'] >= nodes,
statistics['memory_mb'] >= memory,
statistics['vcpus'] >= vcpu]):
return statistics
else:
return None
def wait_for_stack_ready(orchestration_client, stack_name):
"""Check the status of an orchestration stack
Get the status of an orchestration stack and check whether it is complete
or failed.
:param orchestration_client: Instance of Orchestration client
:type orchestration_client: heatclient.v1.client.Client
:param stack_name: Name or UUID of stack to retrieve
:type stack_name: string
"""
SUCCESSFUL_MATCH_OUTPUT = "(CREATE|UPDATE)_COMPLETE"
FAIL_MATCH_OUTPUT = "(CREATE|UPDATE)_FAILED"
while True:
stack = orchestration_client.stacks.get(stack_name)
if not stack:
return False
status = stack.stack_status
if re.match(SUCCESSFUL_MATCH_OUTPUT, status):
return True
if re.match(FAIL_MATCH_OUTPUT, status):
print("Stack failed with status: {}".format(
stack.stack_status_reason, file=sys.stderr))
return False
time.sleep(10)
def wait_for_provision_state(baremetal_client, node_uuid, provision_state,
loops=10, sleep=1):
"""Wait for a given Provisioning state in Ironic Discoverd
Updating the provisioning state is an async operation, we
need to wait for it to be completed.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param node_uuid: The Ironic node UUID
:type node_uuid: str
:param provision_state: The provisioning state name to wait for
:type provision_state: str
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
for _ in range(0, loops):
node = baremetal_client.node.get(node_uuid)
if node is None:
# The node can't be found in ironic, so we don't need to wait for
# the provision state
return True
if node.provision_state == provision_state:
return True
time.sleep(sleep)
return False
def wait_for_node_discovery(discoverd_client, auth_token, discoverd_url,
node_uuids, loops=220, sleep=10):
"""Check the status of Node discovery in Ironic discoverd
Gets the status and waits for them to complete.
:param discoverd_client: Ironic Discoverd client
:type discoverd_client: ironic_discoverd.client
:param auth_token: Authorisation token used by discoverd client
:type auth_token: string
:param discoverd_url: URL used by the discoverd client
:type discoverd_url: string
:param node_uuids: List of Node UUID's to wait for discovery
:type node_uuids: [string, ]
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
log = logging.getLogger(__name__ + ".wait_for_node_discovery")
node_uuids = node_uuids[:]
for _ in range(0, loops):
for node_uuid in node_uuids:
status = discoverd_client.get_status(
node_uuid,
base_url=discoverd_url,
auth_token=auth_token)
if status['finished']:
log.debug("Discover finished for node {0} (Error: {1})".format(
node_uuid, status['error']))
node_uuids.remove(node_uuid)
yield node_uuid, status
if not len(node_uuids):
raise StopIteration
time.sleep(sleep)
if len(node_uuids):
log.error("Discovery didn't finish for nodes {0}".format(
','.join(node_uuids)))
def create_environment_file(path="~/overcloud-env.json",
control_scale=1, compute_scale=1,
ceph_storage_scale=0, block_storage_scale=0,
swift_storage_scale=0):
"""Create a heat environment file
Create the heat environment file with the scale parameters.
:param control_scale: Scale value for control roles.
:type control_scale: int
:param compute_scale: Scale value for compute roles.
:type compute_scale: int
:param ceph_storage_scale: Scale value for ceph storage roles.
:type ceph_storage_scale: int
:param block_storage_scale: Scale value for block storage roles.
:type block_storage_scale: int
:param swift_storage_scale: Scale value for swift storage roles.
:type swift_storage_scale: int
"""
env_path = os.path.expanduser(path)
with open(env_path, 'w+') as f:
f.write(json.dumps({
"parameters": {
"ControllerCount": control_scale,
"ComputeCount": compute_scale,
"CephStorageCount": ceph_storage_scale,
"BlockStorageCount": block_storage_scale,
"ObjectStorageCount": swift_storage_scale}
}))
return env_path
def set_nodes_state(baremetal_client, nodes, transition, target_state,
skipped_states=()):
"""Make all nodes available in the baremetal service for a deployment
For each node, make it available unless it is already available or active.
Available nodes can be used for a deployment and an active node is already
in use.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param nodes: List of Baremetal Nodes
:type nodes: [ironicclient.v1.node.Node]
:param transition: The state to set for a node. The full list of states
can be found in ironic.common.states.
:type transition: string
:param target_state: The expected result state for a node. For example when
transitioning to 'manage' the result is 'manageable'
:type target_state: string
:param skipped_states: A set of states to skip, for example 'active' nodes
are already deployed and the state can't always be
changed.
:type skipped_states: iterable of strings
"""
log = logging.getLogger(__name__ + ".set_nodes_state")
for node in nodes:
if node.provision_state in skipped_states:
continue
log.debug(
"Setting provision state from {0} to '{1} for Node {2}"
.format(node.provision_state, transition, node.uuid))
baremetal_client.node.set_provision_state(node.uuid, transition)
if not wait_for_provision_state(baremetal_client, node.uuid,
target_state):
print("FAIL: State not updated for Node {0}".format(
node.uuid, file=sys.stderr))
else:
yield node.uuid
def get_config_value(section, option):
p = six.moves.configparser.ConfigParser()
p.read(os.path.expanduser("~/undercloud-passwords.conf"))
return p.get(section, option)
def remove_known_hosts(overcloud_ip):
"""For a given IP address remove SSH keys from the known_hosts file"""
known_hosts = os.path.expanduser("~/.ssh/known_hosts")
if os.path.exists(known_hosts):
command = ['ssh-keygen', '-R', overcloud_ip, '-f', known_hosts]
subprocess.check_call(command)
def create_cephx_key():
# NOTE(gfidente): Taken from
# https://github.com/ceph/ceph-deploy/blob/master/ceph_deploy/new.py#L21
key = os.urandom(16)
header = struct.pack("<hiih", 1, int(time.time()), 0, len(key))
return base64.b64encode(header + key)
def run_shell(cmd):
return subprocess.call([cmd], shell=True)
def all_unique(x):
"""Return True if the collection has no duplications."""
return len(set(x)) == len(x)
def file_checksum(filepath):
"""Calculate md5 checksum on file
:param filepath: Full path to file (e.g. /home/stack/image.qcow2)
:type filepath: string
"""
checksum = hashlib.md5()
with open(filepath, 'rb') as f:
for fragment in iter(lambda: f.read(65536), ''):
checksum.update(fragment)
return checksum.hexdigest()
def check_nodes_count(baremetal_client, stack, parameters, defaults):
"""Check if there are enough available nodes for creating/scaling stack"""
count = 0
if stack:
for param in defaults:
try:
current = int(stack.parameters[param])
except KeyError:
raise ValueError(
"Parameter '%s' was not found in existing stack" % param)
count += parameters.get(param, current)
else:
for param, default in defaults.items():
count += parameters.get(param, default)
available = len(baremetal_client.node.list(associated=False,
maintenance=False))
if count > available:
raise exceptions.DeploymentError(
"Not enough nodes - available: {0}, requested: {1}".format(
available, count))
else:
return True
|
rdo-management/python-rdomanager-oscplugin | rdomanager_oscplugin/utils.py | remove_known_hosts | python | def remove_known_hosts(overcloud_ip):
known_hosts = os.path.expanduser("~/.ssh/known_hosts")
if os.path.exists(known_hosts):
command = ['ssh-keygen', '-R', overcloud_ip, '-f', known_hosts]
subprocess.check_call(command) | For a given IP address remove SSH keys from the known_hosts file | train | https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/utils.py#L361-L368 | null | # Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import base64
import hashlib
import json
import logging
import os
import re
import six
import struct
import subprocess
import sys
import time
import uuid
from rdomanager_oscplugin import exceptions
WEBROOT = '/dashboard/'
SERVICE_LIST = {
'ceilometer': {'password_field': 'OVERCLOUD_CEILOMETER_PASSWORD'},
'cinder': {'password_field': 'OVERCLOUD_CINDER_PASSWORD'},
'cinderv2': {'password_field': 'OVERCLOUD_CINDER_PASSWORD'},
'glance': {'password_field': 'OVERCLOUD_GLANCE_PASSWORD'},
'heat': {'password_field': 'OVERCLOUD_HEAT_PASSWORD'},
'neutron': {'password_field': 'OVERCLOUD_NEUTRON_PASSWORD'},
'nova': {'password_field': 'OVERCLOUD_NOVA_PASSWORD'},
'novav3': {'password_field': 'OVERCLOUD_NOVA_PASSWORD'},
'swift': {'password_field': 'OVERCLOUD_SWIFT_PASSWORD'},
'horizon': {
'port': '80',
'path': WEBROOT,
'admin_path': '%sadmin' % WEBROOT},
}
def _generate_password():
"""Create a random password
The password is made by taking a uuid and passing it though sha1sum.
We may change this in future to gain more entropy.
This is based on the tripleo command os-make-password
"""
uuid_str = six.text_type(uuid.uuid4()).encode("UTF-8")
return hashlib.sha1(uuid_str).hexdigest()
def generate_overcloud_passwords(output_file="tripleo-overcloud-passwords"):
"""Create the passwords needed for the overcloud
This will create the set of passwords required by the overcloud, store
them in the output file path and return a dictionary of passwords. If the
file already exists the existing passwords will be returned instead,
"""
if os.path.isfile(output_file):
with open(output_file) as f:
return dict(line.split('=') for line in f.read().splitlines())
password_names = (
"OVERCLOUD_ADMIN_PASSWORD",
"OVERCLOUD_ADMIN_TOKEN",
"OVERCLOUD_CEILOMETER_PASSWORD",
"OVERCLOUD_CEILOMETER_SECRET",
"OVERCLOUD_CINDER_PASSWORD",
"OVERCLOUD_DEMO_PASSWORD",
"OVERCLOUD_GLANCE_PASSWORD",
"OVERCLOUD_HEAT_PASSWORD",
"OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD",
"OVERCLOUD_NEUTRON_PASSWORD",
"OVERCLOUD_NOVA_PASSWORD",
"OVERCLOUD_SWIFT_HASH",
"OVERCLOUD_SWIFT_PASSWORD",
)
passwords = dict((p, _generate_password()) for p in password_names)
with open(output_file, 'w') as f:
for name, password in passwords.items():
f.write("{0}={1}\n".format(name, password))
return passwords
def check_hypervisor_stats(compute_client, nodes=1, memory=0, vcpu=0):
"""Check the Hypervisor stats meet a minimum value
Check the hypervisor stats match the required counts. This is an
implementation of a command in TripleO with the same name.
:param compute_client: Instance of Nova client
:type compute_client: novaclient.client.v2.Client
:param nodes: The number of nodes to wait for, defaults to 1.
:type nodes: int
:param memory: The amount of memory to wait for in MB, defaults to 0.
:type memory: int
:param vcpu: The number of vcpus to wait for, defaults to 0.
:type vcpu: int
"""
statistics = compute_client.hypervisors.statistics().to_dict()
if all([statistics['count'] >= nodes,
statistics['memory_mb'] >= memory,
statistics['vcpus'] >= vcpu]):
return statistics
else:
return None
def wait_for_stack_ready(orchestration_client, stack_name):
"""Check the status of an orchestration stack
Get the status of an orchestration stack and check whether it is complete
or failed.
:param orchestration_client: Instance of Orchestration client
:type orchestration_client: heatclient.v1.client.Client
:param stack_name: Name or UUID of stack to retrieve
:type stack_name: string
"""
SUCCESSFUL_MATCH_OUTPUT = "(CREATE|UPDATE)_COMPLETE"
FAIL_MATCH_OUTPUT = "(CREATE|UPDATE)_FAILED"
while True:
stack = orchestration_client.stacks.get(stack_name)
if not stack:
return False
status = stack.stack_status
if re.match(SUCCESSFUL_MATCH_OUTPUT, status):
return True
if re.match(FAIL_MATCH_OUTPUT, status):
print("Stack failed with status: {}".format(
stack.stack_status_reason, file=sys.stderr))
return False
time.sleep(10)
def wait_for_provision_state(baremetal_client, node_uuid, provision_state,
loops=10, sleep=1):
"""Wait for a given Provisioning state in Ironic Discoverd
Updating the provisioning state is an async operation, we
need to wait for it to be completed.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param node_uuid: The Ironic node UUID
:type node_uuid: str
:param provision_state: The provisioning state name to wait for
:type provision_state: str
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
for _ in range(0, loops):
node = baremetal_client.node.get(node_uuid)
if node is None:
# The node can't be found in ironic, so we don't need to wait for
# the provision state
return True
if node.provision_state == provision_state:
return True
time.sleep(sleep)
return False
def wait_for_node_discovery(discoverd_client, auth_token, discoverd_url,
node_uuids, loops=220, sleep=10):
"""Check the status of Node discovery in Ironic discoverd
Gets the status and waits for them to complete.
:param discoverd_client: Ironic Discoverd client
:type discoverd_client: ironic_discoverd.client
:param auth_token: Authorisation token used by discoverd client
:type auth_token: string
:param discoverd_url: URL used by the discoverd client
:type discoverd_url: string
:param node_uuids: List of Node UUID's to wait for discovery
:type node_uuids: [string, ]
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
log = logging.getLogger(__name__ + ".wait_for_node_discovery")
node_uuids = node_uuids[:]
for _ in range(0, loops):
for node_uuid in node_uuids:
status = discoverd_client.get_status(
node_uuid,
base_url=discoverd_url,
auth_token=auth_token)
if status['finished']:
log.debug("Discover finished for node {0} (Error: {1})".format(
node_uuid, status['error']))
node_uuids.remove(node_uuid)
yield node_uuid, status
if not len(node_uuids):
raise StopIteration
time.sleep(sleep)
if len(node_uuids):
log.error("Discovery didn't finish for nodes {0}".format(
','.join(node_uuids)))
def create_environment_file(path="~/overcloud-env.json",
control_scale=1, compute_scale=1,
ceph_storage_scale=0, block_storage_scale=0,
swift_storage_scale=0):
"""Create a heat environment file
Create the heat environment file with the scale parameters.
:param control_scale: Scale value for control roles.
:type control_scale: int
:param compute_scale: Scale value for compute roles.
:type compute_scale: int
:param ceph_storage_scale: Scale value for ceph storage roles.
:type ceph_storage_scale: int
:param block_storage_scale: Scale value for block storage roles.
:type block_storage_scale: int
:param swift_storage_scale: Scale value for swift storage roles.
:type swift_storage_scale: int
"""
env_path = os.path.expanduser(path)
with open(env_path, 'w+') as f:
f.write(json.dumps({
"parameters": {
"ControllerCount": control_scale,
"ComputeCount": compute_scale,
"CephStorageCount": ceph_storage_scale,
"BlockStorageCount": block_storage_scale,
"ObjectStorageCount": swift_storage_scale}
}))
return env_path
def set_nodes_state(baremetal_client, nodes, transition, target_state,
skipped_states=()):
"""Make all nodes available in the baremetal service for a deployment
For each node, make it available unless it is already available or active.
Available nodes can be used for a deployment and an active node is already
in use.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param nodes: List of Baremetal Nodes
:type nodes: [ironicclient.v1.node.Node]
:param transition: The state to set for a node. The full list of states
can be found in ironic.common.states.
:type transition: string
:param target_state: The expected result state for a node. For example when
transitioning to 'manage' the result is 'manageable'
:type target_state: string
:param skipped_states: A set of states to skip, for example 'active' nodes
are already deployed and the state can't always be
changed.
:type skipped_states: iterable of strings
"""
log = logging.getLogger(__name__ + ".set_nodes_state")
for node in nodes:
if node.provision_state in skipped_states:
continue
log.debug(
"Setting provision state from {0} to '{1} for Node {2}"
.format(node.provision_state, transition, node.uuid))
baremetal_client.node.set_provision_state(node.uuid, transition)
if not wait_for_provision_state(baremetal_client, node.uuid,
target_state):
print("FAIL: State not updated for Node {0}".format(
node.uuid, file=sys.stderr))
else:
yield node.uuid
def get_hiera_key(key_name):
"""Retrieve a key from the hiera store
:param password_name: Name of the key to retrieve
:type password_name: type
"""
command = ["hiera", key_name]
p = subprocess.Popen(command, stdout=subprocess.PIPE)
out, err = p.communicate()
return out
def get_config_value(section, option):
p = six.moves.configparser.ConfigParser()
p.read(os.path.expanduser("~/undercloud-passwords.conf"))
return p.get(section, option)
def create_cephx_key():
# NOTE(gfidente): Taken from
# https://github.com/ceph/ceph-deploy/blob/master/ceph_deploy/new.py#L21
key = os.urandom(16)
header = struct.pack("<hiih", 1, int(time.time()), 0, len(key))
return base64.b64encode(header + key)
def run_shell(cmd):
return subprocess.call([cmd], shell=True)
def all_unique(x):
"""Return True if the collection has no duplications."""
return len(set(x)) == len(x)
def file_checksum(filepath):
"""Calculate md5 checksum on file
:param filepath: Full path to file (e.g. /home/stack/image.qcow2)
:type filepath: string
"""
checksum = hashlib.md5()
with open(filepath, 'rb') as f:
for fragment in iter(lambda: f.read(65536), ''):
checksum.update(fragment)
return checksum.hexdigest()
def check_nodes_count(baremetal_client, stack, parameters, defaults):
"""Check if there are enough available nodes for creating/scaling stack"""
count = 0
if stack:
for param in defaults:
try:
current = int(stack.parameters[param])
except KeyError:
raise ValueError(
"Parameter '%s' was not found in existing stack" % param)
count += parameters.get(param, current)
else:
for param, default in defaults.items():
count += parameters.get(param, default)
available = len(baremetal_client.node.list(associated=False,
maintenance=False))
if count > available:
raise exceptions.DeploymentError(
"Not enough nodes - available: {0}, requested: {1}".format(
available, count))
else:
return True
|
rdo-management/python-rdomanager-oscplugin | rdomanager_oscplugin/utils.py | file_checksum | python | def file_checksum(filepath):
checksum = hashlib.md5()
with open(filepath, 'rb') as f:
for fragment in iter(lambda: f.read(65536), ''):
checksum.update(fragment)
return checksum.hexdigest() | Calculate md5 checksum on file
:param filepath: Full path to file (e.g. /home/stack/image.qcow2)
:type filepath: string | train | https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/utils.py#L388-L399 | null | # Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import base64
import hashlib
import json
import logging
import os
import re
import six
import struct
import subprocess
import sys
import time
import uuid
from rdomanager_oscplugin import exceptions
WEBROOT = '/dashboard/'
SERVICE_LIST = {
'ceilometer': {'password_field': 'OVERCLOUD_CEILOMETER_PASSWORD'},
'cinder': {'password_field': 'OVERCLOUD_CINDER_PASSWORD'},
'cinderv2': {'password_field': 'OVERCLOUD_CINDER_PASSWORD'},
'glance': {'password_field': 'OVERCLOUD_GLANCE_PASSWORD'},
'heat': {'password_field': 'OVERCLOUD_HEAT_PASSWORD'},
'neutron': {'password_field': 'OVERCLOUD_NEUTRON_PASSWORD'},
'nova': {'password_field': 'OVERCLOUD_NOVA_PASSWORD'},
'novav3': {'password_field': 'OVERCLOUD_NOVA_PASSWORD'},
'swift': {'password_field': 'OVERCLOUD_SWIFT_PASSWORD'},
'horizon': {
'port': '80',
'path': WEBROOT,
'admin_path': '%sadmin' % WEBROOT},
}
def _generate_password():
"""Create a random password
The password is made by taking a uuid and passing it though sha1sum.
We may change this in future to gain more entropy.
This is based on the tripleo command os-make-password
"""
uuid_str = six.text_type(uuid.uuid4()).encode("UTF-8")
return hashlib.sha1(uuid_str).hexdigest()
def generate_overcloud_passwords(output_file="tripleo-overcloud-passwords"):
"""Create the passwords needed for the overcloud
This will create the set of passwords required by the overcloud, store
them in the output file path and return a dictionary of passwords. If the
file already exists the existing passwords will be returned instead,
"""
if os.path.isfile(output_file):
with open(output_file) as f:
return dict(line.split('=') for line in f.read().splitlines())
password_names = (
"OVERCLOUD_ADMIN_PASSWORD",
"OVERCLOUD_ADMIN_TOKEN",
"OVERCLOUD_CEILOMETER_PASSWORD",
"OVERCLOUD_CEILOMETER_SECRET",
"OVERCLOUD_CINDER_PASSWORD",
"OVERCLOUD_DEMO_PASSWORD",
"OVERCLOUD_GLANCE_PASSWORD",
"OVERCLOUD_HEAT_PASSWORD",
"OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD",
"OVERCLOUD_NEUTRON_PASSWORD",
"OVERCLOUD_NOVA_PASSWORD",
"OVERCLOUD_SWIFT_HASH",
"OVERCLOUD_SWIFT_PASSWORD",
)
passwords = dict((p, _generate_password()) for p in password_names)
with open(output_file, 'w') as f:
for name, password in passwords.items():
f.write("{0}={1}\n".format(name, password))
return passwords
def check_hypervisor_stats(compute_client, nodes=1, memory=0, vcpu=0):
"""Check the Hypervisor stats meet a minimum value
Check the hypervisor stats match the required counts. This is an
implementation of a command in TripleO with the same name.
:param compute_client: Instance of Nova client
:type compute_client: novaclient.client.v2.Client
:param nodes: The number of nodes to wait for, defaults to 1.
:type nodes: int
:param memory: The amount of memory to wait for in MB, defaults to 0.
:type memory: int
:param vcpu: The number of vcpus to wait for, defaults to 0.
:type vcpu: int
"""
statistics = compute_client.hypervisors.statistics().to_dict()
if all([statistics['count'] >= nodes,
statistics['memory_mb'] >= memory,
statistics['vcpus'] >= vcpu]):
return statistics
else:
return None
def wait_for_stack_ready(orchestration_client, stack_name):
"""Check the status of an orchestration stack
Get the status of an orchestration stack and check whether it is complete
or failed.
:param orchestration_client: Instance of Orchestration client
:type orchestration_client: heatclient.v1.client.Client
:param stack_name: Name or UUID of stack to retrieve
:type stack_name: string
"""
SUCCESSFUL_MATCH_OUTPUT = "(CREATE|UPDATE)_COMPLETE"
FAIL_MATCH_OUTPUT = "(CREATE|UPDATE)_FAILED"
while True:
stack = orchestration_client.stacks.get(stack_name)
if not stack:
return False
status = stack.stack_status
if re.match(SUCCESSFUL_MATCH_OUTPUT, status):
return True
if re.match(FAIL_MATCH_OUTPUT, status):
print("Stack failed with status: {}".format(
stack.stack_status_reason, file=sys.stderr))
return False
time.sleep(10)
def wait_for_provision_state(baremetal_client, node_uuid, provision_state,
loops=10, sleep=1):
"""Wait for a given Provisioning state in Ironic Discoverd
Updating the provisioning state is an async operation, we
need to wait for it to be completed.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param node_uuid: The Ironic node UUID
:type node_uuid: str
:param provision_state: The provisioning state name to wait for
:type provision_state: str
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
for _ in range(0, loops):
node = baremetal_client.node.get(node_uuid)
if node is None:
# The node can't be found in ironic, so we don't need to wait for
# the provision state
return True
if node.provision_state == provision_state:
return True
time.sleep(sleep)
return False
def wait_for_node_discovery(discoverd_client, auth_token, discoverd_url,
node_uuids, loops=220, sleep=10):
"""Check the status of Node discovery in Ironic discoverd
Gets the status and waits for them to complete.
:param discoverd_client: Ironic Discoverd client
:type discoverd_client: ironic_discoverd.client
:param auth_token: Authorisation token used by discoverd client
:type auth_token: string
:param discoverd_url: URL used by the discoverd client
:type discoverd_url: string
:param node_uuids: List of Node UUID's to wait for discovery
:type node_uuids: [string, ]
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
log = logging.getLogger(__name__ + ".wait_for_node_discovery")
node_uuids = node_uuids[:]
for _ in range(0, loops):
for node_uuid in node_uuids:
status = discoverd_client.get_status(
node_uuid,
base_url=discoverd_url,
auth_token=auth_token)
if status['finished']:
log.debug("Discover finished for node {0} (Error: {1})".format(
node_uuid, status['error']))
node_uuids.remove(node_uuid)
yield node_uuid, status
if not len(node_uuids):
raise StopIteration
time.sleep(sleep)
if len(node_uuids):
log.error("Discovery didn't finish for nodes {0}".format(
','.join(node_uuids)))
def create_environment_file(path="~/overcloud-env.json",
control_scale=1, compute_scale=1,
ceph_storage_scale=0, block_storage_scale=0,
swift_storage_scale=0):
"""Create a heat environment file
Create the heat environment file with the scale parameters.
:param control_scale: Scale value for control roles.
:type control_scale: int
:param compute_scale: Scale value for compute roles.
:type compute_scale: int
:param ceph_storage_scale: Scale value for ceph storage roles.
:type ceph_storage_scale: int
:param block_storage_scale: Scale value for block storage roles.
:type block_storage_scale: int
:param swift_storage_scale: Scale value for swift storage roles.
:type swift_storage_scale: int
"""
env_path = os.path.expanduser(path)
with open(env_path, 'w+') as f:
f.write(json.dumps({
"parameters": {
"ControllerCount": control_scale,
"ComputeCount": compute_scale,
"CephStorageCount": ceph_storage_scale,
"BlockStorageCount": block_storage_scale,
"ObjectStorageCount": swift_storage_scale}
}))
return env_path
def set_nodes_state(baremetal_client, nodes, transition, target_state,
skipped_states=()):
"""Make all nodes available in the baremetal service for a deployment
For each node, make it available unless it is already available or active.
Available nodes can be used for a deployment and an active node is already
in use.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param nodes: List of Baremetal Nodes
:type nodes: [ironicclient.v1.node.Node]
:param transition: The state to set for a node. The full list of states
can be found in ironic.common.states.
:type transition: string
:param target_state: The expected result state for a node. For example when
transitioning to 'manage' the result is 'manageable'
:type target_state: string
:param skipped_states: A set of states to skip, for example 'active' nodes
are already deployed and the state can't always be
changed.
:type skipped_states: iterable of strings
"""
log = logging.getLogger(__name__ + ".set_nodes_state")
for node in nodes:
if node.provision_state in skipped_states:
continue
log.debug(
"Setting provision state from {0} to '{1} for Node {2}"
.format(node.provision_state, transition, node.uuid))
baremetal_client.node.set_provision_state(node.uuid, transition)
if not wait_for_provision_state(baremetal_client, node.uuid,
target_state):
print("FAIL: State not updated for Node {0}".format(
node.uuid, file=sys.stderr))
else:
yield node.uuid
def get_hiera_key(key_name):
"""Retrieve a key from the hiera store
:param password_name: Name of the key to retrieve
:type password_name: type
"""
command = ["hiera", key_name]
p = subprocess.Popen(command, stdout=subprocess.PIPE)
out, err = p.communicate()
return out
def get_config_value(section, option):
p = six.moves.configparser.ConfigParser()
p.read(os.path.expanduser("~/undercloud-passwords.conf"))
return p.get(section, option)
def remove_known_hosts(overcloud_ip):
"""For a given IP address remove SSH keys from the known_hosts file"""
known_hosts = os.path.expanduser("~/.ssh/known_hosts")
if os.path.exists(known_hosts):
command = ['ssh-keygen', '-R', overcloud_ip, '-f', known_hosts]
subprocess.check_call(command)
def create_cephx_key():
# NOTE(gfidente): Taken from
# https://github.com/ceph/ceph-deploy/blob/master/ceph_deploy/new.py#L21
key = os.urandom(16)
header = struct.pack("<hiih", 1, int(time.time()), 0, len(key))
return base64.b64encode(header + key)
def run_shell(cmd):
return subprocess.call([cmd], shell=True)
def all_unique(x):
"""Return True if the collection has no duplications."""
return len(set(x)) == len(x)
def check_nodes_count(baremetal_client, stack, parameters, defaults):
"""Check if there are enough available nodes for creating/scaling stack"""
count = 0
if stack:
for param in defaults:
try:
current = int(stack.parameters[param])
except KeyError:
raise ValueError(
"Parameter '%s' was not found in existing stack" % param)
count += parameters.get(param, current)
else:
for param, default in defaults.items():
count += parameters.get(param, default)
available = len(baremetal_client.node.list(associated=False,
maintenance=False))
if count > available:
raise exceptions.DeploymentError(
"Not enough nodes - available: {0}, requested: {1}".format(
available, count))
else:
return True
|
rdo-management/python-rdomanager-oscplugin | rdomanager_oscplugin/utils.py | check_nodes_count | python | def check_nodes_count(baremetal_client, stack, parameters, defaults):
count = 0
if stack:
for param in defaults:
try:
current = int(stack.parameters[param])
except KeyError:
raise ValueError(
"Parameter '%s' was not found in existing stack" % param)
count += parameters.get(param, current)
else:
for param, default in defaults.items():
count += parameters.get(param, default)
available = len(baremetal_client.node.list(associated=False,
maintenance=False))
if count > available:
raise exceptions.DeploymentError(
"Not enough nodes - available: {0}, requested: {1}".format(
available, count))
else:
return True | Check if there are enough available nodes for creating/scaling stack | train | https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/utils.py#L402-L424 | null | # Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import base64
import hashlib
import json
import logging
import os
import re
import six
import struct
import subprocess
import sys
import time
import uuid
from rdomanager_oscplugin import exceptions
WEBROOT = '/dashboard/'
SERVICE_LIST = {
'ceilometer': {'password_field': 'OVERCLOUD_CEILOMETER_PASSWORD'},
'cinder': {'password_field': 'OVERCLOUD_CINDER_PASSWORD'},
'cinderv2': {'password_field': 'OVERCLOUD_CINDER_PASSWORD'},
'glance': {'password_field': 'OVERCLOUD_GLANCE_PASSWORD'},
'heat': {'password_field': 'OVERCLOUD_HEAT_PASSWORD'},
'neutron': {'password_field': 'OVERCLOUD_NEUTRON_PASSWORD'},
'nova': {'password_field': 'OVERCLOUD_NOVA_PASSWORD'},
'novav3': {'password_field': 'OVERCLOUD_NOVA_PASSWORD'},
'swift': {'password_field': 'OVERCLOUD_SWIFT_PASSWORD'},
'horizon': {
'port': '80',
'path': WEBROOT,
'admin_path': '%sadmin' % WEBROOT},
}
def _generate_password():
"""Create a random password
The password is made by taking a uuid and passing it though sha1sum.
We may change this in future to gain more entropy.
This is based on the tripleo command os-make-password
"""
uuid_str = six.text_type(uuid.uuid4()).encode("UTF-8")
return hashlib.sha1(uuid_str).hexdigest()
def generate_overcloud_passwords(output_file="tripleo-overcloud-passwords"):
"""Create the passwords needed for the overcloud
This will create the set of passwords required by the overcloud, store
them in the output file path and return a dictionary of passwords. If the
file already exists the existing passwords will be returned instead,
"""
if os.path.isfile(output_file):
with open(output_file) as f:
return dict(line.split('=') for line in f.read().splitlines())
password_names = (
"OVERCLOUD_ADMIN_PASSWORD",
"OVERCLOUD_ADMIN_TOKEN",
"OVERCLOUD_CEILOMETER_PASSWORD",
"OVERCLOUD_CEILOMETER_SECRET",
"OVERCLOUD_CINDER_PASSWORD",
"OVERCLOUD_DEMO_PASSWORD",
"OVERCLOUD_GLANCE_PASSWORD",
"OVERCLOUD_HEAT_PASSWORD",
"OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD",
"OVERCLOUD_NEUTRON_PASSWORD",
"OVERCLOUD_NOVA_PASSWORD",
"OVERCLOUD_SWIFT_HASH",
"OVERCLOUD_SWIFT_PASSWORD",
)
passwords = dict((p, _generate_password()) for p in password_names)
with open(output_file, 'w') as f:
for name, password in passwords.items():
f.write("{0}={1}\n".format(name, password))
return passwords
def check_hypervisor_stats(compute_client, nodes=1, memory=0, vcpu=0):
"""Check the Hypervisor stats meet a minimum value
Check the hypervisor stats match the required counts. This is an
implementation of a command in TripleO with the same name.
:param compute_client: Instance of Nova client
:type compute_client: novaclient.client.v2.Client
:param nodes: The number of nodes to wait for, defaults to 1.
:type nodes: int
:param memory: The amount of memory to wait for in MB, defaults to 0.
:type memory: int
:param vcpu: The number of vcpus to wait for, defaults to 0.
:type vcpu: int
"""
statistics = compute_client.hypervisors.statistics().to_dict()
if all([statistics['count'] >= nodes,
statistics['memory_mb'] >= memory,
statistics['vcpus'] >= vcpu]):
return statistics
else:
return None
def wait_for_stack_ready(orchestration_client, stack_name):
"""Check the status of an orchestration stack
Get the status of an orchestration stack and check whether it is complete
or failed.
:param orchestration_client: Instance of Orchestration client
:type orchestration_client: heatclient.v1.client.Client
:param stack_name: Name or UUID of stack to retrieve
:type stack_name: string
"""
SUCCESSFUL_MATCH_OUTPUT = "(CREATE|UPDATE)_COMPLETE"
FAIL_MATCH_OUTPUT = "(CREATE|UPDATE)_FAILED"
while True:
stack = orchestration_client.stacks.get(stack_name)
if not stack:
return False
status = stack.stack_status
if re.match(SUCCESSFUL_MATCH_OUTPUT, status):
return True
if re.match(FAIL_MATCH_OUTPUT, status):
print("Stack failed with status: {}".format(
stack.stack_status_reason, file=sys.stderr))
return False
time.sleep(10)
def wait_for_provision_state(baremetal_client, node_uuid, provision_state,
loops=10, sleep=1):
"""Wait for a given Provisioning state in Ironic Discoverd
Updating the provisioning state is an async operation, we
need to wait for it to be completed.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param node_uuid: The Ironic node UUID
:type node_uuid: str
:param provision_state: The provisioning state name to wait for
:type provision_state: str
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
for _ in range(0, loops):
node = baremetal_client.node.get(node_uuid)
if node is None:
# The node can't be found in ironic, so we don't need to wait for
# the provision state
return True
if node.provision_state == provision_state:
return True
time.sleep(sleep)
return False
def wait_for_node_discovery(discoverd_client, auth_token, discoverd_url,
node_uuids, loops=220, sleep=10):
"""Check the status of Node discovery in Ironic discoverd
Gets the status and waits for them to complete.
:param discoverd_client: Ironic Discoverd client
:type discoverd_client: ironic_discoverd.client
:param auth_token: Authorisation token used by discoverd client
:type auth_token: string
:param discoverd_url: URL used by the discoverd client
:type discoverd_url: string
:param node_uuids: List of Node UUID's to wait for discovery
:type node_uuids: [string, ]
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
log = logging.getLogger(__name__ + ".wait_for_node_discovery")
node_uuids = node_uuids[:]
for _ in range(0, loops):
for node_uuid in node_uuids:
status = discoverd_client.get_status(
node_uuid,
base_url=discoverd_url,
auth_token=auth_token)
if status['finished']:
log.debug("Discover finished for node {0} (Error: {1})".format(
node_uuid, status['error']))
node_uuids.remove(node_uuid)
yield node_uuid, status
if not len(node_uuids):
raise StopIteration
time.sleep(sleep)
if len(node_uuids):
log.error("Discovery didn't finish for nodes {0}".format(
','.join(node_uuids)))
def create_environment_file(path="~/overcloud-env.json",
control_scale=1, compute_scale=1,
ceph_storage_scale=0, block_storage_scale=0,
swift_storage_scale=0):
"""Create a heat environment file
Create the heat environment file with the scale parameters.
:param control_scale: Scale value for control roles.
:type control_scale: int
:param compute_scale: Scale value for compute roles.
:type compute_scale: int
:param ceph_storage_scale: Scale value for ceph storage roles.
:type ceph_storage_scale: int
:param block_storage_scale: Scale value for block storage roles.
:type block_storage_scale: int
:param swift_storage_scale: Scale value for swift storage roles.
:type swift_storage_scale: int
"""
env_path = os.path.expanduser(path)
with open(env_path, 'w+') as f:
f.write(json.dumps({
"parameters": {
"ControllerCount": control_scale,
"ComputeCount": compute_scale,
"CephStorageCount": ceph_storage_scale,
"BlockStorageCount": block_storage_scale,
"ObjectStorageCount": swift_storage_scale}
}))
return env_path
def set_nodes_state(baremetal_client, nodes, transition, target_state,
skipped_states=()):
"""Make all nodes available in the baremetal service for a deployment
For each node, make it available unless it is already available or active.
Available nodes can be used for a deployment and an active node is already
in use.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param nodes: List of Baremetal Nodes
:type nodes: [ironicclient.v1.node.Node]
:param transition: The state to set for a node. The full list of states
can be found in ironic.common.states.
:type transition: string
:param target_state: The expected result state for a node. For example when
transitioning to 'manage' the result is 'manageable'
:type target_state: string
:param skipped_states: A set of states to skip, for example 'active' nodes
are already deployed and the state can't always be
changed.
:type skipped_states: iterable of strings
"""
log = logging.getLogger(__name__ + ".set_nodes_state")
for node in nodes:
if node.provision_state in skipped_states:
continue
log.debug(
"Setting provision state from {0} to '{1} for Node {2}"
.format(node.provision_state, transition, node.uuid))
baremetal_client.node.set_provision_state(node.uuid, transition)
if not wait_for_provision_state(baremetal_client, node.uuid,
target_state):
print("FAIL: State not updated for Node {0}".format(
node.uuid, file=sys.stderr))
else:
yield node.uuid
def get_hiera_key(key_name):
"""Retrieve a key from the hiera store
:param password_name: Name of the key to retrieve
:type password_name: type
"""
command = ["hiera", key_name]
p = subprocess.Popen(command, stdout=subprocess.PIPE)
out, err = p.communicate()
return out
def get_config_value(section, option):
p = six.moves.configparser.ConfigParser()
p.read(os.path.expanduser("~/undercloud-passwords.conf"))
return p.get(section, option)
def remove_known_hosts(overcloud_ip):
"""For a given IP address remove SSH keys from the known_hosts file"""
known_hosts = os.path.expanduser("~/.ssh/known_hosts")
if os.path.exists(known_hosts):
command = ['ssh-keygen', '-R', overcloud_ip, '-f', known_hosts]
subprocess.check_call(command)
def create_cephx_key():
# NOTE(gfidente): Taken from
# https://github.com/ceph/ceph-deploy/blob/master/ceph_deploy/new.py#L21
key = os.urandom(16)
header = struct.pack("<hiih", 1, int(time.time()), 0, len(key))
return base64.b64encode(header + key)
def run_shell(cmd):
return subprocess.call([cmd], shell=True)
def all_unique(x):
"""Return True if the collection has no duplications."""
return len(set(x)) == len(x)
def file_checksum(filepath):
"""Calculate md5 checksum on file
:param filepath: Full path to file (e.g. /home/stack/image.qcow2)
:type filepath: string
"""
checksum = hashlib.md5()
with open(filepath, 'rb') as f:
for fragment in iter(lambda: f.read(65536), ''):
checksum.update(fragment)
return checksum.hexdigest()
|
rdo-management/python-rdomanager-oscplugin | rdomanager_oscplugin/v1/overcloud_deploy.py | DeployOvercloud.set_overcloud_passwords | python | def set_overcloud_passwords(self, parameters, parsed_args):
undercloud_ceilometer_snmpd_password = utils.get_config_value(
"auth", "undercloud_ceilometer_snmpd_password")
self.passwords = passwords = utils.generate_overcloud_passwords()
ceilometer_pass = passwords['OVERCLOUD_CEILOMETER_PASSWORD']
ceilometer_secret = passwords['OVERCLOUD_CEILOMETER_SECRET']
if parsed_args.templates:
parameters['AdminPassword'] = passwords['OVERCLOUD_ADMIN_PASSWORD']
parameters['AdminToken'] = passwords['OVERCLOUD_ADMIN_TOKEN']
parameters['CeilometerPassword'] = ceilometer_pass
parameters['CeilometerMeteringSecret'] = ceilometer_secret
parameters['CinderPassword'] = passwords[
'OVERCLOUD_CINDER_PASSWORD']
parameters['GlancePassword'] = passwords[
'OVERCLOUD_GLANCE_PASSWORD']
parameters['HeatPassword'] = passwords['OVERCLOUD_HEAT_PASSWORD']
parameters['HeatStackDomainAdminPassword'] = passwords[
'OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD']
parameters['NeutronPassword'] = passwords[
'OVERCLOUD_NEUTRON_PASSWORD']
parameters['NovaPassword'] = passwords['OVERCLOUD_NOVA_PASSWORD']
parameters['SwiftHashSuffix'] = passwords['OVERCLOUD_SWIFT_HASH']
parameters['SwiftPassword'] = passwords['OVERCLOUD_SWIFT_PASSWORD']
parameters['SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
else:
parameters['Controller-1::AdminPassword'] = passwords[
'OVERCLOUD_ADMIN_PASSWORD']
parameters['Controller-1::AdminToken'] = passwords[
'OVERCLOUD_ADMIN_TOKEN']
parameters['Compute-1::AdminPassword'] = passwords[
'OVERCLOUD_ADMIN_PASSWORD']
parameters['Controller-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Cinder-Storage-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Swift-Storage-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Compute-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Controller-1::CeilometerPassword'] = ceilometer_pass
parameters[
'Controller-1::CeilometerMeteringSecret'] = ceilometer_secret
parameters['Compute-1::CeilometerPassword'] = ceilometer_pass
parameters[
'Compute-1::CeilometerMeteringSecret'] = ceilometer_secret
parameters['Controller-1::CinderPassword'] = (
passwords['OVERCLOUD_CINDER_PASSWORD'])
parameters['Controller-1::GlancePassword'] = (
passwords['OVERCLOUD_GLANCE_PASSWORD'])
parameters['Controller-1::HeatPassword'] = (
passwords['OVERCLOUD_HEAT_PASSWORD'])
parameters['Controller-1::HeatStackDomainAdminPassword'] = (
passwords['OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD'])
parameters['Controller-1::NeutronPassword'] = (
passwords['OVERCLOUD_NEUTRON_PASSWORD'])
parameters['Compute-1::NeutronPassword'] = (
passwords['OVERCLOUD_NEUTRON_PASSWORD'])
parameters['Controller-1::NovaPassword'] = (
passwords['OVERCLOUD_NOVA_PASSWORD'])
parameters['Compute-1::NovaPassword'] = (
passwords['OVERCLOUD_NOVA_PASSWORD'])
parameters['Controller-1::SwiftHashSuffix'] = (
passwords['OVERCLOUD_SWIFT_HASH'])
parameters['Controller-1::SwiftPassword'] = (
passwords['OVERCLOUD_SWIFT_PASSWORD']) | Add passwords to the parameters dictionary
:param parameters: A dictionary for the passwords to be added to
:type parameters: dict | train | https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/v1/overcloud_deploy.py#L104-L176 | [
"def get_config_value(section, option):\n\n p = six.moves.configparser.ConfigParser()\n p.read(os.path.expanduser(\"~/undercloud-passwords.conf\"))\n return p.get(section, option)\n",
"def generate_overcloud_passwords(output_file=\"tripleo-overcloud-passwords\"):\n \"\"\"Create the passwords needed fo... | class DeployOvercloud(command.Command):
"""Deploy Overcloud"""
log = logging.getLogger(__name__ + ".DeployOvercloud")
predeploy_errors = 0
predeploy_warnings = 0
def _get_stack(self, orchestration_client, stack_name):
"""Get the ID for the current deployed overcloud stack if it exists."""
try:
stack = orchestration_client.stacks.get(stack_name)
self.log.info("Stack found, will be doing a stack update")
return stack
except HTTPNotFound:
self.log.info("No stack found, will be doing a stack create")
def _update_paramaters(self, args, network_client, stack):
if args.templates:
parameters = PARAMETERS.copy()
if stack is None:
parameters.update(NEW_STACK_PARAMETERS)
else:
parameters = {}
self.log.debug("Generating overcloud passwords")
self.set_overcloud_passwords(parameters, args)
self.log.debug("Getting ctlplane from Neutron")
net = network_client.api.find_attr('networks', 'ctlplane')
parameters['NeutronControlPlaneID'] = net['id']
if args.templates:
param_args = (
('NeutronPublicInterface', 'neutron_public_interface'),
('NeutronBridgeMappings', 'neutron_bridge_mappings'),
('NeutronFlatNetworks', 'neutron_flat_networks'),
('HypervisorNeutronPhysicalBridge', 'neutron_physical_bridge'),
('NtpServer', 'ntp_server'),
('ControllerCount', 'control_scale'),
('ComputeCount', 'compute_scale'),
('ObjectStorageCount', 'swift_storage_scale'),
('BlockStorageCount', 'block_storage_scale'),
('CephStorageCount', 'ceph_storage_scale'),
('OvercloudControlFlavor', 'control_flavor'),
('OvercloudComputeFlavor', 'compute_flavor'),
('OvercloudBlockStorageFlavor', 'block_storage_flavor'),
('OvercloudSwiftStorageFlavor', 'swift_storage_flavor'),
('OvercloudCephStorageFlavor', 'ceph_storage_flavor'),
('NeutronNetworkVLANRanges', 'neutron_network_vlan_ranges'),
('NeutronMechanismDrivers', 'neutron_mechanism_drivers')
)
if stack is None:
new_stack_args = (
('NeutronNetworkType', 'neutron_network_type'),
('NeutronTunnelIdRanges', 'neutron_tunnel_id_ranges'),
('NeutronTunnelTypes', 'neutron_tunnel_types'),
('NeutronVniRanges', 'neutron_vni_ranges'),
('NovaComputeLibvirtType', 'libvirt_type'),
)
param_args = param_args + new_stack_args
if args.neutron_disable_tunneling is not None:
neutron_enable_tunneling = (
not args.neutron_disable_tunneling)
parameters.update({
'NeutronEnableTunnelling': neutron_enable_tunneling,
})
else:
param_args = (
('Controller-1::NeutronPublicInterface',
'neutron_public_interface'),
('Compute-1::NeutronPublicInterface',
'neutron_public_interface'),
('Controller-1::NeutronBridgeMappings',
'neutron_bridge_mappings'),
('Compute-1::NeutronBridgeMappings',
'neutron_bridge_mappings'),
('Controller-1::NeutronFlatNetworks', 'neutron_flat_networks'),
('Compute-1::NeutronFlatNetworks', 'neutron_flat_networks'),
('Compute-1::NeutronPhysicalBridge',
'neutron_physical_bridge'),
('Controller-1::NtpServer', 'ntp_server'),
('Compute-1::NtpServer', 'ntp_server'),
('Controller-1::NeutronNetworkVLANRanges',
'neutron_network_vlan_ranges'),
('Compute-1::NeutronNetworkVLANRanges',
'neutron_network_vlan_ranges'),
('Controller-1::NeutronMechanismDrivers',
'neutron_mechanism_drivers'),
('Compute-1::NeutronMechanismDrivers',
'neutron_mechanism_drivers'),
('Controller-1::count', 'control_scale'),
('Compute-1::count', 'compute_scale'),
('Swift-Storage-1::count', 'swift_storage_scale'),
('Cinder-Storage-1::count', 'block_storage_scale'),
('Ceph-Storage-1::count', 'ceph_storage_scale'),
('Cinder-Storage-1::Flavor', 'block_storage_flavor'),
('Compute-1::Flavor', 'compute_flavor'),
('Controller-1::Flavor', 'control_flavor'),
('Swift-Storage-1::Flavor', 'swift_storage_flavor'),
('Ceph-Storage-1::Flavor', 'ceph_storage_flavor'),
)
if stack is None:
new_stack_args = (
('Controller-1::NeutronNetworkType',
'neutron_network_type'),
('Compute-1::NeutronNetworkType', 'neutron_network_type'),
('Controller-1::NeutronTunnelTypes',
'neutron_tunnel_types'),
('Compute-1::NeutronTunnelTypes', 'neutron_tunnel_types'),
('Compute-1::NovaComputeLibvirtType', 'libvirt_type'),
('Controller-1::NeutronTunnelIdRanges',
'neutron_tunnel_id_ranges'),
('Controller-1::NeutronVniRanges', 'neutron_vni_ranges'),
('Compute-1::NeutronTunnelIdRanges',
'neutron_tunnel_id_ranges'),
('Compute-1::NeutronVniRanges', 'neutron_vni_ranges'),
)
param_args = param_args + new_stack_args
if args.neutron_disable_tunneling is not None:
neutron_enable_tunneling = (
not args.neutron_disable_tunneling)
parameters.update({
'Controller-1::NeutronEnableTunnelling':
neutron_enable_tunneling,
'Compute-1::NeutronEnableTunnelling':
neutron_enable_tunneling,
})
# Update parameters from commandline
for param, arg in param_args:
if getattr(args, arg, None) is not None:
parameters[param] = getattr(args, arg)
# Scaling needs extra parameters
number_controllers = max((
int(parameters.get('ControllerCount', 0)),
int(parameters.get('Controller-1::count', 0))
))
if number_controllers > 1:
if not args.ntp_server:
raise Exception('Specify --ntp-server when using multiple'
' controllers (with HA).')
if args.templates:
parameters.update({
'NeutronL3HA': True,
'NeutronAllowL3AgentFailover': False,
})
else:
parameters.update({
'Controller-1::NeutronL3HA': True,
'Controller-1::NeutronAllowL3AgentFailover': False,
'Compute-1::NeutronL3HA': True,
'Compute-1::NeutronAllowL3AgentFailover': False,
})
else:
if args.templates:
parameters.update({
'NeutronL3HA': False,
'NeutronAllowL3AgentFailover': False,
})
else:
parameters.update({
'Controller-1::NeutronL3HA': False,
'Controller-1::NeutronAllowL3AgentFailover': False,
'Compute-1::NeutronL3HA': False,
'Compute-1::NeutronAllowL3AgentFailover': False,
})
# set at least 3 dhcp_agents_per_network
dhcp_agents_per_network = (number_controllers if number_controllers and
number_controllers > 3 else 3)
if args.templates:
parameters.update({
'NeutronDhcpAgentsPerNetwork': dhcp_agents_per_network,
})
else:
parameters.update({
'Controller-1::NeutronDhcpAgentsPerNetwork':
dhcp_agents_per_network,
})
if max((int(parameters.get('CephStorageCount', 0)),
int(parameters.get('Ceph-Storage-1::count', 0)))) > 0:
if stack is None:
parameters.update({
'CephClusterFSID': six.text_type(uuid.uuid1()),
'CephMonKey': utils.create_cephx_key(),
'CephAdminKey': utils.create_cephx_key()
})
return parameters
def _create_registration_env(self, args):
if args.templates:
tht_root = args.templates
else:
tht_root = TRIPLEO_HEAT_TEMPLATES
environment = os.path.join(tht_root,
RHEL_REGISTRATION_EXTRACONFIG_NAME,
'environment-rhel-registration.yaml')
registry = os.path.join(tht_root, RHEL_REGISTRATION_EXTRACONFIG_NAME,
'rhel-registration-resource-registry.yaml')
user_env = ("parameter_defaults:\n"
" rhel_reg_method: \"%(method)s\"\n"
" rhel_reg_org: \"%(org)s\"\n"
" rhel_reg_force: \"%(force)s\"\n"
" rhel_reg_sat_url: \"%(sat_url)s\"\n"
" rhel_reg_activation_key: \"%(activation_key)s\"\n"
% {'method': args.reg_method,
'org': args.reg_org,
'force': args.reg_force,
'sat_url': args.reg_sat_url,
'activation_key': args.reg_activation_key})
handle, user_env_file = tempfile.mkstemp()
with open(user_env_file, 'w') as temp_file:
temp_file.write(user_env)
return [registry, environment, user_env_file]
def _heat_deploy(self, stack, stack_name, template_path, parameters,
environments, timeout):
"""Verify the Baremetal nodes are available and do a stack update"""
self.log.debug("Processing environment files")
env_files, env = (
template_utils.process_multiple_environments_and_files(
environments))
self.log.debug("Getting template contents")
template_files, template = template_utils.get_template_contents(
template_path)
files = dict(list(template_files.items()) + list(env_files.items()))
clients = self.app.client_manager
orchestration_client = clients.rdomanager_oscplugin.orchestration()
self.log.debug("Deploying stack: %s", stack_name)
self.log.debug("Deploying template: %s", template)
self.log.debug("Deploying parameters: %s", parameters)
self.log.debug("Deploying environment: %s", env)
self.log.debug("Deploying files: %s", files)
stack_args = {
'stack_name': stack_name,
'template': template,
'parameters': parameters,
'environment': env,
'files': files
}
if timeout:
stack_args['timeout_mins'] = timeout
if stack is None:
self.log.info("Performing Heat stack create")
orchestration_client.stacks.create(**stack_args)
else:
self.log.info("Performing Heat stack update")
# Make sure existing parameters for stack are reused
stack_args['existing'] = 'true'
orchestration_client.stacks.update(stack.id, **stack_args)
create_result = utils.wait_for_stack_ready(
orchestration_client, stack_name)
if not create_result:
if stack is None:
raise Exception("Heat Stack create failed.")
else:
raise Exception("Heat Stack update failed.")
def _get_overcloud_endpoint(self, stack):
for output in stack.to_dict().get('outputs', {}):
if output['output_key'] == 'KeystoneURL':
return output['output_value']
def _get_service_ips(self, stack):
service_ips = {}
for output in stack.to_dict().get('outputs', {}):
service_ips[output['output_key']] = output['output_value']
return service_ips
def _pre_heat_deploy(self):
"""Setup before the Heat stack create or update has been done."""
clients = self.app.client_manager
compute_client = clients.compute
self.log.debug("Checking hypervisor stats")
if utils.check_hypervisor_stats(compute_client) is None:
raise exceptions.DeploymentError(
"Expected hypervisor stats not met")
return True
def _deploy_tripleo_heat_templates(self, stack, parsed_args):
"""Deploy the fixed templates in TripleO Heat Templates"""
clients = self.app.client_manager
network_client = clients.network
parameters = self._update_paramaters(
parsed_args, network_client, stack)
utils.check_nodes_count(
self.app.client_manager.rdomanager_oscplugin.baremetal(),
stack,
parameters,
{
'ControllerCount': 1,
'ComputeCount': 1,
'ObjectStorageCount': 0,
'BlockStorageCount': 0,
'CephStorageCount': 0,
}
)
tht_root = parsed_args.templates
print("Deploying templates in the directory {0}".format(
os.path.abspath(tht_root)))
self.log.debug("Creating Environment file")
env_path = utils.create_environment_file()
if stack is None:
self.log.debug("Creating Keystone certificates")
keystone_pki.generate_certs_into_json(env_path, False)
resource_registry_path = os.path.join(tht_root, RESOURCE_REGISTRY_NAME)
environments = [resource_registry_path, env_path]
if parsed_args.rhel_reg:
reg_env = self._create_registration_env(parsed_args)
environments.extend(reg_env)
if parsed_args.environment_files:
environments.extend(parsed_args.environment_files)
overcloud_yaml = os.path.join(tht_root, OVERCLOUD_YAML_NAME)
self._heat_deploy(stack, parsed_args.stack, overcloud_yaml, parameters,
environments, parsed_args.timeout)
def _deploy_tuskar(self, stack, parsed_args):
clients = self.app.client_manager
management = clients.rdomanager_oscplugin.management()
network_client = clients.network
# TODO(dmatthews): The Tuskar client has very similar code to this for
# downloading templates. It should be refactored upstream so we can use
# it.
if parsed_args.output_dir:
output_dir = parsed_args.output_dir
else:
output_dir = tempfile.mkdtemp()
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
management_plan = tuskarutils.find_resource(
management.plans, parsed_args.plan)
# retrieve templates
templates = management.plans.templates(management_plan.uuid)
parameters = self._update_paramaters(
parsed_args, network_client, stack)
utils.check_nodes_count(
self.app.client_manager.rdomanager_oscplugin.baremetal(),
stack,
parameters,
{
'Controller-1::count': 1,
'Compute-1::count': 1,
'Swift-Storage-1::count': 0,
'Cinder-Storage-1::count': 0,
'Ceph-Storage-1::count': 0,
}
)
if stack is None:
ca_key_pem, ca_cert_pem = keystone_pki.create_ca_pair()
signing_key_pem, signing_cert_pem = (
keystone_pki.create_signing_pair(ca_key_pem, ca_cert_pem))
parameters['Controller-1::KeystoneCACertificate'] = ca_cert_pem
parameters['Controller-1::KeystoneSigningCertificate'] = (
signing_cert_pem)
parameters['Controller-1::KeystoneSigningKey'] = signing_key_pem
# Save the parameters to Tuskar so they can be used when redeploying.
# Tuskar expects to get all values as strings. So we convert them all
# below.
management.plans.patch(
management_plan.uuid,
[{'name': x[0], 'value': six.text_type(x[1])}
for x in parameters.items()]
)
# write file for each key-value in templates
print("The following templates will be written:")
for template_name, template_content in templates.items():
# It's possible to organize the role templates and their dependent
# files into directories, in which case the template_name will
# carry the directory information. If that's the case, first
# create the directory structure (if it hasn't already been
# created by another file in the templates list).
template_dir = os.path.dirname(template_name)
output_template_dir = os.path.join(output_dir, template_dir)
if template_dir and not os.path.exists(output_template_dir):
os.makedirs(output_template_dir)
filename = os.path.join(output_dir, template_name)
with open(filename, 'w+') as template_file:
template_file.write(template_content)
print(filename)
overcloud_yaml = os.path.join(output_dir, 'plan.yaml')
environment_yaml = os.path.join(output_dir, 'environment.yaml')
environments = [environment_yaml, ]
if parsed_args.rhel_reg:
reg_env = self._create_registration_env(parsed_args)
environments.extend(reg_env)
if parsed_args.environment_files:
environments.extend(parsed_args.environment_files)
self._heat_deploy(stack, parsed_args.stack, overcloud_yaml, parameters,
environments, parsed_args.timeout)
def _create_overcloudrc(self, stack, parsed_args):
overcloud_endpoint = self._get_overcloud_endpoint(stack)
overcloud_ip = six.moves.urllib.parse.urlparse(
overcloud_endpoint).hostname
rc_params = {
'NOVA_VERSION': '1.1',
'COMPUTE_API_VERSION': '1.1',
'OS_USERNAME': 'admin',
'OS_TENANT_NAME': 'admin',
'OS_NO_CACHE': 'True',
'OS_CLOUDNAME': stack.stack_name,
'no_proxy': "%(no_proxy)s,%(overcloud_ip)s" % {
'no_proxy': parsed_args.no_proxy,
'overcloud_ip': overcloud_ip,
}
}
rc_params.update({
'OS_PASSWORD': self.passwords['OVERCLOUD_ADMIN_PASSWORD'],
'OS_AUTH_URL': self._get_overcloud_endpoint(stack),
})
with open('%src' % stack.stack_name, 'w') as f:
for key, value in rc_params.items():
f.write("export %(key)s=%(value)s\n" %
{'key': key, 'value': value})
def _create_tempest_deployer_input(self):
config = configparser.ConfigParser()
config.add_section('compute-feature-enabled')
# Does the test environment support obtaining instance serial console
# output? (default: true)
# set in [nova.serial_console]->enabled
config.set('compute-feature-enabled', 'console_output', 'false')
config.add_section('object-storage')
# Role to add to users created for swift tests to enable creating
# containers (default: 'Member')
# keystone role-list returns this role
config.set('object-storage', 'operator_role', 'swiftoperator')
config.add_section('orchestration')
# Role required for users to be able to manage stacks
# (default: 'heat_stack_owner')
# keystone role-list returns this role
config.set('orchestration', 'stack_owner_role', 'heat_stack_user')
config.add_section('volume')
# Name of the backend1 (must be declared in cinder.conf)
# (default: 'BACKEND_1')
# set in [cinder]->enabled_backends
config.set('volume', 'backend1_name', 'tripleo_iscsi')
config.add_section('volume-feature-enabled')
# Update bootable status of a volume Not implemented on icehouse
# (default: false)
# python-cinderclient supports set-bootable
config.set('volume-feature-enabled', 'bootable', 'true')
with open('tempest-deployer-input.conf', 'w+') as config_file:
config.write(config_file)
def _deploy_postconfig(self, stack, parsed_args):
self.log.debug("_deploy_postconfig(%s)" % parsed_args)
passwords = self.passwords
overcloud_endpoint = self._get_overcloud_endpoint(stack)
overcloud_ip = six.moves.urllib.parse.urlparse(
overcloud_endpoint).hostname
no_proxy = [os.environ.get('no_proxy'), overcloud_ip]
os.environ['no_proxy'] = ','.join(
[x for x in no_proxy if x is not None])
service_ips = self._get_service_ips(stack)
utils.remove_known_hosts(overcloud_ip)
keystone_ip = service_ips.get('KeystoneAdminVip')
if not keystone_ip:
keystone_ip = overcloud_ip
keystone.initialize(
keystone_ip,
passwords['OVERCLOUD_ADMIN_TOKEN'],
'admin@example.com',
passwords['OVERCLOUD_ADMIN_PASSWORD'],
public=overcloud_ip,
user='heat-admin')
# NOTE(bcrochet): Bad hack. Remove the ssl_port info from the
# os_cloud_config.SERVICES dictionary
for service_name, data in keystone.SERVICES.iteritems():
data.pop('ssl_port', None)
services = {}
for service, data in six.iteritems(utils.SERVICE_LIST):
service_data = data.copy()
service_data.pop('password_field', None)
password_field = data.get('password_field')
if password_field:
service_data['password'] = passwords[password_field]
service_name = re.sub('v[0-9]+', '',
service.capitalize() + 'InternalVip')
internal_vip = service_ips.get(service_name)
if internal_vip:
service_data['internal_host'] = internal_vip
services.update({service: service_data})
keystone_client = clients.get_keystone_client(
'admin',
passwords['OVERCLOUD_ADMIN_PASSWORD'],
'admin',
overcloud_endpoint)
keystone.setup_endpoints(
services,
client=keystone_client,
os_auth_url=overcloud_endpoint,
public_host=overcloud_ip)
compute_client = clients.get_nova_bm_client(
'admin',
passwords['OVERCLOUD_ADMIN_PASSWORD'],
'admin',
overcloud_endpoint)
compute_client.flavors.create('m1.demo', 512, 1, 10, 'auto')
def _validate_args(self, parsed_args):
network_type = parsed_args.neutron_network_type
tunnel_types = parsed_args.neutron_tunnel_types
if network_type and tunnel_types:
# Validate that neutron_network_type is in neutron_tunnel_types
if network_type not in tunnel_types:
raise oscexc.CommandError("Neutron network type must be in "
"Neutron tunnel types "
"(%s) " % tunnel_types)
elif network_type and not tunnel_types:
raise oscexc.CommandError("Neutron tunnel types must be specified "
"when Neutron network type is specified")
def _predeploy_verify_capabilities(self, parsed_args):
self.predeploy_errors = 0
self.predeploy_warnings = 0
self.log.debug("Starting _pre_verify_capabilities")
bm_client = self.app.client_manager.rdomanager_oscplugin.baremetal()
self._check_boot_images()
self._check_flavors_exist(parsed_args)
for node in bm_client.node.list():
node = bm_client.node.get(node.uuid)
self.log.debug("Checking config for Node {0}".format(node.uuid))
self._check_ironic_boot_configuration(node)
flavor_profile_map = self._collect_flavor_profiles([
parsed_args.control_flavor,
parsed_args.compute_flavor,
parsed_args.ceph_storage_flavor,
parsed_args.block_storage_flavor,
parsed_args.swift_storage_flavor,
])
node_profile_map = self._collect_node_profiles()
for target, flavor, scale in [
('control', parsed_args.control_flavor,
parsed_args.control_scale),
('compute', parsed_args.compute_flavor,
parsed_args.compute_scale),
('ceph-storage', parsed_args.ceph_storage_flavor,
parsed_args.ceph_storage_scale),
('block-storage', parsed_args.block_storage_flavor,
parsed_args.block_storage_scale),
('swift-storage', parsed_args.swift_storage_flavor,
parsed_args.swift_storage_scale),
]:
if scale == 0 or flavor is None:
self.log.debug("Skipping verification of %s profiles because "
"none will be deployed", flavor)
continue
self._check_profiles(
target, flavor, scale,
flavor_profile_map,
node_profile_map)
if len(node_profile_map.get(None, [])) > 0:
self.predeploy_warnings += 1
self.log.warning(
"There are %d ironic nodes with no profile that will "
"not be used: %s",
len(node_profile_map[None]),
', '.join(node_profile_map[None])
)
return self.predeploy_errors, self.predeploy_warnings
__kernel_id = None
__ramdisk_id = None
def _image_ids(self):
if self.__kernel_id is not None and self.__ramdisk_id is not None:
return self.__kernel_id, self.__ramdisk_id
image_client = self.app.client_manager.image
kernel_id, ramdisk_id = None, None
try:
kernel_id = osc_utils.find_resource(
image_client.images, 'bm-deploy-kernel').id
except AttributeError as e:
self.log.error("Please make sure there is only one image named "
"'bm-deploy-kernel' in glance.")
self.log.exception(e)
try:
ramdisk_id = osc_utils.find_resource(
image_client.images, 'bm-deploy-ramdisk').id
except AttributeError as e:
self.log.error("Please make sure there is only one image "
"named 'bm-deploy-ramdisk' in glance.")
self.log.exception(e)
self.log.debug("Using kernel ID: {0} and ramdisk ID: {1}".format(
kernel_id, ramdisk_id))
self.__kernel_id = kernel_id
self.__ramdisk_id = ramdisk_id
return kernel_id, ramdisk_id
def _collect_node_profiles(self):
"""Gather a map of profile -> [node_uuid] for ironic boot profiles"""
bm_client = self.app.client_manager.rdomanager_oscplugin.baremetal()
# map of profile capability -> [node_uuid, ...]
profile_map = collections.defaultdict(list)
for node in bm_client.node.list(maintenance=False):
node = bm_client.node.get(node.uuid)
profiles = re.findall(r'profile:(.*?)(?:,|$)',
node.properties.get('capabilities', ''))
if not profiles:
profile_map[None].append(node.uuid)
for p in profiles:
profile_map[p].append(node.uuid)
return dict(profile_map)
def _collect_flavor_profiles(self, flavors):
compute_client = self.app.client_manager.compute
flavor_profiles = {}
for flavor in compute_client.flavors.list():
if flavor.name not in flavors:
self.log.debug("Flavor {} isn't used in this deployment, "
"skipping it".format(flavor.name))
continue
profile = flavor.get_keys().get('capabilities:profile')
if profile == '':
flavor_profiles[flavor.name] = None
else:
flavor_profiles[flavor.name] = profile
if flavor.get_keys().get('capabilities:boot_option', '') \
!= 'local':
self.predeploy_warnings += 1
self.log.error(
'Flavor %s "capabilities:boot_option" is not set to '
'"local". Nodes must have ability to PXE boot from '
'deploy image.', flavor.name)
self.log.error(
'Recommended solution: openstack flavor set --property '
'"cpu_arch"="x86_64" --property '
'"capabilities:boot_option"="local" ' + flavor.name)
return flavor_profiles
def _check_profiles(self, target, flavor, scale,
flavor_profile_map,
node_profile_map):
if flavor_profile_map.get(flavor) is None:
self.predeploy_errors += 1
self.log.error(
'Warning: The flavor selected for --%s-flavor "%s" has no '
'profile associated', target, flavor)
self.log.error(
'Recommendation: assign a profile with openstack flavor set '
'--property "capabilities:profile"="PROFILE_NAME" %s',
flavor)
return
if len(node_profile_map.get(flavor_profile_map[flavor], [])) < scale:
self.predeploy_errors += 1
self.log.error(
"Error: %s of %s requested ironic nodes tagged to profile %s "
"(for flavor %s)",
len(node_profile_map.get(flavor_profile_map[flavor], [])),
scale, flavor_profile_map[flavor], flavor
)
self.log.error(
"Recommendation: tag more nodes using ironic node-update "
"<NODE ID> replace properties/capabilities=profile:%s,"
"boot_option:local", flavor_profile_map[flavor])
def _check_boot_images(self):
kernel_id, ramdisk_id = self._image_ids()
message = ("No image with the name '{}' found - make "
"sure you've uploaded boot images")
if kernel_id is None:
self.predeploy_errors += 1
self.log.error(message.format('bm-deploy-kernel'))
if ramdisk_id is None:
self.predeploy_errors += 1
self.log.error(message.format('bm-deploy-ramdisk'))
def _check_flavors_exist(self, parsed_args):
"""Ensure that selected flavors (--ROLE-flavor) exist in nova."""
compute_client = self.app.client_manager.compute
flavors = {f.name: f for f in compute_client.flavors.list()}
message = "Provided --{}-flavor, '{}', does not exist"
for target, flavor, scale in (
('control', parsed_args.control_flavor,
parsed_args.control_scale),
('compute', parsed_args.compute_flavor,
parsed_args.compute_scale),
('ceph-storage', parsed_args.ceph_storage_flavor,
parsed_args.ceph_storage_scale),
('block-storage', parsed_args.block_storage_flavor,
parsed_args.block_storage_scale),
('swift-storage', parsed_args.swift_storage_flavor,
parsed_args.swift_storage_scale),
):
if flavor is None or scale == 0:
self.log.debug("--{}-flavor not used".format(target))
elif flavor not in flavors:
self.predeploy_errors += 1
self.log.error(message.format(target, flavor))
def _check_ironic_boot_configuration(self, node):
kernel_id, ramdisk_id = self._image_ids()
self.log.debug("Doing boot checks for {}".format(node.uuid))
message = ("Node uuid={uuid} has an incorrectly configured "
"{property}. Expected \"{expected}\" but got "
"\"{actual}\".")
if node.driver_info.get('deploy_ramdisk') != ramdisk_id:
self.predeploy_errors += 1
self.log.error(message.format(
uuid=node.uuid,
property='driver_info/deploy_ramdisk',
expected=ramdisk_id,
actual=node.driver_info.get('deploy_ramdisk')
))
if node.driver_info.get('deploy_kernel') != kernel_id:
self.predeploy_errors += 1
self.log.error(message.format(
uuid=node.uuid,
property='driver_info/deploy_kernel',
expected=ramdisk_id,
actual=node.driver_info.get('deploy_kernel')
))
if 'boot_option:local' not in node.properties.get('capabilities', ''):
self.predeploy_warnings += 1
self.log.warning(message.format(
uuid=node.uuid,
property='properties/capabilities',
expected='boot_option:local',
actual=node.properties.get('capabilities')
))
def get_parser(self, prog_name):
# add_help doesn't work properly, set it to False:
parser = argparse.ArgumentParser(
description=self.get_description(),
prog=prog_name,
add_help=False
)
main_group = parser.add_mutually_exclusive_group(required=True)
main_group.add_argument(
'--plan',
help=_("The Name or UUID of the Tuskar plan to deploy.")
)
main_group.add_argument(
'--templates', nargs='?', const=TRIPLEO_HEAT_TEMPLATES,
help=_("The directory containing the Heat templates to deploy"))
parser.add_argument('--stack',
help=_("Stack name to create or update"),
default='overcloud')
parser.add_argument('-t', '--timeout', metavar='<TIMEOUT>',
type=int, default=240,
help=_('Deployment timeout in minutes.'))
parser.add_argument('--control-scale', type=int,
help=_('New number of control nodes.'))
parser.add_argument('--compute-scale', type=int,
help=_('New number of compute nodes.'))
parser.add_argument('--ceph-storage-scale', type=int,
help=_('New number of ceph storage nodes.'))
parser.add_argument('--block-storage-scale', type=int,
help=_('New number of cinder storage nodes.'))
parser.add_argument('--swift-storage-scale', type=int,
help=_('New number of swift storage nodes.'))
parser.add_argument('--control-flavor',
help=_("Nova flavor to use for control nodes."))
parser.add_argument('--compute-flavor',
help=_("Nova flavor to use for compute nodes."))
parser.add_argument('--ceph-storage-flavor',
help=_("Nova flavor to use for ceph storage "
"nodes."))
parser.add_argument('--block-storage-flavor',
help=_("Nova flavor to use for cinder storage "
"nodes."))
parser.add_argument('--swift-storage-flavor',
help=_("Nova flavor to use for swift storage "
"nodes."))
parser.add_argument('--neutron-flat-networks',
help=_('Comma separated list of physical_network '
'names with which flat networks can be '
'created. Use * to allow flat networks '
'with arbitrary physical_network names.'))
parser.add_argument('--neutron-physical-bridge',
help=_('Deprecated.'))
parser.add_argument('--neutron-bridge-mappings',
help=_('Comma separated list of bridge mappings. '
'(default: datacentre:br-ex)'))
parser.add_argument('--neutron-public-interface',
help=_('Deprecated.'))
parser.add_argument('--hypervisor-neutron-public-interface',
default='nic1', help=_('Deprecated.'))
parser.add_argument('--neutron-network-type',
help=_('The network type for tenant networks.'))
parser.add_argument('--neutron-tunnel-types',
help=_('Network types supported by the agent '
'(gre and/or vxlan).'))
parser.add_argument('--neutron-tunnel-id-ranges',
default="1:1000",
help=_("Ranges of GRE tunnel IDs to make "
"available for tenant network allocation"),)
parser.add_argument('--neutron-vni-ranges',
default="1:1000",
help=_("Ranges of VXLAN VNI IDs to make "
"available for tenant network allocation"),)
parser.add_argument('--neutron-disable-tunneling',
dest='neutron_disable_tunneling',
action="store_const", const=True,
help=_('Disables tunneling.')),
parser.add_argument('--neutron-network-vlan-ranges',
help=_('Comma separated list of '
'<physical_network>:<vlan_min>:<vlan_max> '
'or <physical_network> specifying '
'physical_network names usable for VLAN '
'provider and tenant networks, as well as '
'ranges of VLAN tags on each available for '
'allocation to tenant networks. '
'(ex: datacentre:1:1000)'))
parser.add_argument('--neutron-mechanism-drivers',
help=_('An ordered list of extension driver '
'entrypoints to be loaded from the '
'neutron.ml2.extension_drivers namespace.'))
parser.add_argument('--libvirt-type',
default='kvm',
choices=['kvm', 'qemu'],
help=_('Libvirt domain type. (default: kvm)'))
parser.add_argument('--ntp-server',
help=_('The NTP for overcloud nodes.'))
parser.add_argument(
'--tripleo-root',
default=os.environ.get('TRIPLEO_ROOT', '/etc/tripleo'),
help=_('The root directory for TripleO templates.')
)
parser.add_argument(
'--no-proxy',
default=os.environ.get('no_proxy', ''),
help=_('A comma separated list of hosts that should not be '
'proxied.')
)
parser.add_argument(
'-O', '--output-dir', metavar='<OUTPUT DIR>',
help=_('Directory to write Tuskar template files into. It will be '
'created if it does not exist. If not provided a temporary '
'directory will be used.')
)
parser.add_argument(
'-e', '--environment-file', metavar='<HEAT ENVIRONMENT FILE>',
action='append', dest='environment_files',
help=_('Environment files to be passed to the heat stack-create '
'or heat stack-update command. (Can be specified more than '
'once.)')
)
parser.add_argument(
'--validation-errors-fatal',
action='store_true',
default=False,
help=_('Exit if there are errors from the configuration '
'pre-checks. Ignoring these errors will likely cause your '
'deploy to fail.')
)
parser.add_argument(
'--validation-warnings-fatal',
action='store_true',
default=False,
help=_('Exit if there are warnings from the configuration '
'pre-checks.')
)
reg_group = parser.add_argument_group('Registration Parameters')
reg_group.add_argument(
'--rhel-reg',
action='store_true',
help=_('Register overcloud nodes to the customer portal or a '
'satellite.')
)
reg_group.add_argument(
'--reg-method',
choices=['satellite', 'portal'],
default='satellite',
help=_('RHEL registration method to use for the overcloud nodes.')
)
reg_group.add_argument(
'--reg-org',
default='',
help=_('Organization key to use for registration.')
)
reg_group.add_argument(
'--reg-force',
action='store_true',
help=_('Register the system even if it is already registered.')
)
reg_group.add_argument(
'--reg-sat-url',
default='',
help=_('Satellite server to register overcloud nodes.')
)
reg_group.add_argument(
'--reg-activation-key',
default='',
help=_('Activation key to use for registration.')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
self._validate_args(parsed_args)
errors, warnings = self._predeploy_verify_capabilities(parsed_args)
if errors > 0:
self.log.error(
"Configuration has %d errors, fix them before proceeding. "
"Ignoring these errors is likely to lead to a failed deploy.",
errors)
if parsed_args.validation_warnings_fatal or \
parsed_args.validation_errors_fatal:
return
if warnings > 0:
self.log.error(
"Configuration has %d warnings, fix them before proceeding. ",
warnings)
if parsed_args.validation_warnings_fatal:
return
else:
self.log.info("SUCCESS: No warnings or errors in deploy "
"configuration, proceeding.")
clients = self.app.client_manager
orchestration_client = clients.rdomanager_oscplugin.orchestration()
stack = self._get_stack(orchestration_client, parsed_args.stack)
stack_create = stack is None
try:
self._pre_heat_deploy()
if parsed_args.rhel_reg:
if parsed_args.reg_method == 'satellite':
sat_required_args = (parsed_args.reg_org and
parsed_args.reg_sat_url and
parsed_args.reg_activation_key)
if not sat_required_args:
raise exceptions.DeploymentError(
"ERROR: In order to use satellite registration, "
"you must specify --reg-org, --reg-sat-url, and "
"--reg-activation-key.")
else:
portal_required_args = (parsed_args.reg_org and
parsed_args.reg_activation_key)
if not portal_required_args:
raise exceptions.DeploymentError(
"ERROR: In order to use portal registration, you "
"must specify --reg-org, and "
"--reg-activation-key.")
if parsed_args.templates:
self._deploy_tripleo_heat_templates(stack, parsed_args)
else:
self._deploy_tuskar(stack, parsed_args)
# Get a new copy of the stack after stack update/create. If it was
# a create then the previous stack object would be None.
stack = self._get_stack(orchestration_client, parsed_args.stack)
self._create_overcloudrc(stack, parsed_args)
self._create_tempest_deployer_input()
if stack_create:
self._deploy_postconfig(stack, parsed_args)
overcloud_endpoint = self._get_overcloud_endpoint(stack)
print("Overcloud Endpoint: {0}".format(overcloud_endpoint))
print("Overcloud Deployed")
return True
except exceptions.DeploymentError as err:
print("Deployment failed: ", err, file=sys.stderr)
return False
|
rdo-management/python-rdomanager-oscplugin | rdomanager_oscplugin/v1/overcloud_deploy.py | DeployOvercloud._get_stack | python | def _get_stack(self, orchestration_client, stack_name):
try:
stack = orchestration_client.stacks.get(stack_name)
self.log.info("Stack found, will be doing a stack update")
return stack
except HTTPNotFound:
self.log.info("No stack found, will be doing a stack create") | Get the ID for the current deployed overcloud stack if it exists. | train | https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/v1/overcloud_deploy.py#L178-L186 | null | class DeployOvercloud(command.Command):
"""Deploy Overcloud"""
log = logging.getLogger(__name__ + ".DeployOvercloud")
predeploy_errors = 0
predeploy_warnings = 0
def set_overcloud_passwords(self, parameters, parsed_args):
"""Add passwords to the parameters dictionary
:param parameters: A dictionary for the passwords to be added to
:type parameters: dict
"""
undercloud_ceilometer_snmpd_password = utils.get_config_value(
"auth", "undercloud_ceilometer_snmpd_password")
self.passwords = passwords = utils.generate_overcloud_passwords()
ceilometer_pass = passwords['OVERCLOUD_CEILOMETER_PASSWORD']
ceilometer_secret = passwords['OVERCLOUD_CEILOMETER_SECRET']
if parsed_args.templates:
parameters['AdminPassword'] = passwords['OVERCLOUD_ADMIN_PASSWORD']
parameters['AdminToken'] = passwords['OVERCLOUD_ADMIN_TOKEN']
parameters['CeilometerPassword'] = ceilometer_pass
parameters['CeilometerMeteringSecret'] = ceilometer_secret
parameters['CinderPassword'] = passwords[
'OVERCLOUD_CINDER_PASSWORD']
parameters['GlancePassword'] = passwords[
'OVERCLOUD_GLANCE_PASSWORD']
parameters['HeatPassword'] = passwords['OVERCLOUD_HEAT_PASSWORD']
parameters['HeatStackDomainAdminPassword'] = passwords[
'OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD']
parameters['NeutronPassword'] = passwords[
'OVERCLOUD_NEUTRON_PASSWORD']
parameters['NovaPassword'] = passwords['OVERCLOUD_NOVA_PASSWORD']
parameters['SwiftHashSuffix'] = passwords['OVERCLOUD_SWIFT_HASH']
parameters['SwiftPassword'] = passwords['OVERCLOUD_SWIFT_PASSWORD']
parameters['SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
else:
parameters['Controller-1::AdminPassword'] = passwords[
'OVERCLOUD_ADMIN_PASSWORD']
parameters['Controller-1::AdminToken'] = passwords[
'OVERCLOUD_ADMIN_TOKEN']
parameters['Compute-1::AdminPassword'] = passwords[
'OVERCLOUD_ADMIN_PASSWORD']
parameters['Controller-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Cinder-Storage-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Swift-Storage-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Compute-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Controller-1::CeilometerPassword'] = ceilometer_pass
parameters[
'Controller-1::CeilometerMeteringSecret'] = ceilometer_secret
parameters['Compute-1::CeilometerPassword'] = ceilometer_pass
parameters[
'Compute-1::CeilometerMeteringSecret'] = ceilometer_secret
parameters['Controller-1::CinderPassword'] = (
passwords['OVERCLOUD_CINDER_PASSWORD'])
parameters['Controller-1::GlancePassword'] = (
passwords['OVERCLOUD_GLANCE_PASSWORD'])
parameters['Controller-1::HeatPassword'] = (
passwords['OVERCLOUD_HEAT_PASSWORD'])
parameters['Controller-1::HeatStackDomainAdminPassword'] = (
passwords['OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD'])
parameters['Controller-1::NeutronPassword'] = (
passwords['OVERCLOUD_NEUTRON_PASSWORD'])
parameters['Compute-1::NeutronPassword'] = (
passwords['OVERCLOUD_NEUTRON_PASSWORD'])
parameters['Controller-1::NovaPassword'] = (
passwords['OVERCLOUD_NOVA_PASSWORD'])
parameters['Compute-1::NovaPassword'] = (
passwords['OVERCLOUD_NOVA_PASSWORD'])
parameters['Controller-1::SwiftHashSuffix'] = (
passwords['OVERCLOUD_SWIFT_HASH'])
parameters['Controller-1::SwiftPassword'] = (
passwords['OVERCLOUD_SWIFT_PASSWORD'])
def _update_paramaters(self, args, network_client, stack):
if args.templates:
parameters = PARAMETERS.copy()
if stack is None:
parameters.update(NEW_STACK_PARAMETERS)
else:
parameters = {}
self.log.debug("Generating overcloud passwords")
self.set_overcloud_passwords(parameters, args)
self.log.debug("Getting ctlplane from Neutron")
net = network_client.api.find_attr('networks', 'ctlplane')
parameters['NeutronControlPlaneID'] = net['id']
if args.templates:
param_args = (
('NeutronPublicInterface', 'neutron_public_interface'),
('NeutronBridgeMappings', 'neutron_bridge_mappings'),
('NeutronFlatNetworks', 'neutron_flat_networks'),
('HypervisorNeutronPhysicalBridge', 'neutron_physical_bridge'),
('NtpServer', 'ntp_server'),
('ControllerCount', 'control_scale'),
('ComputeCount', 'compute_scale'),
('ObjectStorageCount', 'swift_storage_scale'),
('BlockStorageCount', 'block_storage_scale'),
('CephStorageCount', 'ceph_storage_scale'),
('OvercloudControlFlavor', 'control_flavor'),
('OvercloudComputeFlavor', 'compute_flavor'),
('OvercloudBlockStorageFlavor', 'block_storage_flavor'),
('OvercloudSwiftStorageFlavor', 'swift_storage_flavor'),
('OvercloudCephStorageFlavor', 'ceph_storage_flavor'),
('NeutronNetworkVLANRanges', 'neutron_network_vlan_ranges'),
('NeutronMechanismDrivers', 'neutron_mechanism_drivers')
)
if stack is None:
new_stack_args = (
('NeutronNetworkType', 'neutron_network_type'),
('NeutronTunnelIdRanges', 'neutron_tunnel_id_ranges'),
('NeutronTunnelTypes', 'neutron_tunnel_types'),
('NeutronVniRanges', 'neutron_vni_ranges'),
('NovaComputeLibvirtType', 'libvirt_type'),
)
param_args = param_args + new_stack_args
if args.neutron_disable_tunneling is not None:
neutron_enable_tunneling = (
not args.neutron_disable_tunneling)
parameters.update({
'NeutronEnableTunnelling': neutron_enable_tunneling,
})
else:
param_args = (
('Controller-1::NeutronPublicInterface',
'neutron_public_interface'),
('Compute-1::NeutronPublicInterface',
'neutron_public_interface'),
('Controller-1::NeutronBridgeMappings',
'neutron_bridge_mappings'),
('Compute-1::NeutronBridgeMappings',
'neutron_bridge_mappings'),
('Controller-1::NeutronFlatNetworks', 'neutron_flat_networks'),
('Compute-1::NeutronFlatNetworks', 'neutron_flat_networks'),
('Compute-1::NeutronPhysicalBridge',
'neutron_physical_bridge'),
('Controller-1::NtpServer', 'ntp_server'),
('Compute-1::NtpServer', 'ntp_server'),
('Controller-1::NeutronNetworkVLANRanges',
'neutron_network_vlan_ranges'),
('Compute-1::NeutronNetworkVLANRanges',
'neutron_network_vlan_ranges'),
('Controller-1::NeutronMechanismDrivers',
'neutron_mechanism_drivers'),
('Compute-1::NeutronMechanismDrivers',
'neutron_mechanism_drivers'),
('Controller-1::count', 'control_scale'),
('Compute-1::count', 'compute_scale'),
('Swift-Storage-1::count', 'swift_storage_scale'),
('Cinder-Storage-1::count', 'block_storage_scale'),
('Ceph-Storage-1::count', 'ceph_storage_scale'),
('Cinder-Storage-1::Flavor', 'block_storage_flavor'),
('Compute-1::Flavor', 'compute_flavor'),
('Controller-1::Flavor', 'control_flavor'),
('Swift-Storage-1::Flavor', 'swift_storage_flavor'),
('Ceph-Storage-1::Flavor', 'ceph_storage_flavor'),
)
if stack is None:
new_stack_args = (
('Controller-1::NeutronNetworkType',
'neutron_network_type'),
('Compute-1::NeutronNetworkType', 'neutron_network_type'),
('Controller-1::NeutronTunnelTypes',
'neutron_tunnel_types'),
('Compute-1::NeutronTunnelTypes', 'neutron_tunnel_types'),
('Compute-1::NovaComputeLibvirtType', 'libvirt_type'),
('Controller-1::NeutronTunnelIdRanges',
'neutron_tunnel_id_ranges'),
('Controller-1::NeutronVniRanges', 'neutron_vni_ranges'),
('Compute-1::NeutronTunnelIdRanges',
'neutron_tunnel_id_ranges'),
('Compute-1::NeutronVniRanges', 'neutron_vni_ranges'),
)
param_args = param_args + new_stack_args
if args.neutron_disable_tunneling is not None:
neutron_enable_tunneling = (
not args.neutron_disable_tunneling)
parameters.update({
'Controller-1::NeutronEnableTunnelling':
neutron_enable_tunneling,
'Compute-1::NeutronEnableTunnelling':
neutron_enable_tunneling,
})
# Update parameters from commandline
for param, arg in param_args:
if getattr(args, arg, None) is not None:
parameters[param] = getattr(args, arg)
# Scaling needs extra parameters
number_controllers = max((
int(parameters.get('ControllerCount', 0)),
int(parameters.get('Controller-1::count', 0))
))
if number_controllers > 1:
if not args.ntp_server:
raise Exception('Specify --ntp-server when using multiple'
' controllers (with HA).')
if args.templates:
parameters.update({
'NeutronL3HA': True,
'NeutronAllowL3AgentFailover': False,
})
else:
parameters.update({
'Controller-1::NeutronL3HA': True,
'Controller-1::NeutronAllowL3AgentFailover': False,
'Compute-1::NeutronL3HA': True,
'Compute-1::NeutronAllowL3AgentFailover': False,
})
else:
if args.templates:
parameters.update({
'NeutronL3HA': False,
'NeutronAllowL3AgentFailover': False,
})
else:
parameters.update({
'Controller-1::NeutronL3HA': False,
'Controller-1::NeutronAllowL3AgentFailover': False,
'Compute-1::NeutronL3HA': False,
'Compute-1::NeutronAllowL3AgentFailover': False,
})
# set at least 3 dhcp_agents_per_network
dhcp_agents_per_network = (number_controllers if number_controllers and
number_controllers > 3 else 3)
if args.templates:
parameters.update({
'NeutronDhcpAgentsPerNetwork': dhcp_agents_per_network,
})
else:
parameters.update({
'Controller-1::NeutronDhcpAgentsPerNetwork':
dhcp_agents_per_network,
})
if max((int(parameters.get('CephStorageCount', 0)),
int(parameters.get('Ceph-Storage-1::count', 0)))) > 0:
if stack is None:
parameters.update({
'CephClusterFSID': six.text_type(uuid.uuid1()),
'CephMonKey': utils.create_cephx_key(),
'CephAdminKey': utils.create_cephx_key()
})
return parameters
def _create_registration_env(self, args):
if args.templates:
tht_root = args.templates
else:
tht_root = TRIPLEO_HEAT_TEMPLATES
environment = os.path.join(tht_root,
RHEL_REGISTRATION_EXTRACONFIG_NAME,
'environment-rhel-registration.yaml')
registry = os.path.join(tht_root, RHEL_REGISTRATION_EXTRACONFIG_NAME,
'rhel-registration-resource-registry.yaml')
user_env = ("parameter_defaults:\n"
" rhel_reg_method: \"%(method)s\"\n"
" rhel_reg_org: \"%(org)s\"\n"
" rhel_reg_force: \"%(force)s\"\n"
" rhel_reg_sat_url: \"%(sat_url)s\"\n"
" rhel_reg_activation_key: \"%(activation_key)s\"\n"
% {'method': args.reg_method,
'org': args.reg_org,
'force': args.reg_force,
'sat_url': args.reg_sat_url,
'activation_key': args.reg_activation_key})
handle, user_env_file = tempfile.mkstemp()
with open(user_env_file, 'w') as temp_file:
temp_file.write(user_env)
return [registry, environment, user_env_file]
def _heat_deploy(self, stack, stack_name, template_path, parameters,
environments, timeout):
"""Verify the Baremetal nodes are available and do a stack update"""
self.log.debug("Processing environment files")
env_files, env = (
template_utils.process_multiple_environments_and_files(
environments))
self.log.debug("Getting template contents")
template_files, template = template_utils.get_template_contents(
template_path)
files = dict(list(template_files.items()) + list(env_files.items()))
clients = self.app.client_manager
orchestration_client = clients.rdomanager_oscplugin.orchestration()
self.log.debug("Deploying stack: %s", stack_name)
self.log.debug("Deploying template: %s", template)
self.log.debug("Deploying parameters: %s", parameters)
self.log.debug("Deploying environment: %s", env)
self.log.debug("Deploying files: %s", files)
stack_args = {
'stack_name': stack_name,
'template': template,
'parameters': parameters,
'environment': env,
'files': files
}
if timeout:
stack_args['timeout_mins'] = timeout
if stack is None:
self.log.info("Performing Heat stack create")
orchestration_client.stacks.create(**stack_args)
else:
self.log.info("Performing Heat stack update")
# Make sure existing parameters for stack are reused
stack_args['existing'] = 'true'
orchestration_client.stacks.update(stack.id, **stack_args)
create_result = utils.wait_for_stack_ready(
orchestration_client, stack_name)
if not create_result:
if stack is None:
raise Exception("Heat Stack create failed.")
else:
raise Exception("Heat Stack update failed.")
def _get_overcloud_endpoint(self, stack):
for output in stack.to_dict().get('outputs', {}):
if output['output_key'] == 'KeystoneURL':
return output['output_value']
def _get_service_ips(self, stack):
service_ips = {}
for output in stack.to_dict().get('outputs', {}):
service_ips[output['output_key']] = output['output_value']
return service_ips
def _pre_heat_deploy(self):
"""Setup before the Heat stack create or update has been done."""
clients = self.app.client_manager
compute_client = clients.compute
self.log.debug("Checking hypervisor stats")
if utils.check_hypervisor_stats(compute_client) is None:
raise exceptions.DeploymentError(
"Expected hypervisor stats not met")
return True
def _deploy_tripleo_heat_templates(self, stack, parsed_args):
"""Deploy the fixed templates in TripleO Heat Templates"""
clients = self.app.client_manager
network_client = clients.network
parameters = self._update_paramaters(
parsed_args, network_client, stack)
utils.check_nodes_count(
self.app.client_manager.rdomanager_oscplugin.baremetal(),
stack,
parameters,
{
'ControllerCount': 1,
'ComputeCount': 1,
'ObjectStorageCount': 0,
'BlockStorageCount': 0,
'CephStorageCount': 0,
}
)
tht_root = parsed_args.templates
print("Deploying templates in the directory {0}".format(
os.path.abspath(tht_root)))
self.log.debug("Creating Environment file")
env_path = utils.create_environment_file()
if stack is None:
self.log.debug("Creating Keystone certificates")
keystone_pki.generate_certs_into_json(env_path, False)
resource_registry_path = os.path.join(tht_root, RESOURCE_REGISTRY_NAME)
environments = [resource_registry_path, env_path]
if parsed_args.rhel_reg:
reg_env = self._create_registration_env(parsed_args)
environments.extend(reg_env)
if parsed_args.environment_files:
environments.extend(parsed_args.environment_files)
overcloud_yaml = os.path.join(tht_root, OVERCLOUD_YAML_NAME)
self._heat_deploy(stack, parsed_args.stack, overcloud_yaml, parameters,
environments, parsed_args.timeout)
def _deploy_tuskar(self, stack, parsed_args):
clients = self.app.client_manager
management = clients.rdomanager_oscplugin.management()
network_client = clients.network
# TODO(dmatthews): The Tuskar client has very similar code to this for
# downloading templates. It should be refactored upstream so we can use
# it.
if parsed_args.output_dir:
output_dir = parsed_args.output_dir
else:
output_dir = tempfile.mkdtemp()
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
management_plan = tuskarutils.find_resource(
management.plans, parsed_args.plan)
# retrieve templates
templates = management.plans.templates(management_plan.uuid)
parameters = self._update_paramaters(
parsed_args, network_client, stack)
utils.check_nodes_count(
self.app.client_manager.rdomanager_oscplugin.baremetal(),
stack,
parameters,
{
'Controller-1::count': 1,
'Compute-1::count': 1,
'Swift-Storage-1::count': 0,
'Cinder-Storage-1::count': 0,
'Ceph-Storage-1::count': 0,
}
)
if stack is None:
ca_key_pem, ca_cert_pem = keystone_pki.create_ca_pair()
signing_key_pem, signing_cert_pem = (
keystone_pki.create_signing_pair(ca_key_pem, ca_cert_pem))
parameters['Controller-1::KeystoneCACertificate'] = ca_cert_pem
parameters['Controller-1::KeystoneSigningCertificate'] = (
signing_cert_pem)
parameters['Controller-1::KeystoneSigningKey'] = signing_key_pem
# Save the parameters to Tuskar so they can be used when redeploying.
# Tuskar expects to get all values as strings. So we convert them all
# below.
management.plans.patch(
management_plan.uuid,
[{'name': x[0], 'value': six.text_type(x[1])}
for x in parameters.items()]
)
# write file for each key-value in templates
print("The following templates will be written:")
for template_name, template_content in templates.items():
# It's possible to organize the role templates and their dependent
# files into directories, in which case the template_name will
# carry the directory information. If that's the case, first
# create the directory structure (if it hasn't already been
# created by another file in the templates list).
template_dir = os.path.dirname(template_name)
output_template_dir = os.path.join(output_dir, template_dir)
if template_dir and not os.path.exists(output_template_dir):
os.makedirs(output_template_dir)
filename = os.path.join(output_dir, template_name)
with open(filename, 'w+') as template_file:
template_file.write(template_content)
print(filename)
overcloud_yaml = os.path.join(output_dir, 'plan.yaml')
environment_yaml = os.path.join(output_dir, 'environment.yaml')
environments = [environment_yaml, ]
if parsed_args.rhel_reg:
reg_env = self._create_registration_env(parsed_args)
environments.extend(reg_env)
if parsed_args.environment_files:
environments.extend(parsed_args.environment_files)
self._heat_deploy(stack, parsed_args.stack, overcloud_yaml, parameters,
environments, parsed_args.timeout)
def _create_overcloudrc(self, stack, parsed_args):
overcloud_endpoint = self._get_overcloud_endpoint(stack)
overcloud_ip = six.moves.urllib.parse.urlparse(
overcloud_endpoint).hostname
rc_params = {
'NOVA_VERSION': '1.1',
'COMPUTE_API_VERSION': '1.1',
'OS_USERNAME': 'admin',
'OS_TENANT_NAME': 'admin',
'OS_NO_CACHE': 'True',
'OS_CLOUDNAME': stack.stack_name,
'no_proxy': "%(no_proxy)s,%(overcloud_ip)s" % {
'no_proxy': parsed_args.no_proxy,
'overcloud_ip': overcloud_ip,
}
}
rc_params.update({
'OS_PASSWORD': self.passwords['OVERCLOUD_ADMIN_PASSWORD'],
'OS_AUTH_URL': self._get_overcloud_endpoint(stack),
})
with open('%src' % stack.stack_name, 'w') as f:
for key, value in rc_params.items():
f.write("export %(key)s=%(value)s\n" %
{'key': key, 'value': value})
def _create_tempest_deployer_input(self):
config = configparser.ConfigParser()
config.add_section('compute-feature-enabled')
# Does the test environment support obtaining instance serial console
# output? (default: true)
# set in [nova.serial_console]->enabled
config.set('compute-feature-enabled', 'console_output', 'false')
config.add_section('object-storage')
# Role to add to users created for swift tests to enable creating
# containers (default: 'Member')
# keystone role-list returns this role
config.set('object-storage', 'operator_role', 'swiftoperator')
config.add_section('orchestration')
# Role required for users to be able to manage stacks
# (default: 'heat_stack_owner')
# keystone role-list returns this role
config.set('orchestration', 'stack_owner_role', 'heat_stack_user')
config.add_section('volume')
# Name of the backend1 (must be declared in cinder.conf)
# (default: 'BACKEND_1')
# set in [cinder]->enabled_backends
config.set('volume', 'backend1_name', 'tripleo_iscsi')
config.add_section('volume-feature-enabled')
# Update bootable status of a volume Not implemented on icehouse
# (default: false)
# python-cinderclient supports set-bootable
config.set('volume-feature-enabled', 'bootable', 'true')
with open('tempest-deployer-input.conf', 'w+') as config_file:
config.write(config_file)
def _deploy_postconfig(self, stack, parsed_args):
self.log.debug("_deploy_postconfig(%s)" % parsed_args)
passwords = self.passwords
overcloud_endpoint = self._get_overcloud_endpoint(stack)
overcloud_ip = six.moves.urllib.parse.urlparse(
overcloud_endpoint).hostname
no_proxy = [os.environ.get('no_proxy'), overcloud_ip]
os.environ['no_proxy'] = ','.join(
[x for x in no_proxy if x is not None])
service_ips = self._get_service_ips(stack)
utils.remove_known_hosts(overcloud_ip)
keystone_ip = service_ips.get('KeystoneAdminVip')
if not keystone_ip:
keystone_ip = overcloud_ip
keystone.initialize(
keystone_ip,
passwords['OVERCLOUD_ADMIN_TOKEN'],
'admin@example.com',
passwords['OVERCLOUD_ADMIN_PASSWORD'],
public=overcloud_ip,
user='heat-admin')
# NOTE(bcrochet): Bad hack. Remove the ssl_port info from the
# os_cloud_config.SERVICES dictionary
for service_name, data in keystone.SERVICES.iteritems():
data.pop('ssl_port', None)
services = {}
for service, data in six.iteritems(utils.SERVICE_LIST):
service_data = data.copy()
service_data.pop('password_field', None)
password_field = data.get('password_field')
if password_field:
service_data['password'] = passwords[password_field]
service_name = re.sub('v[0-9]+', '',
service.capitalize() + 'InternalVip')
internal_vip = service_ips.get(service_name)
if internal_vip:
service_data['internal_host'] = internal_vip
services.update({service: service_data})
keystone_client = clients.get_keystone_client(
'admin',
passwords['OVERCLOUD_ADMIN_PASSWORD'],
'admin',
overcloud_endpoint)
keystone.setup_endpoints(
services,
client=keystone_client,
os_auth_url=overcloud_endpoint,
public_host=overcloud_ip)
compute_client = clients.get_nova_bm_client(
'admin',
passwords['OVERCLOUD_ADMIN_PASSWORD'],
'admin',
overcloud_endpoint)
compute_client.flavors.create('m1.demo', 512, 1, 10, 'auto')
def _validate_args(self, parsed_args):
network_type = parsed_args.neutron_network_type
tunnel_types = parsed_args.neutron_tunnel_types
if network_type and tunnel_types:
# Validate that neutron_network_type is in neutron_tunnel_types
if network_type not in tunnel_types:
raise oscexc.CommandError("Neutron network type must be in "
"Neutron tunnel types "
"(%s) " % tunnel_types)
elif network_type and not tunnel_types:
raise oscexc.CommandError("Neutron tunnel types must be specified "
"when Neutron network type is specified")
def _predeploy_verify_capabilities(self, parsed_args):
self.predeploy_errors = 0
self.predeploy_warnings = 0
self.log.debug("Starting _pre_verify_capabilities")
bm_client = self.app.client_manager.rdomanager_oscplugin.baremetal()
self._check_boot_images()
self._check_flavors_exist(parsed_args)
for node in bm_client.node.list():
node = bm_client.node.get(node.uuid)
self.log.debug("Checking config for Node {0}".format(node.uuid))
self._check_ironic_boot_configuration(node)
flavor_profile_map = self._collect_flavor_profiles([
parsed_args.control_flavor,
parsed_args.compute_flavor,
parsed_args.ceph_storage_flavor,
parsed_args.block_storage_flavor,
parsed_args.swift_storage_flavor,
])
node_profile_map = self._collect_node_profiles()
for target, flavor, scale in [
('control', parsed_args.control_flavor,
parsed_args.control_scale),
('compute', parsed_args.compute_flavor,
parsed_args.compute_scale),
('ceph-storage', parsed_args.ceph_storage_flavor,
parsed_args.ceph_storage_scale),
('block-storage', parsed_args.block_storage_flavor,
parsed_args.block_storage_scale),
('swift-storage', parsed_args.swift_storage_flavor,
parsed_args.swift_storage_scale),
]:
if scale == 0 or flavor is None:
self.log.debug("Skipping verification of %s profiles because "
"none will be deployed", flavor)
continue
self._check_profiles(
target, flavor, scale,
flavor_profile_map,
node_profile_map)
if len(node_profile_map.get(None, [])) > 0:
self.predeploy_warnings += 1
self.log.warning(
"There are %d ironic nodes with no profile that will "
"not be used: %s",
len(node_profile_map[None]),
', '.join(node_profile_map[None])
)
return self.predeploy_errors, self.predeploy_warnings
__kernel_id = None
__ramdisk_id = None
def _image_ids(self):
if self.__kernel_id is not None and self.__ramdisk_id is not None:
return self.__kernel_id, self.__ramdisk_id
image_client = self.app.client_manager.image
kernel_id, ramdisk_id = None, None
try:
kernel_id = osc_utils.find_resource(
image_client.images, 'bm-deploy-kernel').id
except AttributeError as e:
self.log.error("Please make sure there is only one image named "
"'bm-deploy-kernel' in glance.")
self.log.exception(e)
try:
ramdisk_id = osc_utils.find_resource(
image_client.images, 'bm-deploy-ramdisk').id
except AttributeError as e:
self.log.error("Please make sure there is only one image "
"named 'bm-deploy-ramdisk' in glance.")
self.log.exception(e)
self.log.debug("Using kernel ID: {0} and ramdisk ID: {1}".format(
kernel_id, ramdisk_id))
self.__kernel_id = kernel_id
self.__ramdisk_id = ramdisk_id
return kernel_id, ramdisk_id
def _collect_node_profiles(self):
"""Gather a map of profile -> [node_uuid] for ironic boot profiles"""
bm_client = self.app.client_manager.rdomanager_oscplugin.baremetal()
# map of profile capability -> [node_uuid, ...]
profile_map = collections.defaultdict(list)
for node in bm_client.node.list(maintenance=False):
node = bm_client.node.get(node.uuid)
profiles = re.findall(r'profile:(.*?)(?:,|$)',
node.properties.get('capabilities', ''))
if not profiles:
profile_map[None].append(node.uuid)
for p in profiles:
profile_map[p].append(node.uuid)
return dict(profile_map)
def _collect_flavor_profiles(self, flavors):
compute_client = self.app.client_manager.compute
flavor_profiles = {}
for flavor in compute_client.flavors.list():
if flavor.name not in flavors:
self.log.debug("Flavor {} isn't used in this deployment, "
"skipping it".format(flavor.name))
continue
profile = flavor.get_keys().get('capabilities:profile')
if profile == '':
flavor_profiles[flavor.name] = None
else:
flavor_profiles[flavor.name] = profile
if flavor.get_keys().get('capabilities:boot_option', '') \
!= 'local':
self.predeploy_warnings += 1
self.log.error(
'Flavor %s "capabilities:boot_option" is not set to '
'"local". Nodes must have ability to PXE boot from '
'deploy image.', flavor.name)
self.log.error(
'Recommended solution: openstack flavor set --property '
'"cpu_arch"="x86_64" --property '
'"capabilities:boot_option"="local" ' + flavor.name)
return flavor_profiles
def _check_profiles(self, target, flavor, scale,
flavor_profile_map,
node_profile_map):
if flavor_profile_map.get(flavor) is None:
self.predeploy_errors += 1
self.log.error(
'Warning: The flavor selected for --%s-flavor "%s" has no '
'profile associated', target, flavor)
self.log.error(
'Recommendation: assign a profile with openstack flavor set '
'--property "capabilities:profile"="PROFILE_NAME" %s',
flavor)
return
if len(node_profile_map.get(flavor_profile_map[flavor], [])) < scale:
self.predeploy_errors += 1
self.log.error(
"Error: %s of %s requested ironic nodes tagged to profile %s "
"(for flavor %s)",
len(node_profile_map.get(flavor_profile_map[flavor], [])),
scale, flavor_profile_map[flavor], flavor
)
self.log.error(
"Recommendation: tag more nodes using ironic node-update "
"<NODE ID> replace properties/capabilities=profile:%s,"
"boot_option:local", flavor_profile_map[flavor])
def _check_boot_images(self):
kernel_id, ramdisk_id = self._image_ids()
message = ("No image with the name '{}' found - make "
"sure you've uploaded boot images")
if kernel_id is None:
self.predeploy_errors += 1
self.log.error(message.format('bm-deploy-kernel'))
if ramdisk_id is None:
self.predeploy_errors += 1
self.log.error(message.format('bm-deploy-ramdisk'))
def _check_flavors_exist(self, parsed_args):
"""Ensure that selected flavors (--ROLE-flavor) exist in nova."""
compute_client = self.app.client_manager.compute
flavors = {f.name: f for f in compute_client.flavors.list()}
message = "Provided --{}-flavor, '{}', does not exist"
for target, flavor, scale in (
('control', parsed_args.control_flavor,
parsed_args.control_scale),
('compute', parsed_args.compute_flavor,
parsed_args.compute_scale),
('ceph-storage', parsed_args.ceph_storage_flavor,
parsed_args.ceph_storage_scale),
('block-storage', parsed_args.block_storage_flavor,
parsed_args.block_storage_scale),
('swift-storage', parsed_args.swift_storage_flavor,
parsed_args.swift_storage_scale),
):
if flavor is None or scale == 0:
self.log.debug("--{}-flavor not used".format(target))
elif flavor not in flavors:
self.predeploy_errors += 1
self.log.error(message.format(target, flavor))
def _check_ironic_boot_configuration(self, node):
kernel_id, ramdisk_id = self._image_ids()
self.log.debug("Doing boot checks for {}".format(node.uuid))
message = ("Node uuid={uuid} has an incorrectly configured "
"{property}. Expected \"{expected}\" but got "
"\"{actual}\".")
if node.driver_info.get('deploy_ramdisk') != ramdisk_id:
self.predeploy_errors += 1
self.log.error(message.format(
uuid=node.uuid,
property='driver_info/deploy_ramdisk',
expected=ramdisk_id,
actual=node.driver_info.get('deploy_ramdisk')
))
if node.driver_info.get('deploy_kernel') != kernel_id:
self.predeploy_errors += 1
self.log.error(message.format(
uuid=node.uuid,
property='driver_info/deploy_kernel',
expected=ramdisk_id,
actual=node.driver_info.get('deploy_kernel')
))
if 'boot_option:local' not in node.properties.get('capabilities', ''):
self.predeploy_warnings += 1
self.log.warning(message.format(
uuid=node.uuid,
property='properties/capabilities',
expected='boot_option:local',
actual=node.properties.get('capabilities')
))
def get_parser(self, prog_name):
# add_help doesn't work properly, set it to False:
parser = argparse.ArgumentParser(
description=self.get_description(),
prog=prog_name,
add_help=False
)
main_group = parser.add_mutually_exclusive_group(required=True)
main_group.add_argument(
'--plan',
help=_("The Name or UUID of the Tuskar plan to deploy.")
)
main_group.add_argument(
'--templates', nargs='?', const=TRIPLEO_HEAT_TEMPLATES,
help=_("The directory containing the Heat templates to deploy"))
parser.add_argument('--stack',
help=_("Stack name to create or update"),
default='overcloud')
parser.add_argument('-t', '--timeout', metavar='<TIMEOUT>',
type=int, default=240,
help=_('Deployment timeout in minutes.'))
parser.add_argument('--control-scale', type=int,
help=_('New number of control nodes.'))
parser.add_argument('--compute-scale', type=int,
help=_('New number of compute nodes.'))
parser.add_argument('--ceph-storage-scale', type=int,
help=_('New number of ceph storage nodes.'))
parser.add_argument('--block-storage-scale', type=int,
help=_('New number of cinder storage nodes.'))
parser.add_argument('--swift-storage-scale', type=int,
help=_('New number of swift storage nodes.'))
parser.add_argument('--control-flavor',
help=_("Nova flavor to use for control nodes."))
parser.add_argument('--compute-flavor',
help=_("Nova flavor to use for compute nodes."))
parser.add_argument('--ceph-storage-flavor',
help=_("Nova flavor to use for ceph storage "
"nodes."))
parser.add_argument('--block-storage-flavor',
help=_("Nova flavor to use for cinder storage "
"nodes."))
parser.add_argument('--swift-storage-flavor',
help=_("Nova flavor to use for swift storage "
"nodes."))
parser.add_argument('--neutron-flat-networks',
help=_('Comma separated list of physical_network '
'names with which flat networks can be '
'created. Use * to allow flat networks '
'with arbitrary physical_network names.'))
parser.add_argument('--neutron-physical-bridge',
help=_('Deprecated.'))
parser.add_argument('--neutron-bridge-mappings',
help=_('Comma separated list of bridge mappings. '
'(default: datacentre:br-ex)'))
parser.add_argument('--neutron-public-interface',
help=_('Deprecated.'))
parser.add_argument('--hypervisor-neutron-public-interface',
default='nic1', help=_('Deprecated.'))
parser.add_argument('--neutron-network-type',
help=_('The network type for tenant networks.'))
parser.add_argument('--neutron-tunnel-types',
help=_('Network types supported by the agent '
'(gre and/or vxlan).'))
parser.add_argument('--neutron-tunnel-id-ranges',
default="1:1000",
help=_("Ranges of GRE tunnel IDs to make "
"available for tenant network allocation"),)
parser.add_argument('--neutron-vni-ranges',
default="1:1000",
help=_("Ranges of VXLAN VNI IDs to make "
"available for tenant network allocation"),)
parser.add_argument('--neutron-disable-tunneling',
dest='neutron_disable_tunneling',
action="store_const", const=True,
help=_('Disables tunneling.')),
parser.add_argument('--neutron-network-vlan-ranges',
help=_('Comma separated list of '
'<physical_network>:<vlan_min>:<vlan_max> '
'or <physical_network> specifying '
'physical_network names usable for VLAN '
'provider and tenant networks, as well as '
'ranges of VLAN tags on each available for '
'allocation to tenant networks. '
'(ex: datacentre:1:1000)'))
parser.add_argument('--neutron-mechanism-drivers',
help=_('An ordered list of extension driver '
'entrypoints to be loaded from the '
'neutron.ml2.extension_drivers namespace.'))
parser.add_argument('--libvirt-type',
default='kvm',
choices=['kvm', 'qemu'],
help=_('Libvirt domain type. (default: kvm)'))
parser.add_argument('--ntp-server',
help=_('The NTP for overcloud nodes.'))
parser.add_argument(
'--tripleo-root',
default=os.environ.get('TRIPLEO_ROOT', '/etc/tripleo'),
help=_('The root directory for TripleO templates.')
)
parser.add_argument(
'--no-proxy',
default=os.environ.get('no_proxy', ''),
help=_('A comma separated list of hosts that should not be '
'proxied.')
)
parser.add_argument(
'-O', '--output-dir', metavar='<OUTPUT DIR>',
help=_('Directory to write Tuskar template files into. It will be '
'created if it does not exist. If not provided a temporary '
'directory will be used.')
)
parser.add_argument(
'-e', '--environment-file', metavar='<HEAT ENVIRONMENT FILE>',
action='append', dest='environment_files',
help=_('Environment files to be passed to the heat stack-create '
'or heat stack-update command. (Can be specified more than '
'once.)')
)
parser.add_argument(
'--validation-errors-fatal',
action='store_true',
default=False,
help=_('Exit if there are errors from the configuration '
'pre-checks. Ignoring these errors will likely cause your '
'deploy to fail.')
)
parser.add_argument(
'--validation-warnings-fatal',
action='store_true',
default=False,
help=_('Exit if there are warnings from the configuration '
'pre-checks.')
)
reg_group = parser.add_argument_group('Registration Parameters')
reg_group.add_argument(
'--rhel-reg',
action='store_true',
help=_('Register overcloud nodes to the customer portal or a '
'satellite.')
)
reg_group.add_argument(
'--reg-method',
choices=['satellite', 'portal'],
default='satellite',
help=_('RHEL registration method to use for the overcloud nodes.')
)
reg_group.add_argument(
'--reg-org',
default='',
help=_('Organization key to use for registration.')
)
reg_group.add_argument(
'--reg-force',
action='store_true',
help=_('Register the system even if it is already registered.')
)
reg_group.add_argument(
'--reg-sat-url',
default='',
help=_('Satellite server to register overcloud nodes.')
)
reg_group.add_argument(
'--reg-activation-key',
default='',
help=_('Activation key to use for registration.')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
self._validate_args(parsed_args)
errors, warnings = self._predeploy_verify_capabilities(parsed_args)
if errors > 0:
self.log.error(
"Configuration has %d errors, fix them before proceeding. "
"Ignoring these errors is likely to lead to a failed deploy.",
errors)
if parsed_args.validation_warnings_fatal or \
parsed_args.validation_errors_fatal:
return
if warnings > 0:
self.log.error(
"Configuration has %d warnings, fix them before proceeding. ",
warnings)
if parsed_args.validation_warnings_fatal:
return
else:
self.log.info("SUCCESS: No warnings or errors in deploy "
"configuration, proceeding.")
clients = self.app.client_manager
orchestration_client = clients.rdomanager_oscplugin.orchestration()
stack = self._get_stack(orchestration_client, parsed_args.stack)
stack_create = stack is None
try:
self._pre_heat_deploy()
if parsed_args.rhel_reg:
if parsed_args.reg_method == 'satellite':
sat_required_args = (parsed_args.reg_org and
parsed_args.reg_sat_url and
parsed_args.reg_activation_key)
if not sat_required_args:
raise exceptions.DeploymentError(
"ERROR: In order to use satellite registration, "
"you must specify --reg-org, --reg-sat-url, and "
"--reg-activation-key.")
else:
portal_required_args = (parsed_args.reg_org and
parsed_args.reg_activation_key)
if not portal_required_args:
raise exceptions.DeploymentError(
"ERROR: In order to use portal registration, you "
"must specify --reg-org, and "
"--reg-activation-key.")
if parsed_args.templates:
self._deploy_tripleo_heat_templates(stack, parsed_args)
else:
self._deploy_tuskar(stack, parsed_args)
# Get a new copy of the stack after stack update/create. If it was
# a create then the previous stack object would be None.
stack = self._get_stack(orchestration_client, parsed_args.stack)
self._create_overcloudrc(stack, parsed_args)
self._create_tempest_deployer_input()
if stack_create:
self._deploy_postconfig(stack, parsed_args)
overcloud_endpoint = self._get_overcloud_endpoint(stack)
print("Overcloud Endpoint: {0}".format(overcloud_endpoint))
print("Overcloud Deployed")
return True
except exceptions.DeploymentError as err:
print("Deployment failed: ", err, file=sys.stderr)
return False
|
rdo-management/python-rdomanager-oscplugin | rdomanager_oscplugin/v1/overcloud_deploy.py | DeployOvercloud._heat_deploy | python | def _heat_deploy(self, stack, stack_name, template_path, parameters,
environments, timeout):
self.log.debug("Processing environment files")
env_files, env = (
template_utils.process_multiple_environments_and_files(
environments))
self.log.debug("Getting template contents")
template_files, template = template_utils.get_template_contents(
template_path)
files = dict(list(template_files.items()) + list(env_files.items()))
clients = self.app.client_manager
orchestration_client = clients.rdomanager_oscplugin.orchestration()
self.log.debug("Deploying stack: %s", stack_name)
self.log.debug("Deploying template: %s", template)
self.log.debug("Deploying parameters: %s", parameters)
self.log.debug("Deploying environment: %s", env)
self.log.debug("Deploying files: %s", files)
stack_args = {
'stack_name': stack_name,
'template': template,
'parameters': parameters,
'environment': env,
'files': files
}
if timeout:
stack_args['timeout_mins'] = timeout
if stack is None:
self.log.info("Performing Heat stack create")
orchestration_client.stacks.create(**stack_args)
else:
self.log.info("Performing Heat stack update")
# Make sure existing parameters for stack are reused
stack_args['existing'] = 'true'
orchestration_client.stacks.update(stack.id, **stack_args)
create_result = utils.wait_for_stack_ready(
orchestration_client, stack_name)
if not create_result:
if stack is None:
raise Exception("Heat Stack create failed.")
else:
raise Exception("Heat Stack update failed.") | Verify the Baremetal nodes are available and do a stack update | train | https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/v1/overcloud_deploy.py#L401-L451 | null | class DeployOvercloud(command.Command):
"""Deploy Overcloud"""
log = logging.getLogger(__name__ + ".DeployOvercloud")
predeploy_errors = 0
predeploy_warnings = 0
def set_overcloud_passwords(self, parameters, parsed_args):
"""Add passwords to the parameters dictionary
:param parameters: A dictionary for the passwords to be added to
:type parameters: dict
"""
undercloud_ceilometer_snmpd_password = utils.get_config_value(
"auth", "undercloud_ceilometer_snmpd_password")
self.passwords = passwords = utils.generate_overcloud_passwords()
ceilometer_pass = passwords['OVERCLOUD_CEILOMETER_PASSWORD']
ceilometer_secret = passwords['OVERCLOUD_CEILOMETER_SECRET']
if parsed_args.templates:
parameters['AdminPassword'] = passwords['OVERCLOUD_ADMIN_PASSWORD']
parameters['AdminToken'] = passwords['OVERCLOUD_ADMIN_TOKEN']
parameters['CeilometerPassword'] = ceilometer_pass
parameters['CeilometerMeteringSecret'] = ceilometer_secret
parameters['CinderPassword'] = passwords[
'OVERCLOUD_CINDER_PASSWORD']
parameters['GlancePassword'] = passwords[
'OVERCLOUD_GLANCE_PASSWORD']
parameters['HeatPassword'] = passwords['OVERCLOUD_HEAT_PASSWORD']
parameters['HeatStackDomainAdminPassword'] = passwords[
'OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD']
parameters['NeutronPassword'] = passwords[
'OVERCLOUD_NEUTRON_PASSWORD']
parameters['NovaPassword'] = passwords['OVERCLOUD_NOVA_PASSWORD']
parameters['SwiftHashSuffix'] = passwords['OVERCLOUD_SWIFT_HASH']
parameters['SwiftPassword'] = passwords['OVERCLOUD_SWIFT_PASSWORD']
parameters['SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
else:
parameters['Controller-1::AdminPassword'] = passwords[
'OVERCLOUD_ADMIN_PASSWORD']
parameters['Controller-1::AdminToken'] = passwords[
'OVERCLOUD_ADMIN_TOKEN']
parameters['Compute-1::AdminPassword'] = passwords[
'OVERCLOUD_ADMIN_PASSWORD']
parameters['Controller-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Cinder-Storage-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Swift-Storage-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Compute-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Controller-1::CeilometerPassword'] = ceilometer_pass
parameters[
'Controller-1::CeilometerMeteringSecret'] = ceilometer_secret
parameters['Compute-1::CeilometerPassword'] = ceilometer_pass
parameters[
'Compute-1::CeilometerMeteringSecret'] = ceilometer_secret
parameters['Controller-1::CinderPassword'] = (
passwords['OVERCLOUD_CINDER_PASSWORD'])
parameters['Controller-1::GlancePassword'] = (
passwords['OVERCLOUD_GLANCE_PASSWORD'])
parameters['Controller-1::HeatPassword'] = (
passwords['OVERCLOUD_HEAT_PASSWORD'])
parameters['Controller-1::HeatStackDomainAdminPassword'] = (
passwords['OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD'])
parameters['Controller-1::NeutronPassword'] = (
passwords['OVERCLOUD_NEUTRON_PASSWORD'])
parameters['Compute-1::NeutronPassword'] = (
passwords['OVERCLOUD_NEUTRON_PASSWORD'])
parameters['Controller-1::NovaPassword'] = (
passwords['OVERCLOUD_NOVA_PASSWORD'])
parameters['Compute-1::NovaPassword'] = (
passwords['OVERCLOUD_NOVA_PASSWORD'])
parameters['Controller-1::SwiftHashSuffix'] = (
passwords['OVERCLOUD_SWIFT_HASH'])
parameters['Controller-1::SwiftPassword'] = (
passwords['OVERCLOUD_SWIFT_PASSWORD'])
def _get_stack(self, orchestration_client, stack_name):
"""Get the ID for the current deployed overcloud stack if it exists."""
try:
stack = orchestration_client.stacks.get(stack_name)
self.log.info("Stack found, will be doing a stack update")
return stack
except HTTPNotFound:
self.log.info("No stack found, will be doing a stack create")
def _update_paramaters(self, args, network_client, stack):
if args.templates:
parameters = PARAMETERS.copy()
if stack is None:
parameters.update(NEW_STACK_PARAMETERS)
else:
parameters = {}
self.log.debug("Generating overcloud passwords")
self.set_overcloud_passwords(parameters, args)
self.log.debug("Getting ctlplane from Neutron")
net = network_client.api.find_attr('networks', 'ctlplane')
parameters['NeutronControlPlaneID'] = net['id']
if args.templates:
param_args = (
('NeutronPublicInterface', 'neutron_public_interface'),
('NeutronBridgeMappings', 'neutron_bridge_mappings'),
('NeutronFlatNetworks', 'neutron_flat_networks'),
('HypervisorNeutronPhysicalBridge', 'neutron_physical_bridge'),
('NtpServer', 'ntp_server'),
('ControllerCount', 'control_scale'),
('ComputeCount', 'compute_scale'),
('ObjectStorageCount', 'swift_storage_scale'),
('BlockStorageCount', 'block_storage_scale'),
('CephStorageCount', 'ceph_storage_scale'),
('OvercloudControlFlavor', 'control_flavor'),
('OvercloudComputeFlavor', 'compute_flavor'),
('OvercloudBlockStorageFlavor', 'block_storage_flavor'),
('OvercloudSwiftStorageFlavor', 'swift_storage_flavor'),
('OvercloudCephStorageFlavor', 'ceph_storage_flavor'),
('NeutronNetworkVLANRanges', 'neutron_network_vlan_ranges'),
('NeutronMechanismDrivers', 'neutron_mechanism_drivers')
)
if stack is None:
new_stack_args = (
('NeutronNetworkType', 'neutron_network_type'),
('NeutronTunnelIdRanges', 'neutron_tunnel_id_ranges'),
('NeutronTunnelTypes', 'neutron_tunnel_types'),
('NeutronVniRanges', 'neutron_vni_ranges'),
('NovaComputeLibvirtType', 'libvirt_type'),
)
param_args = param_args + new_stack_args
if args.neutron_disable_tunneling is not None:
neutron_enable_tunneling = (
not args.neutron_disable_tunneling)
parameters.update({
'NeutronEnableTunnelling': neutron_enable_tunneling,
})
else:
param_args = (
('Controller-1::NeutronPublicInterface',
'neutron_public_interface'),
('Compute-1::NeutronPublicInterface',
'neutron_public_interface'),
('Controller-1::NeutronBridgeMappings',
'neutron_bridge_mappings'),
('Compute-1::NeutronBridgeMappings',
'neutron_bridge_mappings'),
('Controller-1::NeutronFlatNetworks', 'neutron_flat_networks'),
('Compute-1::NeutronFlatNetworks', 'neutron_flat_networks'),
('Compute-1::NeutronPhysicalBridge',
'neutron_physical_bridge'),
('Controller-1::NtpServer', 'ntp_server'),
('Compute-1::NtpServer', 'ntp_server'),
('Controller-1::NeutronNetworkVLANRanges',
'neutron_network_vlan_ranges'),
('Compute-1::NeutronNetworkVLANRanges',
'neutron_network_vlan_ranges'),
('Controller-1::NeutronMechanismDrivers',
'neutron_mechanism_drivers'),
('Compute-1::NeutronMechanismDrivers',
'neutron_mechanism_drivers'),
('Controller-1::count', 'control_scale'),
('Compute-1::count', 'compute_scale'),
('Swift-Storage-1::count', 'swift_storage_scale'),
('Cinder-Storage-1::count', 'block_storage_scale'),
('Ceph-Storage-1::count', 'ceph_storage_scale'),
('Cinder-Storage-1::Flavor', 'block_storage_flavor'),
('Compute-1::Flavor', 'compute_flavor'),
('Controller-1::Flavor', 'control_flavor'),
('Swift-Storage-1::Flavor', 'swift_storage_flavor'),
('Ceph-Storage-1::Flavor', 'ceph_storage_flavor'),
)
if stack is None:
new_stack_args = (
('Controller-1::NeutronNetworkType',
'neutron_network_type'),
('Compute-1::NeutronNetworkType', 'neutron_network_type'),
('Controller-1::NeutronTunnelTypes',
'neutron_tunnel_types'),
('Compute-1::NeutronTunnelTypes', 'neutron_tunnel_types'),
('Compute-1::NovaComputeLibvirtType', 'libvirt_type'),
('Controller-1::NeutronTunnelIdRanges',
'neutron_tunnel_id_ranges'),
('Controller-1::NeutronVniRanges', 'neutron_vni_ranges'),
('Compute-1::NeutronTunnelIdRanges',
'neutron_tunnel_id_ranges'),
('Compute-1::NeutronVniRanges', 'neutron_vni_ranges'),
)
param_args = param_args + new_stack_args
if args.neutron_disable_tunneling is not None:
neutron_enable_tunneling = (
not args.neutron_disable_tunneling)
parameters.update({
'Controller-1::NeutronEnableTunnelling':
neutron_enable_tunneling,
'Compute-1::NeutronEnableTunnelling':
neutron_enable_tunneling,
})
# Update parameters from commandline
for param, arg in param_args:
if getattr(args, arg, None) is not None:
parameters[param] = getattr(args, arg)
# Scaling needs extra parameters
number_controllers = max((
int(parameters.get('ControllerCount', 0)),
int(parameters.get('Controller-1::count', 0))
))
if number_controllers > 1:
if not args.ntp_server:
raise Exception('Specify --ntp-server when using multiple'
' controllers (with HA).')
if args.templates:
parameters.update({
'NeutronL3HA': True,
'NeutronAllowL3AgentFailover': False,
})
else:
parameters.update({
'Controller-1::NeutronL3HA': True,
'Controller-1::NeutronAllowL3AgentFailover': False,
'Compute-1::NeutronL3HA': True,
'Compute-1::NeutronAllowL3AgentFailover': False,
})
else:
if args.templates:
parameters.update({
'NeutronL3HA': False,
'NeutronAllowL3AgentFailover': False,
})
else:
parameters.update({
'Controller-1::NeutronL3HA': False,
'Controller-1::NeutronAllowL3AgentFailover': False,
'Compute-1::NeutronL3HA': False,
'Compute-1::NeutronAllowL3AgentFailover': False,
})
# set at least 3 dhcp_agents_per_network
dhcp_agents_per_network = (number_controllers if number_controllers and
number_controllers > 3 else 3)
if args.templates:
parameters.update({
'NeutronDhcpAgentsPerNetwork': dhcp_agents_per_network,
})
else:
parameters.update({
'Controller-1::NeutronDhcpAgentsPerNetwork':
dhcp_agents_per_network,
})
if max((int(parameters.get('CephStorageCount', 0)),
int(parameters.get('Ceph-Storage-1::count', 0)))) > 0:
if stack is None:
parameters.update({
'CephClusterFSID': six.text_type(uuid.uuid1()),
'CephMonKey': utils.create_cephx_key(),
'CephAdminKey': utils.create_cephx_key()
})
return parameters
def _create_registration_env(self, args):
if args.templates:
tht_root = args.templates
else:
tht_root = TRIPLEO_HEAT_TEMPLATES
environment = os.path.join(tht_root,
RHEL_REGISTRATION_EXTRACONFIG_NAME,
'environment-rhel-registration.yaml')
registry = os.path.join(tht_root, RHEL_REGISTRATION_EXTRACONFIG_NAME,
'rhel-registration-resource-registry.yaml')
user_env = ("parameter_defaults:\n"
" rhel_reg_method: \"%(method)s\"\n"
" rhel_reg_org: \"%(org)s\"\n"
" rhel_reg_force: \"%(force)s\"\n"
" rhel_reg_sat_url: \"%(sat_url)s\"\n"
" rhel_reg_activation_key: \"%(activation_key)s\"\n"
% {'method': args.reg_method,
'org': args.reg_org,
'force': args.reg_force,
'sat_url': args.reg_sat_url,
'activation_key': args.reg_activation_key})
handle, user_env_file = tempfile.mkstemp()
with open(user_env_file, 'w') as temp_file:
temp_file.write(user_env)
return [registry, environment, user_env_file]
def _get_overcloud_endpoint(self, stack):
for output in stack.to_dict().get('outputs', {}):
if output['output_key'] == 'KeystoneURL':
return output['output_value']
def _get_service_ips(self, stack):
service_ips = {}
for output in stack.to_dict().get('outputs', {}):
service_ips[output['output_key']] = output['output_value']
return service_ips
def _pre_heat_deploy(self):
"""Setup before the Heat stack create or update has been done."""
clients = self.app.client_manager
compute_client = clients.compute
self.log.debug("Checking hypervisor stats")
if utils.check_hypervisor_stats(compute_client) is None:
raise exceptions.DeploymentError(
"Expected hypervisor stats not met")
return True
def _deploy_tripleo_heat_templates(self, stack, parsed_args):
"""Deploy the fixed templates in TripleO Heat Templates"""
clients = self.app.client_manager
network_client = clients.network
parameters = self._update_paramaters(
parsed_args, network_client, stack)
utils.check_nodes_count(
self.app.client_manager.rdomanager_oscplugin.baremetal(),
stack,
parameters,
{
'ControllerCount': 1,
'ComputeCount': 1,
'ObjectStorageCount': 0,
'BlockStorageCount': 0,
'CephStorageCount': 0,
}
)
tht_root = parsed_args.templates
print("Deploying templates in the directory {0}".format(
os.path.abspath(tht_root)))
self.log.debug("Creating Environment file")
env_path = utils.create_environment_file()
if stack is None:
self.log.debug("Creating Keystone certificates")
keystone_pki.generate_certs_into_json(env_path, False)
resource_registry_path = os.path.join(tht_root, RESOURCE_REGISTRY_NAME)
environments = [resource_registry_path, env_path]
if parsed_args.rhel_reg:
reg_env = self._create_registration_env(parsed_args)
environments.extend(reg_env)
if parsed_args.environment_files:
environments.extend(parsed_args.environment_files)
overcloud_yaml = os.path.join(tht_root, OVERCLOUD_YAML_NAME)
self._heat_deploy(stack, parsed_args.stack, overcloud_yaml, parameters,
environments, parsed_args.timeout)
def _deploy_tuskar(self, stack, parsed_args):
clients = self.app.client_manager
management = clients.rdomanager_oscplugin.management()
network_client = clients.network
# TODO(dmatthews): The Tuskar client has very similar code to this for
# downloading templates. It should be refactored upstream so we can use
# it.
if parsed_args.output_dir:
output_dir = parsed_args.output_dir
else:
output_dir = tempfile.mkdtemp()
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
management_plan = tuskarutils.find_resource(
management.plans, parsed_args.plan)
# retrieve templates
templates = management.plans.templates(management_plan.uuid)
parameters = self._update_paramaters(
parsed_args, network_client, stack)
utils.check_nodes_count(
self.app.client_manager.rdomanager_oscplugin.baremetal(),
stack,
parameters,
{
'Controller-1::count': 1,
'Compute-1::count': 1,
'Swift-Storage-1::count': 0,
'Cinder-Storage-1::count': 0,
'Ceph-Storage-1::count': 0,
}
)
if stack is None:
ca_key_pem, ca_cert_pem = keystone_pki.create_ca_pair()
signing_key_pem, signing_cert_pem = (
keystone_pki.create_signing_pair(ca_key_pem, ca_cert_pem))
parameters['Controller-1::KeystoneCACertificate'] = ca_cert_pem
parameters['Controller-1::KeystoneSigningCertificate'] = (
signing_cert_pem)
parameters['Controller-1::KeystoneSigningKey'] = signing_key_pem
# Save the parameters to Tuskar so they can be used when redeploying.
# Tuskar expects to get all values as strings. So we convert them all
# below.
management.plans.patch(
management_plan.uuid,
[{'name': x[0], 'value': six.text_type(x[1])}
for x in parameters.items()]
)
# write file for each key-value in templates
print("The following templates will be written:")
for template_name, template_content in templates.items():
# It's possible to organize the role templates and their dependent
# files into directories, in which case the template_name will
# carry the directory information. If that's the case, first
# create the directory structure (if it hasn't already been
# created by another file in the templates list).
template_dir = os.path.dirname(template_name)
output_template_dir = os.path.join(output_dir, template_dir)
if template_dir and not os.path.exists(output_template_dir):
os.makedirs(output_template_dir)
filename = os.path.join(output_dir, template_name)
with open(filename, 'w+') as template_file:
template_file.write(template_content)
print(filename)
overcloud_yaml = os.path.join(output_dir, 'plan.yaml')
environment_yaml = os.path.join(output_dir, 'environment.yaml')
environments = [environment_yaml, ]
if parsed_args.rhel_reg:
reg_env = self._create_registration_env(parsed_args)
environments.extend(reg_env)
if parsed_args.environment_files:
environments.extend(parsed_args.environment_files)
self._heat_deploy(stack, parsed_args.stack, overcloud_yaml, parameters,
environments, parsed_args.timeout)
def _create_overcloudrc(self, stack, parsed_args):
overcloud_endpoint = self._get_overcloud_endpoint(stack)
overcloud_ip = six.moves.urllib.parse.urlparse(
overcloud_endpoint).hostname
rc_params = {
'NOVA_VERSION': '1.1',
'COMPUTE_API_VERSION': '1.1',
'OS_USERNAME': 'admin',
'OS_TENANT_NAME': 'admin',
'OS_NO_CACHE': 'True',
'OS_CLOUDNAME': stack.stack_name,
'no_proxy': "%(no_proxy)s,%(overcloud_ip)s" % {
'no_proxy': parsed_args.no_proxy,
'overcloud_ip': overcloud_ip,
}
}
rc_params.update({
'OS_PASSWORD': self.passwords['OVERCLOUD_ADMIN_PASSWORD'],
'OS_AUTH_URL': self._get_overcloud_endpoint(stack),
})
with open('%src' % stack.stack_name, 'w') as f:
for key, value in rc_params.items():
f.write("export %(key)s=%(value)s\n" %
{'key': key, 'value': value})
def _create_tempest_deployer_input(self):
config = configparser.ConfigParser()
config.add_section('compute-feature-enabled')
# Does the test environment support obtaining instance serial console
# output? (default: true)
# set in [nova.serial_console]->enabled
config.set('compute-feature-enabled', 'console_output', 'false')
config.add_section('object-storage')
# Role to add to users created for swift tests to enable creating
# containers (default: 'Member')
# keystone role-list returns this role
config.set('object-storage', 'operator_role', 'swiftoperator')
config.add_section('orchestration')
# Role required for users to be able to manage stacks
# (default: 'heat_stack_owner')
# keystone role-list returns this role
config.set('orchestration', 'stack_owner_role', 'heat_stack_user')
config.add_section('volume')
# Name of the backend1 (must be declared in cinder.conf)
# (default: 'BACKEND_1')
# set in [cinder]->enabled_backends
config.set('volume', 'backend1_name', 'tripleo_iscsi')
config.add_section('volume-feature-enabled')
# Update bootable status of a volume Not implemented on icehouse
# (default: false)
# python-cinderclient supports set-bootable
config.set('volume-feature-enabled', 'bootable', 'true')
with open('tempest-deployer-input.conf', 'w+') as config_file:
config.write(config_file)
def _deploy_postconfig(self, stack, parsed_args):
self.log.debug("_deploy_postconfig(%s)" % parsed_args)
passwords = self.passwords
overcloud_endpoint = self._get_overcloud_endpoint(stack)
overcloud_ip = six.moves.urllib.parse.urlparse(
overcloud_endpoint).hostname
no_proxy = [os.environ.get('no_proxy'), overcloud_ip]
os.environ['no_proxy'] = ','.join(
[x for x in no_proxy if x is not None])
service_ips = self._get_service_ips(stack)
utils.remove_known_hosts(overcloud_ip)
keystone_ip = service_ips.get('KeystoneAdminVip')
if not keystone_ip:
keystone_ip = overcloud_ip
keystone.initialize(
keystone_ip,
passwords['OVERCLOUD_ADMIN_TOKEN'],
'admin@example.com',
passwords['OVERCLOUD_ADMIN_PASSWORD'],
public=overcloud_ip,
user='heat-admin')
# NOTE(bcrochet): Bad hack. Remove the ssl_port info from the
# os_cloud_config.SERVICES dictionary
for service_name, data in keystone.SERVICES.iteritems():
data.pop('ssl_port', None)
services = {}
for service, data in six.iteritems(utils.SERVICE_LIST):
service_data = data.copy()
service_data.pop('password_field', None)
password_field = data.get('password_field')
if password_field:
service_data['password'] = passwords[password_field]
service_name = re.sub('v[0-9]+', '',
service.capitalize() + 'InternalVip')
internal_vip = service_ips.get(service_name)
if internal_vip:
service_data['internal_host'] = internal_vip
services.update({service: service_data})
keystone_client = clients.get_keystone_client(
'admin',
passwords['OVERCLOUD_ADMIN_PASSWORD'],
'admin',
overcloud_endpoint)
keystone.setup_endpoints(
services,
client=keystone_client,
os_auth_url=overcloud_endpoint,
public_host=overcloud_ip)
compute_client = clients.get_nova_bm_client(
'admin',
passwords['OVERCLOUD_ADMIN_PASSWORD'],
'admin',
overcloud_endpoint)
compute_client.flavors.create('m1.demo', 512, 1, 10, 'auto')
def _validate_args(self, parsed_args):
network_type = parsed_args.neutron_network_type
tunnel_types = parsed_args.neutron_tunnel_types
if network_type and tunnel_types:
# Validate that neutron_network_type is in neutron_tunnel_types
if network_type not in tunnel_types:
raise oscexc.CommandError("Neutron network type must be in "
"Neutron tunnel types "
"(%s) " % tunnel_types)
elif network_type and not tunnel_types:
raise oscexc.CommandError("Neutron tunnel types must be specified "
"when Neutron network type is specified")
def _predeploy_verify_capabilities(self, parsed_args):
self.predeploy_errors = 0
self.predeploy_warnings = 0
self.log.debug("Starting _pre_verify_capabilities")
bm_client = self.app.client_manager.rdomanager_oscplugin.baremetal()
self._check_boot_images()
self._check_flavors_exist(parsed_args)
for node in bm_client.node.list():
node = bm_client.node.get(node.uuid)
self.log.debug("Checking config for Node {0}".format(node.uuid))
self._check_ironic_boot_configuration(node)
flavor_profile_map = self._collect_flavor_profiles([
parsed_args.control_flavor,
parsed_args.compute_flavor,
parsed_args.ceph_storage_flavor,
parsed_args.block_storage_flavor,
parsed_args.swift_storage_flavor,
])
node_profile_map = self._collect_node_profiles()
for target, flavor, scale in [
('control', parsed_args.control_flavor,
parsed_args.control_scale),
('compute', parsed_args.compute_flavor,
parsed_args.compute_scale),
('ceph-storage', parsed_args.ceph_storage_flavor,
parsed_args.ceph_storage_scale),
('block-storage', parsed_args.block_storage_flavor,
parsed_args.block_storage_scale),
('swift-storage', parsed_args.swift_storage_flavor,
parsed_args.swift_storage_scale),
]:
if scale == 0 or flavor is None:
self.log.debug("Skipping verification of %s profiles because "
"none will be deployed", flavor)
continue
self._check_profiles(
target, flavor, scale,
flavor_profile_map,
node_profile_map)
if len(node_profile_map.get(None, [])) > 0:
self.predeploy_warnings += 1
self.log.warning(
"There are %d ironic nodes with no profile that will "
"not be used: %s",
len(node_profile_map[None]),
', '.join(node_profile_map[None])
)
return self.predeploy_errors, self.predeploy_warnings
__kernel_id = None
__ramdisk_id = None
def _image_ids(self):
if self.__kernel_id is not None and self.__ramdisk_id is not None:
return self.__kernel_id, self.__ramdisk_id
image_client = self.app.client_manager.image
kernel_id, ramdisk_id = None, None
try:
kernel_id = osc_utils.find_resource(
image_client.images, 'bm-deploy-kernel').id
except AttributeError as e:
self.log.error("Please make sure there is only one image named "
"'bm-deploy-kernel' in glance.")
self.log.exception(e)
try:
ramdisk_id = osc_utils.find_resource(
image_client.images, 'bm-deploy-ramdisk').id
except AttributeError as e:
self.log.error("Please make sure there is only one image "
"named 'bm-deploy-ramdisk' in glance.")
self.log.exception(e)
self.log.debug("Using kernel ID: {0} and ramdisk ID: {1}".format(
kernel_id, ramdisk_id))
self.__kernel_id = kernel_id
self.__ramdisk_id = ramdisk_id
return kernel_id, ramdisk_id
def _collect_node_profiles(self):
"""Gather a map of profile -> [node_uuid] for ironic boot profiles"""
bm_client = self.app.client_manager.rdomanager_oscplugin.baremetal()
# map of profile capability -> [node_uuid, ...]
profile_map = collections.defaultdict(list)
for node in bm_client.node.list(maintenance=False):
node = bm_client.node.get(node.uuid)
profiles = re.findall(r'profile:(.*?)(?:,|$)',
node.properties.get('capabilities', ''))
if not profiles:
profile_map[None].append(node.uuid)
for p in profiles:
profile_map[p].append(node.uuid)
return dict(profile_map)
def _collect_flavor_profiles(self, flavors):
compute_client = self.app.client_manager.compute
flavor_profiles = {}
for flavor in compute_client.flavors.list():
if flavor.name not in flavors:
self.log.debug("Flavor {} isn't used in this deployment, "
"skipping it".format(flavor.name))
continue
profile = flavor.get_keys().get('capabilities:profile')
if profile == '':
flavor_profiles[flavor.name] = None
else:
flavor_profiles[flavor.name] = profile
if flavor.get_keys().get('capabilities:boot_option', '') \
!= 'local':
self.predeploy_warnings += 1
self.log.error(
'Flavor %s "capabilities:boot_option" is not set to '
'"local". Nodes must have ability to PXE boot from '
'deploy image.', flavor.name)
self.log.error(
'Recommended solution: openstack flavor set --property '
'"cpu_arch"="x86_64" --property '
'"capabilities:boot_option"="local" ' + flavor.name)
return flavor_profiles
def _check_profiles(self, target, flavor, scale,
flavor_profile_map,
node_profile_map):
if flavor_profile_map.get(flavor) is None:
self.predeploy_errors += 1
self.log.error(
'Warning: The flavor selected for --%s-flavor "%s" has no '
'profile associated', target, flavor)
self.log.error(
'Recommendation: assign a profile with openstack flavor set '
'--property "capabilities:profile"="PROFILE_NAME" %s',
flavor)
return
if len(node_profile_map.get(flavor_profile_map[flavor], [])) < scale:
self.predeploy_errors += 1
self.log.error(
"Error: %s of %s requested ironic nodes tagged to profile %s "
"(for flavor %s)",
len(node_profile_map.get(flavor_profile_map[flavor], [])),
scale, flavor_profile_map[flavor], flavor
)
self.log.error(
"Recommendation: tag more nodes using ironic node-update "
"<NODE ID> replace properties/capabilities=profile:%s,"
"boot_option:local", flavor_profile_map[flavor])
def _check_boot_images(self):
kernel_id, ramdisk_id = self._image_ids()
message = ("No image with the name '{}' found - make "
"sure you've uploaded boot images")
if kernel_id is None:
self.predeploy_errors += 1
self.log.error(message.format('bm-deploy-kernel'))
if ramdisk_id is None:
self.predeploy_errors += 1
self.log.error(message.format('bm-deploy-ramdisk'))
def _check_flavors_exist(self, parsed_args):
"""Ensure that selected flavors (--ROLE-flavor) exist in nova."""
compute_client = self.app.client_manager.compute
flavors = {f.name: f for f in compute_client.flavors.list()}
message = "Provided --{}-flavor, '{}', does not exist"
for target, flavor, scale in (
('control', parsed_args.control_flavor,
parsed_args.control_scale),
('compute', parsed_args.compute_flavor,
parsed_args.compute_scale),
('ceph-storage', parsed_args.ceph_storage_flavor,
parsed_args.ceph_storage_scale),
('block-storage', parsed_args.block_storage_flavor,
parsed_args.block_storage_scale),
('swift-storage', parsed_args.swift_storage_flavor,
parsed_args.swift_storage_scale),
):
if flavor is None or scale == 0:
self.log.debug("--{}-flavor not used".format(target))
elif flavor not in flavors:
self.predeploy_errors += 1
self.log.error(message.format(target, flavor))
def _check_ironic_boot_configuration(self, node):
kernel_id, ramdisk_id = self._image_ids()
self.log.debug("Doing boot checks for {}".format(node.uuid))
message = ("Node uuid={uuid} has an incorrectly configured "
"{property}. Expected \"{expected}\" but got "
"\"{actual}\".")
if node.driver_info.get('deploy_ramdisk') != ramdisk_id:
self.predeploy_errors += 1
self.log.error(message.format(
uuid=node.uuid,
property='driver_info/deploy_ramdisk',
expected=ramdisk_id,
actual=node.driver_info.get('deploy_ramdisk')
))
if node.driver_info.get('deploy_kernel') != kernel_id:
self.predeploy_errors += 1
self.log.error(message.format(
uuid=node.uuid,
property='driver_info/deploy_kernel',
expected=ramdisk_id,
actual=node.driver_info.get('deploy_kernel')
))
if 'boot_option:local' not in node.properties.get('capabilities', ''):
self.predeploy_warnings += 1
self.log.warning(message.format(
uuid=node.uuid,
property='properties/capabilities',
expected='boot_option:local',
actual=node.properties.get('capabilities')
))
def get_parser(self, prog_name):
# add_help doesn't work properly, set it to False:
parser = argparse.ArgumentParser(
description=self.get_description(),
prog=prog_name,
add_help=False
)
main_group = parser.add_mutually_exclusive_group(required=True)
main_group.add_argument(
'--plan',
help=_("The Name or UUID of the Tuskar plan to deploy.")
)
main_group.add_argument(
'--templates', nargs='?', const=TRIPLEO_HEAT_TEMPLATES,
help=_("The directory containing the Heat templates to deploy"))
parser.add_argument('--stack',
help=_("Stack name to create or update"),
default='overcloud')
parser.add_argument('-t', '--timeout', metavar='<TIMEOUT>',
type=int, default=240,
help=_('Deployment timeout in minutes.'))
parser.add_argument('--control-scale', type=int,
help=_('New number of control nodes.'))
parser.add_argument('--compute-scale', type=int,
help=_('New number of compute nodes.'))
parser.add_argument('--ceph-storage-scale', type=int,
help=_('New number of ceph storage nodes.'))
parser.add_argument('--block-storage-scale', type=int,
help=_('New number of cinder storage nodes.'))
parser.add_argument('--swift-storage-scale', type=int,
help=_('New number of swift storage nodes.'))
parser.add_argument('--control-flavor',
help=_("Nova flavor to use for control nodes."))
parser.add_argument('--compute-flavor',
help=_("Nova flavor to use for compute nodes."))
parser.add_argument('--ceph-storage-flavor',
help=_("Nova flavor to use for ceph storage "
"nodes."))
parser.add_argument('--block-storage-flavor',
help=_("Nova flavor to use for cinder storage "
"nodes."))
parser.add_argument('--swift-storage-flavor',
help=_("Nova flavor to use for swift storage "
"nodes."))
parser.add_argument('--neutron-flat-networks',
help=_('Comma separated list of physical_network '
'names with which flat networks can be '
'created. Use * to allow flat networks '
'with arbitrary physical_network names.'))
parser.add_argument('--neutron-physical-bridge',
help=_('Deprecated.'))
parser.add_argument('--neutron-bridge-mappings',
help=_('Comma separated list of bridge mappings. '
'(default: datacentre:br-ex)'))
parser.add_argument('--neutron-public-interface',
help=_('Deprecated.'))
parser.add_argument('--hypervisor-neutron-public-interface',
default='nic1', help=_('Deprecated.'))
parser.add_argument('--neutron-network-type',
help=_('The network type for tenant networks.'))
parser.add_argument('--neutron-tunnel-types',
help=_('Network types supported by the agent '
'(gre and/or vxlan).'))
parser.add_argument('--neutron-tunnel-id-ranges',
default="1:1000",
help=_("Ranges of GRE tunnel IDs to make "
"available for tenant network allocation"),)
parser.add_argument('--neutron-vni-ranges',
default="1:1000",
help=_("Ranges of VXLAN VNI IDs to make "
"available for tenant network allocation"),)
parser.add_argument('--neutron-disable-tunneling',
dest='neutron_disable_tunneling',
action="store_const", const=True,
help=_('Disables tunneling.')),
parser.add_argument('--neutron-network-vlan-ranges',
help=_('Comma separated list of '
'<physical_network>:<vlan_min>:<vlan_max> '
'or <physical_network> specifying '
'physical_network names usable for VLAN '
'provider and tenant networks, as well as '
'ranges of VLAN tags on each available for '
'allocation to tenant networks. '
'(ex: datacentre:1:1000)'))
parser.add_argument('--neutron-mechanism-drivers',
help=_('An ordered list of extension driver '
'entrypoints to be loaded from the '
'neutron.ml2.extension_drivers namespace.'))
parser.add_argument('--libvirt-type',
default='kvm',
choices=['kvm', 'qemu'],
help=_('Libvirt domain type. (default: kvm)'))
parser.add_argument('--ntp-server',
help=_('The NTP for overcloud nodes.'))
parser.add_argument(
'--tripleo-root',
default=os.environ.get('TRIPLEO_ROOT', '/etc/tripleo'),
help=_('The root directory for TripleO templates.')
)
parser.add_argument(
'--no-proxy',
default=os.environ.get('no_proxy', ''),
help=_('A comma separated list of hosts that should not be '
'proxied.')
)
parser.add_argument(
'-O', '--output-dir', metavar='<OUTPUT DIR>',
help=_('Directory to write Tuskar template files into. It will be '
'created if it does not exist. If not provided a temporary '
'directory will be used.')
)
parser.add_argument(
'-e', '--environment-file', metavar='<HEAT ENVIRONMENT FILE>',
action='append', dest='environment_files',
help=_('Environment files to be passed to the heat stack-create '
'or heat stack-update command. (Can be specified more than '
'once.)')
)
parser.add_argument(
'--validation-errors-fatal',
action='store_true',
default=False,
help=_('Exit if there are errors from the configuration '
'pre-checks. Ignoring these errors will likely cause your '
'deploy to fail.')
)
parser.add_argument(
'--validation-warnings-fatal',
action='store_true',
default=False,
help=_('Exit if there are warnings from the configuration '
'pre-checks.')
)
reg_group = parser.add_argument_group('Registration Parameters')
reg_group.add_argument(
'--rhel-reg',
action='store_true',
help=_('Register overcloud nodes to the customer portal or a '
'satellite.')
)
reg_group.add_argument(
'--reg-method',
choices=['satellite', 'portal'],
default='satellite',
help=_('RHEL registration method to use for the overcloud nodes.')
)
reg_group.add_argument(
'--reg-org',
default='',
help=_('Organization key to use for registration.')
)
reg_group.add_argument(
'--reg-force',
action='store_true',
help=_('Register the system even if it is already registered.')
)
reg_group.add_argument(
'--reg-sat-url',
default='',
help=_('Satellite server to register overcloud nodes.')
)
reg_group.add_argument(
'--reg-activation-key',
default='',
help=_('Activation key to use for registration.')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
self._validate_args(parsed_args)
errors, warnings = self._predeploy_verify_capabilities(parsed_args)
if errors > 0:
self.log.error(
"Configuration has %d errors, fix them before proceeding. "
"Ignoring these errors is likely to lead to a failed deploy.",
errors)
if parsed_args.validation_warnings_fatal or \
parsed_args.validation_errors_fatal:
return
if warnings > 0:
self.log.error(
"Configuration has %d warnings, fix them before proceeding. ",
warnings)
if parsed_args.validation_warnings_fatal:
return
else:
self.log.info("SUCCESS: No warnings or errors in deploy "
"configuration, proceeding.")
clients = self.app.client_manager
orchestration_client = clients.rdomanager_oscplugin.orchestration()
stack = self._get_stack(orchestration_client, parsed_args.stack)
stack_create = stack is None
try:
self._pre_heat_deploy()
if parsed_args.rhel_reg:
if parsed_args.reg_method == 'satellite':
sat_required_args = (parsed_args.reg_org and
parsed_args.reg_sat_url and
parsed_args.reg_activation_key)
if not sat_required_args:
raise exceptions.DeploymentError(
"ERROR: In order to use satellite registration, "
"you must specify --reg-org, --reg-sat-url, and "
"--reg-activation-key.")
else:
portal_required_args = (parsed_args.reg_org and
parsed_args.reg_activation_key)
if not portal_required_args:
raise exceptions.DeploymentError(
"ERROR: In order to use portal registration, you "
"must specify --reg-org, and "
"--reg-activation-key.")
if parsed_args.templates:
self._deploy_tripleo_heat_templates(stack, parsed_args)
else:
self._deploy_tuskar(stack, parsed_args)
# Get a new copy of the stack after stack update/create. If it was
# a create then the previous stack object would be None.
stack = self._get_stack(orchestration_client, parsed_args.stack)
self._create_overcloudrc(stack, parsed_args)
self._create_tempest_deployer_input()
if stack_create:
self._deploy_postconfig(stack, parsed_args)
overcloud_endpoint = self._get_overcloud_endpoint(stack)
print("Overcloud Endpoint: {0}".format(overcloud_endpoint))
print("Overcloud Deployed")
return True
except exceptions.DeploymentError as err:
print("Deployment failed: ", err, file=sys.stderr)
return False
|
rdo-management/python-rdomanager-oscplugin | rdomanager_oscplugin/v1/overcloud_deploy.py | DeployOvercloud._pre_heat_deploy | python | def _pre_heat_deploy(self):
clients = self.app.client_manager
compute_client = clients.compute
self.log.debug("Checking hypervisor stats")
if utils.check_hypervisor_stats(compute_client) is None:
raise exceptions.DeploymentError(
"Expected hypervisor stats not met")
return True | Setup before the Heat stack create or update has been done. | train | https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/v1/overcloud_deploy.py#L464-L473 | null | class DeployOvercloud(command.Command):
"""Deploy Overcloud"""
log = logging.getLogger(__name__ + ".DeployOvercloud")
predeploy_errors = 0
predeploy_warnings = 0
def set_overcloud_passwords(self, parameters, parsed_args):
"""Add passwords to the parameters dictionary
:param parameters: A dictionary for the passwords to be added to
:type parameters: dict
"""
undercloud_ceilometer_snmpd_password = utils.get_config_value(
"auth", "undercloud_ceilometer_snmpd_password")
self.passwords = passwords = utils.generate_overcloud_passwords()
ceilometer_pass = passwords['OVERCLOUD_CEILOMETER_PASSWORD']
ceilometer_secret = passwords['OVERCLOUD_CEILOMETER_SECRET']
if parsed_args.templates:
parameters['AdminPassword'] = passwords['OVERCLOUD_ADMIN_PASSWORD']
parameters['AdminToken'] = passwords['OVERCLOUD_ADMIN_TOKEN']
parameters['CeilometerPassword'] = ceilometer_pass
parameters['CeilometerMeteringSecret'] = ceilometer_secret
parameters['CinderPassword'] = passwords[
'OVERCLOUD_CINDER_PASSWORD']
parameters['GlancePassword'] = passwords[
'OVERCLOUD_GLANCE_PASSWORD']
parameters['HeatPassword'] = passwords['OVERCLOUD_HEAT_PASSWORD']
parameters['HeatStackDomainAdminPassword'] = passwords[
'OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD']
parameters['NeutronPassword'] = passwords[
'OVERCLOUD_NEUTRON_PASSWORD']
parameters['NovaPassword'] = passwords['OVERCLOUD_NOVA_PASSWORD']
parameters['SwiftHashSuffix'] = passwords['OVERCLOUD_SWIFT_HASH']
parameters['SwiftPassword'] = passwords['OVERCLOUD_SWIFT_PASSWORD']
parameters['SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
else:
parameters['Controller-1::AdminPassword'] = passwords[
'OVERCLOUD_ADMIN_PASSWORD']
parameters['Controller-1::AdminToken'] = passwords[
'OVERCLOUD_ADMIN_TOKEN']
parameters['Compute-1::AdminPassword'] = passwords[
'OVERCLOUD_ADMIN_PASSWORD']
parameters['Controller-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Cinder-Storage-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Swift-Storage-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Compute-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Controller-1::CeilometerPassword'] = ceilometer_pass
parameters[
'Controller-1::CeilometerMeteringSecret'] = ceilometer_secret
parameters['Compute-1::CeilometerPassword'] = ceilometer_pass
parameters[
'Compute-1::CeilometerMeteringSecret'] = ceilometer_secret
parameters['Controller-1::CinderPassword'] = (
passwords['OVERCLOUD_CINDER_PASSWORD'])
parameters['Controller-1::GlancePassword'] = (
passwords['OVERCLOUD_GLANCE_PASSWORD'])
parameters['Controller-1::HeatPassword'] = (
passwords['OVERCLOUD_HEAT_PASSWORD'])
parameters['Controller-1::HeatStackDomainAdminPassword'] = (
passwords['OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD'])
parameters['Controller-1::NeutronPassword'] = (
passwords['OVERCLOUD_NEUTRON_PASSWORD'])
parameters['Compute-1::NeutronPassword'] = (
passwords['OVERCLOUD_NEUTRON_PASSWORD'])
parameters['Controller-1::NovaPassword'] = (
passwords['OVERCLOUD_NOVA_PASSWORD'])
parameters['Compute-1::NovaPassword'] = (
passwords['OVERCLOUD_NOVA_PASSWORD'])
parameters['Controller-1::SwiftHashSuffix'] = (
passwords['OVERCLOUD_SWIFT_HASH'])
parameters['Controller-1::SwiftPassword'] = (
passwords['OVERCLOUD_SWIFT_PASSWORD'])
def _get_stack(self, orchestration_client, stack_name):
"""Get the ID for the current deployed overcloud stack if it exists."""
try:
stack = orchestration_client.stacks.get(stack_name)
self.log.info("Stack found, will be doing a stack update")
return stack
except HTTPNotFound:
self.log.info("No stack found, will be doing a stack create")
def _update_paramaters(self, args, network_client, stack):
if args.templates:
parameters = PARAMETERS.copy()
if stack is None:
parameters.update(NEW_STACK_PARAMETERS)
else:
parameters = {}
self.log.debug("Generating overcloud passwords")
self.set_overcloud_passwords(parameters, args)
self.log.debug("Getting ctlplane from Neutron")
net = network_client.api.find_attr('networks', 'ctlplane')
parameters['NeutronControlPlaneID'] = net['id']
if args.templates:
param_args = (
('NeutronPublicInterface', 'neutron_public_interface'),
('NeutronBridgeMappings', 'neutron_bridge_mappings'),
('NeutronFlatNetworks', 'neutron_flat_networks'),
('HypervisorNeutronPhysicalBridge', 'neutron_physical_bridge'),
('NtpServer', 'ntp_server'),
('ControllerCount', 'control_scale'),
('ComputeCount', 'compute_scale'),
('ObjectStorageCount', 'swift_storage_scale'),
('BlockStorageCount', 'block_storage_scale'),
('CephStorageCount', 'ceph_storage_scale'),
('OvercloudControlFlavor', 'control_flavor'),
('OvercloudComputeFlavor', 'compute_flavor'),
('OvercloudBlockStorageFlavor', 'block_storage_flavor'),
('OvercloudSwiftStorageFlavor', 'swift_storage_flavor'),
('OvercloudCephStorageFlavor', 'ceph_storage_flavor'),
('NeutronNetworkVLANRanges', 'neutron_network_vlan_ranges'),
('NeutronMechanismDrivers', 'neutron_mechanism_drivers')
)
if stack is None:
new_stack_args = (
('NeutronNetworkType', 'neutron_network_type'),
('NeutronTunnelIdRanges', 'neutron_tunnel_id_ranges'),
('NeutronTunnelTypes', 'neutron_tunnel_types'),
('NeutronVniRanges', 'neutron_vni_ranges'),
('NovaComputeLibvirtType', 'libvirt_type'),
)
param_args = param_args + new_stack_args
if args.neutron_disable_tunneling is not None:
neutron_enable_tunneling = (
not args.neutron_disable_tunneling)
parameters.update({
'NeutronEnableTunnelling': neutron_enable_tunneling,
})
else:
param_args = (
('Controller-1::NeutronPublicInterface',
'neutron_public_interface'),
('Compute-1::NeutronPublicInterface',
'neutron_public_interface'),
('Controller-1::NeutronBridgeMappings',
'neutron_bridge_mappings'),
('Compute-1::NeutronBridgeMappings',
'neutron_bridge_mappings'),
('Controller-1::NeutronFlatNetworks', 'neutron_flat_networks'),
('Compute-1::NeutronFlatNetworks', 'neutron_flat_networks'),
('Compute-1::NeutronPhysicalBridge',
'neutron_physical_bridge'),
('Controller-1::NtpServer', 'ntp_server'),
('Compute-1::NtpServer', 'ntp_server'),
('Controller-1::NeutronNetworkVLANRanges',
'neutron_network_vlan_ranges'),
('Compute-1::NeutronNetworkVLANRanges',
'neutron_network_vlan_ranges'),
('Controller-1::NeutronMechanismDrivers',
'neutron_mechanism_drivers'),
('Compute-1::NeutronMechanismDrivers',
'neutron_mechanism_drivers'),
('Controller-1::count', 'control_scale'),
('Compute-1::count', 'compute_scale'),
('Swift-Storage-1::count', 'swift_storage_scale'),
('Cinder-Storage-1::count', 'block_storage_scale'),
('Ceph-Storage-1::count', 'ceph_storage_scale'),
('Cinder-Storage-1::Flavor', 'block_storage_flavor'),
('Compute-1::Flavor', 'compute_flavor'),
('Controller-1::Flavor', 'control_flavor'),
('Swift-Storage-1::Flavor', 'swift_storage_flavor'),
('Ceph-Storage-1::Flavor', 'ceph_storage_flavor'),
)
if stack is None:
new_stack_args = (
('Controller-1::NeutronNetworkType',
'neutron_network_type'),
('Compute-1::NeutronNetworkType', 'neutron_network_type'),
('Controller-1::NeutronTunnelTypes',
'neutron_tunnel_types'),
('Compute-1::NeutronTunnelTypes', 'neutron_tunnel_types'),
('Compute-1::NovaComputeLibvirtType', 'libvirt_type'),
('Controller-1::NeutronTunnelIdRanges',
'neutron_tunnel_id_ranges'),
('Controller-1::NeutronVniRanges', 'neutron_vni_ranges'),
('Compute-1::NeutronTunnelIdRanges',
'neutron_tunnel_id_ranges'),
('Compute-1::NeutronVniRanges', 'neutron_vni_ranges'),
)
param_args = param_args + new_stack_args
if args.neutron_disable_tunneling is not None:
neutron_enable_tunneling = (
not args.neutron_disable_tunneling)
parameters.update({
'Controller-1::NeutronEnableTunnelling':
neutron_enable_tunneling,
'Compute-1::NeutronEnableTunnelling':
neutron_enable_tunneling,
})
# Update parameters from commandline
for param, arg in param_args:
if getattr(args, arg, None) is not None:
parameters[param] = getattr(args, arg)
# Scaling needs extra parameters
number_controllers = max((
int(parameters.get('ControllerCount', 0)),
int(parameters.get('Controller-1::count', 0))
))
if number_controllers > 1:
if not args.ntp_server:
raise Exception('Specify --ntp-server when using multiple'
' controllers (with HA).')
if args.templates:
parameters.update({
'NeutronL3HA': True,
'NeutronAllowL3AgentFailover': False,
})
else:
parameters.update({
'Controller-1::NeutronL3HA': True,
'Controller-1::NeutronAllowL3AgentFailover': False,
'Compute-1::NeutronL3HA': True,
'Compute-1::NeutronAllowL3AgentFailover': False,
})
else:
if args.templates:
parameters.update({
'NeutronL3HA': False,
'NeutronAllowL3AgentFailover': False,
})
else:
parameters.update({
'Controller-1::NeutronL3HA': False,
'Controller-1::NeutronAllowL3AgentFailover': False,
'Compute-1::NeutronL3HA': False,
'Compute-1::NeutronAllowL3AgentFailover': False,
})
# set at least 3 dhcp_agents_per_network
dhcp_agents_per_network = (number_controllers if number_controllers and
number_controllers > 3 else 3)
if args.templates:
parameters.update({
'NeutronDhcpAgentsPerNetwork': dhcp_agents_per_network,
})
else:
parameters.update({
'Controller-1::NeutronDhcpAgentsPerNetwork':
dhcp_agents_per_network,
})
if max((int(parameters.get('CephStorageCount', 0)),
int(parameters.get('Ceph-Storage-1::count', 0)))) > 0:
if stack is None:
parameters.update({
'CephClusterFSID': six.text_type(uuid.uuid1()),
'CephMonKey': utils.create_cephx_key(),
'CephAdminKey': utils.create_cephx_key()
})
return parameters
def _create_registration_env(self, args):
if args.templates:
tht_root = args.templates
else:
tht_root = TRIPLEO_HEAT_TEMPLATES
environment = os.path.join(tht_root,
RHEL_REGISTRATION_EXTRACONFIG_NAME,
'environment-rhel-registration.yaml')
registry = os.path.join(tht_root, RHEL_REGISTRATION_EXTRACONFIG_NAME,
'rhel-registration-resource-registry.yaml')
user_env = ("parameter_defaults:\n"
" rhel_reg_method: \"%(method)s\"\n"
" rhel_reg_org: \"%(org)s\"\n"
" rhel_reg_force: \"%(force)s\"\n"
" rhel_reg_sat_url: \"%(sat_url)s\"\n"
" rhel_reg_activation_key: \"%(activation_key)s\"\n"
% {'method': args.reg_method,
'org': args.reg_org,
'force': args.reg_force,
'sat_url': args.reg_sat_url,
'activation_key': args.reg_activation_key})
handle, user_env_file = tempfile.mkstemp()
with open(user_env_file, 'w') as temp_file:
temp_file.write(user_env)
return [registry, environment, user_env_file]
def _heat_deploy(self, stack, stack_name, template_path, parameters,
environments, timeout):
"""Verify the Baremetal nodes are available and do a stack update"""
self.log.debug("Processing environment files")
env_files, env = (
template_utils.process_multiple_environments_and_files(
environments))
self.log.debug("Getting template contents")
template_files, template = template_utils.get_template_contents(
template_path)
files = dict(list(template_files.items()) + list(env_files.items()))
clients = self.app.client_manager
orchestration_client = clients.rdomanager_oscplugin.orchestration()
self.log.debug("Deploying stack: %s", stack_name)
self.log.debug("Deploying template: %s", template)
self.log.debug("Deploying parameters: %s", parameters)
self.log.debug("Deploying environment: %s", env)
self.log.debug("Deploying files: %s", files)
stack_args = {
'stack_name': stack_name,
'template': template,
'parameters': parameters,
'environment': env,
'files': files
}
if timeout:
stack_args['timeout_mins'] = timeout
if stack is None:
self.log.info("Performing Heat stack create")
orchestration_client.stacks.create(**stack_args)
else:
self.log.info("Performing Heat stack update")
# Make sure existing parameters for stack are reused
stack_args['existing'] = 'true'
orchestration_client.stacks.update(stack.id, **stack_args)
create_result = utils.wait_for_stack_ready(
orchestration_client, stack_name)
if not create_result:
if stack is None:
raise Exception("Heat Stack create failed.")
else:
raise Exception("Heat Stack update failed.")
def _get_overcloud_endpoint(self, stack):
for output in stack.to_dict().get('outputs', {}):
if output['output_key'] == 'KeystoneURL':
return output['output_value']
def _get_service_ips(self, stack):
service_ips = {}
for output in stack.to_dict().get('outputs', {}):
service_ips[output['output_key']] = output['output_value']
return service_ips
def _deploy_tripleo_heat_templates(self, stack, parsed_args):
"""Deploy the fixed templates in TripleO Heat Templates"""
clients = self.app.client_manager
network_client = clients.network
parameters = self._update_paramaters(
parsed_args, network_client, stack)
utils.check_nodes_count(
self.app.client_manager.rdomanager_oscplugin.baremetal(),
stack,
parameters,
{
'ControllerCount': 1,
'ComputeCount': 1,
'ObjectStorageCount': 0,
'BlockStorageCount': 0,
'CephStorageCount': 0,
}
)
tht_root = parsed_args.templates
print("Deploying templates in the directory {0}".format(
os.path.abspath(tht_root)))
self.log.debug("Creating Environment file")
env_path = utils.create_environment_file()
if stack is None:
self.log.debug("Creating Keystone certificates")
keystone_pki.generate_certs_into_json(env_path, False)
resource_registry_path = os.path.join(tht_root, RESOURCE_REGISTRY_NAME)
environments = [resource_registry_path, env_path]
if parsed_args.rhel_reg:
reg_env = self._create_registration_env(parsed_args)
environments.extend(reg_env)
if parsed_args.environment_files:
environments.extend(parsed_args.environment_files)
overcloud_yaml = os.path.join(tht_root, OVERCLOUD_YAML_NAME)
self._heat_deploy(stack, parsed_args.stack, overcloud_yaml, parameters,
environments, parsed_args.timeout)
def _deploy_tuskar(self, stack, parsed_args):
clients = self.app.client_manager
management = clients.rdomanager_oscplugin.management()
network_client = clients.network
# TODO(dmatthews): The Tuskar client has very similar code to this for
# downloading templates. It should be refactored upstream so we can use
# it.
if parsed_args.output_dir:
output_dir = parsed_args.output_dir
else:
output_dir = tempfile.mkdtemp()
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
management_plan = tuskarutils.find_resource(
management.plans, parsed_args.plan)
# retrieve templates
templates = management.plans.templates(management_plan.uuid)
parameters = self._update_paramaters(
parsed_args, network_client, stack)
utils.check_nodes_count(
self.app.client_manager.rdomanager_oscplugin.baremetal(),
stack,
parameters,
{
'Controller-1::count': 1,
'Compute-1::count': 1,
'Swift-Storage-1::count': 0,
'Cinder-Storage-1::count': 0,
'Ceph-Storage-1::count': 0,
}
)
if stack is None:
ca_key_pem, ca_cert_pem = keystone_pki.create_ca_pair()
signing_key_pem, signing_cert_pem = (
keystone_pki.create_signing_pair(ca_key_pem, ca_cert_pem))
parameters['Controller-1::KeystoneCACertificate'] = ca_cert_pem
parameters['Controller-1::KeystoneSigningCertificate'] = (
signing_cert_pem)
parameters['Controller-1::KeystoneSigningKey'] = signing_key_pem
# Save the parameters to Tuskar so they can be used when redeploying.
# Tuskar expects to get all values as strings. So we convert them all
# below.
management.plans.patch(
management_plan.uuid,
[{'name': x[0], 'value': six.text_type(x[1])}
for x in parameters.items()]
)
# write file for each key-value in templates
print("The following templates will be written:")
for template_name, template_content in templates.items():
# It's possible to organize the role templates and their dependent
# files into directories, in which case the template_name will
# carry the directory information. If that's the case, first
# create the directory structure (if it hasn't already been
# created by another file in the templates list).
template_dir = os.path.dirname(template_name)
output_template_dir = os.path.join(output_dir, template_dir)
if template_dir and not os.path.exists(output_template_dir):
os.makedirs(output_template_dir)
filename = os.path.join(output_dir, template_name)
with open(filename, 'w+') as template_file:
template_file.write(template_content)
print(filename)
overcloud_yaml = os.path.join(output_dir, 'plan.yaml')
environment_yaml = os.path.join(output_dir, 'environment.yaml')
environments = [environment_yaml, ]
if parsed_args.rhel_reg:
reg_env = self._create_registration_env(parsed_args)
environments.extend(reg_env)
if parsed_args.environment_files:
environments.extend(parsed_args.environment_files)
self._heat_deploy(stack, parsed_args.stack, overcloud_yaml, parameters,
environments, parsed_args.timeout)
def _create_overcloudrc(self, stack, parsed_args):
overcloud_endpoint = self._get_overcloud_endpoint(stack)
overcloud_ip = six.moves.urllib.parse.urlparse(
overcloud_endpoint).hostname
rc_params = {
'NOVA_VERSION': '1.1',
'COMPUTE_API_VERSION': '1.1',
'OS_USERNAME': 'admin',
'OS_TENANT_NAME': 'admin',
'OS_NO_CACHE': 'True',
'OS_CLOUDNAME': stack.stack_name,
'no_proxy': "%(no_proxy)s,%(overcloud_ip)s" % {
'no_proxy': parsed_args.no_proxy,
'overcloud_ip': overcloud_ip,
}
}
rc_params.update({
'OS_PASSWORD': self.passwords['OVERCLOUD_ADMIN_PASSWORD'],
'OS_AUTH_URL': self._get_overcloud_endpoint(stack),
})
with open('%src' % stack.stack_name, 'w') as f:
for key, value in rc_params.items():
f.write("export %(key)s=%(value)s\n" %
{'key': key, 'value': value})
def _create_tempest_deployer_input(self):
config = configparser.ConfigParser()
config.add_section('compute-feature-enabled')
# Does the test environment support obtaining instance serial console
# output? (default: true)
# set in [nova.serial_console]->enabled
config.set('compute-feature-enabled', 'console_output', 'false')
config.add_section('object-storage')
# Role to add to users created for swift tests to enable creating
# containers (default: 'Member')
# keystone role-list returns this role
config.set('object-storage', 'operator_role', 'swiftoperator')
config.add_section('orchestration')
# Role required for users to be able to manage stacks
# (default: 'heat_stack_owner')
# keystone role-list returns this role
config.set('orchestration', 'stack_owner_role', 'heat_stack_user')
config.add_section('volume')
# Name of the backend1 (must be declared in cinder.conf)
# (default: 'BACKEND_1')
# set in [cinder]->enabled_backends
config.set('volume', 'backend1_name', 'tripleo_iscsi')
config.add_section('volume-feature-enabled')
# Update bootable status of a volume Not implemented on icehouse
# (default: false)
# python-cinderclient supports set-bootable
config.set('volume-feature-enabled', 'bootable', 'true')
with open('tempest-deployer-input.conf', 'w+') as config_file:
config.write(config_file)
def _deploy_postconfig(self, stack, parsed_args):
self.log.debug("_deploy_postconfig(%s)" % parsed_args)
passwords = self.passwords
overcloud_endpoint = self._get_overcloud_endpoint(stack)
overcloud_ip = six.moves.urllib.parse.urlparse(
overcloud_endpoint).hostname
no_proxy = [os.environ.get('no_proxy'), overcloud_ip]
os.environ['no_proxy'] = ','.join(
[x for x in no_proxy if x is not None])
service_ips = self._get_service_ips(stack)
utils.remove_known_hosts(overcloud_ip)
keystone_ip = service_ips.get('KeystoneAdminVip')
if not keystone_ip:
keystone_ip = overcloud_ip
keystone.initialize(
keystone_ip,
passwords['OVERCLOUD_ADMIN_TOKEN'],
'admin@example.com',
passwords['OVERCLOUD_ADMIN_PASSWORD'],
public=overcloud_ip,
user='heat-admin')
# NOTE(bcrochet): Bad hack. Remove the ssl_port info from the
# os_cloud_config.SERVICES dictionary
for service_name, data in keystone.SERVICES.iteritems():
data.pop('ssl_port', None)
services = {}
for service, data in six.iteritems(utils.SERVICE_LIST):
service_data = data.copy()
service_data.pop('password_field', None)
password_field = data.get('password_field')
if password_field:
service_data['password'] = passwords[password_field]
service_name = re.sub('v[0-9]+', '',
service.capitalize() + 'InternalVip')
internal_vip = service_ips.get(service_name)
if internal_vip:
service_data['internal_host'] = internal_vip
services.update({service: service_data})
keystone_client = clients.get_keystone_client(
'admin',
passwords['OVERCLOUD_ADMIN_PASSWORD'],
'admin',
overcloud_endpoint)
keystone.setup_endpoints(
services,
client=keystone_client,
os_auth_url=overcloud_endpoint,
public_host=overcloud_ip)
compute_client = clients.get_nova_bm_client(
'admin',
passwords['OVERCLOUD_ADMIN_PASSWORD'],
'admin',
overcloud_endpoint)
compute_client.flavors.create('m1.demo', 512, 1, 10, 'auto')
def _validate_args(self, parsed_args):
network_type = parsed_args.neutron_network_type
tunnel_types = parsed_args.neutron_tunnel_types
if network_type and tunnel_types:
# Validate that neutron_network_type is in neutron_tunnel_types
if network_type not in tunnel_types:
raise oscexc.CommandError("Neutron network type must be in "
"Neutron tunnel types "
"(%s) " % tunnel_types)
elif network_type and not tunnel_types:
raise oscexc.CommandError("Neutron tunnel types must be specified "
"when Neutron network type is specified")
def _predeploy_verify_capabilities(self, parsed_args):
self.predeploy_errors = 0
self.predeploy_warnings = 0
self.log.debug("Starting _pre_verify_capabilities")
bm_client = self.app.client_manager.rdomanager_oscplugin.baremetal()
self._check_boot_images()
self._check_flavors_exist(parsed_args)
for node in bm_client.node.list():
node = bm_client.node.get(node.uuid)
self.log.debug("Checking config for Node {0}".format(node.uuid))
self._check_ironic_boot_configuration(node)
flavor_profile_map = self._collect_flavor_profiles([
parsed_args.control_flavor,
parsed_args.compute_flavor,
parsed_args.ceph_storage_flavor,
parsed_args.block_storage_flavor,
parsed_args.swift_storage_flavor,
])
node_profile_map = self._collect_node_profiles()
for target, flavor, scale in [
('control', parsed_args.control_flavor,
parsed_args.control_scale),
('compute', parsed_args.compute_flavor,
parsed_args.compute_scale),
('ceph-storage', parsed_args.ceph_storage_flavor,
parsed_args.ceph_storage_scale),
('block-storage', parsed_args.block_storage_flavor,
parsed_args.block_storage_scale),
('swift-storage', parsed_args.swift_storage_flavor,
parsed_args.swift_storage_scale),
]:
if scale == 0 or flavor is None:
self.log.debug("Skipping verification of %s profiles because "
"none will be deployed", flavor)
continue
self._check_profiles(
target, flavor, scale,
flavor_profile_map,
node_profile_map)
if len(node_profile_map.get(None, [])) > 0:
self.predeploy_warnings += 1
self.log.warning(
"There are %d ironic nodes with no profile that will "
"not be used: %s",
len(node_profile_map[None]),
', '.join(node_profile_map[None])
)
return self.predeploy_errors, self.predeploy_warnings
__kernel_id = None
__ramdisk_id = None
def _image_ids(self):
if self.__kernel_id is not None and self.__ramdisk_id is not None:
return self.__kernel_id, self.__ramdisk_id
image_client = self.app.client_manager.image
kernel_id, ramdisk_id = None, None
try:
kernel_id = osc_utils.find_resource(
image_client.images, 'bm-deploy-kernel').id
except AttributeError as e:
self.log.error("Please make sure there is only one image named "
"'bm-deploy-kernel' in glance.")
self.log.exception(e)
try:
ramdisk_id = osc_utils.find_resource(
image_client.images, 'bm-deploy-ramdisk').id
except AttributeError as e:
self.log.error("Please make sure there is only one image "
"named 'bm-deploy-ramdisk' in glance.")
self.log.exception(e)
self.log.debug("Using kernel ID: {0} and ramdisk ID: {1}".format(
kernel_id, ramdisk_id))
self.__kernel_id = kernel_id
self.__ramdisk_id = ramdisk_id
return kernel_id, ramdisk_id
def _collect_node_profiles(self):
"""Gather a map of profile -> [node_uuid] for ironic boot profiles"""
bm_client = self.app.client_manager.rdomanager_oscplugin.baremetal()
# map of profile capability -> [node_uuid, ...]
profile_map = collections.defaultdict(list)
for node in bm_client.node.list(maintenance=False):
node = bm_client.node.get(node.uuid)
profiles = re.findall(r'profile:(.*?)(?:,|$)',
node.properties.get('capabilities', ''))
if not profiles:
profile_map[None].append(node.uuid)
for p in profiles:
profile_map[p].append(node.uuid)
return dict(profile_map)
def _collect_flavor_profiles(self, flavors):
compute_client = self.app.client_manager.compute
flavor_profiles = {}
for flavor in compute_client.flavors.list():
if flavor.name not in flavors:
self.log.debug("Flavor {} isn't used in this deployment, "
"skipping it".format(flavor.name))
continue
profile = flavor.get_keys().get('capabilities:profile')
if profile == '':
flavor_profiles[flavor.name] = None
else:
flavor_profiles[flavor.name] = profile
if flavor.get_keys().get('capabilities:boot_option', '') \
!= 'local':
self.predeploy_warnings += 1
self.log.error(
'Flavor %s "capabilities:boot_option" is not set to '
'"local". Nodes must have ability to PXE boot from '
'deploy image.', flavor.name)
self.log.error(
'Recommended solution: openstack flavor set --property '
'"cpu_arch"="x86_64" --property '
'"capabilities:boot_option"="local" ' + flavor.name)
return flavor_profiles
def _check_profiles(self, target, flavor, scale,
flavor_profile_map,
node_profile_map):
if flavor_profile_map.get(flavor) is None:
self.predeploy_errors += 1
self.log.error(
'Warning: The flavor selected for --%s-flavor "%s" has no '
'profile associated', target, flavor)
self.log.error(
'Recommendation: assign a profile with openstack flavor set '
'--property "capabilities:profile"="PROFILE_NAME" %s',
flavor)
return
if len(node_profile_map.get(flavor_profile_map[flavor], [])) < scale:
self.predeploy_errors += 1
self.log.error(
"Error: %s of %s requested ironic nodes tagged to profile %s "
"(for flavor %s)",
len(node_profile_map.get(flavor_profile_map[flavor], [])),
scale, flavor_profile_map[flavor], flavor
)
self.log.error(
"Recommendation: tag more nodes using ironic node-update "
"<NODE ID> replace properties/capabilities=profile:%s,"
"boot_option:local", flavor_profile_map[flavor])
def _check_boot_images(self):
kernel_id, ramdisk_id = self._image_ids()
message = ("No image with the name '{}' found - make "
"sure you've uploaded boot images")
if kernel_id is None:
self.predeploy_errors += 1
self.log.error(message.format('bm-deploy-kernel'))
if ramdisk_id is None:
self.predeploy_errors += 1
self.log.error(message.format('bm-deploy-ramdisk'))
def _check_flavors_exist(self, parsed_args):
"""Ensure that selected flavors (--ROLE-flavor) exist in nova."""
compute_client = self.app.client_manager.compute
flavors = {f.name: f for f in compute_client.flavors.list()}
message = "Provided --{}-flavor, '{}', does not exist"
for target, flavor, scale in (
('control', parsed_args.control_flavor,
parsed_args.control_scale),
('compute', parsed_args.compute_flavor,
parsed_args.compute_scale),
('ceph-storage', parsed_args.ceph_storage_flavor,
parsed_args.ceph_storage_scale),
('block-storage', parsed_args.block_storage_flavor,
parsed_args.block_storage_scale),
('swift-storage', parsed_args.swift_storage_flavor,
parsed_args.swift_storage_scale),
):
if flavor is None or scale == 0:
self.log.debug("--{}-flavor not used".format(target))
elif flavor not in flavors:
self.predeploy_errors += 1
self.log.error(message.format(target, flavor))
def _check_ironic_boot_configuration(self, node):
kernel_id, ramdisk_id = self._image_ids()
self.log.debug("Doing boot checks for {}".format(node.uuid))
message = ("Node uuid={uuid} has an incorrectly configured "
"{property}. Expected \"{expected}\" but got "
"\"{actual}\".")
if node.driver_info.get('deploy_ramdisk') != ramdisk_id:
self.predeploy_errors += 1
self.log.error(message.format(
uuid=node.uuid,
property='driver_info/deploy_ramdisk',
expected=ramdisk_id,
actual=node.driver_info.get('deploy_ramdisk')
))
if node.driver_info.get('deploy_kernel') != kernel_id:
self.predeploy_errors += 1
self.log.error(message.format(
uuid=node.uuid,
property='driver_info/deploy_kernel',
expected=ramdisk_id,
actual=node.driver_info.get('deploy_kernel')
))
if 'boot_option:local' not in node.properties.get('capabilities', ''):
self.predeploy_warnings += 1
self.log.warning(message.format(
uuid=node.uuid,
property='properties/capabilities',
expected='boot_option:local',
actual=node.properties.get('capabilities')
))
def get_parser(self, prog_name):
# add_help doesn't work properly, set it to False:
parser = argparse.ArgumentParser(
description=self.get_description(),
prog=prog_name,
add_help=False
)
main_group = parser.add_mutually_exclusive_group(required=True)
main_group.add_argument(
'--plan',
help=_("The Name or UUID of the Tuskar plan to deploy.")
)
main_group.add_argument(
'--templates', nargs='?', const=TRIPLEO_HEAT_TEMPLATES,
help=_("The directory containing the Heat templates to deploy"))
parser.add_argument('--stack',
help=_("Stack name to create or update"),
default='overcloud')
parser.add_argument('-t', '--timeout', metavar='<TIMEOUT>',
type=int, default=240,
help=_('Deployment timeout in minutes.'))
parser.add_argument('--control-scale', type=int,
help=_('New number of control nodes.'))
parser.add_argument('--compute-scale', type=int,
help=_('New number of compute nodes.'))
parser.add_argument('--ceph-storage-scale', type=int,
help=_('New number of ceph storage nodes.'))
parser.add_argument('--block-storage-scale', type=int,
help=_('New number of cinder storage nodes.'))
parser.add_argument('--swift-storage-scale', type=int,
help=_('New number of swift storage nodes.'))
parser.add_argument('--control-flavor',
help=_("Nova flavor to use for control nodes."))
parser.add_argument('--compute-flavor',
help=_("Nova flavor to use for compute nodes."))
parser.add_argument('--ceph-storage-flavor',
help=_("Nova flavor to use for ceph storage "
"nodes."))
parser.add_argument('--block-storage-flavor',
help=_("Nova flavor to use for cinder storage "
"nodes."))
parser.add_argument('--swift-storage-flavor',
help=_("Nova flavor to use for swift storage "
"nodes."))
parser.add_argument('--neutron-flat-networks',
help=_('Comma separated list of physical_network '
'names with which flat networks can be '
'created. Use * to allow flat networks '
'with arbitrary physical_network names.'))
parser.add_argument('--neutron-physical-bridge',
help=_('Deprecated.'))
parser.add_argument('--neutron-bridge-mappings',
help=_('Comma separated list of bridge mappings. '
'(default: datacentre:br-ex)'))
parser.add_argument('--neutron-public-interface',
help=_('Deprecated.'))
parser.add_argument('--hypervisor-neutron-public-interface',
default='nic1', help=_('Deprecated.'))
parser.add_argument('--neutron-network-type',
help=_('The network type for tenant networks.'))
parser.add_argument('--neutron-tunnel-types',
help=_('Network types supported by the agent '
'(gre and/or vxlan).'))
parser.add_argument('--neutron-tunnel-id-ranges',
default="1:1000",
help=_("Ranges of GRE tunnel IDs to make "
"available for tenant network allocation"),)
parser.add_argument('--neutron-vni-ranges',
default="1:1000",
help=_("Ranges of VXLAN VNI IDs to make "
"available for tenant network allocation"),)
parser.add_argument('--neutron-disable-tunneling',
dest='neutron_disable_tunneling',
action="store_const", const=True,
help=_('Disables tunneling.')),
parser.add_argument('--neutron-network-vlan-ranges',
help=_('Comma separated list of '
'<physical_network>:<vlan_min>:<vlan_max> '
'or <physical_network> specifying '
'physical_network names usable for VLAN '
'provider and tenant networks, as well as '
'ranges of VLAN tags on each available for '
'allocation to tenant networks. '
'(ex: datacentre:1:1000)'))
parser.add_argument('--neutron-mechanism-drivers',
help=_('An ordered list of extension driver '
'entrypoints to be loaded from the '
'neutron.ml2.extension_drivers namespace.'))
parser.add_argument('--libvirt-type',
default='kvm',
choices=['kvm', 'qemu'],
help=_('Libvirt domain type. (default: kvm)'))
parser.add_argument('--ntp-server',
help=_('The NTP for overcloud nodes.'))
parser.add_argument(
'--tripleo-root',
default=os.environ.get('TRIPLEO_ROOT', '/etc/tripleo'),
help=_('The root directory for TripleO templates.')
)
parser.add_argument(
'--no-proxy',
default=os.environ.get('no_proxy', ''),
help=_('A comma separated list of hosts that should not be '
'proxied.')
)
parser.add_argument(
'-O', '--output-dir', metavar='<OUTPUT DIR>',
help=_('Directory to write Tuskar template files into. It will be '
'created if it does not exist. If not provided a temporary '
'directory will be used.')
)
parser.add_argument(
'-e', '--environment-file', metavar='<HEAT ENVIRONMENT FILE>',
action='append', dest='environment_files',
help=_('Environment files to be passed to the heat stack-create '
'or heat stack-update command. (Can be specified more than '
'once.)')
)
parser.add_argument(
'--validation-errors-fatal',
action='store_true',
default=False,
help=_('Exit if there are errors from the configuration '
'pre-checks. Ignoring these errors will likely cause your '
'deploy to fail.')
)
parser.add_argument(
'--validation-warnings-fatal',
action='store_true',
default=False,
help=_('Exit if there are warnings from the configuration '
'pre-checks.')
)
reg_group = parser.add_argument_group('Registration Parameters')
reg_group.add_argument(
'--rhel-reg',
action='store_true',
help=_('Register overcloud nodes to the customer portal or a '
'satellite.')
)
reg_group.add_argument(
'--reg-method',
choices=['satellite', 'portal'],
default='satellite',
help=_('RHEL registration method to use for the overcloud nodes.')
)
reg_group.add_argument(
'--reg-org',
default='',
help=_('Organization key to use for registration.')
)
reg_group.add_argument(
'--reg-force',
action='store_true',
help=_('Register the system even if it is already registered.')
)
reg_group.add_argument(
'--reg-sat-url',
default='',
help=_('Satellite server to register overcloud nodes.')
)
reg_group.add_argument(
'--reg-activation-key',
default='',
help=_('Activation key to use for registration.')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
self._validate_args(parsed_args)
errors, warnings = self._predeploy_verify_capabilities(parsed_args)
if errors > 0:
self.log.error(
"Configuration has %d errors, fix them before proceeding. "
"Ignoring these errors is likely to lead to a failed deploy.",
errors)
if parsed_args.validation_warnings_fatal or \
parsed_args.validation_errors_fatal:
return
if warnings > 0:
self.log.error(
"Configuration has %d warnings, fix them before proceeding. ",
warnings)
if parsed_args.validation_warnings_fatal:
return
else:
self.log.info("SUCCESS: No warnings or errors in deploy "
"configuration, proceeding.")
clients = self.app.client_manager
orchestration_client = clients.rdomanager_oscplugin.orchestration()
stack = self._get_stack(orchestration_client, parsed_args.stack)
stack_create = stack is None
try:
self._pre_heat_deploy()
if parsed_args.rhel_reg:
if parsed_args.reg_method == 'satellite':
sat_required_args = (parsed_args.reg_org and
parsed_args.reg_sat_url and
parsed_args.reg_activation_key)
if not sat_required_args:
raise exceptions.DeploymentError(
"ERROR: In order to use satellite registration, "
"you must specify --reg-org, --reg-sat-url, and "
"--reg-activation-key.")
else:
portal_required_args = (parsed_args.reg_org and
parsed_args.reg_activation_key)
if not portal_required_args:
raise exceptions.DeploymentError(
"ERROR: In order to use portal registration, you "
"must specify --reg-org, and "
"--reg-activation-key.")
if parsed_args.templates:
self._deploy_tripleo_heat_templates(stack, parsed_args)
else:
self._deploy_tuskar(stack, parsed_args)
# Get a new copy of the stack after stack update/create. If it was
# a create then the previous stack object would be None.
stack = self._get_stack(orchestration_client, parsed_args.stack)
self._create_overcloudrc(stack, parsed_args)
self._create_tempest_deployer_input()
if stack_create:
self._deploy_postconfig(stack, parsed_args)
overcloud_endpoint = self._get_overcloud_endpoint(stack)
print("Overcloud Endpoint: {0}".format(overcloud_endpoint))
print("Overcloud Deployed")
return True
except exceptions.DeploymentError as err:
print("Deployment failed: ", err, file=sys.stderr)
return False
|
rdo-management/python-rdomanager-oscplugin | rdomanager_oscplugin/v1/overcloud_deploy.py | DeployOvercloud._deploy_tripleo_heat_templates | python | def _deploy_tripleo_heat_templates(self, stack, parsed_args):
clients = self.app.client_manager
network_client = clients.network
parameters = self._update_paramaters(
parsed_args, network_client, stack)
utils.check_nodes_count(
self.app.client_manager.rdomanager_oscplugin.baremetal(),
stack,
parameters,
{
'ControllerCount': 1,
'ComputeCount': 1,
'ObjectStorageCount': 0,
'BlockStorageCount': 0,
'CephStorageCount': 0,
}
)
tht_root = parsed_args.templates
print("Deploying templates in the directory {0}".format(
os.path.abspath(tht_root)))
self.log.debug("Creating Environment file")
env_path = utils.create_environment_file()
if stack is None:
self.log.debug("Creating Keystone certificates")
keystone_pki.generate_certs_into_json(env_path, False)
resource_registry_path = os.path.join(tht_root, RESOURCE_REGISTRY_NAME)
environments = [resource_registry_path, env_path]
if parsed_args.rhel_reg:
reg_env = self._create_registration_env(parsed_args)
environments.extend(reg_env)
if parsed_args.environment_files:
environments.extend(parsed_args.environment_files)
overcloud_yaml = os.path.join(tht_root, OVERCLOUD_YAML_NAME)
self._heat_deploy(stack, parsed_args.stack, overcloud_yaml, parameters,
environments, parsed_args.timeout) | Deploy the fixed templates in TripleO Heat Templates | train | https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/v1/overcloud_deploy.py#L475-L520 | null | class DeployOvercloud(command.Command):
"""Deploy Overcloud"""
log = logging.getLogger(__name__ + ".DeployOvercloud")
predeploy_errors = 0
predeploy_warnings = 0
def set_overcloud_passwords(self, parameters, parsed_args):
"""Add passwords to the parameters dictionary
:param parameters: A dictionary for the passwords to be added to
:type parameters: dict
"""
undercloud_ceilometer_snmpd_password = utils.get_config_value(
"auth", "undercloud_ceilometer_snmpd_password")
self.passwords = passwords = utils.generate_overcloud_passwords()
ceilometer_pass = passwords['OVERCLOUD_CEILOMETER_PASSWORD']
ceilometer_secret = passwords['OVERCLOUD_CEILOMETER_SECRET']
if parsed_args.templates:
parameters['AdminPassword'] = passwords['OVERCLOUD_ADMIN_PASSWORD']
parameters['AdminToken'] = passwords['OVERCLOUD_ADMIN_TOKEN']
parameters['CeilometerPassword'] = ceilometer_pass
parameters['CeilometerMeteringSecret'] = ceilometer_secret
parameters['CinderPassword'] = passwords[
'OVERCLOUD_CINDER_PASSWORD']
parameters['GlancePassword'] = passwords[
'OVERCLOUD_GLANCE_PASSWORD']
parameters['HeatPassword'] = passwords['OVERCLOUD_HEAT_PASSWORD']
parameters['HeatStackDomainAdminPassword'] = passwords[
'OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD']
parameters['NeutronPassword'] = passwords[
'OVERCLOUD_NEUTRON_PASSWORD']
parameters['NovaPassword'] = passwords['OVERCLOUD_NOVA_PASSWORD']
parameters['SwiftHashSuffix'] = passwords['OVERCLOUD_SWIFT_HASH']
parameters['SwiftPassword'] = passwords['OVERCLOUD_SWIFT_PASSWORD']
parameters['SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
else:
parameters['Controller-1::AdminPassword'] = passwords[
'OVERCLOUD_ADMIN_PASSWORD']
parameters['Controller-1::AdminToken'] = passwords[
'OVERCLOUD_ADMIN_TOKEN']
parameters['Compute-1::AdminPassword'] = passwords[
'OVERCLOUD_ADMIN_PASSWORD']
parameters['Controller-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Cinder-Storage-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Swift-Storage-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Compute-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Controller-1::CeilometerPassword'] = ceilometer_pass
parameters[
'Controller-1::CeilometerMeteringSecret'] = ceilometer_secret
parameters['Compute-1::CeilometerPassword'] = ceilometer_pass
parameters[
'Compute-1::CeilometerMeteringSecret'] = ceilometer_secret
parameters['Controller-1::CinderPassword'] = (
passwords['OVERCLOUD_CINDER_PASSWORD'])
parameters['Controller-1::GlancePassword'] = (
passwords['OVERCLOUD_GLANCE_PASSWORD'])
parameters['Controller-1::HeatPassword'] = (
passwords['OVERCLOUD_HEAT_PASSWORD'])
parameters['Controller-1::HeatStackDomainAdminPassword'] = (
passwords['OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD'])
parameters['Controller-1::NeutronPassword'] = (
passwords['OVERCLOUD_NEUTRON_PASSWORD'])
parameters['Compute-1::NeutronPassword'] = (
passwords['OVERCLOUD_NEUTRON_PASSWORD'])
parameters['Controller-1::NovaPassword'] = (
passwords['OVERCLOUD_NOVA_PASSWORD'])
parameters['Compute-1::NovaPassword'] = (
passwords['OVERCLOUD_NOVA_PASSWORD'])
parameters['Controller-1::SwiftHashSuffix'] = (
passwords['OVERCLOUD_SWIFT_HASH'])
parameters['Controller-1::SwiftPassword'] = (
passwords['OVERCLOUD_SWIFT_PASSWORD'])
def _get_stack(self, orchestration_client, stack_name):
"""Get the ID for the current deployed overcloud stack if it exists."""
try:
stack = orchestration_client.stacks.get(stack_name)
self.log.info("Stack found, will be doing a stack update")
return stack
except HTTPNotFound:
self.log.info("No stack found, will be doing a stack create")
def _update_paramaters(self, args, network_client, stack):
if args.templates:
parameters = PARAMETERS.copy()
if stack is None:
parameters.update(NEW_STACK_PARAMETERS)
else:
parameters = {}
self.log.debug("Generating overcloud passwords")
self.set_overcloud_passwords(parameters, args)
self.log.debug("Getting ctlplane from Neutron")
net = network_client.api.find_attr('networks', 'ctlplane')
parameters['NeutronControlPlaneID'] = net['id']
if args.templates:
param_args = (
('NeutronPublicInterface', 'neutron_public_interface'),
('NeutronBridgeMappings', 'neutron_bridge_mappings'),
('NeutronFlatNetworks', 'neutron_flat_networks'),
('HypervisorNeutronPhysicalBridge', 'neutron_physical_bridge'),
('NtpServer', 'ntp_server'),
('ControllerCount', 'control_scale'),
('ComputeCount', 'compute_scale'),
('ObjectStorageCount', 'swift_storage_scale'),
('BlockStorageCount', 'block_storage_scale'),
('CephStorageCount', 'ceph_storage_scale'),
('OvercloudControlFlavor', 'control_flavor'),
('OvercloudComputeFlavor', 'compute_flavor'),
('OvercloudBlockStorageFlavor', 'block_storage_flavor'),
('OvercloudSwiftStorageFlavor', 'swift_storage_flavor'),
('OvercloudCephStorageFlavor', 'ceph_storage_flavor'),
('NeutronNetworkVLANRanges', 'neutron_network_vlan_ranges'),
('NeutronMechanismDrivers', 'neutron_mechanism_drivers')
)
if stack is None:
new_stack_args = (
('NeutronNetworkType', 'neutron_network_type'),
('NeutronTunnelIdRanges', 'neutron_tunnel_id_ranges'),
('NeutronTunnelTypes', 'neutron_tunnel_types'),
('NeutronVniRanges', 'neutron_vni_ranges'),
('NovaComputeLibvirtType', 'libvirt_type'),
)
param_args = param_args + new_stack_args
if args.neutron_disable_tunneling is not None:
neutron_enable_tunneling = (
not args.neutron_disable_tunneling)
parameters.update({
'NeutronEnableTunnelling': neutron_enable_tunneling,
})
else:
param_args = (
('Controller-1::NeutronPublicInterface',
'neutron_public_interface'),
('Compute-1::NeutronPublicInterface',
'neutron_public_interface'),
('Controller-1::NeutronBridgeMappings',
'neutron_bridge_mappings'),
('Compute-1::NeutronBridgeMappings',
'neutron_bridge_mappings'),
('Controller-1::NeutronFlatNetworks', 'neutron_flat_networks'),
('Compute-1::NeutronFlatNetworks', 'neutron_flat_networks'),
('Compute-1::NeutronPhysicalBridge',
'neutron_physical_bridge'),
('Controller-1::NtpServer', 'ntp_server'),
('Compute-1::NtpServer', 'ntp_server'),
('Controller-1::NeutronNetworkVLANRanges',
'neutron_network_vlan_ranges'),
('Compute-1::NeutronNetworkVLANRanges',
'neutron_network_vlan_ranges'),
('Controller-1::NeutronMechanismDrivers',
'neutron_mechanism_drivers'),
('Compute-1::NeutronMechanismDrivers',
'neutron_mechanism_drivers'),
('Controller-1::count', 'control_scale'),
('Compute-1::count', 'compute_scale'),
('Swift-Storage-1::count', 'swift_storage_scale'),
('Cinder-Storage-1::count', 'block_storage_scale'),
('Ceph-Storage-1::count', 'ceph_storage_scale'),
('Cinder-Storage-1::Flavor', 'block_storage_flavor'),
('Compute-1::Flavor', 'compute_flavor'),
('Controller-1::Flavor', 'control_flavor'),
('Swift-Storage-1::Flavor', 'swift_storage_flavor'),
('Ceph-Storage-1::Flavor', 'ceph_storage_flavor'),
)
if stack is None:
new_stack_args = (
('Controller-1::NeutronNetworkType',
'neutron_network_type'),
('Compute-1::NeutronNetworkType', 'neutron_network_type'),
('Controller-1::NeutronTunnelTypes',
'neutron_tunnel_types'),
('Compute-1::NeutronTunnelTypes', 'neutron_tunnel_types'),
('Compute-1::NovaComputeLibvirtType', 'libvirt_type'),
('Controller-1::NeutronTunnelIdRanges',
'neutron_tunnel_id_ranges'),
('Controller-1::NeutronVniRanges', 'neutron_vni_ranges'),
('Compute-1::NeutronTunnelIdRanges',
'neutron_tunnel_id_ranges'),
('Compute-1::NeutronVniRanges', 'neutron_vni_ranges'),
)
param_args = param_args + new_stack_args
if args.neutron_disable_tunneling is not None:
neutron_enable_tunneling = (
not args.neutron_disable_tunneling)
parameters.update({
'Controller-1::NeutronEnableTunnelling':
neutron_enable_tunneling,
'Compute-1::NeutronEnableTunnelling':
neutron_enable_tunneling,
})
# Update parameters from commandline
for param, arg in param_args:
if getattr(args, arg, None) is not None:
parameters[param] = getattr(args, arg)
# Scaling needs extra parameters
number_controllers = max((
int(parameters.get('ControllerCount', 0)),
int(parameters.get('Controller-1::count', 0))
))
if number_controllers > 1:
if not args.ntp_server:
raise Exception('Specify --ntp-server when using multiple'
' controllers (with HA).')
if args.templates:
parameters.update({
'NeutronL3HA': True,
'NeutronAllowL3AgentFailover': False,
})
else:
parameters.update({
'Controller-1::NeutronL3HA': True,
'Controller-1::NeutronAllowL3AgentFailover': False,
'Compute-1::NeutronL3HA': True,
'Compute-1::NeutronAllowL3AgentFailover': False,
})
else:
if args.templates:
parameters.update({
'NeutronL3HA': False,
'NeutronAllowL3AgentFailover': False,
})
else:
parameters.update({
'Controller-1::NeutronL3HA': False,
'Controller-1::NeutronAllowL3AgentFailover': False,
'Compute-1::NeutronL3HA': False,
'Compute-1::NeutronAllowL3AgentFailover': False,
})
# set at least 3 dhcp_agents_per_network
dhcp_agents_per_network = (number_controllers if number_controllers and
number_controllers > 3 else 3)
if args.templates:
parameters.update({
'NeutronDhcpAgentsPerNetwork': dhcp_agents_per_network,
})
else:
parameters.update({
'Controller-1::NeutronDhcpAgentsPerNetwork':
dhcp_agents_per_network,
})
if max((int(parameters.get('CephStorageCount', 0)),
int(parameters.get('Ceph-Storage-1::count', 0)))) > 0:
if stack is None:
parameters.update({
'CephClusterFSID': six.text_type(uuid.uuid1()),
'CephMonKey': utils.create_cephx_key(),
'CephAdminKey': utils.create_cephx_key()
})
return parameters
def _create_registration_env(self, args):
if args.templates:
tht_root = args.templates
else:
tht_root = TRIPLEO_HEAT_TEMPLATES
environment = os.path.join(tht_root,
RHEL_REGISTRATION_EXTRACONFIG_NAME,
'environment-rhel-registration.yaml')
registry = os.path.join(tht_root, RHEL_REGISTRATION_EXTRACONFIG_NAME,
'rhel-registration-resource-registry.yaml')
user_env = ("parameter_defaults:\n"
" rhel_reg_method: \"%(method)s\"\n"
" rhel_reg_org: \"%(org)s\"\n"
" rhel_reg_force: \"%(force)s\"\n"
" rhel_reg_sat_url: \"%(sat_url)s\"\n"
" rhel_reg_activation_key: \"%(activation_key)s\"\n"
% {'method': args.reg_method,
'org': args.reg_org,
'force': args.reg_force,
'sat_url': args.reg_sat_url,
'activation_key': args.reg_activation_key})
handle, user_env_file = tempfile.mkstemp()
with open(user_env_file, 'w') as temp_file:
temp_file.write(user_env)
return [registry, environment, user_env_file]
def _heat_deploy(self, stack, stack_name, template_path, parameters,
environments, timeout):
"""Verify the Baremetal nodes are available and do a stack update"""
self.log.debug("Processing environment files")
env_files, env = (
template_utils.process_multiple_environments_and_files(
environments))
self.log.debug("Getting template contents")
template_files, template = template_utils.get_template_contents(
template_path)
files = dict(list(template_files.items()) + list(env_files.items()))
clients = self.app.client_manager
orchestration_client = clients.rdomanager_oscplugin.orchestration()
self.log.debug("Deploying stack: %s", stack_name)
self.log.debug("Deploying template: %s", template)
self.log.debug("Deploying parameters: %s", parameters)
self.log.debug("Deploying environment: %s", env)
self.log.debug("Deploying files: %s", files)
stack_args = {
'stack_name': stack_name,
'template': template,
'parameters': parameters,
'environment': env,
'files': files
}
if timeout:
stack_args['timeout_mins'] = timeout
if stack is None:
self.log.info("Performing Heat stack create")
orchestration_client.stacks.create(**stack_args)
else:
self.log.info("Performing Heat stack update")
# Make sure existing parameters for stack are reused
stack_args['existing'] = 'true'
orchestration_client.stacks.update(stack.id, **stack_args)
create_result = utils.wait_for_stack_ready(
orchestration_client, stack_name)
if not create_result:
if stack is None:
raise Exception("Heat Stack create failed.")
else:
raise Exception("Heat Stack update failed.")
def _get_overcloud_endpoint(self, stack):
for output in stack.to_dict().get('outputs', {}):
if output['output_key'] == 'KeystoneURL':
return output['output_value']
def _get_service_ips(self, stack):
service_ips = {}
for output in stack.to_dict().get('outputs', {}):
service_ips[output['output_key']] = output['output_value']
return service_ips
def _pre_heat_deploy(self):
"""Setup before the Heat stack create or update has been done."""
clients = self.app.client_manager
compute_client = clients.compute
self.log.debug("Checking hypervisor stats")
if utils.check_hypervisor_stats(compute_client) is None:
raise exceptions.DeploymentError(
"Expected hypervisor stats not met")
return True
def _deploy_tuskar(self, stack, parsed_args):
clients = self.app.client_manager
management = clients.rdomanager_oscplugin.management()
network_client = clients.network
# TODO(dmatthews): The Tuskar client has very similar code to this for
# downloading templates. It should be refactored upstream so we can use
# it.
if parsed_args.output_dir:
output_dir = parsed_args.output_dir
else:
output_dir = tempfile.mkdtemp()
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
management_plan = tuskarutils.find_resource(
management.plans, parsed_args.plan)
# retrieve templates
templates = management.plans.templates(management_plan.uuid)
parameters = self._update_paramaters(
parsed_args, network_client, stack)
utils.check_nodes_count(
self.app.client_manager.rdomanager_oscplugin.baremetal(),
stack,
parameters,
{
'Controller-1::count': 1,
'Compute-1::count': 1,
'Swift-Storage-1::count': 0,
'Cinder-Storage-1::count': 0,
'Ceph-Storage-1::count': 0,
}
)
if stack is None:
ca_key_pem, ca_cert_pem = keystone_pki.create_ca_pair()
signing_key_pem, signing_cert_pem = (
keystone_pki.create_signing_pair(ca_key_pem, ca_cert_pem))
parameters['Controller-1::KeystoneCACertificate'] = ca_cert_pem
parameters['Controller-1::KeystoneSigningCertificate'] = (
signing_cert_pem)
parameters['Controller-1::KeystoneSigningKey'] = signing_key_pem
# Save the parameters to Tuskar so they can be used when redeploying.
# Tuskar expects to get all values as strings. So we convert them all
# below.
management.plans.patch(
management_plan.uuid,
[{'name': x[0], 'value': six.text_type(x[1])}
for x in parameters.items()]
)
# write file for each key-value in templates
print("The following templates will be written:")
for template_name, template_content in templates.items():
# It's possible to organize the role templates and their dependent
# files into directories, in which case the template_name will
# carry the directory information. If that's the case, first
# create the directory structure (if it hasn't already been
# created by another file in the templates list).
template_dir = os.path.dirname(template_name)
output_template_dir = os.path.join(output_dir, template_dir)
if template_dir and not os.path.exists(output_template_dir):
os.makedirs(output_template_dir)
filename = os.path.join(output_dir, template_name)
with open(filename, 'w+') as template_file:
template_file.write(template_content)
print(filename)
overcloud_yaml = os.path.join(output_dir, 'plan.yaml')
environment_yaml = os.path.join(output_dir, 'environment.yaml')
environments = [environment_yaml, ]
if parsed_args.rhel_reg:
reg_env = self._create_registration_env(parsed_args)
environments.extend(reg_env)
if parsed_args.environment_files:
environments.extend(parsed_args.environment_files)
self._heat_deploy(stack, parsed_args.stack, overcloud_yaml, parameters,
environments, parsed_args.timeout)
def _create_overcloudrc(self, stack, parsed_args):
overcloud_endpoint = self._get_overcloud_endpoint(stack)
overcloud_ip = six.moves.urllib.parse.urlparse(
overcloud_endpoint).hostname
rc_params = {
'NOVA_VERSION': '1.1',
'COMPUTE_API_VERSION': '1.1',
'OS_USERNAME': 'admin',
'OS_TENANT_NAME': 'admin',
'OS_NO_CACHE': 'True',
'OS_CLOUDNAME': stack.stack_name,
'no_proxy': "%(no_proxy)s,%(overcloud_ip)s" % {
'no_proxy': parsed_args.no_proxy,
'overcloud_ip': overcloud_ip,
}
}
rc_params.update({
'OS_PASSWORD': self.passwords['OVERCLOUD_ADMIN_PASSWORD'],
'OS_AUTH_URL': self._get_overcloud_endpoint(stack),
})
with open('%src' % stack.stack_name, 'w') as f:
for key, value in rc_params.items():
f.write("export %(key)s=%(value)s\n" %
{'key': key, 'value': value})
def _create_tempest_deployer_input(self):
config = configparser.ConfigParser()
config.add_section('compute-feature-enabled')
# Does the test environment support obtaining instance serial console
# output? (default: true)
# set in [nova.serial_console]->enabled
config.set('compute-feature-enabled', 'console_output', 'false')
config.add_section('object-storage')
# Role to add to users created for swift tests to enable creating
# containers (default: 'Member')
# keystone role-list returns this role
config.set('object-storage', 'operator_role', 'swiftoperator')
config.add_section('orchestration')
# Role required for users to be able to manage stacks
# (default: 'heat_stack_owner')
# keystone role-list returns this role
config.set('orchestration', 'stack_owner_role', 'heat_stack_user')
config.add_section('volume')
# Name of the backend1 (must be declared in cinder.conf)
# (default: 'BACKEND_1')
# set in [cinder]->enabled_backends
config.set('volume', 'backend1_name', 'tripleo_iscsi')
config.add_section('volume-feature-enabled')
# Update bootable status of a volume Not implemented on icehouse
# (default: false)
# python-cinderclient supports set-bootable
config.set('volume-feature-enabled', 'bootable', 'true')
with open('tempest-deployer-input.conf', 'w+') as config_file:
config.write(config_file)
def _deploy_postconfig(self, stack, parsed_args):
self.log.debug("_deploy_postconfig(%s)" % parsed_args)
passwords = self.passwords
overcloud_endpoint = self._get_overcloud_endpoint(stack)
overcloud_ip = six.moves.urllib.parse.urlparse(
overcloud_endpoint).hostname
no_proxy = [os.environ.get('no_proxy'), overcloud_ip]
os.environ['no_proxy'] = ','.join(
[x for x in no_proxy if x is not None])
service_ips = self._get_service_ips(stack)
utils.remove_known_hosts(overcloud_ip)
keystone_ip = service_ips.get('KeystoneAdminVip')
if not keystone_ip:
keystone_ip = overcloud_ip
keystone.initialize(
keystone_ip,
passwords['OVERCLOUD_ADMIN_TOKEN'],
'admin@example.com',
passwords['OVERCLOUD_ADMIN_PASSWORD'],
public=overcloud_ip,
user='heat-admin')
# NOTE(bcrochet): Bad hack. Remove the ssl_port info from the
# os_cloud_config.SERVICES dictionary
for service_name, data in keystone.SERVICES.iteritems():
data.pop('ssl_port', None)
services = {}
for service, data in six.iteritems(utils.SERVICE_LIST):
service_data = data.copy()
service_data.pop('password_field', None)
password_field = data.get('password_field')
if password_field:
service_data['password'] = passwords[password_field]
service_name = re.sub('v[0-9]+', '',
service.capitalize() + 'InternalVip')
internal_vip = service_ips.get(service_name)
if internal_vip:
service_data['internal_host'] = internal_vip
services.update({service: service_data})
keystone_client = clients.get_keystone_client(
'admin',
passwords['OVERCLOUD_ADMIN_PASSWORD'],
'admin',
overcloud_endpoint)
keystone.setup_endpoints(
services,
client=keystone_client,
os_auth_url=overcloud_endpoint,
public_host=overcloud_ip)
compute_client = clients.get_nova_bm_client(
'admin',
passwords['OVERCLOUD_ADMIN_PASSWORD'],
'admin',
overcloud_endpoint)
compute_client.flavors.create('m1.demo', 512, 1, 10, 'auto')
def _validate_args(self, parsed_args):
network_type = parsed_args.neutron_network_type
tunnel_types = parsed_args.neutron_tunnel_types
if network_type and tunnel_types:
# Validate that neutron_network_type is in neutron_tunnel_types
if network_type not in tunnel_types:
raise oscexc.CommandError("Neutron network type must be in "
"Neutron tunnel types "
"(%s) " % tunnel_types)
elif network_type and not tunnel_types:
raise oscexc.CommandError("Neutron tunnel types must be specified "
"when Neutron network type is specified")
def _predeploy_verify_capabilities(self, parsed_args):
self.predeploy_errors = 0
self.predeploy_warnings = 0
self.log.debug("Starting _pre_verify_capabilities")
bm_client = self.app.client_manager.rdomanager_oscplugin.baremetal()
self._check_boot_images()
self._check_flavors_exist(parsed_args)
for node in bm_client.node.list():
node = bm_client.node.get(node.uuid)
self.log.debug("Checking config for Node {0}".format(node.uuid))
self._check_ironic_boot_configuration(node)
flavor_profile_map = self._collect_flavor_profiles([
parsed_args.control_flavor,
parsed_args.compute_flavor,
parsed_args.ceph_storage_flavor,
parsed_args.block_storage_flavor,
parsed_args.swift_storage_flavor,
])
node_profile_map = self._collect_node_profiles()
for target, flavor, scale in [
('control', parsed_args.control_flavor,
parsed_args.control_scale),
('compute', parsed_args.compute_flavor,
parsed_args.compute_scale),
('ceph-storage', parsed_args.ceph_storage_flavor,
parsed_args.ceph_storage_scale),
('block-storage', parsed_args.block_storage_flavor,
parsed_args.block_storage_scale),
('swift-storage', parsed_args.swift_storage_flavor,
parsed_args.swift_storage_scale),
]:
if scale == 0 or flavor is None:
self.log.debug("Skipping verification of %s profiles because "
"none will be deployed", flavor)
continue
self._check_profiles(
target, flavor, scale,
flavor_profile_map,
node_profile_map)
if len(node_profile_map.get(None, [])) > 0:
self.predeploy_warnings += 1
self.log.warning(
"There are %d ironic nodes with no profile that will "
"not be used: %s",
len(node_profile_map[None]),
', '.join(node_profile_map[None])
)
return self.predeploy_errors, self.predeploy_warnings
__kernel_id = None
__ramdisk_id = None
def _image_ids(self):
if self.__kernel_id is not None and self.__ramdisk_id is not None:
return self.__kernel_id, self.__ramdisk_id
image_client = self.app.client_manager.image
kernel_id, ramdisk_id = None, None
try:
kernel_id = osc_utils.find_resource(
image_client.images, 'bm-deploy-kernel').id
except AttributeError as e:
self.log.error("Please make sure there is only one image named "
"'bm-deploy-kernel' in glance.")
self.log.exception(e)
try:
ramdisk_id = osc_utils.find_resource(
image_client.images, 'bm-deploy-ramdisk').id
except AttributeError as e:
self.log.error("Please make sure there is only one image "
"named 'bm-deploy-ramdisk' in glance.")
self.log.exception(e)
self.log.debug("Using kernel ID: {0} and ramdisk ID: {1}".format(
kernel_id, ramdisk_id))
self.__kernel_id = kernel_id
self.__ramdisk_id = ramdisk_id
return kernel_id, ramdisk_id
def _collect_node_profiles(self):
"""Gather a map of profile -> [node_uuid] for ironic boot profiles"""
bm_client = self.app.client_manager.rdomanager_oscplugin.baremetal()
# map of profile capability -> [node_uuid, ...]
profile_map = collections.defaultdict(list)
for node in bm_client.node.list(maintenance=False):
node = bm_client.node.get(node.uuid)
profiles = re.findall(r'profile:(.*?)(?:,|$)',
node.properties.get('capabilities', ''))
if not profiles:
profile_map[None].append(node.uuid)
for p in profiles:
profile_map[p].append(node.uuid)
return dict(profile_map)
def _collect_flavor_profiles(self, flavors):
compute_client = self.app.client_manager.compute
flavor_profiles = {}
for flavor in compute_client.flavors.list():
if flavor.name not in flavors:
self.log.debug("Flavor {} isn't used in this deployment, "
"skipping it".format(flavor.name))
continue
profile = flavor.get_keys().get('capabilities:profile')
if profile == '':
flavor_profiles[flavor.name] = None
else:
flavor_profiles[flavor.name] = profile
if flavor.get_keys().get('capabilities:boot_option', '') \
!= 'local':
self.predeploy_warnings += 1
self.log.error(
'Flavor %s "capabilities:boot_option" is not set to '
'"local". Nodes must have ability to PXE boot from '
'deploy image.', flavor.name)
self.log.error(
'Recommended solution: openstack flavor set --property '
'"cpu_arch"="x86_64" --property '
'"capabilities:boot_option"="local" ' + flavor.name)
return flavor_profiles
def _check_profiles(self, target, flavor, scale,
flavor_profile_map,
node_profile_map):
if flavor_profile_map.get(flavor) is None:
self.predeploy_errors += 1
self.log.error(
'Warning: The flavor selected for --%s-flavor "%s" has no '
'profile associated', target, flavor)
self.log.error(
'Recommendation: assign a profile with openstack flavor set '
'--property "capabilities:profile"="PROFILE_NAME" %s',
flavor)
return
if len(node_profile_map.get(flavor_profile_map[flavor], [])) < scale:
self.predeploy_errors += 1
self.log.error(
"Error: %s of %s requested ironic nodes tagged to profile %s "
"(for flavor %s)",
len(node_profile_map.get(flavor_profile_map[flavor], [])),
scale, flavor_profile_map[flavor], flavor
)
self.log.error(
"Recommendation: tag more nodes using ironic node-update "
"<NODE ID> replace properties/capabilities=profile:%s,"
"boot_option:local", flavor_profile_map[flavor])
def _check_boot_images(self):
kernel_id, ramdisk_id = self._image_ids()
message = ("No image with the name '{}' found - make "
"sure you've uploaded boot images")
if kernel_id is None:
self.predeploy_errors += 1
self.log.error(message.format('bm-deploy-kernel'))
if ramdisk_id is None:
self.predeploy_errors += 1
self.log.error(message.format('bm-deploy-ramdisk'))
def _check_flavors_exist(self, parsed_args):
"""Ensure that selected flavors (--ROLE-flavor) exist in nova."""
compute_client = self.app.client_manager.compute
flavors = {f.name: f for f in compute_client.flavors.list()}
message = "Provided --{}-flavor, '{}', does not exist"
for target, flavor, scale in (
('control', parsed_args.control_flavor,
parsed_args.control_scale),
('compute', parsed_args.compute_flavor,
parsed_args.compute_scale),
('ceph-storage', parsed_args.ceph_storage_flavor,
parsed_args.ceph_storage_scale),
('block-storage', parsed_args.block_storage_flavor,
parsed_args.block_storage_scale),
('swift-storage', parsed_args.swift_storage_flavor,
parsed_args.swift_storage_scale),
):
if flavor is None or scale == 0:
self.log.debug("--{}-flavor not used".format(target))
elif flavor not in flavors:
self.predeploy_errors += 1
self.log.error(message.format(target, flavor))
def _check_ironic_boot_configuration(self, node):
kernel_id, ramdisk_id = self._image_ids()
self.log.debug("Doing boot checks for {}".format(node.uuid))
message = ("Node uuid={uuid} has an incorrectly configured "
"{property}. Expected \"{expected}\" but got "
"\"{actual}\".")
if node.driver_info.get('deploy_ramdisk') != ramdisk_id:
self.predeploy_errors += 1
self.log.error(message.format(
uuid=node.uuid,
property='driver_info/deploy_ramdisk',
expected=ramdisk_id,
actual=node.driver_info.get('deploy_ramdisk')
))
if node.driver_info.get('deploy_kernel') != kernel_id:
self.predeploy_errors += 1
self.log.error(message.format(
uuid=node.uuid,
property='driver_info/deploy_kernel',
expected=ramdisk_id,
actual=node.driver_info.get('deploy_kernel')
))
if 'boot_option:local' not in node.properties.get('capabilities', ''):
self.predeploy_warnings += 1
self.log.warning(message.format(
uuid=node.uuid,
property='properties/capabilities',
expected='boot_option:local',
actual=node.properties.get('capabilities')
))
def get_parser(self, prog_name):
# add_help doesn't work properly, set it to False:
parser = argparse.ArgumentParser(
description=self.get_description(),
prog=prog_name,
add_help=False
)
main_group = parser.add_mutually_exclusive_group(required=True)
main_group.add_argument(
'--plan',
help=_("The Name or UUID of the Tuskar plan to deploy.")
)
main_group.add_argument(
'--templates', nargs='?', const=TRIPLEO_HEAT_TEMPLATES,
help=_("The directory containing the Heat templates to deploy"))
parser.add_argument('--stack',
help=_("Stack name to create or update"),
default='overcloud')
parser.add_argument('-t', '--timeout', metavar='<TIMEOUT>',
type=int, default=240,
help=_('Deployment timeout in minutes.'))
parser.add_argument('--control-scale', type=int,
help=_('New number of control nodes.'))
parser.add_argument('--compute-scale', type=int,
help=_('New number of compute nodes.'))
parser.add_argument('--ceph-storage-scale', type=int,
help=_('New number of ceph storage nodes.'))
parser.add_argument('--block-storage-scale', type=int,
help=_('New number of cinder storage nodes.'))
parser.add_argument('--swift-storage-scale', type=int,
help=_('New number of swift storage nodes.'))
parser.add_argument('--control-flavor',
help=_("Nova flavor to use for control nodes."))
parser.add_argument('--compute-flavor',
help=_("Nova flavor to use for compute nodes."))
parser.add_argument('--ceph-storage-flavor',
help=_("Nova flavor to use for ceph storage "
"nodes."))
parser.add_argument('--block-storage-flavor',
help=_("Nova flavor to use for cinder storage "
"nodes."))
parser.add_argument('--swift-storage-flavor',
help=_("Nova flavor to use for swift storage "
"nodes."))
parser.add_argument('--neutron-flat-networks',
help=_('Comma separated list of physical_network '
'names with which flat networks can be '
'created. Use * to allow flat networks '
'with arbitrary physical_network names.'))
parser.add_argument('--neutron-physical-bridge',
help=_('Deprecated.'))
parser.add_argument('--neutron-bridge-mappings',
help=_('Comma separated list of bridge mappings. '
'(default: datacentre:br-ex)'))
parser.add_argument('--neutron-public-interface',
help=_('Deprecated.'))
parser.add_argument('--hypervisor-neutron-public-interface',
default='nic1', help=_('Deprecated.'))
parser.add_argument('--neutron-network-type',
help=_('The network type for tenant networks.'))
parser.add_argument('--neutron-tunnel-types',
help=_('Network types supported by the agent '
'(gre and/or vxlan).'))
parser.add_argument('--neutron-tunnel-id-ranges',
default="1:1000",
help=_("Ranges of GRE tunnel IDs to make "
"available for tenant network allocation"),)
parser.add_argument('--neutron-vni-ranges',
default="1:1000",
help=_("Ranges of VXLAN VNI IDs to make "
"available for tenant network allocation"),)
parser.add_argument('--neutron-disable-tunneling',
dest='neutron_disable_tunneling',
action="store_const", const=True,
help=_('Disables tunneling.')),
parser.add_argument('--neutron-network-vlan-ranges',
help=_('Comma separated list of '
'<physical_network>:<vlan_min>:<vlan_max> '
'or <physical_network> specifying '
'physical_network names usable for VLAN '
'provider and tenant networks, as well as '
'ranges of VLAN tags on each available for '
'allocation to tenant networks. '
'(ex: datacentre:1:1000)'))
parser.add_argument('--neutron-mechanism-drivers',
help=_('An ordered list of extension driver '
'entrypoints to be loaded from the '
'neutron.ml2.extension_drivers namespace.'))
parser.add_argument('--libvirt-type',
default='kvm',
choices=['kvm', 'qemu'],
help=_('Libvirt domain type. (default: kvm)'))
parser.add_argument('--ntp-server',
help=_('The NTP for overcloud nodes.'))
parser.add_argument(
'--tripleo-root',
default=os.environ.get('TRIPLEO_ROOT', '/etc/tripleo'),
help=_('The root directory for TripleO templates.')
)
parser.add_argument(
'--no-proxy',
default=os.environ.get('no_proxy', ''),
help=_('A comma separated list of hosts that should not be '
'proxied.')
)
parser.add_argument(
'-O', '--output-dir', metavar='<OUTPUT DIR>',
help=_('Directory to write Tuskar template files into. It will be '
'created if it does not exist. If not provided a temporary '
'directory will be used.')
)
parser.add_argument(
'-e', '--environment-file', metavar='<HEAT ENVIRONMENT FILE>',
action='append', dest='environment_files',
help=_('Environment files to be passed to the heat stack-create '
'or heat stack-update command. (Can be specified more than '
'once.)')
)
parser.add_argument(
'--validation-errors-fatal',
action='store_true',
default=False,
help=_('Exit if there are errors from the configuration '
'pre-checks. Ignoring these errors will likely cause your '
'deploy to fail.')
)
parser.add_argument(
'--validation-warnings-fatal',
action='store_true',
default=False,
help=_('Exit if there are warnings from the configuration '
'pre-checks.')
)
reg_group = parser.add_argument_group('Registration Parameters')
reg_group.add_argument(
'--rhel-reg',
action='store_true',
help=_('Register overcloud nodes to the customer portal or a '
'satellite.')
)
reg_group.add_argument(
'--reg-method',
choices=['satellite', 'portal'],
default='satellite',
help=_('RHEL registration method to use for the overcloud nodes.')
)
reg_group.add_argument(
'--reg-org',
default='',
help=_('Organization key to use for registration.')
)
reg_group.add_argument(
'--reg-force',
action='store_true',
help=_('Register the system even if it is already registered.')
)
reg_group.add_argument(
'--reg-sat-url',
default='',
help=_('Satellite server to register overcloud nodes.')
)
reg_group.add_argument(
'--reg-activation-key',
default='',
help=_('Activation key to use for registration.')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
self._validate_args(parsed_args)
errors, warnings = self._predeploy_verify_capabilities(parsed_args)
if errors > 0:
self.log.error(
"Configuration has %d errors, fix them before proceeding. "
"Ignoring these errors is likely to lead to a failed deploy.",
errors)
if parsed_args.validation_warnings_fatal or \
parsed_args.validation_errors_fatal:
return
if warnings > 0:
self.log.error(
"Configuration has %d warnings, fix them before proceeding. ",
warnings)
if parsed_args.validation_warnings_fatal:
return
else:
self.log.info("SUCCESS: No warnings or errors in deploy "
"configuration, proceeding.")
clients = self.app.client_manager
orchestration_client = clients.rdomanager_oscplugin.orchestration()
stack = self._get_stack(orchestration_client, parsed_args.stack)
stack_create = stack is None
try:
self._pre_heat_deploy()
if parsed_args.rhel_reg:
if parsed_args.reg_method == 'satellite':
sat_required_args = (parsed_args.reg_org and
parsed_args.reg_sat_url and
parsed_args.reg_activation_key)
if not sat_required_args:
raise exceptions.DeploymentError(
"ERROR: In order to use satellite registration, "
"you must specify --reg-org, --reg-sat-url, and "
"--reg-activation-key.")
else:
portal_required_args = (parsed_args.reg_org and
parsed_args.reg_activation_key)
if not portal_required_args:
raise exceptions.DeploymentError(
"ERROR: In order to use portal registration, you "
"must specify --reg-org, and "
"--reg-activation-key.")
if parsed_args.templates:
self._deploy_tripleo_heat_templates(stack, parsed_args)
else:
self._deploy_tuskar(stack, parsed_args)
# Get a new copy of the stack after stack update/create. If it was
# a create then the previous stack object would be None.
stack = self._get_stack(orchestration_client, parsed_args.stack)
self._create_overcloudrc(stack, parsed_args)
self._create_tempest_deployer_input()
if stack_create:
self._deploy_postconfig(stack, parsed_args)
overcloud_endpoint = self._get_overcloud_endpoint(stack)
print("Overcloud Endpoint: {0}".format(overcloud_endpoint))
print("Overcloud Deployed")
return True
except exceptions.DeploymentError as err:
print("Deployment failed: ", err, file=sys.stderr)
return False
|
rdo-management/python-rdomanager-oscplugin | rdomanager_oscplugin/v1/overcloud_deploy.py | DeployOvercloud._collect_node_profiles | python | def _collect_node_profiles(self):
bm_client = self.app.client_manager.rdomanager_oscplugin.baremetal()
# map of profile capability -> [node_uuid, ...]
profile_map = collections.defaultdict(list)
for node in bm_client.node.list(maintenance=False):
node = bm_client.node.get(node.uuid)
profiles = re.findall(r'profile:(.*?)(?:,|$)',
node.properties.get('capabilities', ''))
if not profiles:
profile_map[None].append(node.uuid)
for p in profiles:
profile_map[p].append(node.uuid)
return dict(profile_map) | Gather a map of profile -> [node_uuid] for ironic boot profiles | train | https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/v1/overcloud_deploy.py#L842-L858 | null | class DeployOvercloud(command.Command):
"""Deploy Overcloud"""
log = logging.getLogger(__name__ + ".DeployOvercloud")
predeploy_errors = 0
predeploy_warnings = 0
def set_overcloud_passwords(self, parameters, parsed_args):
"""Add passwords to the parameters dictionary
:param parameters: A dictionary for the passwords to be added to
:type parameters: dict
"""
undercloud_ceilometer_snmpd_password = utils.get_config_value(
"auth", "undercloud_ceilometer_snmpd_password")
self.passwords = passwords = utils.generate_overcloud_passwords()
ceilometer_pass = passwords['OVERCLOUD_CEILOMETER_PASSWORD']
ceilometer_secret = passwords['OVERCLOUD_CEILOMETER_SECRET']
if parsed_args.templates:
parameters['AdminPassword'] = passwords['OVERCLOUD_ADMIN_PASSWORD']
parameters['AdminToken'] = passwords['OVERCLOUD_ADMIN_TOKEN']
parameters['CeilometerPassword'] = ceilometer_pass
parameters['CeilometerMeteringSecret'] = ceilometer_secret
parameters['CinderPassword'] = passwords[
'OVERCLOUD_CINDER_PASSWORD']
parameters['GlancePassword'] = passwords[
'OVERCLOUD_GLANCE_PASSWORD']
parameters['HeatPassword'] = passwords['OVERCLOUD_HEAT_PASSWORD']
parameters['HeatStackDomainAdminPassword'] = passwords[
'OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD']
parameters['NeutronPassword'] = passwords[
'OVERCLOUD_NEUTRON_PASSWORD']
parameters['NovaPassword'] = passwords['OVERCLOUD_NOVA_PASSWORD']
parameters['SwiftHashSuffix'] = passwords['OVERCLOUD_SWIFT_HASH']
parameters['SwiftPassword'] = passwords['OVERCLOUD_SWIFT_PASSWORD']
parameters['SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
else:
parameters['Controller-1::AdminPassword'] = passwords[
'OVERCLOUD_ADMIN_PASSWORD']
parameters['Controller-1::AdminToken'] = passwords[
'OVERCLOUD_ADMIN_TOKEN']
parameters['Compute-1::AdminPassword'] = passwords[
'OVERCLOUD_ADMIN_PASSWORD']
parameters['Controller-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Cinder-Storage-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Swift-Storage-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Compute-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Controller-1::CeilometerPassword'] = ceilometer_pass
parameters[
'Controller-1::CeilometerMeteringSecret'] = ceilometer_secret
parameters['Compute-1::CeilometerPassword'] = ceilometer_pass
parameters[
'Compute-1::CeilometerMeteringSecret'] = ceilometer_secret
parameters['Controller-1::CinderPassword'] = (
passwords['OVERCLOUD_CINDER_PASSWORD'])
parameters['Controller-1::GlancePassword'] = (
passwords['OVERCLOUD_GLANCE_PASSWORD'])
parameters['Controller-1::HeatPassword'] = (
passwords['OVERCLOUD_HEAT_PASSWORD'])
parameters['Controller-1::HeatStackDomainAdminPassword'] = (
passwords['OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD'])
parameters['Controller-1::NeutronPassword'] = (
passwords['OVERCLOUD_NEUTRON_PASSWORD'])
parameters['Compute-1::NeutronPassword'] = (
passwords['OVERCLOUD_NEUTRON_PASSWORD'])
parameters['Controller-1::NovaPassword'] = (
passwords['OVERCLOUD_NOVA_PASSWORD'])
parameters['Compute-1::NovaPassword'] = (
passwords['OVERCLOUD_NOVA_PASSWORD'])
parameters['Controller-1::SwiftHashSuffix'] = (
passwords['OVERCLOUD_SWIFT_HASH'])
parameters['Controller-1::SwiftPassword'] = (
passwords['OVERCLOUD_SWIFT_PASSWORD'])
def _get_stack(self, orchestration_client, stack_name):
"""Get the ID for the current deployed overcloud stack if it exists."""
try:
stack = orchestration_client.stacks.get(stack_name)
self.log.info("Stack found, will be doing a stack update")
return stack
except HTTPNotFound:
self.log.info("No stack found, will be doing a stack create")
def _update_paramaters(self, args, network_client, stack):
if args.templates:
parameters = PARAMETERS.copy()
if stack is None:
parameters.update(NEW_STACK_PARAMETERS)
else:
parameters = {}
self.log.debug("Generating overcloud passwords")
self.set_overcloud_passwords(parameters, args)
self.log.debug("Getting ctlplane from Neutron")
net = network_client.api.find_attr('networks', 'ctlplane')
parameters['NeutronControlPlaneID'] = net['id']
if args.templates:
param_args = (
('NeutronPublicInterface', 'neutron_public_interface'),
('NeutronBridgeMappings', 'neutron_bridge_mappings'),
('NeutronFlatNetworks', 'neutron_flat_networks'),
('HypervisorNeutronPhysicalBridge', 'neutron_physical_bridge'),
('NtpServer', 'ntp_server'),
('ControllerCount', 'control_scale'),
('ComputeCount', 'compute_scale'),
('ObjectStorageCount', 'swift_storage_scale'),
('BlockStorageCount', 'block_storage_scale'),
('CephStorageCount', 'ceph_storage_scale'),
('OvercloudControlFlavor', 'control_flavor'),
('OvercloudComputeFlavor', 'compute_flavor'),
('OvercloudBlockStorageFlavor', 'block_storage_flavor'),
('OvercloudSwiftStorageFlavor', 'swift_storage_flavor'),
('OvercloudCephStorageFlavor', 'ceph_storage_flavor'),
('NeutronNetworkVLANRanges', 'neutron_network_vlan_ranges'),
('NeutronMechanismDrivers', 'neutron_mechanism_drivers')
)
if stack is None:
new_stack_args = (
('NeutronNetworkType', 'neutron_network_type'),
('NeutronTunnelIdRanges', 'neutron_tunnel_id_ranges'),
('NeutronTunnelTypes', 'neutron_tunnel_types'),
('NeutronVniRanges', 'neutron_vni_ranges'),
('NovaComputeLibvirtType', 'libvirt_type'),
)
param_args = param_args + new_stack_args
if args.neutron_disable_tunneling is not None:
neutron_enable_tunneling = (
not args.neutron_disable_tunneling)
parameters.update({
'NeutronEnableTunnelling': neutron_enable_tunneling,
})
else:
param_args = (
('Controller-1::NeutronPublicInterface',
'neutron_public_interface'),
('Compute-1::NeutronPublicInterface',
'neutron_public_interface'),
('Controller-1::NeutronBridgeMappings',
'neutron_bridge_mappings'),
('Compute-1::NeutronBridgeMappings',
'neutron_bridge_mappings'),
('Controller-1::NeutronFlatNetworks', 'neutron_flat_networks'),
('Compute-1::NeutronFlatNetworks', 'neutron_flat_networks'),
('Compute-1::NeutronPhysicalBridge',
'neutron_physical_bridge'),
('Controller-1::NtpServer', 'ntp_server'),
('Compute-1::NtpServer', 'ntp_server'),
('Controller-1::NeutronNetworkVLANRanges',
'neutron_network_vlan_ranges'),
('Compute-1::NeutronNetworkVLANRanges',
'neutron_network_vlan_ranges'),
('Controller-1::NeutronMechanismDrivers',
'neutron_mechanism_drivers'),
('Compute-1::NeutronMechanismDrivers',
'neutron_mechanism_drivers'),
('Controller-1::count', 'control_scale'),
('Compute-1::count', 'compute_scale'),
('Swift-Storage-1::count', 'swift_storage_scale'),
('Cinder-Storage-1::count', 'block_storage_scale'),
('Ceph-Storage-1::count', 'ceph_storage_scale'),
('Cinder-Storage-1::Flavor', 'block_storage_flavor'),
('Compute-1::Flavor', 'compute_flavor'),
('Controller-1::Flavor', 'control_flavor'),
('Swift-Storage-1::Flavor', 'swift_storage_flavor'),
('Ceph-Storage-1::Flavor', 'ceph_storage_flavor'),
)
if stack is None:
new_stack_args = (
('Controller-1::NeutronNetworkType',
'neutron_network_type'),
('Compute-1::NeutronNetworkType', 'neutron_network_type'),
('Controller-1::NeutronTunnelTypes',
'neutron_tunnel_types'),
('Compute-1::NeutronTunnelTypes', 'neutron_tunnel_types'),
('Compute-1::NovaComputeLibvirtType', 'libvirt_type'),
('Controller-1::NeutronTunnelIdRanges',
'neutron_tunnel_id_ranges'),
('Controller-1::NeutronVniRanges', 'neutron_vni_ranges'),
('Compute-1::NeutronTunnelIdRanges',
'neutron_tunnel_id_ranges'),
('Compute-1::NeutronVniRanges', 'neutron_vni_ranges'),
)
param_args = param_args + new_stack_args
if args.neutron_disable_tunneling is not None:
neutron_enable_tunneling = (
not args.neutron_disable_tunneling)
parameters.update({
'Controller-1::NeutronEnableTunnelling':
neutron_enable_tunneling,
'Compute-1::NeutronEnableTunnelling':
neutron_enable_tunneling,
})
# Update parameters from commandline
for param, arg in param_args:
if getattr(args, arg, None) is not None:
parameters[param] = getattr(args, arg)
# Scaling needs extra parameters
number_controllers = max((
int(parameters.get('ControllerCount', 0)),
int(parameters.get('Controller-1::count', 0))
))
if number_controllers > 1:
if not args.ntp_server:
raise Exception('Specify --ntp-server when using multiple'
' controllers (with HA).')
if args.templates:
parameters.update({
'NeutronL3HA': True,
'NeutronAllowL3AgentFailover': False,
})
else:
parameters.update({
'Controller-1::NeutronL3HA': True,
'Controller-1::NeutronAllowL3AgentFailover': False,
'Compute-1::NeutronL3HA': True,
'Compute-1::NeutronAllowL3AgentFailover': False,
})
else:
if args.templates:
parameters.update({
'NeutronL3HA': False,
'NeutronAllowL3AgentFailover': False,
})
else:
parameters.update({
'Controller-1::NeutronL3HA': False,
'Controller-1::NeutronAllowL3AgentFailover': False,
'Compute-1::NeutronL3HA': False,
'Compute-1::NeutronAllowL3AgentFailover': False,
})
# set at least 3 dhcp_agents_per_network
dhcp_agents_per_network = (number_controllers if number_controllers and
number_controllers > 3 else 3)
if args.templates:
parameters.update({
'NeutronDhcpAgentsPerNetwork': dhcp_agents_per_network,
})
else:
parameters.update({
'Controller-1::NeutronDhcpAgentsPerNetwork':
dhcp_agents_per_network,
})
if max((int(parameters.get('CephStorageCount', 0)),
int(parameters.get('Ceph-Storage-1::count', 0)))) > 0:
if stack is None:
parameters.update({
'CephClusterFSID': six.text_type(uuid.uuid1()),
'CephMonKey': utils.create_cephx_key(),
'CephAdminKey': utils.create_cephx_key()
})
return parameters
def _create_registration_env(self, args):
if args.templates:
tht_root = args.templates
else:
tht_root = TRIPLEO_HEAT_TEMPLATES
environment = os.path.join(tht_root,
RHEL_REGISTRATION_EXTRACONFIG_NAME,
'environment-rhel-registration.yaml')
registry = os.path.join(tht_root, RHEL_REGISTRATION_EXTRACONFIG_NAME,
'rhel-registration-resource-registry.yaml')
user_env = ("parameter_defaults:\n"
" rhel_reg_method: \"%(method)s\"\n"
" rhel_reg_org: \"%(org)s\"\n"
" rhel_reg_force: \"%(force)s\"\n"
" rhel_reg_sat_url: \"%(sat_url)s\"\n"
" rhel_reg_activation_key: \"%(activation_key)s\"\n"
% {'method': args.reg_method,
'org': args.reg_org,
'force': args.reg_force,
'sat_url': args.reg_sat_url,
'activation_key': args.reg_activation_key})
handle, user_env_file = tempfile.mkstemp()
with open(user_env_file, 'w') as temp_file:
temp_file.write(user_env)
return [registry, environment, user_env_file]
def _heat_deploy(self, stack, stack_name, template_path, parameters,
environments, timeout):
"""Verify the Baremetal nodes are available and do a stack update"""
self.log.debug("Processing environment files")
env_files, env = (
template_utils.process_multiple_environments_and_files(
environments))
self.log.debug("Getting template contents")
template_files, template = template_utils.get_template_contents(
template_path)
files = dict(list(template_files.items()) + list(env_files.items()))
clients = self.app.client_manager
orchestration_client = clients.rdomanager_oscplugin.orchestration()
self.log.debug("Deploying stack: %s", stack_name)
self.log.debug("Deploying template: %s", template)
self.log.debug("Deploying parameters: %s", parameters)
self.log.debug("Deploying environment: %s", env)
self.log.debug("Deploying files: %s", files)
stack_args = {
'stack_name': stack_name,
'template': template,
'parameters': parameters,
'environment': env,
'files': files
}
if timeout:
stack_args['timeout_mins'] = timeout
if stack is None:
self.log.info("Performing Heat stack create")
orchestration_client.stacks.create(**stack_args)
else:
self.log.info("Performing Heat stack update")
# Make sure existing parameters for stack are reused
stack_args['existing'] = 'true'
orchestration_client.stacks.update(stack.id, **stack_args)
create_result = utils.wait_for_stack_ready(
orchestration_client, stack_name)
if not create_result:
if stack is None:
raise Exception("Heat Stack create failed.")
else:
raise Exception("Heat Stack update failed.")
def _get_overcloud_endpoint(self, stack):
for output in stack.to_dict().get('outputs', {}):
if output['output_key'] == 'KeystoneURL':
return output['output_value']
def _get_service_ips(self, stack):
service_ips = {}
for output in stack.to_dict().get('outputs', {}):
service_ips[output['output_key']] = output['output_value']
return service_ips
def _pre_heat_deploy(self):
"""Setup before the Heat stack create or update has been done."""
clients = self.app.client_manager
compute_client = clients.compute
self.log.debug("Checking hypervisor stats")
if utils.check_hypervisor_stats(compute_client) is None:
raise exceptions.DeploymentError(
"Expected hypervisor stats not met")
return True
def _deploy_tripleo_heat_templates(self, stack, parsed_args):
"""Deploy the fixed templates in TripleO Heat Templates"""
clients = self.app.client_manager
network_client = clients.network
parameters = self._update_paramaters(
parsed_args, network_client, stack)
utils.check_nodes_count(
self.app.client_manager.rdomanager_oscplugin.baremetal(),
stack,
parameters,
{
'ControllerCount': 1,
'ComputeCount': 1,
'ObjectStorageCount': 0,
'BlockStorageCount': 0,
'CephStorageCount': 0,
}
)
tht_root = parsed_args.templates
print("Deploying templates in the directory {0}".format(
os.path.abspath(tht_root)))
self.log.debug("Creating Environment file")
env_path = utils.create_environment_file()
if stack is None:
self.log.debug("Creating Keystone certificates")
keystone_pki.generate_certs_into_json(env_path, False)
resource_registry_path = os.path.join(tht_root, RESOURCE_REGISTRY_NAME)
environments = [resource_registry_path, env_path]
if parsed_args.rhel_reg:
reg_env = self._create_registration_env(parsed_args)
environments.extend(reg_env)
if parsed_args.environment_files:
environments.extend(parsed_args.environment_files)
overcloud_yaml = os.path.join(tht_root, OVERCLOUD_YAML_NAME)
self._heat_deploy(stack, parsed_args.stack, overcloud_yaml, parameters,
environments, parsed_args.timeout)
def _deploy_tuskar(self, stack, parsed_args):
clients = self.app.client_manager
management = clients.rdomanager_oscplugin.management()
network_client = clients.network
# TODO(dmatthews): The Tuskar client has very similar code to this for
# downloading templates. It should be refactored upstream so we can use
# it.
if parsed_args.output_dir:
output_dir = parsed_args.output_dir
else:
output_dir = tempfile.mkdtemp()
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
management_plan = tuskarutils.find_resource(
management.plans, parsed_args.plan)
# retrieve templates
templates = management.plans.templates(management_plan.uuid)
parameters = self._update_paramaters(
parsed_args, network_client, stack)
utils.check_nodes_count(
self.app.client_manager.rdomanager_oscplugin.baremetal(),
stack,
parameters,
{
'Controller-1::count': 1,
'Compute-1::count': 1,
'Swift-Storage-1::count': 0,
'Cinder-Storage-1::count': 0,
'Ceph-Storage-1::count': 0,
}
)
if stack is None:
ca_key_pem, ca_cert_pem = keystone_pki.create_ca_pair()
signing_key_pem, signing_cert_pem = (
keystone_pki.create_signing_pair(ca_key_pem, ca_cert_pem))
parameters['Controller-1::KeystoneCACertificate'] = ca_cert_pem
parameters['Controller-1::KeystoneSigningCertificate'] = (
signing_cert_pem)
parameters['Controller-1::KeystoneSigningKey'] = signing_key_pem
# Save the parameters to Tuskar so they can be used when redeploying.
# Tuskar expects to get all values as strings. So we convert them all
# below.
management.plans.patch(
management_plan.uuid,
[{'name': x[0], 'value': six.text_type(x[1])}
for x in parameters.items()]
)
# write file for each key-value in templates
print("The following templates will be written:")
for template_name, template_content in templates.items():
# It's possible to organize the role templates and their dependent
# files into directories, in which case the template_name will
# carry the directory information. If that's the case, first
# create the directory structure (if it hasn't already been
# created by another file in the templates list).
template_dir = os.path.dirname(template_name)
output_template_dir = os.path.join(output_dir, template_dir)
if template_dir and not os.path.exists(output_template_dir):
os.makedirs(output_template_dir)
filename = os.path.join(output_dir, template_name)
with open(filename, 'w+') as template_file:
template_file.write(template_content)
print(filename)
overcloud_yaml = os.path.join(output_dir, 'plan.yaml')
environment_yaml = os.path.join(output_dir, 'environment.yaml')
environments = [environment_yaml, ]
if parsed_args.rhel_reg:
reg_env = self._create_registration_env(parsed_args)
environments.extend(reg_env)
if parsed_args.environment_files:
environments.extend(parsed_args.environment_files)
self._heat_deploy(stack, parsed_args.stack, overcloud_yaml, parameters,
environments, parsed_args.timeout)
def _create_overcloudrc(self, stack, parsed_args):
overcloud_endpoint = self._get_overcloud_endpoint(stack)
overcloud_ip = six.moves.urllib.parse.urlparse(
overcloud_endpoint).hostname
rc_params = {
'NOVA_VERSION': '1.1',
'COMPUTE_API_VERSION': '1.1',
'OS_USERNAME': 'admin',
'OS_TENANT_NAME': 'admin',
'OS_NO_CACHE': 'True',
'OS_CLOUDNAME': stack.stack_name,
'no_proxy': "%(no_proxy)s,%(overcloud_ip)s" % {
'no_proxy': parsed_args.no_proxy,
'overcloud_ip': overcloud_ip,
}
}
rc_params.update({
'OS_PASSWORD': self.passwords['OVERCLOUD_ADMIN_PASSWORD'],
'OS_AUTH_URL': self._get_overcloud_endpoint(stack),
})
with open('%src' % stack.stack_name, 'w') as f:
for key, value in rc_params.items():
f.write("export %(key)s=%(value)s\n" %
{'key': key, 'value': value})
def _create_tempest_deployer_input(self):
config = configparser.ConfigParser()
config.add_section('compute-feature-enabled')
# Does the test environment support obtaining instance serial console
# output? (default: true)
# set in [nova.serial_console]->enabled
config.set('compute-feature-enabled', 'console_output', 'false')
config.add_section('object-storage')
# Role to add to users created for swift tests to enable creating
# containers (default: 'Member')
# keystone role-list returns this role
config.set('object-storage', 'operator_role', 'swiftoperator')
config.add_section('orchestration')
# Role required for users to be able to manage stacks
# (default: 'heat_stack_owner')
# keystone role-list returns this role
config.set('orchestration', 'stack_owner_role', 'heat_stack_user')
config.add_section('volume')
# Name of the backend1 (must be declared in cinder.conf)
# (default: 'BACKEND_1')
# set in [cinder]->enabled_backends
config.set('volume', 'backend1_name', 'tripleo_iscsi')
config.add_section('volume-feature-enabled')
# Update bootable status of a volume Not implemented on icehouse
# (default: false)
# python-cinderclient supports set-bootable
config.set('volume-feature-enabled', 'bootable', 'true')
with open('tempest-deployer-input.conf', 'w+') as config_file:
config.write(config_file)
def _deploy_postconfig(self, stack, parsed_args):
self.log.debug("_deploy_postconfig(%s)" % parsed_args)
passwords = self.passwords
overcloud_endpoint = self._get_overcloud_endpoint(stack)
overcloud_ip = six.moves.urllib.parse.urlparse(
overcloud_endpoint).hostname
no_proxy = [os.environ.get('no_proxy'), overcloud_ip]
os.environ['no_proxy'] = ','.join(
[x for x in no_proxy if x is not None])
service_ips = self._get_service_ips(stack)
utils.remove_known_hosts(overcloud_ip)
keystone_ip = service_ips.get('KeystoneAdminVip')
if not keystone_ip:
keystone_ip = overcloud_ip
keystone.initialize(
keystone_ip,
passwords['OVERCLOUD_ADMIN_TOKEN'],
'admin@example.com',
passwords['OVERCLOUD_ADMIN_PASSWORD'],
public=overcloud_ip,
user='heat-admin')
# NOTE(bcrochet): Bad hack. Remove the ssl_port info from the
# os_cloud_config.SERVICES dictionary
for service_name, data in keystone.SERVICES.iteritems():
data.pop('ssl_port', None)
services = {}
for service, data in six.iteritems(utils.SERVICE_LIST):
service_data = data.copy()
service_data.pop('password_field', None)
password_field = data.get('password_field')
if password_field:
service_data['password'] = passwords[password_field]
service_name = re.sub('v[0-9]+', '',
service.capitalize() + 'InternalVip')
internal_vip = service_ips.get(service_name)
if internal_vip:
service_data['internal_host'] = internal_vip
services.update({service: service_data})
keystone_client = clients.get_keystone_client(
'admin',
passwords['OVERCLOUD_ADMIN_PASSWORD'],
'admin',
overcloud_endpoint)
keystone.setup_endpoints(
services,
client=keystone_client,
os_auth_url=overcloud_endpoint,
public_host=overcloud_ip)
compute_client = clients.get_nova_bm_client(
'admin',
passwords['OVERCLOUD_ADMIN_PASSWORD'],
'admin',
overcloud_endpoint)
compute_client.flavors.create('m1.demo', 512, 1, 10, 'auto')
def _validate_args(self, parsed_args):
network_type = parsed_args.neutron_network_type
tunnel_types = parsed_args.neutron_tunnel_types
if network_type and tunnel_types:
# Validate that neutron_network_type is in neutron_tunnel_types
if network_type not in tunnel_types:
raise oscexc.CommandError("Neutron network type must be in "
"Neutron tunnel types "
"(%s) " % tunnel_types)
elif network_type and not tunnel_types:
raise oscexc.CommandError("Neutron tunnel types must be specified "
"when Neutron network type is specified")
def _predeploy_verify_capabilities(self, parsed_args):
self.predeploy_errors = 0
self.predeploy_warnings = 0
self.log.debug("Starting _pre_verify_capabilities")
bm_client = self.app.client_manager.rdomanager_oscplugin.baremetal()
self._check_boot_images()
self._check_flavors_exist(parsed_args)
for node in bm_client.node.list():
node = bm_client.node.get(node.uuid)
self.log.debug("Checking config for Node {0}".format(node.uuid))
self._check_ironic_boot_configuration(node)
flavor_profile_map = self._collect_flavor_profiles([
parsed_args.control_flavor,
parsed_args.compute_flavor,
parsed_args.ceph_storage_flavor,
parsed_args.block_storage_flavor,
parsed_args.swift_storage_flavor,
])
node_profile_map = self._collect_node_profiles()
for target, flavor, scale in [
('control', parsed_args.control_flavor,
parsed_args.control_scale),
('compute', parsed_args.compute_flavor,
parsed_args.compute_scale),
('ceph-storage', parsed_args.ceph_storage_flavor,
parsed_args.ceph_storage_scale),
('block-storage', parsed_args.block_storage_flavor,
parsed_args.block_storage_scale),
('swift-storage', parsed_args.swift_storage_flavor,
parsed_args.swift_storage_scale),
]:
if scale == 0 or flavor is None:
self.log.debug("Skipping verification of %s profiles because "
"none will be deployed", flavor)
continue
self._check_profiles(
target, flavor, scale,
flavor_profile_map,
node_profile_map)
if len(node_profile_map.get(None, [])) > 0:
self.predeploy_warnings += 1
self.log.warning(
"There are %d ironic nodes with no profile that will "
"not be used: %s",
len(node_profile_map[None]),
', '.join(node_profile_map[None])
)
return self.predeploy_errors, self.predeploy_warnings
__kernel_id = None
__ramdisk_id = None
def _image_ids(self):
if self.__kernel_id is not None and self.__ramdisk_id is not None:
return self.__kernel_id, self.__ramdisk_id
image_client = self.app.client_manager.image
kernel_id, ramdisk_id = None, None
try:
kernel_id = osc_utils.find_resource(
image_client.images, 'bm-deploy-kernel').id
except AttributeError as e:
self.log.error("Please make sure there is only one image named "
"'bm-deploy-kernel' in glance.")
self.log.exception(e)
try:
ramdisk_id = osc_utils.find_resource(
image_client.images, 'bm-deploy-ramdisk').id
except AttributeError as e:
self.log.error("Please make sure there is only one image "
"named 'bm-deploy-ramdisk' in glance.")
self.log.exception(e)
self.log.debug("Using kernel ID: {0} and ramdisk ID: {1}".format(
kernel_id, ramdisk_id))
self.__kernel_id = kernel_id
self.__ramdisk_id = ramdisk_id
return kernel_id, ramdisk_id
def _collect_flavor_profiles(self, flavors):
compute_client = self.app.client_manager.compute
flavor_profiles = {}
for flavor in compute_client.flavors.list():
if flavor.name not in flavors:
self.log.debug("Flavor {} isn't used in this deployment, "
"skipping it".format(flavor.name))
continue
profile = flavor.get_keys().get('capabilities:profile')
if profile == '':
flavor_profiles[flavor.name] = None
else:
flavor_profiles[flavor.name] = profile
if flavor.get_keys().get('capabilities:boot_option', '') \
!= 'local':
self.predeploy_warnings += 1
self.log.error(
'Flavor %s "capabilities:boot_option" is not set to '
'"local". Nodes must have ability to PXE boot from '
'deploy image.', flavor.name)
self.log.error(
'Recommended solution: openstack flavor set --property '
'"cpu_arch"="x86_64" --property '
'"capabilities:boot_option"="local" ' + flavor.name)
return flavor_profiles
def _check_profiles(self, target, flavor, scale,
flavor_profile_map,
node_profile_map):
if flavor_profile_map.get(flavor) is None:
self.predeploy_errors += 1
self.log.error(
'Warning: The flavor selected for --%s-flavor "%s" has no '
'profile associated', target, flavor)
self.log.error(
'Recommendation: assign a profile with openstack flavor set '
'--property "capabilities:profile"="PROFILE_NAME" %s',
flavor)
return
if len(node_profile_map.get(flavor_profile_map[flavor], [])) < scale:
self.predeploy_errors += 1
self.log.error(
"Error: %s of %s requested ironic nodes tagged to profile %s "
"(for flavor %s)",
len(node_profile_map.get(flavor_profile_map[flavor], [])),
scale, flavor_profile_map[flavor], flavor
)
self.log.error(
"Recommendation: tag more nodes using ironic node-update "
"<NODE ID> replace properties/capabilities=profile:%s,"
"boot_option:local", flavor_profile_map[flavor])
def _check_boot_images(self):
kernel_id, ramdisk_id = self._image_ids()
message = ("No image with the name '{}' found - make "
"sure you've uploaded boot images")
if kernel_id is None:
self.predeploy_errors += 1
self.log.error(message.format('bm-deploy-kernel'))
if ramdisk_id is None:
self.predeploy_errors += 1
self.log.error(message.format('bm-deploy-ramdisk'))
def _check_flavors_exist(self, parsed_args):
"""Ensure that selected flavors (--ROLE-flavor) exist in nova."""
compute_client = self.app.client_manager.compute
flavors = {f.name: f for f in compute_client.flavors.list()}
message = "Provided --{}-flavor, '{}', does not exist"
for target, flavor, scale in (
('control', parsed_args.control_flavor,
parsed_args.control_scale),
('compute', parsed_args.compute_flavor,
parsed_args.compute_scale),
('ceph-storage', parsed_args.ceph_storage_flavor,
parsed_args.ceph_storage_scale),
('block-storage', parsed_args.block_storage_flavor,
parsed_args.block_storage_scale),
('swift-storage', parsed_args.swift_storage_flavor,
parsed_args.swift_storage_scale),
):
if flavor is None or scale == 0:
self.log.debug("--{}-flavor not used".format(target))
elif flavor not in flavors:
self.predeploy_errors += 1
self.log.error(message.format(target, flavor))
def _check_ironic_boot_configuration(self, node):
kernel_id, ramdisk_id = self._image_ids()
self.log.debug("Doing boot checks for {}".format(node.uuid))
message = ("Node uuid={uuid} has an incorrectly configured "
"{property}. Expected \"{expected}\" but got "
"\"{actual}\".")
if node.driver_info.get('deploy_ramdisk') != ramdisk_id:
self.predeploy_errors += 1
self.log.error(message.format(
uuid=node.uuid,
property='driver_info/deploy_ramdisk',
expected=ramdisk_id,
actual=node.driver_info.get('deploy_ramdisk')
))
if node.driver_info.get('deploy_kernel') != kernel_id:
self.predeploy_errors += 1
self.log.error(message.format(
uuid=node.uuid,
property='driver_info/deploy_kernel',
expected=ramdisk_id,
actual=node.driver_info.get('deploy_kernel')
))
if 'boot_option:local' not in node.properties.get('capabilities', ''):
self.predeploy_warnings += 1
self.log.warning(message.format(
uuid=node.uuid,
property='properties/capabilities',
expected='boot_option:local',
actual=node.properties.get('capabilities')
))
def get_parser(self, prog_name):
# add_help doesn't work properly, set it to False:
parser = argparse.ArgumentParser(
description=self.get_description(),
prog=prog_name,
add_help=False
)
main_group = parser.add_mutually_exclusive_group(required=True)
main_group.add_argument(
'--plan',
help=_("The Name or UUID of the Tuskar plan to deploy.")
)
main_group.add_argument(
'--templates', nargs='?', const=TRIPLEO_HEAT_TEMPLATES,
help=_("The directory containing the Heat templates to deploy"))
parser.add_argument('--stack',
help=_("Stack name to create or update"),
default='overcloud')
parser.add_argument('-t', '--timeout', metavar='<TIMEOUT>',
type=int, default=240,
help=_('Deployment timeout in minutes.'))
parser.add_argument('--control-scale', type=int,
help=_('New number of control nodes.'))
parser.add_argument('--compute-scale', type=int,
help=_('New number of compute nodes.'))
parser.add_argument('--ceph-storage-scale', type=int,
help=_('New number of ceph storage nodes.'))
parser.add_argument('--block-storage-scale', type=int,
help=_('New number of cinder storage nodes.'))
parser.add_argument('--swift-storage-scale', type=int,
help=_('New number of swift storage nodes.'))
parser.add_argument('--control-flavor',
help=_("Nova flavor to use for control nodes."))
parser.add_argument('--compute-flavor',
help=_("Nova flavor to use for compute nodes."))
parser.add_argument('--ceph-storage-flavor',
help=_("Nova flavor to use for ceph storage "
"nodes."))
parser.add_argument('--block-storage-flavor',
help=_("Nova flavor to use for cinder storage "
"nodes."))
parser.add_argument('--swift-storage-flavor',
help=_("Nova flavor to use for swift storage "
"nodes."))
parser.add_argument('--neutron-flat-networks',
help=_('Comma separated list of physical_network '
'names with which flat networks can be '
'created. Use * to allow flat networks '
'with arbitrary physical_network names.'))
parser.add_argument('--neutron-physical-bridge',
help=_('Deprecated.'))
parser.add_argument('--neutron-bridge-mappings',
help=_('Comma separated list of bridge mappings. '
'(default: datacentre:br-ex)'))
parser.add_argument('--neutron-public-interface',
help=_('Deprecated.'))
parser.add_argument('--hypervisor-neutron-public-interface',
default='nic1', help=_('Deprecated.'))
parser.add_argument('--neutron-network-type',
help=_('The network type for tenant networks.'))
parser.add_argument('--neutron-tunnel-types',
help=_('Network types supported by the agent '
'(gre and/or vxlan).'))
parser.add_argument('--neutron-tunnel-id-ranges',
default="1:1000",
help=_("Ranges of GRE tunnel IDs to make "
"available for tenant network allocation"),)
parser.add_argument('--neutron-vni-ranges',
default="1:1000",
help=_("Ranges of VXLAN VNI IDs to make "
"available for tenant network allocation"),)
parser.add_argument('--neutron-disable-tunneling',
dest='neutron_disable_tunneling',
action="store_const", const=True,
help=_('Disables tunneling.')),
parser.add_argument('--neutron-network-vlan-ranges',
help=_('Comma separated list of '
'<physical_network>:<vlan_min>:<vlan_max> '
'or <physical_network> specifying '
'physical_network names usable for VLAN '
'provider and tenant networks, as well as '
'ranges of VLAN tags on each available for '
'allocation to tenant networks. '
'(ex: datacentre:1:1000)'))
parser.add_argument('--neutron-mechanism-drivers',
help=_('An ordered list of extension driver '
'entrypoints to be loaded from the '
'neutron.ml2.extension_drivers namespace.'))
parser.add_argument('--libvirt-type',
default='kvm',
choices=['kvm', 'qemu'],
help=_('Libvirt domain type. (default: kvm)'))
parser.add_argument('--ntp-server',
help=_('The NTP for overcloud nodes.'))
parser.add_argument(
'--tripleo-root',
default=os.environ.get('TRIPLEO_ROOT', '/etc/tripleo'),
help=_('The root directory for TripleO templates.')
)
parser.add_argument(
'--no-proxy',
default=os.environ.get('no_proxy', ''),
help=_('A comma separated list of hosts that should not be '
'proxied.')
)
parser.add_argument(
'-O', '--output-dir', metavar='<OUTPUT DIR>',
help=_('Directory to write Tuskar template files into. It will be '
'created if it does not exist. If not provided a temporary '
'directory will be used.')
)
parser.add_argument(
'-e', '--environment-file', metavar='<HEAT ENVIRONMENT FILE>',
action='append', dest='environment_files',
help=_('Environment files to be passed to the heat stack-create '
'or heat stack-update command. (Can be specified more than '
'once.)')
)
parser.add_argument(
'--validation-errors-fatal',
action='store_true',
default=False,
help=_('Exit if there are errors from the configuration '
'pre-checks. Ignoring these errors will likely cause your '
'deploy to fail.')
)
parser.add_argument(
'--validation-warnings-fatal',
action='store_true',
default=False,
help=_('Exit if there are warnings from the configuration '
'pre-checks.')
)
reg_group = parser.add_argument_group('Registration Parameters')
reg_group.add_argument(
'--rhel-reg',
action='store_true',
help=_('Register overcloud nodes to the customer portal or a '
'satellite.')
)
reg_group.add_argument(
'--reg-method',
choices=['satellite', 'portal'],
default='satellite',
help=_('RHEL registration method to use for the overcloud nodes.')
)
reg_group.add_argument(
'--reg-org',
default='',
help=_('Organization key to use for registration.')
)
reg_group.add_argument(
'--reg-force',
action='store_true',
help=_('Register the system even if it is already registered.')
)
reg_group.add_argument(
'--reg-sat-url',
default='',
help=_('Satellite server to register overcloud nodes.')
)
reg_group.add_argument(
'--reg-activation-key',
default='',
help=_('Activation key to use for registration.')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
self._validate_args(parsed_args)
errors, warnings = self._predeploy_verify_capabilities(parsed_args)
if errors > 0:
self.log.error(
"Configuration has %d errors, fix them before proceeding. "
"Ignoring these errors is likely to lead to a failed deploy.",
errors)
if parsed_args.validation_warnings_fatal or \
parsed_args.validation_errors_fatal:
return
if warnings > 0:
self.log.error(
"Configuration has %d warnings, fix them before proceeding. ",
warnings)
if parsed_args.validation_warnings_fatal:
return
else:
self.log.info("SUCCESS: No warnings or errors in deploy "
"configuration, proceeding.")
clients = self.app.client_manager
orchestration_client = clients.rdomanager_oscplugin.orchestration()
stack = self._get_stack(orchestration_client, parsed_args.stack)
stack_create = stack is None
try:
self._pre_heat_deploy()
if parsed_args.rhel_reg:
if parsed_args.reg_method == 'satellite':
sat_required_args = (parsed_args.reg_org and
parsed_args.reg_sat_url and
parsed_args.reg_activation_key)
if not sat_required_args:
raise exceptions.DeploymentError(
"ERROR: In order to use satellite registration, "
"you must specify --reg-org, --reg-sat-url, and "
"--reg-activation-key.")
else:
portal_required_args = (parsed_args.reg_org and
parsed_args.reg_activation_key)
if not portal_required_args:
raise exceptions.DeploymentError(
"ERROR: In order to use portal registration, you "
"must specify --reg-org, and "
"--reg-activation-key.")
if parsed_args.templates:
self._deploy_tripleo_heat_templates(stack, parsed_args)
else:
self._deploy_tuskar(stack, parsed_args)
# Get a new copy of the stack after stack update/create. If it was
# a create then the previous stack object would be None.
stack = self._get_stack(orchestration_client, parsed_args.stack)
self._create_overcloudrc(stack, parsed_args)
self._create_tempest_deployer_input()
if stack_create:
self._deploy_postconfig(stack, parsed_args)
overcloud_endpoint = self._get_overcloud_endpoint(stack)
print("Overcloud Endpoint: {0}".format(overcloud_endpoint))
print("Overcloud Deployed")
return True
except exceptions.DeploymentError as err:
print("Deployment failed: ", err, file=sys.stderr)
return False
|
rdo-management/python-rdomanager-oscplugin | rdomanager_oscplugin/v1/overcloud_deploy.py | DeployOvercloud._check_flavors_exist | python | def _check_flavors_exist(self, parsed_args):
compute_client = self.app.client_manager.compute
flavors = {f.name: f for f in compute_client.flavors.list()}
message = "Provided --{}-flavor, '{}', does not exist"
for target, flavor, scale in (
('control', parsed_args.control_flavor,
parsed_args.control_scale),
('compute', parsed_args.compute_flavor,
parsed_args.compute_scale),
('ceph-storage', parsed_args.ceph_storage_flavor,
parsed_args.ceph_storage_scale),
('block-storage', parsed_args.block_storage_flavor,
parsed_args.block_storage_scale),
('swift-storage', parsed_args.swift_storage_flavor,
parsed_args.swift_storage_scale),
):
if flavor is None or scale == 0:
self.log.debug("--{}-flavor not used".format(target))
elif flavor not in flavors:
self.predeploy_errors += 1
self.log.error(message.format(target, flavor)) | Ensure that selected flavors (--ROLE-flavor) exist in nova. | train | https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/v1/overcloud_deploy.py#L929-L953 | null | class DeployOvercloud(command.Command):
"""Deploy Overcloud"""
log = logging.getLogger(__name__ + ".DeployOvercloud")
predeploy_errors = 0
predeploy_warnings = 0
def set_overcloud_passwords(self, parameters, parsed_args):
"""Add passwords to the parameters dictionary
:param parameters: A dictionary for the passwords to be added to
:type parameters: dict
"""
undercloud_ceilometer_snmpd_password = utils.get_config_value(
"auth", "undercloud_ceilometer_snmpd_password")
self.passwords = passwords = utils.generate_overcloud_passwords()
ceilometer_pass = passwords['OVERCLOUD_CEILOMETER_PASSWORD']
ceilometer_secret = passwords['OVERCLOUD_CEILOMETER_SECRET']
if parsed_args.templates:
parameters['AdminPassword'] = passwords['OVERCLOUD_ADMIN_PASSWORD']
parameters['AdminToken'] = passwords['OVERCLOUD_ADMIN_TOKEN']
parameters['CeilometerPassword'] = ceilometer_pass
parameters['CeilometerMeteringSecret'] = ceilometer_secret
parameters['CinderPassword'] = passwords[
'OVERCLOUD_CINDER_PASSWORD']
parameters['GlancePassword'] = passwords[
'OVERCLOUD_GLANCE_PASSWORD']
parameters['HeatPassword'] = passwords['OVERCLOUD_HEAT_PASSWORD']
parameters['HeatStackDomainAdminPassword'] = passwords[
'OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD']
parameters['NeutronPassword'] = passwords[
'OVERCLOUD_NEUTRON_PASSWORD']
parameters['NovaPassword'] = passwords['OVERCLOUD_NOVA_PASSWORD']
parameters['SwiftHashSuffix'] = passwords['OVERCLOUD_SWIFT_HASH']
parameters['SwiftPassword'] = passwords['OVERCLOUD_SWIFT_PASSWORD']
parameters['SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
else:
parameters['Controller-1::AdminPassword'] = passwords[
'OVERCLOUD_ADMIN_PASSWORD']
parameters['Controller-1::AdminToken'] = passwords[
'OVERCLOUD_ADMIN_TOKEN']
parameters['Compute-1::AdminPassword'] = passwords[
'OVERCLOUD_ADMIN_PASSWORD']
parameters['Controller-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Cinder-Storage-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Swift-Storage-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Compute-1::SnmpdReadonlyUserPassword'] = (
undercloud_ceilometer_snmpd_password)
parameters['Controller-1::CeilometerPassword'] = ceilometer_pass
parameters[
'Controller-1::CeilometerMeteringSecret'] = ceilometer_secret
parameters['Compute-1::CeilometerPassword'] = ceilometer_pass
parameters[
'Compute-1::CeilometerMeteringSecret'] = ceilometer_secret
parameters['Controller-1::CinderPassword'] = (
passwords['OVERCLOUD_CINDER_PASSWORD'])
parameters['Controller-1::GlancePassword'] = (
passwords['OVERCLOUD_GLANCE_PASSWORD'])
parameters['Controller-1::HeatPassword'] = (
passwords['OVERCLOUD_HEAT_PASSWORD'])
parameters['Controller-1::HeatStackDomainAdminPassword'] = (
passwords['OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD'])
parameters['Controller-1::NeutronPassword'] = (
passwords['OVERCLOUD_NEUTRON_PASSWORD'])
parameters['Compute-1::NeutronPassword'] = (
passwords['OVERCLOUD_NEUTRON_PASSWORD'])
parameters['Controller-1::NovaPassword'] = (
passwords['OVERCLOUD_NOVA_PASSWORD'])
parameters['Compute-1::NovaPassword'] = (
passwords['OVERCLOUD_NOVA_PASSWORD'])
parameters['Controller-1::SwiftHashSuffix'] = (
passwords['OVERCLOUD_SWIFT_HASH'])
parameters['Controller-1::SwiftPassword'] = (
passwords['OVERCLOUD_SWIFT_PASSWORD'])
def _get_stack(self, orchestration_client, stack_name):
"""Get the ID for the current deployed overcloud stack if it exists."""
try:
stack = orchestration_client.stacks.get(stack_name)
self.log.info("Stack found, will be doing a stack update")
return stack
except HTTPNotFound:
self.log.info("No stack found, will be doing a stack create")
def _update_paramaters(self, args, network_client, stack):
if args.templates:
parameters = PARAMETERS.copy()
if stack is None:
parameters.update(NEW_STACK_PARAMETERS)
else:
parameters = {}
self.log.debug("Generating overcloud passwords")
self.set_overcloud_passwords(parameters, args)
self.log.debug("Getting ctlplane from Neutron")
net = network_client.api.find_attr('networks', 'ctlplane')
parameters['NeutronControlPlaneID'] = net['id']
if args.templates:
param_args = (
('NeutronPublicInterface', 'neutron_public_interface'),
('NeutronBridgeMappings', 'neutron_bridge_mappings'),
('NeutronFlatNetworks', 'neutron_flat_networks'),
('HypervisorNeutronPhysicalBridge', 'neutron_physical_bridge'),
('NtpServer', 'ntp_server'),
('ControllerCount', 'control_scale'),
('ComputeCount', 'compute_scale'),
('ObjectStorageCount', 'swift_storage_scale'),
('BlockStorageCount', 'block_storage_scale'),
('CephStorageCount', 'ceph_storage_scale'),
('OvercloudControlFlavor', 'control_flavor'),
('OvercloudComputeFlavor', 'compute_flavor'),
('OvercloudBlockStorageFlavor', 'block_storage_flavor'),
('OvercloudSwiftStorageFlavor', 'swift_storage_flavor'),
('OvercloudCephStorageFlavor', 'ceph_storage_flavor'),
('NeutronNetworkVLANRanges', 'neutron_network_vlan_ranges'),
('NeutronMechanismDrivers', 'neutron_mechanism_drivers')
)
if stack is None:
new_stack_args = (
('NeutronNetworkType', 'neutron_network_type'),
('NeutronTunnelIdRanges', 'neutron_tunnel_id_ranges'),
('NeutronTunnelTypes', 'neutron_tunnel_types'),
('NeutronVniRanges', 'neutron_vni_ranges'),
('NovaComputeLibvirtType', 'libvirt_type'),
)
param_args = param_args + new_stack_args
if args.neutron_disable_tunneling is not None:
neutron_enable_tunneling = (
not args.neutron_disable_tunneling)
parameters.update({
'NeutronEnableTunnelling': neutron_enable_tunneling,
})
else:
param_args = (
('Controller-1::NeutronPublicInterface',
'neutron_public_interface'),
('Compute-1::NeutronPublicInterface',
'neutron_public_interface'),
('Controller-1::NeutronBridgeMappings',
'neutron_bridge_mappings'),
('Compute-1::NeutronBridgeMappings',
'neutron_bridge_mappings'),
('Controller-1::NeutronFlatNetworks', 'neutron_flat_networks'),
('Compute-1::NeutronFlatNetworks', 'neutron_flat_networks'),
('Compute-1::NeutronPhysicalBridge',
'neutron_physical_bridge'),
('Controller-1::NtpServer', 'ntp_server'),
('Compute-1::NtpServer', 'ntp_server'),
('Controller-1::NeutronNetworkVLANRanges',
'neutron_network_vlan_ranges'),
('Compute-1::NeutronNetworkVLANRanges',
'neutron_network_vlan_ranges'),
('Controller-1::NeutronMechanismDrivers',
'neutron_mechanism_drivers'),
('Compute-1::NeutronMechanismDrivers',
'neutron_mechanism_drivers'),
('Controller-1::count', 'control_scale'),
('Compute-1::count', 'compute_scale'),
('Swift-Storage-1::count', 'swift_storage_scale'),
('Cinder-Storage-1::count', 'block_storage_scale'),
('Ceph-Storage-1::count', 'ceph_storage_scale'),
('Cinder-Storage-1::Flavor', 'block_storage_flavor'),
('Compute-1::Flavor', 'compute_flavor'),
('Controller-1::Flavor', 'control_flavor'),
('Swift-Storage-1::Flavor', 'swift_storage_flavor'),
('Ceph-Storage-1::Flavor', 'ceph_storage_flavor'),
)
if stack is None:
new_stack_args = (
('Controller-1::NeutronNetworkType',
'neutron_network_type'),
('Compute-1::NeutronNetworkType', 'neutron_network_type'),
('Controller-1::NeutronTunnelTypes',
'neutron_tunnel_types'),
('Compute-1::NeutronTunnelTypes', 'neutron_tunnel_types'),
('Compute-1::NovaComputeLibvirtType', 'libvirt_type'),
('Controller-1::NeutronTunnelIdRanges',
'neutron_tunnel_id_ranges'),
('Controller-1::NeutronVniRanges', 'neutron_vni_ranges'),
('Compute-1::NeutronTunnelIdRanges',
'neutron_tunnel_id_ranges'),
('Compute-1::NeutronVniRanges', 'neutron_vni_ranges'),
)
param_args = param_args + new_stack_args
if args.neutron_disable_tunneling is not None:
neutron_enable_tunneling = (
not args.neutron_disable_tunneling)
parameters.update({
'Controller-1::NeutronEnableTunnelling':
neutron_enable_tunneling,
'Compute-1::NeutronEnableTunnelling':
neutron_enable_tunneling,
})
# Update parameters from commandline
for param, arg in param_args:
if getattr(args, arg, None) is not None:
parameters[param] = getattr(args, arg)
# Scaling needs extra parameters
number_controllers = max((
int(parameters.get('ControllerCount', 0)),
int(parameters.get('Controller-1::count', 0))
))
if number_controllers > 1:
if not args.ntp_server:
raise Exception('Specify --ntp-server when using multiple'
' controllers (with HA).')
if args.templates:
parameters.update({
'NeutronL3HA': True,
'NeutronAllowL3AgentFailover': False,
})
else:
parameters.update({
'Controller-1::NeutronL3HA': True,
'Controller-1::NeutronAllowL3AgentFailover': False,
'Compute-1::NeutronL3HA': True,
'Compute-1::NeutronAllowL3AgentFailover': False,
})
else:
if args.templates:
parameters.update({
'NeutronL3HA': False,
'NeutronAllowL3AgentFailover': False,
})
else:
parameters.update({
'Controller-1::NeutronL3HA': False,
'Controller-1::NeutronAllowL3AgentFailover': False,
'Compute-1::NeutronL3HA': False,
'Compute-1::NeutronAllowL3AgentFailover': False,
})
# set at least 3 dhcp_agents_per_network
dhcp_agents_per_network = (number_controllers if number_controllers and
number_controllers > 3 else 3)
if args.templates:
parameters.update({
'NeutronDhcpAgentsPerNetwork': dhcp_agents_per_network,
})
else:
parameters.update({
'Controller-1::NeutronDhcpAgentsPerNetwork':
dhcp_agents_per_network,
})
if max((int(parameters.get('CephStorageCount', 0)),
int(parameters.get('Ceph-Storage-1::count', 0)))) > 0:
if stack is None:
parameters.update({
'CephClusterFSID': six.text_type(uuid.uuid1()),
'CephMonKey': utils.create_cephx_key(),
'CephAdminKey': utils.create_cephx_key()
})
return parameters
def _create_registration_env(self, args):
if args.templates:
tht_root = args.templates
else:
tht_root = TRIPLEO_HEAT_TEMPLATES
environment = os.path.join(tht_root,
RHEL_REGISTRATION_EXTRACONFIG_NAME,
'environment-rhel-registration.yaml')
registry = os.path.join(tht_root, RHEL_REGISTRATION_EXTRACONFIG_NAME,
'rhel-registration-resource-registry.yaml')
user_env = ("parameter_defaults:\n"
" rhel_reg_method: \"%(method)s\"\n"
" rhel_reg_org: \"%(org)s\"\n"
" rhel_reg_force: \"%(force)s\"\n"
" rhel_reg_sat_url: \"%(sat_url)s\"\n"
" rhel_reg_activation_key: \"%(activation_key)s\"\n"
% {'method': args.reg_method,
'org': args.reg_org,
'force': args.reg_force,
'sat_url': args.reg_sat_url,
'activation_key': args.reg_activation_key})
handle, user_env_file = tempfile.mkstemp()
with open(user_env_file, 'w') as temp_file:
temp_file.write(user_env)
return [registry, environment, user_env_file]
def _heat_deploy(self, stack, stack_name, template_path, parameters,
environments, timeout):
"""Verify the Baremetal nodes are available and do a stack update"""
self.log.debug("Processing environment files")
env_files, env = (
template_utils.process_multiple_environments_and_files(
environments))
self.log.debug("Getting template contents")
template_files, template = template_utils.get_template_contents(
template_path)
files = dict(list(template_files.items()) + list(env_files.items()))
clients = self.app.client_manager
orchestration_client = clients.rdomanager_oscplugin.orchestration()
self.log.debug("Deploying stack: %s", stack_name)
self.log.debug("Deploying template: %s", template)
self.log.debug("Deploying parameters: %s", parameters)
self.log.debug("Deploying environment: %s", env)
self.log.debug("Deploying files: %s", files)
stack_args = {
'stack_name': stack_name,
'template': template,
'parameters': parameters,
'environment': env,
'files': files
}
if timeout:
stack_args['timeout_mins'] = timeout
if stack is None:
self.log.info("Performing Heat stack create")
orchestration_client.stacks.create(**stack_args)
else:
self.log.info("Performing Heat stack update")
# Make sure existing parameters for stack are reused
stack_args['existing'] = 'true'
orchestration_client.stacks.update(stack.id, **stack_args)
create_result = utils.wait_for_stack_ready(
orchestration_client, stack_name)
if not create_result:
if stack is None:
raise Exception("Heat Stack create failed.")
else:
raise Exception("Heat Stack update failed.")
def _get_overcloud_endpoint(self, stack):
for output in stack.to_dict().get('outputs', {}):
if output['output_key'] == 'KeystoneURL':
return output['output_value']
def _get_service_ips(self, stack):
service_ips = {}
for output in stack.to_dict().get('outputs', {}):
service_ips[output['output_key']] = output['output_value']
return service_ips
def _pre_heat_deploy(self):
"""Setup before the Heat stack create or update has been done."""
clients = self.app.client_manager
compute_client = clients.compute
self.log.debug("Checking hypervisor stats")
if utils.check_hypervisor_stats(compute_client) is None:
raise exceptions.DeploymentError(
"Expected hypervisor stats not met")
return True
def _deploy_tripleo_heat_templates(self, stack, parsed_args):
"""Deploy the fixed templates in TripleO Heat Templates"""
clients = self.app.client_manager
network_client = clients.network
parameters = self._update_paramaters(
parsed_args, network_client, stack)
utils.check_nodes_count(
self.app.client_manager.rdomanager_oscplugin.baremetal(),
stack,
parameters,
{
'ControllerCount': 1,
'ComputeCount': 1,
'ObjectStorageCount': 0,
'BlockStorageCount': 0,
'CephStorageCount': 0,
}
)
tht_root = parsed_args.templates
print("Deploying templates in the directory {0}".format(
os.path.abspath(tht_root)))
self.log.debug("Creating Environment file")
env_path = utils.create_environment_file()
if stack is None:
self.log.debug("Creating Keystone certificates")
keystone_pki.generate_certs_into_json(env_path, False)
resource_registry_path = os.path.join(tht_root, RESOURCE_REGISTRY_NAME)
environments = [resource_registry_path, env_path]
if parsed_args.rhel_reg:
reg_env = self._create_registration_env(parsed_args)
environments.extend(reg_env)
if parsed_args.environment_files:
environments.extend(parsed_args.environment_files)
overcloud_yaml = os.path.join(tht_root, OVERCLOUD_YAML_NAME)
self._heat_deploy(stack, parsed_args.stack, overcloud_yaml, parameters,
environments, parsed_args.timeout)
def _deploy_tuskar(self, stack, parsed_args):
clients = self.app.client_manager
management = clients.rdomanager_oscplugin.management()
network_client = clients.network
# TODO(dmatthews): The Tuskar client has very similar code to this for
# downloading templates. It should be refactored upstream so we can use
# it.
if parsed_args.output_dir:
output_dir = parsed_args.output_dir
else:
output_dir = tempfile.mkdtemp()
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
management_plan = tuskarutils.find_resource(
management.plans, parsed_args.plan)
# retrieve templates
templates = management.plans.templates(management_plan.uuid)
parameters = self._update_paramaters(
parsed_args, network_client, stack)
utils.check_nodes_count(
self.app.client_manager.rdomanager_oscplugin.baremetal(),
stack,
parameters,
{
'Controller-1::count': 1,
'Compute-1::count': 1,
'Swift-Storage-1::count': 0,
'Cinder-Storage-1::count': 0,
'Ceph-Storage-1::count': 0,
}
)
if stack is None:
ca_key_pem, ca_cert_pem = keystone_pki.create_ca_pair()
signing_key_pem, signing_cert_pem = (
keystone_pki.create_signing_pair(ca_key_pem, ca_cert_pem))
parameters['Controller-1::KeystoneCACertificate'] = ca_cert_pem
parameters['Controller-1::KeystoneSigningCertificate'] = (
signing_cert_pem)
parameters['Controller-1::KeystoneSigningKey'] = signing_key_pem
# Save the parameters to Tuskar so they can be used when redeploying.
# Tuskar expects to get all values as strings. So we convert them all
# below.
management.plans.patch(
management_plan.uuid,
[{'name': x[0], 'value': six.text_type(x[1])}
for x in parameters.items()]
)
# write file for each key-value in templates
print("The following templates will be written:")
for template_name, template_content in templates.items():
# It's possible to organize the role templates and their dependent
# files into directories, in which case the template_name will
# carry the directory information. If that's the case, first
# create the directory structure (if it hasn't already been
# created by another file in the templates list).
template_dir = os.path.dirname(template_name)
output_template_dir = os.path.join(output_dir, template_dir)
if template_dir and not os.path.exists(output_template_dir):
os.makedirs(output_template_dir)
filename = os.path.join(output_dir, template_name)
with open(filename, 'w+') as template_file:
template_file.write(template_content)
print(filename)
overcloud_yaml = os.path.join(output_dir, 'plan.yaml')
environment_yaml = os.path.join(output_dir, 'environment.yaml')
environments = [environment_yaml, ]
if parsed_args.rhel_reg:
reg_env = self._create_registration_env(parsed_args)
environments.extend(reg_env)
if parsed_args.environment_files:
environments.extend(parsed_args.environment_files)
self._heat_deploy(stack, parsed_args.stack, overcloud_yaml, parameters,
environments, parsed_args.timeout)
def _create_overcloudrc(self, stack, parsed_args):
overcloud_endpoint = self._get_overcloud_endpoint(stack)
overcloud_ip = six.moves.urllib.parse.urlparse(
overcloud_endpoint).hostname
rc_params = {
'NOVA_VERSION': '1.1',
'COMPUTE_API_VERSION': '1.1',
'OS_USERNAME': 'admin',
'OS_TENANT_NAME': 'admin',
'OS_NO_CACHE': 'True',
'OS_CLOUDNAME': stack.stack_name,
'no_proxy': "%(no_proxy)s,%(overcloud_ip)s" % {
'no_proxy': parsed_args.no_proxy,
'overcloud_ip': overcloud_ip,
}
}
rc_params.update({
'OS_PASSWORD': self.passwords['OVERCLOUD_ADMIN_PASSWORD'],
'OS_AUTH_URL': self._get_overcloud_endpoint(stack),
})
with open('%src' % stack.stack_name, 'w') as f:
for key, value in rc_params.items():
f.write("export %(key)s=%(value)s\n" %
{'key': key, 'value': value})
def _create_tempest_deployer_input(self):
config = configparser.ConfigParser()
config.add_section('compute-feature-enabled')
# Does the test environment support obtaining instance serial console
# output? (default: true)
# set in [nova.serial_console]->enabled
config.set('compute-feature-enabled', 'console_output', 'false')
config.add_section('object-storage')
# Role to add to users created for swift tests to enable creating
# containers (default: 'Member')
# keystone role-list returns this role
config.set('object-storage', 'operator_role', 'swiftoperator')
config.add_section('orchestration')
# Role required for users to be able to manage stacks
# (default: 'heat_stack_owner')
# keystone role-list returns this role
config.set('orchestration', 'stack_owner_role', 'heat_stack_user')
config.add_section('volume')
# Name of the backend1 (must be declared in cinder.conf)
# (default: 'BACKEND_1')
# set in [cinder]->enabled_backends
config.set('volume', 'backend1_name', 'tripleo_iscsi')
config.add_section('volume-feature-enabled')
# Update bootable status of a volume Not implemented on icehouse
# (default: false)
# python-cinderclient supports set-bootable
config.set('volume-feature-enabled', 'bootable', 'true')
with open('tempest-deployer-input.conf', 'w+') as config_file:
config.write(config_file)
def _deploy_postconfig(self, stack, parsed_args):
self.log.debug("_deploy_postconfig(%s)" % parsed_args)
passwords = self.passwords
overcloud_endpoint = self._get_overcloud_endpoint(stack)
overcloud_ip = six.moves.urllib.parse.urlparse(
overcloud_endpoint).hostname
no_proxy = [os.environ.get('no_proxy'), overcloud_ip]
os.environ['no_proxy'] = ','.join(
[x for x in no_proxy if x is not None])
service_ips = self._get_service_ips(stack)
utils.remove_known_hosts(overcloud_ip)
keystone_ip = service_ips.get('KeystoneAdminVip')
if not keystone_ip:
keystone_ip = overcloud_ip
keystone.initialize(
keystone_ip,
passwords['OVERCLOUD_ADMIN_TOKEN'],
'admin@example.com',
passwords['OVERCLOUD_ADMIN_PASSWORD'],
public=overcloud_ip,
user='heat-admin')
# NOTE(bcrochet): Bad hack. Remove the ssl_port info from the
# os_cloud_config.SERVICES dictionary
for service_name, data in keystone.SERVICES.iteritems():
data.pop('ssl_port', None)
services = {}
for service, data in six.iteritems(utils.SERVICE_LIST):
service_data = data.copy()
service_data.pop('password_field', None)
password_field = data.get('password_field')
if password_field:
service_data['password'] = passwords[password_field]
service_name = re.sub('v[0-9]+', '',
service.capitalize() + 'InternalVip')
internal_vip = service_ips.get(service_name)
if internal_vip:
service_data['internal_host'] = internal_vip
services.update({service: service_data})
keystone_client = clients.get_keystone_client(
'admin',
passwords['OVERCLOUD_ADMIN_PASSWORD'],
'admin',
overcloud_endpoint)
keystone.setup_endpoints(
services,
client=keystone_client,
os_auth_url=overcloud_endpoint,
public_host=overcloud_ip)
compute_client = clients.get_nova_bm_client(
'admin',
passwords['OVERCLOUD_ADMIN_PASSWORD'],
'admin',
overcloud_endpoint)
compute_client.flavors.create('m1.demo', 512, 1, 10, 'auto')
def _validate_args(self, parsed_args):
network_type = parsed_args.neutron_network_type
tunnel_types = parsed_args.neutron_tunnel_types
if network_type and tunnel_types:
# Validate that neutron_network_type is in neutron_tunnel_types
if network_type not in tunnel_types:
raise oscexc.CommandError("Neutron network type must be in "
"Neutron tunnel types "
"(%s) " % tunnel_types)
elif network_type and not tunnel_types:
raise oscexc.CommandError("Neutron tunnel types must be specified "
"when Neutron network type is specified")
def _predeploy_verify_capabilities(self, parsed_args):
self.predeploy_errors = 0
self.predeploy_warnings = 0
self.log.debug("Starting _pre_verify_capabilities")
bm_client = self.app.client_manager.rdomanager_oscplugin.baremetal()
self._check_boot_images()
self._check_flavors_exist(parsed_args)
for node in bm_client.node.list():
node = bm_client.node.get(node.uuid)
self.log.debug("Checking config for Node {0}".format(node.uuid))
self._check_ironic_boot_configuration(node)
flavor_profile_map = self._collect_flavor_profiles([
parsed_args.control_flavor,
parsed_args.compute_flavor,
parsed_args.ceph_storage_flavor,
parsed_args.block_storage_flavor,
parsed_args.swift_storage_flavor,
])
node_profile_map = self._collect_node_profiles()
for target, flavor, scale in [
('control', parsed_args.control_flavor,
parsed_args.control_scale),
('compute', parsed_args.compute_flavor,
parsed_args.compute_scale),
('ceph-storage', parsed_args.ceph_storage_flavor,
parsed_args.ceph_storage_scale),
('block-storage', parsed_args.block_storage_flavor,
parsed_args.block_storage_scale),
('swift-storage', parsed_args.swift_storage_flavor,
parsed_args.swift_storage_scale),
]:
if scale == 0 or flavor is None:
self.log.debug("Skipping verification of %s profiles because "
"none will be deployed", flavor)
continue
self._check_profiles(
target, flavor, scale,
flavor_profile_map,
node_profile_map)
if len(node_profile_map.get(None, [])) > 0:
self.predeploy_warnings += 1
self.log.warning(
"There are %d ironic nodes with no profile that will "
"not be used: %s",
len(node_profile_map[None]),
', '.join(node_profile_map[None])
)
return self.predeploy_errors, self.predeploy_warnings
__kernel_id = None
__ramdisk_id = None
def _image_ids(self):
if self.__kernel_id is not None and self.__ramdisk_id is not None:
return self.__kernel_id, self.__ramdisk_id
image_client = self.app.client_manager.image
kernel_id, ramdisk_id = None, None
try:
kernel_id = osc_utils.find_resource(
image_client.images, 'bm-deploy-kernel').id
except AttributeError as e:
self.log.error("Please make sure there is only one image named "
"'bm-deploy-kernel' in glance.")
self.log.exception(e)
try:
ramdisk_id = osc_utils.find_resource(
image_client.images, 'bm-deploy-ramdisk').id
except AttributeError as e:
self.log.error("Please make sure there is only one image "
"named 'bm-deploy-ramdisk' in glance.")
self.log.exception(e)
self.log.debug("Using kernel ID: {0} and ramdisk ID: {1}".format(
kernel_id, ramdisk_id))
self.__kernel_id = kernel_id
self.__ramdisk_id = ramdisk_id
return kernel_id, ramdisk_id
def _collect_node_profiles(self):
"""Gather a map of profile -> [node_uuid] for ironic boot profiles"""
bm_client = self.app.client_manager.rdomanager_oscplugin.baremetal()
# map of profile capability -> [node_uuid, ...]
profile_map = collections.defaultdict(list)
for node in bm_client.node.list(maintenance=False):
node = bm_client.node.get(node.uuid)
profiles = re.findall(r'profile:(.*?)(?:,|$)',
node.properties.get('capabilities', ''))
if not profiles:
profile_map[None].append(node.uuid)
for p in profiles:
profile_map[p].append(node.uuid)
return dict(profile_map)
def _collect_flavor_profiles(self, flavors):
compute_client = self.app.client_manager.compute
flavor_profiles = {}
for flavor in compute_client.flavors.list():
if flavor.name not in flavors:
self.log.debug("Flavor {} isn't used in this deployment, "
"skipping it".format(flavor.name))
continue
profile = flavor.get_keys().get('capabilities:profile')
if profile == '':
flavor_profiles[flavor.name] = None
else:
flavor_profiles[flavor.name] = profile
if flavor.get_keys().get('capabilities:boot_option', '') \
!= 'local':
self.predeploy_warnings += 1
self.log.error(
'Flavor %s "capabilities:boot_option" is not set to '
'"local". Nodes must have ability to PXE boot from '
'deploy image.', flavor.name)
self.log.error(
'Recommended solution: openstack flavor set --property '
'"cpu_arch"="x86_64" --property '
'"capabilities:boot_option"="local" ' + flavor.name)
return flavor_profiles
def _check_profiles(self, target, flavor, scale,
flavor_profile_map,
node_profile_map):
if flavor_profile_map.get(flavor) is None:
self.predeploy_errors += 1
self.log.error(
'Warning: The flavor selected for --%s-flavor "%s" has no '
'profile associated', target, flavor)
self.log.error(
'Recommendation: assign a profile with openstack flavor set '
'--property "capabilities:profile"="PROFILE_NAME" %s',
flavor)
return
if len(node_profile_map.get(flavor_profile_map[flavor], [])) < scale:
self.predeploy_errors += 1
self.log.error(
"Error: %s of %s requested ironic nodes tagged to profile %s "
"(for flavor %s)",
len(node_profile_map.get(flavor_profile_map[flavor], [])),
scale, flavor_profile_map[flavor], flavor
)
self.log.error(
"Recommendation: tag more nodes using ironic node-update "
"<NODE ID> replace properties/capabilities=profile:%s,"
"boot_option:local", flavor_profile_map[flavor])
def _check_boot_images(self):
kernel_id, ramdisk_id = self._image_ids()
message = ("No image with the name '{}' found - make "
"sure you've uploaded boot images")
if kernel_id is None:
self.predeploy_errors += 1
self.log.error(message.format('bm-deploy-kernel'))
if ramdisk_id is None:
self.predeploy_errors += 1
self.log.error(message.format('bm-deploy-ramdisk'))
def _check_ironic_boot_configuration(self, node):
kernel_id, ramdisk_id = self._image_ids()
self.log.debug("Doing boot checks for {}".format(node.uuid))
message = ("Node uuid={uuid} has an incorrectly configured "
"{property}. Expected \"{expected}\" but got "
"\"{actual}\".")
if node.driver_info.get('deploy_ramdisk') != ramdisk_id:
self.predeploy_errors += 1
self.log.error(message.format(
uuid=node.uuid,
property='driver_info/deploy_ramdisk',
expected=ramdisk_id,
actual=node.driver_info.get('deploy_ramdisk')
))
if node.driver_info.get('deploy_kernel') != kernel_id:
self.predeploy_errors += 1
self.log.error(message.format(
uuid=node.uuid,
property='driver_info/deploy_kernel',
expected=ramdisk_id,
actual=node.driver_info.get('deploy_kernel')
))
if 'boot_option:local' not in node.properties.get('capabilities', ''):
self.predeploy_warnings += 1
self.log.warning(message.format(
uuid=node.uuid,
property='properties/capabilities',
expected='boot_option:local',
actual=node.properties.get('capabilities')
))
def get_parser(self, prog_name):
# add_help doesn't work properly, set it to False:
parser = argparse.ArgumentParser(
description=self.get_description(),
prog=prog_name,
add_help=False
)
main_group = parser.add_mutually_exclusive_group(required=True)
main_group.add_argument(
'--plan',
help=_("The Name or UUID of the Tuskar plan to deploy.")
)
main_group.add_argument(
'--templates', nargs='?', const=TRIPLEO_HEAT_TEMPLATES,
help=_("The directory containing the Heat templates to deploy"))
parser.add_argument('--stack',
help=_("Stack name to create or update"),
default='overcloud')
parser.add_argument('-t', '--timeout', metavar='<TIMEOUT>',
type=int, default=240,
help=_('Deployment timeout in minutes.'))
parser.add_argument('--control-scale', type=int,
help=_('New number of control nodes.'))
parser.add_argument('--compute-scale', type=int,
help=_('New number of compute nodes.'))
parser.add_argument('--ceph-storage-scale', type=int,
help=_('New number of ceph storage nodes.'))
parser.add_argument('--block-storage-scale', type=int,
help=_('New number of cinder storage nodes.'))
parser.add_argument('--swift-storage-scale', type=int,
help=_('New number of swift storage nodes.'))
parser.add_argument('--control-flavor',
help=_("Nova flavor to use for control nodes."))
parser.add_argument('--compute-flavor',
help=_("Nova flavor to use for compute nodes."))
parser.add_argument('--ceph-storage-flavor',
help=_("Nova flavor to use for ceph storage "
"nodes."))
parser.add_argument('--block-storage-flavor',
help=_("Nova flavor to use for cinder storage "
"nodes."))
parser.add_argument('--swift-storage-flavor',
help=_("Nova flavor to use for swift storage "
"nodes."))
parser.add_argument('--neutron-flat-networks',
help=_('Comma separated list of physical_network '
'names with which flat networks can be '
'created. Use * to allow flat networks '
'with arbitrary physical_network names.'))
parser.add_argument('--neutron-physical-bridge',
help=_('Deprecated.'))
parser.add_argument('--neutron-bridge-mappings',
help=_('Comma separated list of bridge mappings. '
'(default: datacentre:br-ex)'))
parser.add_argument('--neutron-public-interface',
help=_('Deprecated.'))
parser.add_argument('--hypervisor-neutron-public-interface',
default='nic1', help=_('Deprecated.'))
parser.add_argument('--neutron-network-type',
help=_('The network type for tenant networks.'))
parser.add_argument('--neutron-tunnel-types',
help=_('Network types supported by the agent '
'(gre and/or vxlan).'))
parser.add_argument('--neutron-tunnel-id-ranges',
default="1:1000",
help=_("Ranges of GRE tunnel IDs to make "
"available for tenant network allocation"),)
parser.add_argument('--neutron-vni-ranges',
default="1:1000",
help=_("Ranges of VXLAN VNI IDs to make "
"available for tenant network allocation"),)
parser.add_argument('--neutron-disable-tunneling',
dest='neutron_disable_tunneling',
action="store_const", const=True,
help=_('Disables tunneling.')),
parser.add_argument('--neutron-network-vlan-ranges',
help=_('Comma separated list of '
'<physical_network>:<vlan_min>:<vlan_max> '
'or <physical_network> specifying '
'physical_network names usable for VLAN '
'provider and tenant networks, as well as '
'ranges of VLAN tags on each available for '
'allocation to tenant networks. '
'(ex: datacentre:1:1000)'))
parser.add_argument('--neutron-mechanism-drivers',
help=_('An ordered list of extension driver '
'entrypoints to be loaded from the '
'neutron.ml2.extension_drivers namespace.'))
parser.add_argument('--libvirt-type',
default='kvm',
choices=['kvm', 'qemu'],
help=_('Libvirt domain type. (default: kvm)'))
parser.add_argument('--ntp-server',
help=_('The NTP for overcloud nodes.'))
parser.add_argument(
'--tripleo-root',
default=os.environ.get('TRIPLEO_ROOT', '/etc/tripleo'),
help=_('The root directory for TripleO templates.')
)
parser.add_argument(
'--no-proxy',
default=os.environ.get('no_proxy', ''),
help=_('A comma separated list of hosts that should not be '
'proxied.')
)
parser.add_argument(
'-O', '--output-dir', metavar='<OUTPUT DIR>',
help=_('Directory to write Tuskar template files into. It will be '
'created if it does not exist. If not provided a temporary '
'directory will be used.')
)
parser.add_argument(
'-e', '--environment-file', metavar='<HEAT ENVIRONMENT FILE>',
action='append', dest='environment_files',
help=_('Environment files to be passed to the heat stack-create '
'or heat stack-update command. (Can be specified more than '
'once.)')
)
parser.add_argument(
'--validation-errors-fatal',
action='store_true',
default=False,
help=_('Exit if there are errors from the configuration '
'pre-checks. Ignoring these errors will likely cause your '
'deploy to fail.')
)
parser.add_argument(
'--validation-warnings-fatal',
action='store_true',
default=False,
help=_('Exit if there are warnings from the configuration '
'pre-checks.')
)
reg_group = parser.add_argument_group('Registration Parameters')
reg_group.add_argument(
'--rhel-reg',
action='store_true',
help=_('Register overcloud nodes to the customer portal or a '
'satellite.')
)
reg_group.add_argument(
'--reg-method',
choices=['satellite', 'portal'],
default='satellite',
help=_('RHEL registration method to use for the overcloud nodes.')
)
reg_group.add_argument(
'--reg-org',
default='',
help=_('Organization key to use for registration.')
)
reg_group.add_argument(
'--reg-force',
action='store_true',
help=_('Register the system even if it is already registered.')
)
reg_group.add_argument(
'--reg-sat-url',
default='',
help=_('Satellite server to register overcloud nodes.')
)
reg_group.add_argument(
'--reg-activation-key',
default='',
help=_('Activation key to use for registration.')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
self._validate_args(parsed_args)
errors, warnings = self._predeploy_verify_capabilities(parsed_args)
if errors > 0:
self.log.error(
"Configuration has %d errors, fix them before proceeding. "
"Ignoring these errors is likely to lead to a failed deploy.",
errors)
if parsed_args.validation_warnings_fatal or \
parsed_args.validation_errors_fatal:
return
if warnings > 0:
self.log.error(
"Configuration has %d warnings, fix them before proceeding. ",
warnings)
if parsed_args.validation_warnings_fatal:
return
else:
self.log.info("SUCCESS: No warnings or errors in deploy "
"configuration, proceeding.")
clients = self.app.client_manager
orchestration_client = clients.rdomanager_oscplugin.orchestration()
stack = self._get_stack(orchestration_client, parsed_args.stack)
stack_create = stack is None
try:
self._pre_heat_deploy()
if parsed_args.rhel_reg:
if parsed_args.reg_method == 'satellite':
sat_required_args = (parsed_args.reg_org and
parsed_args.reg_sat_url and
parsed_args.reg_activation_key)
if not sat_required_args:
raise exceptions.DeploymentError(
"ERROR: In order to use satellite registration, "
"you must specify --reg-org, --reg-sat-url, and "
"--reg-activation-key.")
else:
portal_required_args = (parsed_args.reg_org and
parsed_args.reg_activation_key)
if not portal_required_args:
raise exceptions.DeploymentError(
"ERROR: In order to use portal registration, you "
"must specify --reg-org, and "
"--reg-activation-key.")
if parsed_args.templates:
self._deploy_tripleo_heat_templates(stack, parsed_args)
else:
self._deploy_tuskar(stack, parsed_args)
# Get a new copy of the stack after stack update/create. If it was
# a create then the previous stack object would be None.
stack = self._get_stack(orchestration_client, parsed_args.stack)
self._create_overcloudrc(stack, parsed_args)
self._create_tempest_deployer_input()
if stack_create:
self._deploy_postconfig(stack, parsed_args)
overcloud_endpoint = self._get_overcloud_endpoint(stack)
print("Overcloud Endpoint: {0}".format(overcloud_endpoint))
print("Overcloud Deployed")
return True
except exceptions.DeploymentError as err:
print("Deployment failed: ", err, file=sys.stderr)
return False
|
stephanepechard/projy | projy/collectors/AuthorCollector.py | AuthorCollector.author_from_git | python | def author_from_git(self):
self.author = None
try:
encoding = locale.getdefaultlocale()[1]
# launch git command and get answer
cmd = Popen(["git", "config", "--get", "user.name"], stdout=PIPE)
stdoutdata = cmd.communicate().decode(encoding)
if (stdoutdata[0]):
import ipdb;ipdb.set_trace()
author = stdoutdata[0].rstrip(os.linesep)
self.author = author#.decode('utf8')
except ImportError:
pass
except CalledProcessError:
pass
except OSError:
pass
return self.author | Get the author name from git information. | train | https://github.com/stephanepechard/projy/blob/3146b0e3c207b977e1b51fcb33138746dae83c23/projy/collectors/AuthorCollector.py#L26-L45 | null | class AuthorCollector(Collector):
""" The AuthorCollector class. """
def __init__(self):
self.author = None
def author_from_system(self):
""" Get the author name from system information.
This is just the user name, not the real name.
"""
self.author = getpass.getuser()
return self.author
|
stephanepechard/projy | projy/TerminalView.py | TerminalView.text_in_color | python | def text_in_color(self, message, color_code):
return self.term.color(color_code) + message + self.term.normal | Print with a beautiful color. See codes at the top of this file. | train | https://github.com/stephanepechard/projy/blob/3146b0e3c207b977e1b51fcb33138746dae83c23/projy/TerminalView.py#L39-L41 | null | class TerminalView:
""" Terminal view to basic CLI information. """
def __init__(self):
self.term = Terminal()
def print_error_and_exit(self, message):
""" Print an error in red and exits the program. """
sys.exit(self.term.bold_red('[ERROR] ' + message))
def print_info(self, message):
""" Print an informational text. """
print(message)
def format_question(self, message):
""" Return an info-formatted string. """
return self.term.bold(message)
|
stephanepechard/projy | projy/collectors/Collector.py | Collector.collect | python | def collect(self):
class_functions = []
for key in self.__class__.__dict__.keys():
func = self.__class__.__dict__[key]
if (inspect.isfunction(func)):
class_functions.append(func)
functions = sorted(class_functions, key=lambda func: func.__name__)
for function in functions:
value = function(self)
if value:
return value | Select the best suited data of all available in the subclasses.
In each subclass, the functions alphabetical order should
correspond to their importance.
Here, the first non null value is returned. | train | https://github.com/stephanepechard/projy/blob/3146b0e3c207b977e1b51fcb33138746dae83c23/projy/collectors/Collector.py#L15-L31 | null | class Collector:
""" The Collector class. """
def __init__(self):
pass
|
stephanepechard/projy | projy/collectors/AuthorMailCollector.py | AuthorMailCollector.author_mail_from_git | python | def author_mail_from_git(self):
try:
# launch git command and get answer
cmd = Popen(["git", "config", "--get", "user.email"], stdout=PIPE)
stdoutdata = cmd.communicate()
if (stdoutdata[0]):
self.author_mail = stdoutdata[0].rstrip(os.linesep)
except ImportError:
pass
except CalledProcessError:
pass
except OSError:
pass
return self.author_mail | Get the author mail from git information. | train | https://github.com/stephanepechard/projy/blob/3146b0e3c207b977e1b51fcb33138746dae83c23/projy/collectors/AuthorMailCollector.py#L24-L39 | null | class AuthorMailCollector(Collector):
""" The AuthorMailCollector class. """
def __init__(self):
self.author_mail = None
def author_mail_from_system(self):
""" Get the author mail from system information.
It is probably often innacurate.
"""
self.author_mail = getpass.getuser() + '@' + socket.gethostname()
return self.author_mail
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.