text stringlengths 81 112k |
|---|
Plot the fitting given the fitted axis direction, the fitted
center, the fitted radius and the data points.
def show_fit(w_fit, C_fit, r_fit, Xs):
'''Plot the fitting given the fitted axis direction, the fitted
center, the fitted radius and the data points.
'''
fig = plt.figure()
ax = fig.gca(projection='3d')
# Plot the data points
ax.scatter([X[0] for X in Xs], [X[1] for X in Xs], [X[2] for X in Xs])
# Get the transformation matrix
theta = np.arccos(np.dot(w_fit, np.array([0, 0, 1])))
phi = np.arctan2(w_fit[1], w_fit[0])
M = np.dot(rotation_matrix_from_axis_and_angle(np.array([0, 0, 1]), phi),
rotation_matrix_from_axis_and_angle(np.array([0, 1, 0]), theta))
# Plot the cylinder surface
delta = np.linspace(-np.pi, np.pi, 20)
z = np.linspace(-10, 10, 20)
Delta, Z = np.meshgrid(delta, z)
X = r_fit * np.cos(Delta)
Y = r_fit * np.sin(Delta)
for i in range(len(X)):
for j in range(len(X[i])):
p = np.dot(M, np.array([X[i][j], Y[i][j], Z[i][j]])) + C_fit
X[i][j] = p[0]
Y[i][j] = p[1]
Z[i][j] = p[2]
ax.plot_surface(X, Y, Z, alpha=0.2)
# Plot the center and direction
ax.quiver(C_fit[0], C_fit[1], C_fit[2],
r_fit * w_fit[0], r_fit * w_fit[1], r_fit * w_fit[2], color='red')
plt.show() |
Getting the HIGHLIGHT_NUM_CHARS_BEFORE_MATCH setting
to find how many characters before the first word found should
be removed from the window
def find_window(self, highlight_locations):
"""Getting the HIGHLIGHT_NUM_CHARS_BEFORE_MATCH setting
to find how many characters before the first word found should
be removed from the window
"""
if len(self.text_block) <= self.max_length:
return (0, self.max_length)
num_chars_before = getattr(
settings,
'HIGHLIGHT_NUM_CHARS_BEFORE_MATCH',
0
)
best_start, best_end = super(ColabHighlighter, self).find_window(
highlight_locations
)
if best_start <= num_chars_before:
best_end -= best_start
best_start = 0
else:
best_start -= num_chars_before
best_end -= num_chars_before
return (best_start, best_end) |
Private method for setting a cookie's value
def __set(self, key, real_value, coded_value):
"""Private method for setting a cookie's value"""
morse_set = self.get(key, StringMorsel())
morse_set.set(key, real_value, coded_value)
dict.__setitem__(self, key, morse_set) |
Try to login and set the internal session id.
Please note:
- Any failed login resets all existing session ids, even of
other users.
- SIDs expire after some time
def login(self):
"""
Try to login and set the internal session id.
Please note:
- Any failed login resets all existing session ids, even of
other users.
- SIDs expire after some time
"""
response = self.session.get(self.base_url + '/login_sid.lua', timeout=10)
xml = ET.fromstring(response.text)
if xml.find('SID').text == "0000000000000000":
challenge = xml.find('Challenge').text
url = self.base_url + "/login_sid.lua"
response = self.session.get(url, params={
"username": self.username,
"response": self.calculate_response(challenge, self.password),
}, timeout=10)
xml = ET.fromstring(response.text)
sid = xml.find('SID').text
if xml.find('SID').text == "0000000000000000":
blocktime = int(xml.find('BlockTime').text)
exc = Exception("Login failed, please wait {} seconds".format(
blocktime
))
exc.blocktime = blocktime
raise exc
self.sid = sid
return sid |
Calculate response for the challenge-response authentication
def calculate_response(self, challenge, password):
"""Calculate response for the challenge-response authentication"""
to_hash = (challenge + "-" + password).encode("UTF-16LE")
hashed = hashlib.md5(to_hash).hexdigest()
return "{0}-{1}".format(challenge, hashed) |
Returns a list of Actor objects for querying SmartHome devices.
This is currently the only working method for getting temperature data.
def get_actors(self):
"""
Returns a list of Actor objects for querying SmartHome devices.
This is currently the only working method for getting temperature data.
"""
devices = self.homeautoswitch("getdevicelistinfos")
xml = ET.fromstring(devices)
actors = []
for device in xml.findall('device'):
actors.append(Actor(fritzbox=self, device=device))
return actors |
Return a actor identified by it's ain or return None
def get_actor_by_ain(self, ain):
"""
Return a actor identified by it's ain or return None
"""
for actor in self.get_actors():
if actor.actor_id == ain:
return actor |
Call a switch method.
Should only be used by internal library functions.
def homeautoswitch(self, cmd, ain=None, param=None):
"""
Call a switch method.
Should only be used by internal library functions.
"""
assert self.sid, "Not logged in"
params = {
'switchcmd': cmd,
'sid': self.sid,
}
if param is not None:
params['param'] = param
if ain:
params['ain'] = ain
url = self.base_url + '/webservices/homeautoswitch.lua'
response = self.session.get(url, params=params, timeout=10)
response.raise_for_status()
return response.text.strip().encode('utf-8') |
Get information about all actors
This needs 1+(5n) requests where n = number of actors registered
Deprecated, use get_actors instead.
Returns a dict:
[ain] = {
'name': Name of actor,
'state': Powerstate (boolean)
'present': Connected to server? (boolean)
'power': Current power consumption in mW
'energy': Used energy in Wh since last energy reset
'temperature': Current environment temperature in celsius
}
def get_switch_actors(self):
"""
Get information about all actors
This needs 1+(5n) requests where n = number of actors registered
Deprecated, use get_actors instead.
Returns a dict:
[ain] = {
'name': Name of actor,
'state': Powerstate (boolean)
'present': Connected to server? (boolean)
'power': Current power consumption in mW
'energy': Used energy in Wh since last energy reset
'temperature': Current environment temperature in celsius
}
"""
actors = {}
for ain in self.homeautoswitch("getswitchlist").split(','):
actors[ain] = {
'name': self.homeautoswitch("getswitchname", ain),
'state': bool(self.homeautoswitch("getswitchstate", ain)),
'present': bool(self.homeautoswitch("getswitchpresent", ain)),
'power': self.homeautoswitch("getswitchpower", ain),
'energy': self.homeautoswitch("getswitchenergy", ain),
'temperature': self.homeautoswitch("getswitchtemperature", ain),
}
return actors |
Return a list of devices.
Deprecated, use get_actors instead.
def get_devices(self):
"""
Return a list of devices.
Deprecated, use get_actors instead.
"""
url = self.base_url + '/net/home_auto_query.lua'
response = self.session.get(url, params={
'sid': self.sid,
'command': 'AllOutletStates',
'xhr': 0,
}, timeout=15)
response.raise_for_status()
data = response.json()
count = int(data["Outlet_count"])
devices = []
for i in range(1, count + 1):
device = Device(
int(data["DeviceID_{0}".format(i)]),
int(data["DeviceConnectState_{0}".format(i)]),
int(data["DeviceSwitchState_{0}".format(i)])
)
devices.append(device)
return devices |
Return all available energy consumption data for the device.
You need to divice watt_values by 100 and volt_values by 1000
to get the "real" values.
:return: dict
def get_consumption(self, deviceid, timerange="10"):
"""
Return all available energy consumption data for the device.
You need to divice watt_values by 100 and volt_values by 1000
to get the "real" values.
:return: dict
"""
tranges = ("10", "24h", "month", "year")
if timerange not in tranges:
raise ValueError(
"Unknown timerange. Possible values are: {0}".format(tranges)
)
url = self.base_url + "/net/home_auto_query.lua"
response = self.session.get(url, params={
'sid': self.sid,
'command': 'EnergyStats_{0}'.format(timerange),
'id': deviceid,
'xhr': 0,
}, timeout=15)
response.raise_for_status()
data = response.json()
result = {}
# Single result values
values_map = {
'MM_Value_Amp': 'mm_value_amp',
'MM_Value_Power': 'mm_value_power',
'MM_Value_Volt': 'mm_value_volt',
'EnStats_average_value': 'enstats_average_value',
'EnStats_max_value': 'enstats_max_value',
'EnStats_min_value': 'enstats_min_value',
'EnStats_timer_type': 'enstats_timer_type',
'sum_Day': 'sum_day',
'sum_Month': 'sum_month',
'sum_Year': 'sum_year',
}
for avm_key, py_key in values_map.items():
result[py_key] = int(data[avm_key])
# Stats counts
count = int(data["EnStats_count"])
watt_values = [None for i in range(count)]
volt_values = [None for i in range(count)]
for i in range(1, count + 1):
watt_values[i - 1] = int(data["EnStats_watt_value_{}".format(i)])
volt_values[i - 1] = int(data["EnStats_volt_value_{}".format(i)])
result['watt_values'] = watt_values
result['volt_values'] = volt_values
return result |
Return the system logs since the last reboot.
def get_logs(self):
"""
Return the system logs since the last reboot.
"""
assert BeautifulSoup, "Please install bs4 to use this method"
url = self.base_url + "/system/syslog.lua"
response = self.session.get(url, params={
'sid': self.sid,
'stylemode': 'print',
}, timeout=15)
response.raise_for_status()
entries = []
tree = BeautifulSoup(response.text)
rows = tree.find('table').find_all('tr')
for row in rows:
columns = row.find_all("td")
date = columns[0].string
time = columns[1].string
message = columns[2].find("a").string
merged = "{} {} {}".format(date, time, message.encode("UTF-8"))
msg_hash = hashlib.md5(merged).hexdigest()
entries.append(LogEntry(date, time, message, msg_hash))
return entries |
Returns True if the Hawk nonce has been seen already.
def seen_nonce(id, nonce, timestamp):
"""
Returns True if the Hawk nonce has been seen already.
"""
key = '{id}:{n}:{ts}'.format(id=id, n=nonce, ts=timestamp)
if cache.get(key):
log.warning('replay attack? already processed nonce {k}'
.format(k=key))
return True
else:
log.debug('caching nonce {k}'.format(k=key))
cache.set(key, True,
# We only need the nonce until the message itself expires.
# This also adds a little bit of padding.
timeout=getattr(settings, 'HAWK_MESSAGE_EXPIRATION',
default_message_expiration) + 5)
return False |
FritzBox SmartHome Tool
\b
Provides the following functions:
- A easy to use library for querying SmartHome actors
- This CLI tool for testing
- A carbon client for pipeing data into graphite
def cli(context, host, username, password):
"""
FritzBox SmartHome Tool
\b
Provides the following functions:
- A easy to use library for querying SmartHome actors
- This CLI tool for testing
- A carbon client for pipeing data into graphite
"""
context.obj = FritzBox(host, username, password) |
Display a list of actors
def actors(context):
"""Display a list of actors"""
fritz = context.obj
fritz.login()
for actor in fritz.get_actors():
click.echo("{} ({} {}; AIN {} )".format(
actor.name,
actor.manufacturer,
actor.productname,
actor.actor_id,
))
if actor.has_temperature:
click.echo("Temp: act {} target {}; battery (low): {}".format(
actor.temperature,
actor.target_temperature,
actor.battery_low,
))
click.echo("Temp (via get): act {} target {}".format(
actor.get_temperature(),
actor.get_target_temperature(),
)) |
Display energy stats of all actors
def energy(context, features):
"""Display energy stats of all actors"""
fritz = context.obj
fritz.login()
for actor in fritz.get_actors():
if actor.temperature is not None:
click.echo("{} ({}): {:.2f} Watt current, {:.3f} wH total, {:.2f} °C".format(
actor.name.encode('utf-8'),
actor.actor_id,
(actor.get_power() or 0.0) / 1000,
(actor.get_energy() or 0.0) / 100,
actor.temperature
))
else:
click.echo("{} ({}): {:.2f} Watt current, {:.3f} wH total, offline".format(
actor.name.encode('utf-8'),
actor.actor_id,
(actor.get_power() or 0.0) / 1000,
(actor.get_energy() or 0.0) / 100
))
if features:
click.echo(" Features: PowerMeter: {}, Temperatur: {}, Switch: {}".format(
actor.has_powermeter, actor.has_temperature, actor.has_switch
)) |
Display energy stats of all actors
def graphite(context, server, port, interval, prefix):
"""Display energy stats of all actors"""
fritz = context.obj
fritz.login()
sid_ttl = time.time() + 600
# Find actors and create carbon keys
click.echo(" * Requesting actors list")
simple_chars = re.compile('[^A-Za-z0-9]+')
actors = fritz.get_actors()
keys = {}
for actor in actors:
keys[actor.name] = "{}.{}".format(
prefix,
simple_chars.sub('_', actor.name)
)
# Connect to carbon
click.echo(" * Trying to connect to carbon")
timeout = 2
sock = socket.socket()
sock.settimeout(timeout)
try:
sock.connect((server, port))
except socket.timeout:
raise Exception("Took over {} second(s) to connect to {}".format(
timeout, server
))
except Exception as error:
raise Exception("unknown exception while connecting to {} - {}".format(
server, error
))
def send(key, value):
"""Send a key-value-pair to carbon"""
now = int(time.time())
payload = "{} {} {}\n".format(key, value, now)
sock.sendall(payload)
while True:
if time.time() > sid_ttl:
click.echo(" * Requesting new SID")
fritz.login()
sid_ttl = time.time() + 600
click.echo(" * Requesting statistics")
for actor in actors:
power = actor.get_power()
total = actor.get_energy()
click.echo(" -> {}: {:.2f} Watt current, {:.3f} wH total".format(
actor.name, power / 1000, total / 100
))
send(keys[actor.name] + '.current', power)
send(keys[actor.name] + '.total', total)
time.sleep(interval) |
Switch an actor's power to ON
def switch_on(context, ain):
"""Switch an actor's power to ON"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
click.echo("Switching {} on".format(actor.name))
actor.switch_on()
else:
click.echo("Actor not found: {}".format(ain)) |
Get an actor's power state
def switch_state(context, ain):
"""Get an actor's power state"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
click.echo("State for {} is: {}".format(ain,'ON' if actor.get_state() else 'OFF'))
else:
click.echo("Actor not found: {}".format(ain)) |
Toggle an actor's power state
def switch_toggle(context, ain):
"""Toggle an actor's power state"""
context.obj.login()
actor = context.obj.get_actor_by_ain(ain)
if actor:
if actor.get_state():
actor.switch_off()
click.echo("State for {} is now OFF".format(ain))
else:
actor.switch_on()
click.echo("State for {} is now ON".format(ain))
else:
click.echo("Actor not found: {}".format(ain)) |
Show system logs since last reboot
def logs(context, format):
"""Show system logs since last reboot"""
fritz = context.obj
fritz.login()
messages = fritz.get_logs()
if format == "plain":
for msg in messages:
merged = "{} {} {}".format(msg.date, msg.time, msg.message.encode("UTF-8"))
click.echo(merged)
if format == "json":
entries = [msg._asdict() for msg in messages]
click.echo(json.dumps({
"entries": entries,
})) |
Returns the current power usage in milliWatts.
Attention: Returns None if the value can't be queried or is unknown.
def get_power(self):
"""
Returns the current power usage in milliWatts.
Attention: Returns None if the value can't be queried or is unknown.
"""
value = self.box.homeautoswitch("getswitchpower", self.actor_id)
return int(value) if value.isdigit() else None |
Returns the consumed energy since the start of the statistics in Wh.
Attention: Returns None if the value can't be queried or is unknown.
def get_energy(self):
"""
Returns the consumed energy since the start of the statistics in Wh.
Attention: Returns None if the value can't be queried or is unknown.
"""
value = self.box.homeautoswitch("getswitchenergy", self.actor_id)
return int(value) if value.isdigit() else None |
Returns the current environment temperature.
Attention: Returns None if the value can't be queried or is unknown.
def get_temperature(self):
"""
Returns the current environment temperature.
Attention: Returns None if the value can't be queried or is unknown.
"""
#raise NotImplementedError("This should work according to the AVM docs, but don't...")
value = self.box.homeautoswitch("gettemperature", self.actor_id)
if value.isdigit():
self.temperature = float(value)/10
else:
self.temperature = None
return self.temperature |
Returns the actual target temperature.
Attention: Returns None if the value can't be queried or is unknown.
def get_target_temperature(self):
"""
Returns the actual target temperature.
Attention: Returns None if the value can't be queried or is unknown.
"""
value = self.box.homeautoswitch("gethkrtsoll", self.actor_id)
self.target_temperature = self.__get_temp(value)
return self.target_temperature |
Sets the temperature in celcius
def set_temperature(self, temp):
"""
Sets the temperature in celcius
"""
# Temperature is send to fritz.box a little weird
param = 16 + ( ( temp - 8 ) * 2 )
if param < 16:
param = 253
logger.info("Actor " + self.name + ": Temperature control set to off")
elif param >= 56:
param = 254
logger.info("Actor " + self.name + ": Temperature control set to on")
else:
logger.info("Actor " + self.name + ": Temperature control set to " + str(temp))
return self.box.homeautoswitch("sethkrtsoll", self.actor_id, param) |
https://<host>[:<port>]/
:return: str
def get_openshift_base_uri(self):
"""
https://<host>[:<port>]/
:return: str
"""
deprecated_key = "openshift_uri"
key = "openshift_url"
val = self._get_value(deprecated_key, self.conf_section, deprecated_key)
if val is not None:
warnings.warn("%r is deprecated, use %r instead" % (deprecated_key, key))
return val
return self._get_value(key, self.conf_section, key) |
url of OpenShift where builder will connect
def get_builder_openshift_url(self):
""" url of OpenShift where builder will connect """
key = "builder_openshift_url"
url = self._get_deprecated(key, self.conf_section, key)
if url is None:
logger.warning("%r not found, falling back to get_openshift_base_uri()", key)
url = self.get_openshift_base_uri()
return url |
helper method for generating nodeselector dict
:param nodeselector_str:
:return: dict
def generate_nodeselector_dict(self, nodeselector_str):
"""
helper method for generating nodeselector dict
:param nodeselector_str:
:return: dict
"""
nodeselector = {}
if nodeselector_str and nodeselector_str != 'none':
constraints = [x.strip() for x in nodeselector_str.split(',')]
raw_nodeselector = dict([constraint.split('=', 1) for constraint in constraints])
nodeselector = dict([k.strip(), v.strip()] for (k, v) in raw_nodeselector.items())
return nodeselector |
search the configuration for entries of the form node_selector.platform
:param platform: str, platform to search for, can be null
:return dict
def get_platform_node_selector(self, platform):
"""
search the configuration for entries of the form node_selector.platform
:param platform: str, platform to search for, can be null
:return dict
"""
nodeselector = {}
if platform:
nodeselector_str = self._get_value("node_selector." + platform, self.conf_section,
"node_selector." + platform)
nodeselector = self.generate_nodeselector_dict(nodeselector_str)
return nodeselector |
Extract tabular data as |TableData| instances from a CSV file.
|load_source_desc_file|
:return:
Loaded table data.
|load_table_name_desc|
=================== ========================================
Format specifier Value after the replacement
=================== ========================================
``%(filename)s`` |filename_desc|
``%(format_name)s`` ``"csv"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ========================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the CSV data is invalid.
.. seealso::
:py:func:`csv.reader`
def load(self):
"""
Extract tabular data as |TableData| instances from a CSV file.
|load_source_desc_file|
:return:
Loaded table data.
|load_table_name_desc|
=================== ========================================
Format specifier Value after the replacement
=================== ========================================
``%(filename)s`` |filename_desc|
``%(format_name)s`` ``"csv"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ========================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the CSV data is invalid.
.. seealso::
:py:func:`csv.reader`
"""
self._validate()
self._logger.logging_load()
self.encoding = get_file_encoding(self.source, self.encoding)
if six.PY3:
self._csv_reader = csv.reader(
io.open(self.source, "r", encoding=self.encoding),
delimiter=self.delimiter,
quotechar=self.quotechar,
strict=True,
skipinitialspace=True,
)
else:
self._csv_reader = csv.reader(
_utf_8_encoder(io.open(self.source, "r", encoding=self.encoding)),
delimiter=self.delimiter,
quotechar=self.quotechar,
strict=True,
skipinitialspace=True,
)
formatter = CsvTableFormatter(self._to_data_matrix())
formatter.accept(self)
return formatter.to_table_data() |
Extract tabular data as |TableData| instances from a CSV text object.
|load_source_desc_text|
:return:
Loaded table data.
|load_table_name_desc|
=================== ========================================
Format specifier Value after the replacement
=================== ========================================
``%(filename)s`` ``""``
``%(format_name)s`` ``"csv"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ========================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the CSV data is invalid.
.. seealso::
:py:func:`csv.reader`
def load(self):
"""
Extract tabular data as |TableData| instances from a CSV text object.
|load_source_desc_text|
:return:
Loaded table data.
|load_table_name_desc|
=================== ========================================
Format specifier Value after the replacement
=================== ========================================
``%(filename)s`` ``""``
``%(format_name)s`` ``"csv"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ========================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the CSV data is invalid.
.. seealso::
:py:func:`csv.reader`
"""
self._validate()
self._logger.logging_load()
self._csv_reader = csv.reader(
six.StringIO(self.source.strip()),
delimiter=self.delimiter,
quotechar=self.quotechar,
strict=True,
skipinitialspace=True,
)
formatter = CsvTableFormatter(self._to_data_matrix())
formatter.accept(self)
return formatter.to_table_data() |
set parameters according to specification
these parameters are accepted:
:param pulp_secret: str, resource name of pulp secret
:param koji_target: str, koji tag with packages used to build the image
:param kojiroot: str, URL from which koji packages are fetched
:param kojihub: str, URL of the koji hub
:param koji_certs_secret: str, resource name of secret that holds the koji certificates
:param koji_task_id: int, Koji Task that created this build config
:param flatpak: if we should build a Flatpak OCI Image
:param filesystem_koji_task_id: int, Koji Task that created the base filesystem
:param pulp_registry: str, name of pulp registry in dockpulp.conf
:param sources_command: str, command used to fetch dist-git sources
:param architecture: str, architecture we are building for
:param vendor: str, vendor name
:param build_host: str, host the build will run on or None for auto
:param authoritative_registry: str, the docker registry authoritative for this image
:param distribution_scope: str, distribution scope for this image
(private, authoritative-source-only, restricted, public)
:param use_auth: bool, use auth from atomic-reactor?
:param platform_node_selector: dict, a nodeselector for a specific platform
:param platform_descriptors: dict, platforms and their archiectures and enable_v1 settings
:param scratch_build_node_selector: dict, a nodeselector for scratch builds
:param explicit_build_node_selector: dict, a nodeselector for explicit builds
:param auto_build_node_selector: dict, a nodeselector for auto builds
:param isolated_build_node_selector: dict, a nodeselector for isolated builds
:param is_auto: bool, indicates if build is auto build
:param parent_images_digests: dict, mapping image names with tags to platform specific
digests, example:
{'registry.fedorahosted.org/fedora:29': {
x86_64': 'registry.fedorahosted.org/fedora@sha256:....'}
}
def set_params(self, **kwargs):
"""
set parameters according to specification
these parameters are accepted:
:param pulp_secret: str, resource name of pulp secret
:param koji_target: str, koji tag with packages used to build the image
:param kojiroot: str, URL from which koji packages are fetched
:param kojihub: str, URL of the koji hub
:param koji_certs_secret: str, resource name of secret that holds the koji certificates
:param koji_task_id: int, Koji Task that created this build config
:param flatpak: if we should build a Flatpak OCI Image
:param filesystem_koji_task_id: int, Koji Task that created the base filesystem
:param pulp_registry: str, name of pulp registry in dockpulp.conf
:param sources_command: str, command used to fetch dist-git sources
:param architecture: str, architecture we are building for
:param vendor: str, vendor name
:param build_host: str, host the build will run on or None for auto
:param authoritative_registry: str, the docker registry authoritative for this image
:param distribution_scope: str, distribution scope for this image
(private, authoritative-source-only, restricted, public)
:param use_auth: bool, use auth from atomic-reactor?
:param platform_node_selector: dict, a nodeselector for a specific platform
:param platform_descriptors: dict, platforms and their archiectures and enable_v1 settings
:param scratch_build_node_selector: dict, a nodeselector for scratch builds
:param explicit_build_node_selector: dict, a nodeselector for explicit builds
:param auto_build_node_selector: dict, a nodeselector for auto builds
:param isolated_build_node_selector: dict, a nodeselector for isolated builds
:param is_auto: bool, indicates if build is auto build
:param parent_images_digests: dict, mapping image names with tags to platform specific
digests, example:
{'registry.fedorahosted.org/fedora:29': {
x86_64': 'registry.fedorahosted.org/fedora@sha256:....'}
}
"""
# Here we cater to the koji "scratch" build type, this will disable
# all plugins that might cause importing of data to koji
self.scratch = kwargs.pop('scratch', False)
# When true, it indicates build was automatically started by
# OpenShift via a trigger, for instance ImageChangeTrigger
self.is_auto = kwargs.pop('is_auto', False)
# An isolated build is meant to patch a certain release and not
# update transient tags in container registry
self.isolated = kwargs.pop('isolated', False)
self.validate_build_variation()
self.base_image = kwargs.get('base_image')
self.platform_node_selector = kwargs.get('platform_node_selector', {})
self.platform_descriptors = kwargs.get('platform_descriptors', {})
self.scratch_build_node_selector = kwargs.get('scratch_build_node_selector', {})
self.explicit_build_node_selector = kwargs.get('explicit_build_node_selector', {})
self.auto_build_node_selector = kwargs.get('auto_build_node_selector', {})
self.isolated_build_node_selector = kwargs.get('isolated_build_node_selector', {})
logger.debug("setting params '%s' for %s", kwargs, self.spec)
self.spec.set_params(**kwargs)
self.osbs_api = kwargs.pop('osbs_api') |
Return True if this BuildConfig has ImageStreamTag trigger.
def has_ist_trigger(self):
"""Return True if this BuildConfig has ImageStreamTag trigger."""
triggers = self.template['spec'].get('triggers', [])
if not triggers:
return False
for trigger in triggers:
if trigger['type'] == 'ImageChange' and \
trigger['imageChange']['from']['kind'] == 'ImageStreamTag':
return True
return False |
Sets secret for plugin, if no plugin specified
it will also set general secret
:param secret: str, secret name
:param plugin: tuple, (plugin type, plugin name, argument name)
:param mount_path: str, mount path of secret
def set_secret_for_plugin(self, secret, plugin=None, mount_path=None):
"""
Sets secret for plugin, if no plugin specified
it will also set general secret
:param secret: str, secret name
:param plugin: tuple, (plugin type, plugin name, argument name)
:param mount_path: str, mount path of secret
"""
has_plugin_conf = False
if plugin is not None:
has_plugin_conf = self.dj.dock_json_has_plugin_conf(plugin[0],
plugin[1])
if 'secrets' in self.template['spec']['strategy']['customStrategy']:
if not plugin or has_plugin_conf:
custom = self.template['spec']['strategy']['customStrategy']
if mount_path:
secret_path = mount_path
else:
secret_path = os.path.join(SECRETS_PATH, secret)
logger.info("Configuring %s secret at %s", secret, secret_path)
existing = [secret_mount for secret_mount in custom['secrets']
if secret_mount['secretSource']['name'] == secret]
if existing:
logger.debug("secret %s already set", secret)
else:
custom['secrets'].append({
'secretSource': {
'name': secret,
},
'mountPath': secret_path,
})
# there's no need to set args if no plugin secret specified
# this is used in tag_and_push plugin, as it sets secret path
# for each registry separately
if plugin and plugin[2] is not None:
self.dj.dock_json_set_arg(*(plugin + (secret_path,)))
else:
logger.debug("not setting secret for unused plugin %s",
plugin[1]) |
:param secrets: dict, {(plugin type, plugin name, argument name): secret name}
for example {('exit_plugins', 'koji_promote', 'koji_ssl_certs'): 'koji_ssl_certs', ...}
def set_secrets(self, secrets):
"""
:param secrets: dict, {(plugin type, plugin name, argument name): secret name}
for example {('exit_plugins', 'koji_promote', 'koji_ssl_certs'): 'koji_ssl_certs', ...}
"""
secret_set = False
for (plugin, secret) in secrets.items():
if not isinstance(plugin, tuple) or len(plugin) != 3:
raise ValueError('got "%s" as secrets key, need 3-tuple' % plugin)
if secret is not None:
if isinstance(secret, list):
for secret_item in secret:
self.set_secret_for_plugin(secret_item, plugin=plugin)
else:
self.set_secret_for_plugin(secret, plugin=plugin)
secret_set = True
if not secret_set:
# remove references to secret if no secret was set
if 'secrets' in self.template['spec']['strategy']['customStrategy']:
del self.template['spec']['strategy']['customStrategy']['secrets'] |
Remove matching entries from tag_and_push_registries (in-place)
:param tag_and_push_registries: dict, uri -> dict
:param version: str, 'version' to match against
def remove_tag_and_push_registries(tag_and_push_registries, version):
"""
Remove matching entries from tag_and_push_registries (in-place)
:param tag_and_push_registries: dict, uri -> dict
:param version: str, 'version' to match against
"""
registries = [uri
for uri, regdict in tag_and_push_registries.items()
if regdict['version'] == version]
for registry in registries:
logger.info("removing %s registry: %s", version, registry)
del tag_and_push_registries[registry] |
Enable/disable plugins depending on supported registry API versions
def adjust_for_registry_api_versions(self):
"""
Enable/disable plugins depending on supported registry API versions
"""
versions = self.spec.registry_api_versions.value
if 'v2' not in versions:
raise OsbsValidationException('v1-only docker registry API is not supported')
try:
push_conf = self.dj.dock_json_get_plugin_conf('postbuild_plugins',
'tag_and_push')
tag_and_push_registries = push_conf['args']['registries']
except (KeyError, IndexError):
tag_and_push_registries = {}
if 'v1' not in versions:
# Remove v1-only plugins
for phase, name in [('postbuild_plugins', 'pulp_push')]:
logger.info("removing v1-only plugin: %s", name)
self.dj.remove_plugin(phase, name)
# remove extra tag_and_push config
self.remove_tag_and_push_registries(tag_and_push_registries, 'v1')
# Remove 'version' from tag_and_push plugin config as it's no
# longer needed
for regdict in tag_and_push_registries.values():
if 'version' in regdict:
del regdict['version'] |
Remove trigger-related plugins when needed
If there are no triggers defined, it's assumed the
feature is disabled and all trigger-related plugins
are removed.
If there are triggers defined, and this is a custom
base image, some trigger-related plugins do not apply.
Additionally, this method ensures that custom base
images never have triggers since triggering a base
image rebuild is not a valid scenario.
def adjust_for_triggers(self):
"""Remove trigger-related plugins when needed
If there are no triggers defined, it's assumed the
feature is disabled and all trigger-related plugins
are removed.
If there are triggers defined, and this is a custom
base image, some trigger-related plugins do not apply.
Additionally, this method ensures that custom base
images never have triggers since triggering a base
image rebuild is not a valid scenario.
"""
triggers = self.template['spec'].get('triggers', [])
remove_plugins = [
("prebuild_plugins", "check_and_set_rebuild"),
("prebuild_plugins", "stop_autorebuild_if_disabled"),
]
should_remove = False
if triggers and (self.is_custom_base_image() or self.is_from_scratch_image()):
if self.is_custom_base_image():
msg = "removing %s from request because custom base image"
elif self.is_from_scratch_image():
msg = 'removing %s from request because FROM scratch image'
del self.template['spec']['triggers']
should_remove = True
elif not triggers:
msg = "removing %s from request because there are no triggers"
should_remove = True
if should_remove:
for when, which in remove_plugins:
logger.info(msg, which)
self.dj.remove_plugin(when, which) |
Remove certain plugins in order to handle the "scratch build"
scenario. Scratch builds must not affect subsequent builds,
and should not be imported into Koji.
def adjust_for_scratch(self):
"""
Remove certain plugins in order to handle the "scratch build"
scenario. Scratch builds must not affect subsequent builds,
and should not be imported into Koji.
"""
if self.scratch:
self.template['spec'].pop('triggers', None)
remove_plugins = [
("prebuild_plugins", "koji_parent"),
("postbuild_plugins", "compress"), # required only to make an archive for Koji
("postbuild_plugins", "pulp_pull"), # required only to make an archive for Koji
("postbuild_plugins", "koji_upload"),
("postbuild_plugins", "fetch_worker_metadata"),
("postbuild_plugins", "compare_components"),
("postbuild_plugins", "import_image"),
("exit_plugins", "koji_promote"),
("exit_plugins", "koji_import"),
("exit_plugins", "koji_tag_build"),
("exit_plugins", "remove_worker_metadata"),
("exit_plugins", "import_image"),
]
if not self.has_tag_suffixes_placeholder():
remove_plugins.append(("postbuild_plugins", "tag_from_config"))
for when, which in remove_plugins:
logger.info("removing %s from scratch build request",
which)
self.dj.remove_plugin(when, which)
if self.dj.dock_json_has_plugin_conf('postbuild_plugins',
'tag_by_labels'):
self.dj.dock_json_set_arg('postbuild_plugins', 'tag_by_labels',
'unique_tag_only', True)
self.set_label('scratch', 'true') |
Disable plugins to handle builds depending on whether
or not this is a build from a custom base image.
def adjust_for_custom_base_image(self):
"""
Disable plugins to handle builds depending on whether
or not this is a build from a custom base image.
"""
plugins = []
if self.is_custom_base_image():
# Plugins irrelevant to building base images.
plugins.append(("prebuild_plugins", "pull_base_image"))
plugins.append(("prebuild_plugins", "koji_parent"))
plugins.append(("prebuild_plugins", "inject_parent_image"))
msg = "removing %s from custom image build request"
else:
# Plugins not needed for building non base images.
plugins.append(("prebuild_plugins", "add_filesystem"))
msg = "removing %s from non custom image build request"
for when, which in plugins:
logger.info(msg, which)
self.dj.remove_plugin(when, which) |
if there is yum repo specified, don't pick stuff from koji
def render_koji(self):
"""
if there is yum repo specified, don't pick stuff from koji
"""
phase = 'prebuild_plugins'
plugin = 'koji'
if not self.dj.dock_json_has_plugin_conf(phase, plugin):
return
if self.spec.yum_repourls.value:
logger.info("removing koji from request "
"because there is yum repo specified")
self.dj.remove_plugin(phase, plugin)
elif not (self.spec.koji_target.value and
self.spec.kojiroot.value and
self.spec.kojihub.value):
logger.info("removing koji from request as not specified")
self.dj.remove_plugin(phase, plugin)
else:
self.dj.dock_json_set_arg(phase, plugin,
"target", self.spec.koji_target.value)
self.dj.dock_json_set_arg(phase, plugin,
"root", self.spec.kojiroot.value)
self.dj.dock_json_set_arg(phase, plugin,
"hub", self.spec.kojihub.value)
if self.spec.proxy.value:
self.dj.dock_json_set_arg(phase, plugin,
"proxy", self.spec.proxy.value) |
If the bump_release plugin is present, configure it
def render_bump_release(self):
"""
If the bump_release plugin is present, configure it
"""
phase = 'prebuild_plugins'
plugin = 'bump_release'
if not self.dj.dock_json_has_plugin_conf(phase, plugin):
return
if self.spec.release.value:
logger.info('removing %s from request as release already specified',
plugin)
self.dj.remove_plugin(phase, plugin)
return
hub = self.spec.kojihub.value
if not hub:
logger.info('removing %s from request as koji hub not specified',
plugin)
self.dj.remove_plugin(phase, plugin)
return
self.dj.dock_json_set_arg(phase, plugin, 'hub', hub)
# For flatpak, we want a name-version-release of
# <name>-<stream>-<module_build_version>.<n>, where the .<n> makes
# sure that the build is unique in Koji
if self.spec.flatpak.value:
self.dj.dock_json_set_arg(phase, plugin, 'append', True) |
if we have smtp_host and smtp_from, configure sendmail plugin,
else remove it
def render_sendmail(self):
"""
if we have smtp_host and smtp_from, configure sendmail plugin,
else remove it
"""
phase = 'exit_plugins'
plugin = 'sendmail'
if not self.dj.dock_json_has_plugin_conf(phase, plugin):
return
if self.spec.smtp_host.value and self.spec.smtp_from.value:
self.dj.dock_json_set_arg(phase, plugin, 'url',
self.spec.builder_openshift_url.value)
self.dj.dock_json_set_arg(phase, plugin, 'smtp_host',
self.spec.smtp_host.value)
self.dj.dock_json_set_arg(phase, plugin, 'from_address',
self.spec.smtp_from.value)
else:
logger.info("removing sendmail from request, "
"requires smtp_host and smtp_from")
self.dj.remove_plugin(phase, plugin)
return
if self.spec.kojihub.value and self.spec.kojiroot.value:
self.dj.dock_json_set_arg(phase, plugin,
'koji_hub', self.spec.kojihub.value)
self.dj.dock_json_set_arg(phase, plugin,
"koji_root", self.spec.kojiroot.value)
if self.spec.smtp_to_submitter.value:
self.dj.dock_json_set_arg(phase, plugin, 'to_koji_submitter',
self.spec.smtp_to_submitter.value)
if self.spec.smtp_to_pkgowner.value:
self.dj.dock_json_set_arg(phase, plugin, 'to_koji_pkgowner',
self.spec.smtp_to_pkgowner.value)
if self.spec.smtp_additional_addresses.value:
self.dj.dock_json_set_arg(phase, plugin, 'additional_addresses',
self.spec.smtp_additional_addresses.value)
if self.spec.smtp_error_addresses.value:
self.dj.dock_json_set_arg(phase, plugin,
'error_addresses', self.spec.smtp_error_addresses.value)
if self.spec.smtp_email_domain.value:
self.dj.dock_json_set_arg(phase, plugin,
'email_domain', self.spec.smtp_email_domain.value) |
Configure fetch_maven_artifacts plugin
def render_fetch_maven_artifacts(self):
"""Configure fetch_maven_artifacts plugin"""
phase = 'prebuild_plugins'
plugin = 'fetch_maven_artifacts'
if not self.dj.dock_json_has_plugin_conf(phase, plugin):
return
koji_hub = self.spec.kojihub.value
koji_root = self.spec.kojiroot.value
if not koji_hub and not koji_root:
logger.info('Removing %s because kojihub and kojiroot were not specified', plugin)
self.dj.remove_plugin(phase, plugin)
return
self.dj.dock_json_set_arg(phase, plugin, 'koji_hub', koji_hub)
self.dj.dock_json_set_arg(phase, plugin, "koji_root", koji_root)
if self.spec.artifacts_allowed_domains.value:
self.dj.dock_json_set_arg(phase, plugin, 'allowed_domains',
self.spec.artifacts_allowed_domains.value) |
Configure tag_from_config plugin
def render_tag_from_config(self):
"""Configure tag_from_config plugin"""
phase = 'postbuild_plugins'
plugin = 'tag_from_config'
if not self.has_tag_suffixes_placeholder():
return
unique_tag = self.spec.image_tag.value.split(':')[-1]
tag_suffixes = {'unique': [unique_tag], 'primary': []}
if self.spec.build_type.value == BUILD_TYPE_ORCHESTRATOR:
if self.scratch:
pass
elif self.isolated:
tag_suffixes['primary'].extend(['{version}-{release}'])
elif self._repo_info.additional_tags.from_container_yaml:
tag_suffixes['primary'].extend(['{version}-{release}'])
tag_suffixes['primary'].extend(self._repo_info.additional_tags.tags)
else:
tag_suffixes['primary'].extend(['latest', '{version}', '{version}-{release}'])
tag_suffixes['primary'].extend(self._repo_info.additional_tags.tags)
self.dj.dock_json_set_arg(phase, plugin, 'tag_suffixes', tag_suffixes) |
If a pulp registry is specified, use pulp_pull plugin
def render_pulp_pull(self):
"""
If a pulp registry is specified, use pulp_pull plugin
"""
# pulp_pull is a multi-phase plugin
phases = ('postbuild_plugins', 'exit_plugins')
plugin = 'pulp_pull'
for phase in phases:
if not self.dj.dock_json_has_plugin_conf(phase, plugin):
continue
pulp_registry = self.spec.pulp_registry.value
if not pulp_registry:
logger.info("removing %s from request, requires pulp_registry", pulp_registry)
self.dj.remove_plugin(phase, plugin)
continue
if not self.spec.kojihub.value:
logger.info('Removing %s because no kojihub was specified', plugin)
self.dj.remove_plugin(phase, plugin)
continue
if self.spec.prefer_schema1_digest.value is not None:
self.dj.dock_json_set_arg(phase, 'pulp_pull',
'expect_v2schema2',
not self.spec.prefer_schema1_digest.value) |
If a pulp registry is specified, use the pulp plugin as well as the
delete_from_registry to delete the image after sync
def render_pulp_sync(self):
"""
If a pulp registry is specified, use the pulp plugin as well as the
delete_from_registry to delete the image after sync
"""
if not self.dj.dock_json_has_plugin_conf('postbuild_plugins',
'pulp_sync'):
return
pulp_registry = self.spec.pulp_registry.value
# Find which registry to use
docker_registry = None
registry_secret = None
registries = zip_longest(self.spec.registry_uris.value,
self.spec.registry_secrets.value)
for registry, secret in registries:
if registry.version == 'v2':
# First specified v2 registry is the one we'll tell pulp
# to sync from. Keep the http prefix -- pulp wants it.
docker_registry = registry.uri
registry_secret = secret
logger.info("using docker v2 registry %s for pulp_sync",
docker_registry)
break
if pulp_registry and docker_registry:
self.dj.dock_json_set_arg('postbuild_plugins', 'pulp_sync',
'pulp_registry_name', pulp_registry)
self.dj.dock_json_set_arg('postbuild_plugins', 'pulp_sync',
'docker_registry', docker_registry)
if registry_secret:
self.set_secret_for_plugin(registry_secret,
plugin=('postbuild_plugins',
'pulp_sync',
'registry_secret_path'))
# Verify we have a pulp secret
if self.spec.pulp_secret.value is None:
raise OsbsValidationException("Pulp registry specified "
"but no auth config")
source_registry = self.spec.source_registry_uri.value
perform_delete = (source_registry is None or
source_registry.docker_uri != registry.docker_uri)
if perform_delete:
push_conf = self.dj.dock_json_get_plugin_conf('exit_plugins',
'delete_from_registry')
args = push_conf.setdefault('args', {})
delete_registries = args.setdefault('registries', {})
placeholder = '{{REGISTRY_URI}}'
# use passed in params like 'insecure' if available
if placeholder in delete_registries:
regdict = delete_registries[placeholder].copy()
del delete_registries[placeholder]
else:
regdict = {}
if registry_secret:
regdict['secret'] = \
os.path.join(SECRETS_PATH, registry_secret)
# tag_and_push configured the registry secret, no neet to set it again
delete_registries[docker_registry] = regdict
self.dj.dock_json_set_arg('exit_plugins', 'delete_from_registry',
'registries', delete_registries)
else:
logger.info("removing delete_from_registry from request, "
"source and target registry are identical")
self.dj.remove_plugin("exit_plugins", "delete_from_registry")
else:
# If no pulp registry is specified, don't run the pulp plugin
logger.info("removing pulp_sync+delete_from_registry from request, "
"requires pulp_registry and a v2 registry")
self.dj.remove_plugin("postbuild_plugins", "pulp_sync")
self.dj.remove_plugin("exit_plugins", "delete_from_registry") |
Configure the pulp_tag plugin.
def render_pulp_tag(self):
"""
Configure the pulp_tag plugin.
"""
if not self.dj.dock_json_has_plugin_conf('postbuild_plugins',
'pulp_tag'):
return
pulp_registry = self.spec.pulp_registry.value
if pulp_registry:
self.dj.dock_json_set_arg('postbuild_plugins', 'pulp_tag',
'pulp_registry_name', pulp_registry)
# Verify we have either a secret or username/password
if self.spec.pulp_secret.value is None:
conf = self.dj.dock_json_get_plugin_conf('postbuild_plugins',
'pulp_tag')
args = conf.get('args', {})
if 'username' not in args:
raise OsbsValidationException("Pulp registry specified "
"but no auth config")
else:
# If no pulp registry is specified, don't run the pulp plugin
logger.info("removing pulp_tag from request, "
"requires pulp_registry")
self.dj.remove_plugin("postbuild_plugins", "pulp_tag") |
Configure the group_manifests plugin. Group is always set to false for now.
def render_group_manifests(self):
"""
Configure the group_manifests plugin. Group is always set to false for now.
"""
if not self.dj.dock_json_has_plugin_conf('postbuild_plugins', 'group_manifests'):
return
push_conf = self.dj.dock_json_get_plugin_conf('postbuild_plugins',
'group_manifests')
args = push_conf.setdefault('args', {})
# modify registries in place
registries = args.setdefault('registries', {})
placeholder = '{{REGISTRY_URI}}'
if placeholder in registries:
for registry, secret in zip_longest(self.spec.registry_uris.value,
self.spec.registry_secrets.value):
if not registry.uri:
continue
regdict = registries[placeholder].copy()
regdict['version'] = registry.version
if secret:
regdict['secret'] = os.path.join(SECRETS_PATH, secret)
registries[registry.docker_uri] = regdict
del registries[placeholder]
self.dj.dock_json_set_arg('postbuild_plugins', 'group_manifests',
'group', self.spec.group_manifests.value)
goarch = {}
for platform, architecture in self.platform_descriptors.items():
goarch[platform] = architecture['architecture']
self.dj.dock_json_set_arg('postbuild_plugins', 'group_manifests',
'goarch', goarch) |
Configure the import_image plugin
def render_import_image(self, use_auth=None):
"""
Configure the import_image plugin
"""
# import_image is a multi-phase plugin
phases = ('postbuild_plugins', 'exit_plugins')
plugin = 'import_image'
for phase in phases:
if self.spec.imagestream_name.value is None or self.spec.imagestream_url.value is None:
logger.info("removing %s from request, "
"registry or repo url is not defined", plugin)
self.dj.remove_plugin(phase, plugin)
continue
if self.dj.dock_json_has_plugin_conf(phase, plugin):
self.dj.dock_json_set_arg(phase, plugin, 'imagestream',
self.spec.imagestream_name.value)
self.dj.dock_json_set_arg(phase, plugin, 'docker_image_repo',
self.spec.imagestream_url.value)
self.dj.dock_json_set_arg(phase, plugin, 'url',
self.spec.builder_openshift_url.value)
self.dj.dock_json_set_arg(phase, plugin, 'build_json_dir',
self.spec.builder_build_json_dir.value)
use_auth = self.spec.use_auth.value
if use_auth is not None:
self.dj.dock_json_set_arg(phase, plugin, 'use_auth', use_auth)
if self.spec.imagestream_insecure_registry.value:
self.dj.dock_json_set_arg(phase, plugin, 'insecure_registry', True) |
Customize prod_inner for site specific customizations
def render_customizations(self):
"""
Customize prod_inner for site specific customizations
"""
disable_plugins = self.customize_conf.get('disable_plugins', [])
if not disable_plugins:
logger.debug("No site-specific plugins to disable")
else:
for plugin_dict in disable_plugins:
try:
self.dj.remove_plugin(
plugin_dict['plugin_type'],
plugin_dict['plugin_name']
)
logger.debug(
"site-specific plugin disabled -> Type:{} Name:{}".format(
plugin_dict['plugin_type'],
plugin_dict['plugin_name']
)
)
except KeyError:
# Malformed config
logger.debug("Invalid custom configuration found for disable_plugins")
enable_plugins = self.customize_conf.get('enable_plugins', [])
if not enable_plugins:
logger.debug("No site-specific plugins to enable")
else:
for plugin_dict in enable_plugins:
try:
self.dj.add_plugin(
plugin_dict['plugin_type'],
plugin_dict['plugin_name'],
plugin_dict['plugin_args']
)
logger.debug(
"site-specific plugin enabled -> Type:{} Name:{} Args: {}".format(
plugin_dict['plugin_type'],
plugin_dict['plugin_name'],
plugin_dict['plugin_args']
)
)
except KeyError:
# Malformed config
logger.debug("Invalid custom configuration found for enable_plugins") |
Sets the Build/BuildConfig object name
def render_name(self, name, image_tag, platform):
"""Sets the Build/BuildConfig object name"""
if self.scratch or self.isolated:
name = image_tag
# Platform name may contain characters not allowed by OpenShift.
if platform:
platform_suffix = '-{}'.format(platform)
if name.endswith(platform_suffix):
name = name[:-len(platform_suffix)]
_, salt, timestamp = name.rsplit('-', 2)
if self.scratch:
name = 'scratch-{}-{}'.format(salt, timestamp)
elif self.isolated:
name = 'isolated-{}-{}'.format(salt, timestamp)
# !IMPORTANT! can't be too long: https://github.com/openshift/origin/issues/733
self.template['metadata']['name'] = name |
Only used for setting up the testing framework.
def setup_json_capture(osbs, os_conf, capture_dir):
"""
Only used for setting up the testing framework.
"""
try:
os.mkdir(capture_dir)
except OSError:
pass
finally:
osbs.os._con.request = ResponseSaver(capture_dir,
os_conf.get_openshift_api_uri(),
os_conf.get_k8s_api_uri(),
osbs.os._con.request).request |
get size of console: rows x columns
:return: tuple, (int, int)
def get_terminal_size():
"""
get size of console: rows x columns
:return: tuple, (int, int)
"""
try:
rows, columns = subprocess.check_output(['stty', 'size']).split()
except subprocess.CalledProcessError:
# not attached to terminal
logger.info("not attached to terminal")
return 0, 0
logger.debug("console size is %s %s", rows, columns)
return int(rows), int(columns) |
get size of longest value in specific column
:param col: str, column name
:return int
def _longest_val_in_column(self, col):
"""
get size of longest value in specific column
:param col: str, column name
:return int
"""
try:
# +2 is for implicit separator
return max([len(x[col]) for x in self.table if x[col]]) + 2
except KeyError:
logger.error("there is no column %r", col)
raise |
initialize all values based on provided input
:return: None
def _init(self):
"""
initialize all values based on provided input
:return: None
"""
self.col_count = len(self.col_list)
# list of lengths of longest entries in columns
self.col_longest = self.get_all_longest_col_lengths()
self.data_length = sum(self.col_longest.values())
if self.terminal_width > 0:
# free space is space which should be equeally distributed for all columns
# self.terminal_width -- terminal is our canvas
# - self.data_length -- substract length of content (the actual data)
# - self.col_count + 1 -- table lines are not part of free space, their width is
# (number of columns - 1)
self.total_free_space = (self.terminal_width - self.data_length) - self.col_count + 1
if self.total_free_space <= 0:
self.total_free_space = None
else:
self.default_column_space = self.total_free_space // self.col_count
self.default_column_space_remainder = self.total_free_space % self.col_count
logger.debug("total free space: %d, column space: %d, remainder: %d, columns: %d",
self.total_free_space, self.default_column_space,
self.default_column_space_remainder, self.col_count)
else:
self.total_free_space = None |
count all values needed to display whole table
<><---terminal-width-----------><>
<> HEADER | HEADER2 | HEADER3 <>
<>---------+----------+---------<>
kudos to PostgreSQL developers
:return: None
def _count_sizes(self):
"""
count all values needed to display whole table
<><---terminal-width-----------><>
<> HEADER | HEADER2 | HEADER3 <>
<>---------+----------+---------<>
kudos to PostgreSQL developers
:return: None
"""
format_list = []
header_sepa_format_list = []
# actual widths of columns
self.col_widths = {}
for col in self.col_list:
col_length = self.col_longest[col]
col_width = col_length + self._separate()
# -2 is for implicit separator -- spaces around
format_list.append(" {%s:%d} " % (col, col_width - 2))
header_sepa_format_list.append("{%s:%d}" % (col, col_width))
self.col_widths[col] = col_width
logger.debug("column widths %s", self.col_widths)
self.format_str = "|".join(format_list)
self.header_format_str = "+".join(header_sepa_format_list)
self.header_data = {}
for k in self.col_widths:
self.header_data[k] = "-" * self.col_widths[k] |
iterate over all columns and get their longest values
:return: dict, {"column_name": 132}
def get_all_longest_col_lengths(self):
"""
iterate over all columns and get their longest values
:return: dict, {"column_name": 132}
"""
response = {}
for col in self.col_list:
response[col] = self._longest_val_in_column(col)
return response |
get a width of separator for current column
:return: int
def _separate(self):
"""
get a width of separator for current column
:return: int
"""
if self.total_free_space is None:
return 0
else:
sepa = self.default_column_space
# we need to distribute remainders
if self.default_column_space_remainder > 0:
sepa += 1
self.default_column_space_remainder -= 1
logger.debug("remainder: %d, separator: %d",
self.default_column_space_remainder, sepa)
return sepa |
print provided table
:return: None
def render(self):
"""
print provided table
:return: None
"""
print(self.format_str.format(**self.header), file=sys.stderr)
print(self.header_format_str.format(**self.header_data), file=sys.stderr)
for row in self.data:
print(self.format_str.format(**row)) |
:raises ValidationError:
def _validate_source_data(self):
"""
:raises ValidationError:
"""
try:
jsonschema.validate(self._buffer, self._schema)
except jsonschema.ValidationError as e:
raise ValidationError(e) |
:raises ValueError:
:raises pytablereader.error.ValidationError:
def to_table_data(self):
"""
:raises ValueError:
:raises pytablereader.error.ValidationError:
"""
self._validate_source_data()
self._loader.inc_table_count()
yield TableData(
self._make_table_name(),
["key", "value"],
[record for record in self._buffer.items()],
dp_extractor=self._loader.dp_extractor,
type_hints=self._extract_type_hints(),
) |
:raises ValueError:
:raises pytablereader.error.ValidationError:
def to_table_data(self):
"""
:raises ValueError:
:raises pytablereader.error.ValidationError:
"""
self._validate_source_data()
for table_key, json_records in six.iteritems(self._buffer):
attr_name_set = set()
for json_record in json_records:
attr_name_set = attr_name_set.union(six.viewkeys(json_record))
headers = sorted(attr_name_set)
self._loader.inc_table_count()
self._table_key = table_key
yield TableData(
self._make_table_name(),
headers,
json_records,
dp_extractor=self._loader.dp_extractor,
type_hints=self._extract_type_hints(headers),
) |
:raises ValueError:
:raises pytablereader.error.ValidationError:
def to_table_data(self):
"""
:raises ValueError:
:raises pytablereader.error.ValidationError:
"""
self._validate_source_data()
for table_key, json_records in six.iteritems(self._buffer):
headers = sorted(six.viewkeys(json_records))
self._loader.inc_table_count()
self._table_key = table_key
yield TableData(
self._make_table_name(),
headers,
zip(*[json_records.get(header) for header in headers]),
dp_extractor=self._loader.dp_extractor,
type_hints=self._extract_type_hints(headers),
) |
:raises ValueError:
:raises pytablereader.error.ValidationError:
def to_table_data(self):
"""
:raises ValueError:
:raises pytablereader.error.ValidationError:
"""
self._validate_source_data()
for table_key, json_records in six.iteritems(self._buffer):
self._loader.inc_table_count()
self._table_key = table_key
yield TableData(
self._make_table_name(),
["key", "value"],
[record for record in json_records.items()],
dp_extractor=self._loader.dp_extractor,
type_hints=self._extract_type_hints(),
) |
set parameters in the user parameters
these parameters are accepted:
:param git_uri: str, uri of the git repository for the source
:param git_ref: str, commit ID of the branch to be pulled
:param git_branch: str, branch name of the branch to be pulled
:param base_image: str, name of the parent image
:param name_label: str, label of the parent image
:param user: str, name of the user requesting the build
:param component: str, name of the component
:param release: str,
:param build_image: str,
:param build_imagestream: str,
:param build_from: str,
:param build_type: str, orchestrator or worker
:param platforms: list of str, platforms to build on
:param platform: str, platform
:param koji_target: str, koji tag with packages used to build the image
:param koji_task_id: str, koji ID
:param koji_parent_build: str,
:param koji_upload_dir: str, koji directory where the completed image will be uploaded
:param flatpak: if we should build a Flatpak OCI Image
:param flatpak_base_image: str, name of the Flatpack OCI Image
:param reactor_config_map: str, name of the config map containing the reactor environment
:param reactor_config_override: dict, data structure for reactor config to be injected as
an environment variable into a worker build;
when used, reactor_config_map is ignored.
:param yum_repourls: list of str, uris of the yum repos to pull from
:param signing_intent: bool, True to sign the resulting image
:param compose_ids: list of int, ODCS composes to use instead of generating new ones
:param filesystem_koji_task_id: int, Koji Task that created the base filesystem
:param platform_node_selector: dict, a nodeselector for a user_paramsific platform
:param scratch_build_node_selector: dict, a nodeselector for scratch builds
:param explicit_build_node_selector: dict, a nodeselector for explicit builds
:param auto_build_node_selector: dict, a nodeselector for auto builds
:param isolated_build_node_selector: dict, a nodeselector for isolated builds
:param operator_manifests_extract_platform: str, indicates which platform should upload
operator manifests to koji
:param parent_images_digests: dict, mapping image digests to names and platforms
def set_params(self, **kwargs):
"""
set parameters in the user parameters
these parameters are accepted:
:param git_uri: str, uri of the git repository for the source
:param git_ref: str, commit ID of the branch to be pulled
:param git_branch: str, branch name of the branch to be pulled
:param base_image: str, name of the parent image
:param name_label: str, label of the parent image
:param user: str, name of the user requesting the build
:param component: str, name of the component
:param release: str,
:param build_image: str,
:param build_imagestream: str,
:param build_from: str,
:param build_type: str, orchestrator or worker
:param platforms: list of str, platforms to build on
:param platform: str, platform
:param koji_target: str, koji tag with packages used to build the image
:param koji_task_id: str, koji ID
:param koji_parent_build: str,
:param koji_upload_dir: str, koji directory where the completed image will be uploaded
:param flatpak: if we should build a Flatpak OCI Image
:param flatpak_base_image: str, name of the Flatpack OCI Image
:param reactor_config_map: str, name of the config map containing the reactor environment
:param reactor_config_override: dict, data structure for reactor config to be injected as
an environment variable into a worker build;
when used, reactor_config_map is ignored.
:param yum_repourls: list of str, uris of the yum repos to pull from
:param signing_intent: bool, True to sign the resulting image
:param compose_ids: list of int, ODCS composes to use instead of generating new ones
:param filesystem_koji_task_id: int, Koji Task that created the base filesystem
:param platform_node_selector: dict, a nodeselector for a user_paramsific platform
:param scratch_build_node_selector: dict, a nodeselector for scratch builds
:param explicit_build_node_selector: dict, a nodeselector for explicit builds
:param auto_build_node_selector: dict, a nodeselector for auto builds
:param isolated_build_node_selector: dict, a nodeselector for isolated builds
:param operator_manifests_extract_platform: str, indicates which platform should upload
operator manifests to koji
:param parent_images_digests: dict, mapping image digests to names and platforms
"""
# Here we cater to the koji "scratch" build type, this will disable
# all plugins that might cause importing of data to koji
self.scratch = kwargs.get('scratch')
# When true, it indicates build was automatically started by
# OpenShift via a trigger, for instance ImageChangeTrigger
self.is_auto = kwargs.pop('is_auto', False)
# An isolated build is meant to patch a certain release and not
# update transient tags in container registry
self.isolated = kwargs.get('isolated')
self.osbs_api = kwargs.pop('osbs_api', None)
self.validate_build_variation()
self.base_image = kwargs.get('base_image')
self.platform_node_selector = kwargs.get('platform_node_selector', {})
self.scratch_build_node_selector = kwargs.get('scratch_build_node_selector', {})
self.explicit_build_node_selector = kwargs.get('explicit_build_node_selector', {})
self.auto_build_node_selector = kwargs.get('auto_build_node_selector', {})
self.isolated_build_node_selector = kwargs.get('isolated_build_node_selector', {})
logger.debug("now setting params '%s' for user_params", kwargs)
self.user_params.set_params(**kwargs)
self.source_registry = None
self.organization = None |
Sets data from reactor config
def set_data_from_reactor_config(self):
"""
Sets data from reactor config
"""
reactor_config_override = self.user_params.reactor_config_override.value
reactor_config_map = self.user_params.reactor_config_map.value
data = None
if reactor_config_override:
data = reactor_config_override
elif reactor_config_map:
config_map = self.osbs_api.get_config_map(reactor_config_map)
data = config_map.get_data_by_key('config.yaml')
if not data:
if self.user_params.flatpak.value:
raise OsbsValidationException("flatpak_base_image must be provided")
else:
return
source_registry_key = 'source_registry'
registry_organization_key = 'registries_organization'
req_secrets_key = 'required_secrets'
token_secrets_key = 'worker_token_secrets'
flatpak_key = 'flatpak'
flatpak_base_image_key = 'base_image'
if source_registry_key in data:
self.source_registry = data[source_registry_key]
if registry_organization_key in data:
self.organization = data[registry_organization_key]
if self.user_params.flatpak.value:
flatpack_base_image = data.get(flatpak_key, {}).get(flatpak_base_image_key, None)
if flatpack_base_image:
self.base_image = flatpack_base_image
self.user_params.base_image.value = flatpack_base_image
else:
raise OsbsValidationException("flatpak_base_image must be provided")
required_secrets = data.get(req_secrets_key, [])
token_secrets = data.get(token_secrets_key, [])
self._set_required_secrets(required_secrets, token_secrets) |
Sets required secrets
def _set_required_secrets(self, required_secrets, token_secrets):
"""
Sets required secrets
"""
if self.user_params.build_type.value == BUILD_TYPE_ORCHESTRATOR:
required_secrets += token_secrets
if not required_secrets:
return
secrets = self.template['spec']['strategy']['customStrategy'].setdefault('secrets', [])
existing = set(secret_mount['secretSource']['name'] for secret_mount in secrets)
required_secrets = set(required_secrets)
already_set = required_secrets.intersection(existing)
if already_set:
logger.debug("secrets %s are already set", already_set)
for secret in required_secrets - existing:
secret_path = os.path.join(SECRETS_PATH, secret)
logger.info("Configuring %s secret at %s", secret, secret_path)
secrets.append({
'secretSource': {
'name': secret,
},
'mountPath': secret_path,
}) |
if config contains plugin, remove it
def remove_plugin(self, phase, name, reason=None):
"""
if config contains plugin, remove it
"""
for p in self.template[phase]:
if p.get('name') == name:
self.template[phase].remove(p)
if reason:
logger.info('Removing {}:{}, {}'.format(phase, name, reason))
break |
if config has plugin, override it, else add it
def add_plugin(self, phase, name, args, reason=None):
"""
if config has plugin, override it, else add it
"""
plugin_modified = False
for plugin in self.template[phase]:
if plugin['name'] == name:
plugin['args'] = args
plugin_modified = True
if not plugin_modified:
self.template[phase].append({"name": name, "args": args})
if reason:
logger.info('{}:{} with args {}, {}'.format(phase, name, args, reason)) |
Return the configuration for a plugin.
Raises KeyError if there are no plugins of that type.
Raises IndexError if the named plugin is not listed.
def get_plugin_conf(self, phase, name):
"""
Return the configuration for a plugin.
Raises KeyError if there are no plugins of that type.
Raises IndexError if the named plugin is not listed.
"""
match = [x for x in self.template[phase] if x.get('name') == name]
return match[0] |
Check whether a plugin is configured.
def has_plugin_conf(self, phase, name):
"""
Check whether a plugin is configured.
"""
try:
self.get_plugin_conf(phase, name)
return True
except (KeyError, IndexError):
return False |
Remove certain plugins in order to handle the "scratch build"
scenario. Scratch builds must not affect subsequent builds,
and should not be imported into Koji.
def adjust_for_scratch(self):
"""
Remove certain plugins in order to handle the "scratch build"
scenario. Scratch builds must not affect subsequent builds,
and should not be imported into Koji.
"""
if self.user_params.scratch.value:
remove_plugins = [
("prebuild_plugins", "koji_parent"),
("postbuild_plugins", "compress"), # required only to make an archive for Koji
("postbuild_plugins", "pulp_pull"), # required only to make an archive for Koji
("postbuild_plugins", "compare_components"),
("postbuild_plugins", "import_image"),
("exit_plugins", "koji_promote"),
("exit_plugins", "koji_tag_build"),
("exit_plugins", "import_image"),
("prebuild_plugins", "check_and_set_rebuild"),
("prebuild_plugins", "stop_autorebuild_if_disabled")
]
if not self.has_tag_suffixes_placeholder():
remove_plugins.append(("postbuild_plugins", "tag_from_config"))
for when, which in remove_plugins:
self.pt.remove_plugin(when, which, 'removed from scratch build request') |
Remove certain plugins in order to handle the "isolated build"
scenario.
def adjust_for_isolated(self):
"""
Remove certain plugins in order to handle the "isolated build"
scenario.
"""
if self.user_params.isolated.value:
remove_plugins = [
("prebuild_plugins", "check_and_set_rebuild"),
("prebuild_plugins", "stop_autorebuild_if_disabled")
]
for when, which in remove_plugins:
self.pt.remove_plugin(when, which, 'removed from isolated build request') |
Remove plugins that don't work when building Flatpaks
def adjust_for_flatpak(self):
"""
Remove plugins that don't work when building Flatpaks
"""
if self.user_params.flatpak.value:
remove_plugins = [
("prebuild_plugins", "resolve_composes"),
# We'll extract the filesystem anyways for a Flatpak instead of exporting
# the docker image directly, so squash just slows things down.
("prepublish_plugins", "squash"),
# Pulp can't currently handle Flatpaks, which are OCI images
("postbuild_plugins", "pulp_push"),
("postbuild_plugins", "pulp_tag"),
("postbuild_plugins", "pulp_sync"),
("exit_plugins", "pulp_publish"),
("exit_plugins", "pulp_pull"),
# delete_from_registry is used for deleting builds from the temporary registry
# that pulp_sync mirrors from.
("exit_plugins", "delete_from_registry"),
]
for when, which in remove_plugins:
self.pt.remove_plugin(when, which, 'not needed for flatpak build') |
Customize template for site user specified customizations
def render_customizations(self):
"""
Customize template for site user specified customizations
"""
disable_plugins = self.pt.customize_conf.get('disable_plugins', [])
if not disable_plugins:
logger.debug('No site-user specified plugins to disable')
else:
for plugin in disable_plugins:
try:
self.pt.remove_plugin(plugin['plugin_type'], plugin['plugin_name'],
'disabled at user request')
except KeyError:
# Malformed config
logger.info('Invalid custom configuration found for disable_plugins')
enable_plugins = self.pt.customize_conf.get('enable_plugins', [])
if not enable_plugins:
logger.debug('No site-user specified plugins to enable"')
else:
for plugin in enable_plugins:
try:
msg = 'enabled at user request'
self.pt.add_plugin(plugin['plugin_type'], plugin['plugin_name'],
plugin['plugin_args'], msg)
except KeyError:
# Malformed config
logger.info('Invalid custom configuration found for enable_plugins') |
if there is yum repo in user params, don't pick stuff from koji
def render_koji(self):
"""
if there is yum repo in user params, don't pick stuff from koji
"""
phase = 'prebuild_plugins'
plugin = 'koji'
if not self.pt.has_plugin_conf(phase, plugin):
return
if self.user_params.yum_repourls.value:
self.pt.remove_plugin(phase, plugin, 'there is a yum repo user parameter')
elif not self.pt.set_plugin_arg_valid(phase, plugin, "target",
self.user_params.koji_target.value):
self.pt.remove_plugin(phase, plugin, 'no koji target supplied in user parameters') |
If the bump_release plugin is present, configure it
def render_bump_release(self):
"""
If the bump_release plugin is present, configure it
"""
phase = 'prebuild_plugins'
plugin = 'bump_release'
if not self.pt.has_plugin_conf(phase, plugin):
return
if self.user_params.release.value:
self.pt.remove_plugin(phase, plugin, 'release value supplied as user parameter')
return
# For flatpak, we want a name-version-release of
# <name>-<stream>-<module_build_version>.<n>, where the .<n> makes
# sure that the build is unique in Koji
if self.user_params.flatpak.value:
self.pt.set_plugin_arg(phase, plugin, 'append', True) |
If the check_and_set_platforms plugin is present, configure it
def render_check_and_set_platforms(self):
"""
If the check_and_set_platforms plugin is present, configure it
"""
phase = 'prebuild_plugins'
plugin = 'check_and_set_platforms'
if not self.pt.has_plugin_conf(phase, plugin):
return
if self.user_params.koji_target.value:
self.pt.set_plugin_arg(phase, plugin, "koji_target",
self.user_params.koji_target.value) |
Configure the import_image plugin
def render_import_image(self, use_auth=None):
"""
Configure the import_image plugin
"""
# import_image is a multi-phase plugin
if self.user_params.imagestream_name.value is None:
self.pt.remove_plugin('exit_plugins', 'import_image',
'imagestream not in user parameters')
elif self.pt.has_plugin_conf('exit_plugins', 'import_image'):
self.pt.set_plugin_arg('exit_plugins', 'import_image', 'imagestream',
self.user_params.imagestream_name.value) |
Configure tag_from_config plugin
def render_tag_from_config(self):
"""Configure tag_from_config plugin"""
phase = 'postbuild_plugins'
plugin = 'tag_from_config'
if not self.has_tag_suffixes_placeholder():
return
unique_tag = self.user_params.image_tag.value.split(':')[-1]
tag_suffixes = {'unique': [unique_tag], 'primary': []}
if self.user_params.build_type.value == BUILD_TYPE_ORCHESTRATOR:
additional_tags = self.user_params.additional_tags.value or set()
if self.user_params.scratch.value:
pass
elif self.user_params.isolated.value:
tag_suffixes['primary'].extend(['{version}-{release}'])
elif self.user_params.tags_from_yaml.value:
tag_suffixes['primary'].extend(['{version}-{release}'])
tag_suffixes['primary'].extend(additional_tags)
else:
tag_suffixes['primary'].extend(['latest', '{version}', '{version}-{release}'])
tag_suffixes['primary'].extend(additional_tags)
self.pt.set_plugin_arg(phase, plugin, 'tag_suffixes', tag_suffixes) |
Configure pull_base_image
def render_pull_base_image(self):
"""Configure pull_base_image"""
phase = 'prebuild_plugins'
plugin = 'pull_base_image'
if self.user_params.parent_images_digests.value:
self.pt.set_plugin_arg(phase, plugin, 'parent_images_digests',
self.user_params.parent_images_digests.value) |
get info about user (if no user specified, use the one initiating request)
:param username: str, name of user to get info about, default="~"
:return: dict
def get_user(self, username="~"):
"""
get info about user (if no user specified, use the one initiating request)
:param username: str, name of user to get info about, default="~"
:return: dict
"""
url = self._build_url("users/%s/" % username, _prepend_namespace=False)
response = self._get(url)
check_response(response)
return response |
:return:
def create_build(self, build_json):
"""
:return:
"""
url = self._build_url("builds/")
logger.debug(build_json)
return self._post(url, data=json.dumps(build_json),
headers={"Content-Type": "application/json"}) |
Returns all builds matching a given set of label selectors. It is up to the
calling function to filter the results.
def get_all_build_configs_by_labels(self, label_selectors):
"""
Returns all builds matching a given set of label selectors. It is up to the
calling function to filter the results.
"""
labels = ['%s=%s' % (field, value) for field, value in label_selectors]
labels = ','.join(labels)
url = self._build_url("buildconfigs/", labelSelector=labels)
return self._get(url).json()['items'] |
Returns a build config matching the given label
selectors. This method will raise OsbsException
if not exactly one build config is found.
def get_build_config_by_labels(self, label_selectors):
"""
Returns a build config matching the given label
selectors. This method will raise OsbsException
if not exactly one build config is found.
"""
items = self.get_all_build_configs_by_labels(label_selectors)
if not items:
raise OsbsException(
"Build config not found for labels: %r" %
(label_selectors, ))
if len(items) > 1:
raise OsbsException(
"More than one build config found for labels: %r" %
(label_selectors, ))
return items[0] |
Returns a build config matching the given label selectors, filtering against
another predetermined value. This method will raise OsbsException
if not exactly one build config is found after filtering.
def get_build_config_by_labels_filtered(self, label_selectors, filter_key, filter_value):
"""
Returns a build config matching the given label selectors, filtering against
another predetermined value. This method will raise OsbsException
if not exactly one build config is found after filtering.
"""
items = self.get_all_build_configs_by_labels(label_selectors)
if filter_value is not None:
build_configs = []
for build_config in items:
match_value = graceful_chain_get(build_config, *filter_key.split('.'))
if filter_value == match_value:
build_configs.append(build_config)
items = build_configs
if not items:
raise OsbsException(
"Build config not found for labels: %r" %
(label_selectors, ))
if len(items) > 1:
raise OsbsException(
"More than one build config found for labels: %r" %
(label_selectors, ))
return items[0] |
:return:
def create_build_config(self, build_config_json):
"""
:return:
"""
url = self._build_url("buildconfigs/")
return self._post(url, data=build_config_json,
headers={"Content-Type": "application/json"}) |
stream logs from build
:param build_id: str
:return: iterator
def stream_logs(self, build_id):
"""
stream logs from build
:param build_id: str
:return: iterator
"""
kwargs = {'follow': 1}
# If connection is closed within this many seconds, give up:
min_idle_timeout = 60
# Stream logs, but be careful of the connection closing
# due to idle timeout. In that case, try again until the
# call returns more quickly than a reasonable timeout
# would be set to.
last_activity = time.time()
while True:
buildlogs_url = self._build_url("builds/%s/log/" % build_id,
**kwargs)
try:
response = self._get(buildlogs_url, stream=1,
headers={'Connection': 'close'})
check_response(response)
for line in response.iter_lines():
last_activity = time.time()
yield line
# NOTE1: If self._get causes ChunkedEncodingError, ConnectionError,
# or IncompleteRead to be raised, they'll be wrapped in
# OsbsNetworkException or OsbsException
# NOTE2: If iter_lines causes ChunkedEncodingError
# or IncompleteRead to be raised, it'll simply be silenced.
# NOTE3: An exception may be raised from
# check_response(). In this case, exception will be
# wrapped in OsbsException or OsbsNetworkException,
# inspect cause to detect ConnectionError.
except OsbsException as exc:
if not isinstance(exc.cause, ConnectionError):
raise
idle = time.time() - last_activity
logger.debug("connection closed after %ds", idle)
if idle < min_idle_timeout:
# Finish output
return
since = int(idle - 1)
logger.debug("fetching logs starting from %ds ago", since)
kwargs['sinceSeconds'] = since |
provide logs from build
:param build_id: str
:param follow: bool, fetch logs as they come?
:param build_json: dict, to save one get-build query
:param wait_if_missing: bool, if build doesn't exist, wait
:return: None, str or iterator
def logs(self, build_id, follow=False, build_json=None, wait_if_missing=False):
"""
provide logs from build
:param build_id: str
:param follow: bool, fetch logs as they come?
:param build_json: dict, to save one get-build query
:param wait_if_missing: bool, if build doesn't exist, wait
:return: None, str or iterator
"""
# does build exist?
try:
build_json = build_json or self.get_build(build_id).json()
except OsbsResponseException as ex:
if ex.status_code == 404:
if not wait_if_missing:
raise OsbsException("Build '%s' doesn't exist." % build_id)
else:
raise
if follow or wait_if_missing:
build_json = self.wait_for_build_to_get_scheduled(build_id)
br = BuildResponse(build_json)
# When build is in new or pending state, openshift responds with 500
if br.is_pending():
return
if follow:
return self.stream_logs(build_id)
buildlogs_url = self._build_url("builds/%s/log/" % build_id)
response = self._get(buildlogs_url, headers={'Connection': 'close'})
check_response(response)
return response.content |
List builds matching criteria
:param build_config_id: str, only list builds created from BuildConfig
:param koji_task_id: str, only list builds for Koji Task ID
:param field_selector: str, field selector for query
:return: HttpResponse
def list_builds(self, build_config_id=None, koji_task_id=None,
field_selector=None, labels=None):
"""
List builds matching criteria
:param build_config_id: str, only list builds created from BuildConfig
:param koji_task_id: str, only list builds for Koji Task ID
:param field_selector: str, field selector for query
:return: HttpResponse
"""
query = {}
selector = '{key}={value}'
label = {}
if labels is not None:
label.update(labels)
if build_config_id is not None:
label['buildconfig'] = build_config_id
if koji_task_id is not None:
label['koji-task-id'] = str(koji_task_id)
if label:
query['labelSelector'] = ','.join([selector.format(key=key,
value=value)
for key, value in label.items()])
if field_selector is not None:
query['fieldSelector'] = field_selector
url = self._build_url("builds/", **query)
return self._get(url) |
Prevent builds being scheduled and wait for running builds to finish.
:return:
def create_resource_quota(self, name, quota_json):
"""
Prevent builds being scheduled and wait for running builds to finish.
:return:
"""
url = self._build_k8s_url("resourcequotas/")
response = self._post(url, data=json.dumps(quota_json),
headers={"Content-Type": "application/json"})
if response.status_code == http_client.CONFLICT:
url = self._build_k8s_url("resourcequotas/%s" % name)
response = self._put(url, data=json.dumps(quota_json),
headers={"Content-Type": "application/json"})
check_response(response)
return response |
:param build_id: wait for build to finish
:return:
def wait(self, build_id, states):
"""
:param build_id: wait for build to finish
:return:
"""
logger.info("watching build '%s'", build_id)
for changetype, obj in self.watch_resource("builds", build_id):
try:
obj_name = obj["metadata"]["name"]
except KeyError:
logger.error("'object' doesn't have any name")
continue
try:
obj_status = obj["status"]["phase"]
except KeyError:
logger.error("'object' doesn't have any status")
continue
else:
obj_status_lower = obj_status.lower()
logger.info("object has changed: '%s', status: '%s', name: '%s'",
changetype, obj_status, obj_name)
if obj_name == build_id:
logger.info("matching build found")
logger.debug("is %s in %s?", repr(obj_status_lower), states)
if obj_status_lower in states:
logger.debug("Yes, build is in the state I'm waiting for.")
return obj
else:
logger.debug("No, build is not in the state I'm "
"waiting for.")
else:
logger.info("The build %r isn't me %r", obj_name, build_id)
# I'm not sure how we can end up here since there are two possible scenarios:
# 1. our object was found and we are returning in the loop
# 2. our object was not found and we keep waiting (in the loop)
# Therefore, let's raise here
logger.warning("build '%s' was not found during wait", build_id)
raise OsbsWatchBuildNotFound("build '%s' was not found and response stream ended" %
build_id) |
adjust labels or annotations on object
labels have to match RE: (([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])? and
have at most 63 chars
:param collection: str, object collection e.g. 'builds'
:param name: str, name of object
:param things: str, 'labels' or 'annotations'
:param values: dict, values to set
:param how: callable, how to adjust the values e.g.
self._replace_metadata_things
:return:
def adjust_attributes_on_object(self, collection, name, things, values, how):
"""
adjust labels or annotations on object
labels have to match RE: (([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])? and
have at most 63 chars
:param collection: str, object collection e.g. 'builds'
:param name: str, name of object
:param things: str, 'labels' or 'annotations'
:param values: dict, values to set
:param how: callable, how to adjust the values e.g.
self._replace_metadata_things
:return:
"""
url = self._build_url("%s/%s" % (collection, name))
response = self._get(url)
logger.debug("before modification: %s", response.content)
build_json = response.json()
how(build_json['metadata'], things, values)
response = self._put(url, data=json.dumps(build_json), use_json=True)
check_response(response)
return response |
set annotations on build object
:param build_id: str, id of build
:param annotations: dict, annotations to set
:return:
def update_annotations_on_build(self, build_id, annotations):
"""
set annotations on build object
:param build_id: str, id of build
:param annotations: dict, annotations to set
:return:
"""
return self.adjust_attributes_on_object('builds', build_id,
'annotations', annotations,
self._update_metadata_things) |
Import image tags from a Docker registry into an ImageStream
:return: bool, whether tags were imported
def import_image(self, name, stream_import, tags=None):
"""
Import image tags from a Docker registry into an ImageStream
:return: bool, whether tags were imported
"""
# Get the JSON for the ImageStream
imagestream_json = self.get_image_stream(name).json()
logger.debug("imagestream: %r", imagestream_json)
if 'dockerImageRepository' in imagestream_json.get('spec', {}):
logger.debug("Removing 'dockerImageRepository' from ImageStream %s", name)
source_repo = imagestream_json['spec'].pop('dockerImageRepository')
imagestream_json['metadata']['annotations'][ANNOTATION_SOURCE_REPO] = source_repo
imagestream_json = self.update_image_stream(name, imagestream_json).json()
# Note the tags before import
oldtags = imagestream_json.get('status', {}).get('tags', [])
logger.debug("tags before import: %r", oldtags)
stream_import['metadata']['name'] = name
stream_import['spec']['images'] = []
tags_set = set(tags) if tags else set()
for tag in imagestream_json.get('spec', {}).get('tags', []):
if tags_set and tag['name'] not in tags_set:
continue
image_import = {
'from': tag['from'],
'to': {'name': tag['name']},
'importPolicy': tag.get('importPolicy'),
'referencePolicy': tag.get('referencePolicy'),
}
stream_import['spec']['images'].append(image_import)
if not stream_import['spec']['images']:
logger.debug('No tags to import')
return False
import_url = self._build_url("imagestreamimports/")
import_response = self._post(import_url, data=json.dumps(stream_import),
use_json=True)
self._check_import_image_response(import_response)
new_tags = [
image['tag']
for image in import_response.json().get('status', {}).get('images', [])]
logger.debug("tags after import: %r", new_tags)
return True |
Import image tags from a Docker registry into an ImageStream
:return: bool, whether tags were imported
def import_image_tags(self, name, stream_import, tags, repository, insecure):
"""
Import image tags from a Docker registry into an ImageStream
:return: bool, whether tags were imported
"""
# Get the JSON for the ImageStream
imagestream_json = self.get_image_stream(name).json()
logger.debug("imagestream: %r", imagestream_json)
changed = False
# existence of dockerImageRepository is limiting how many tags are updated
if 'dockerImageRepository' in imagestream_json.get('spec', {}):
logger.debug("Removing 'dockerImageRepository' from ImageStream %s", name)
imagestream_json['spec'].pop('dockerImageRepository')
changed = True
all_annotations = imagestream_json.get('metadata', {}).get('annotations', {})
# remove annotations about registry, since method will get them as arguments
for annotation in ANNOTATION_SOURCE_REPO, ANNOTATION_INSECURE_REPO:
if annotation in all_annotations:
imagestream_json['metadata']['annotations'].pop(annotation)
changed = True
if changed:
imagestream_json = self.update_image_stream(name, imagestream_json).json()
# Note the tags before import
oldtags = imagestream_json.get('status', {}).get('tags', [])
logger.debug("tags before import: %r", oldtags)
stream_import['metadata']['name'] = name
stream_import['spec']['images'] = []
tags_set = set(tags) if tags else set()
if not tags_set:
logger.debug('No tags to import')
return False
for tag in tags_set:
image_import = {
'from': {"kind": "DockerImage",
"name": '{}:{}'.format(repository, tag)},
'to': {'name': tag},
'importPolicy': {'insecure': insecure},
# referencePolicy will default to "type: source"
# so we don't have to explicitly set it
}
stream_import['spec']['images'].append(image_import)
import_url = self._build_url("imagestreamimports/")
import_response = self._post(import_url, data=json.dumps(stream_import),
use_json=True)
self._check_import_image_response(import_response)
new_tags = [
image['tag']
for image in import_response.json().get('status', {}).get('images', [])]
logger.debug("tags after import: %r", new_tags)
return True |
argparse is way too awesome when doing repr() on choices when printing usage
:param s: str or unicode
:return: str on 2, unicode on 3
def str_on_2_unicode_on_3(s):
"""
argparse is way too awesome when doing repr() on choices when printing usage
:param s: str or unicode
:return: str on 2, unicode on 3
"""
if not PY3:
return str(s)
else: # 3+
if not isinstance(s, str):
return str(s, encoding="utf-8")
return s |
Extract tabular data as |TableData| instances from a Line-delimited JSON file.
|load_source_desc_file|
:return:
Loaded table data iterator.
|load_table_name_desc|
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the data is invalid Line-delimited JSON.
:raises pytablereader.error.ValidationError:
If the data is not acceptable Line-delimited JSON format.
def load(self):
"""
Extract tabular data as |TableData| instances from a Line-delimited JSON file.
|load_source_desc_file|
:return:
Loaded table data iterator.
|load_table_name_desc|
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the data is invalid Line-delimited JSON.
:raises pytablereader.error.ValidationError:
If the data is not acceptable Line-delimited JSON format.
"""
formatter = JsonLinesTableFormatter(self.load_dict())
formatter.accept(self)
return formatter.to_table_data() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.