code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from collections.abc import Iterable
import numpy as np
def cov_from_residuals(residuals, dof=None):
"""
computes an optimal shrinkage estimate of the precision matrix from
the residuals as described by Ledoit and Wolfe (2004): "A well-conditioned
estimator for large-dimensional covariance matrices"
Args:
residuals(numpy.ndarray or list of these): n_residuals x n_channels
matrix of residuals
dof(int or list of int): degrees of freedom for covariance estimation
defaults to n_res - 1, should be corrected for the number
of regressors in a GLM if applicable.
Returns:
numpy.ndarray (or list): sigma_p: covariance matrix over channels
"""
if not isinstance(residuals, np.ndarray) or len(residuals.shape) > 2:
s_shrink = []
for i in range(len(residuals)):
if dof is None:
s_shrink.append(cov_from_residuals(residuals[i]))
elif isinstance(dof, Iterable):
s_shrink.append(cov_from_residuals(residuals[i], dof[i]))
else:
s_shrink.append(cov_from_residuals(residuals[i], dof))
else:
if dof is None:
dof = residuals.shape[0] - 1
# calculate sample covariance matrix s
residuals = residuals - np.mean(residuals, axis=0, keepdims=True)
xt_x = np.einsum('ij, ik-> ijk', residuals, residuals)
s = np.sum(xt_x, axis=0) / xt_x.shape[0]
# calculate the scalar estimators to find the optimal shrinkage:
# m, d^2, b^2 as in Ledoit & Wolfe paper
m = np.sum(np.diag(s)) / s.shape[0]
d2 = np.sum((s - m * np.eye(s.shape[0])) ** 2)
b2 = np.sum((xt_x - s) ** 2) / xt_x.shape[0] / xt_x.shape[0]
b2 = min(d2, b2)
# shrink covariance matrix
s_shrink = b2 / d2 * m * np.eye(s.shape[0]) \
+ (d2-b2) / d2 * s
# correction for degrees of freedom
s_shrink = s_shrink * xt_x.shape[0] / dof
return s_shrink
def prec_from_residuals(residuals, dof=None):
"""
computes an optimal shrinkage estimate of the precision matrix from
the residuals as described by Ledoit and Wolfe (2004): "A well-conditioned
estimator for large-dimensional covariance matrices"
Args:
residuals(numpy.ndarray or list of these): n_residuals x n_channels
matrix of residuals
dof(int or list of int): degrees of freedom for covariance estimation
defaults to n_res - 1, should be corrected for the number
of regressors in a GLM if applicable.
Returns:
numpy.ndarray (or list): sigma_p: covariance matrix over channels
"""
cov = cov_from_residuals(residuals=residuals, dof=dof)
if not isinstance(cov, np.ndarray) or len(cov.shape) > 2:
for i in range(len(cov)):
cov[i] = np.linalg.inv(cov[i])
else:
cov = np.linalg.inv(cov)
return cov | /rsa3-3.0.0.post20201106-py3-none-any.whl/pyrsa/data/noise.py | 0.971846 | 0.713556 | noise.py | pypi |
import numpy as np
from pyrsa.util.data_utils import get_unique_unsorted
from pyrsa.util.descriptor_utils import check_descriptor_length_error
from pyrsa.util.descriptor_utils import subset_descriptor
from pyrsa.util.descriptor_utils import bool_index
from pyrsa.util.descriptor_utils import format_descriptor
from pyrsa.util.descriptor_utils import parse_input_descriptor
from pyrsa.util.descriptor_utils import append_obs_descriptors
from pyrsa.util.file_io import write_dict_hdf5
from pyrsa.util.file_io import write_dict_pkl
from pyrsa.util.file_io import read_dict_hdf5
from pyrsa.util.file_io import read_dict_pkl
class DatasetBase:
"""
Abstract dataset class.
Defines members that every class needs to have, but does not
implement any interesting behavior. Inherit from this class
to define specific dataset types
Args:
measurements (numpy.ndarray): n_obs x n_channel 2d-array,
descriptors (dict): descriptors (metadata)
obs_descriptors (dict): observation descriptors (all
are array-like with shape = (n_obs,...))
channel_descriptors (dict): channel descriptors (all are
array-like with shape = (n_channel,...))
Returns:
dataset object
"""
def __init__(self, measurements, descriptors=None,
obs_descriptors=None, channel_descriptors=None):
if measurements.ndim != 2:
raise AttributeError(
"measurements must be in dimension n_obs x n_channel")
self.measurements = measurements
self.n_obs, self.n_channel = self.measurements.shape
check_descriptor_length_error(obs_descriptors,
"obs_descriptors",
self.n_obs
)
check_descriptor_length_error(channel_descriptors,
"channel_descriptors",
self.n_channel
)
self.descriptors = parse_input_descriptor(descriptors)
self.obs_descriptors = parse_input_descriptor(obs_descriptors)
self.channel_descriptors = parse_input_descriptor(channel_descriptors)
def __repr__(self):
"""
defines string which is printed for the object
"""
return (f'pyrsa.data.{self.__class__.__name__}(\n'
f'measurements = \n{self.measurements}\n'
f'descriptors = \n{self.descriptors}\n'
f'obs_descriptors = \n{self.obs_descriptors}\n'
f'channel_descriptors = \n{self.channel_descriptors}\n'
)
def __str__(self):
"""
defines the output of print
"""
string_desc = format_descriptor(self.descriptors)
string_obs_desc = format_descriptor(self.obs_descriptors)
string_channel_desc = format_descriptor(self.channel_descriptors)
if self.measurements.shape[0] > 5:
measurements = self.measurements[:5, :]
else:
measurements = self.measurements
return (f'pyrsa.data.{self.__class__.__name__}\n'
f'measurements = \n{measurements}\n...\n\n'
f'descriptors: \n{string_desc}\n\n'
f'obs_descriptors: \n{string_obs_desc}\n\n'
f'channel_descriptors: \n{string_channel_desc}\n'
)
def split_obs(self, by):
""" Returns a list Datasets split by obs
Args:
by(String): the descriptor by which the splitting is made
Returns:
list of Datasets, splitted by the selected obs_descriptor
"""
raise NotImplementedError(
"split_obs function not implemented in used Dataset class!")
def split_channel(self, by):
""" Returns a list Datasets split by channels
Args:
by(String): the descriptor by which the splitting is made
Returns:
list of Datasets, splitted by the selected channel_descriptor
"""
raise NotImplementedError(
"split_channel function not implemented in used Dataset class!")
def subset_obs(self, by, value):
""" Returns a subsetted Dataset defined by certain obs value
Args:
by(String): the descriptor by which the subset selection is made
from obs dimension
value: the value by which the subset selection is made
from obs dimension
Returns:
Dataset, with subset defined by the selected obs_descriptor
"""
raise NotImplementedError(
"subset_obs function not implemented in used Dataset class!")
def subset_channel(self, by, value):
""" Returns a subsetted Dataset defined by certain channel value
Args:
by(String): the descriptor by which the subset selection is made
from channel dimension
value: the value by which the subset selection is made
from channel dimension
Returns:
Dataset, with subset defined by the selected channel_descriptor
"""
raise NotImplementedError(
"subset_channel function not implemented in used Dataset class!")
def save(self, filename, file_type='hdf5'):
""" Saves the dataset object to a file
Args:
filename(String): path to the file
[or opened file]
file_type(String): Type of file to create:
hdf5: hdf5 file
pkl: pickle file
"""
data_dict = self.to_dict()
if file_type == 'hdf5':
write_dict_hdf5(filename, data_dict)
elif file_type == 'pkl':
write_dict_pkl(filename, data_dict)
def to_dict(self):
""" Generates a dictionary which contains the information to
recreate the dataset object. Used for saving to disc
Returns:
data_dict(dict): dictionary with dataset information
"""
data_dict = {}
data_dict['measurements'] = self.measurements
data_dict['descriptors'] = self.descriptors
data_dict['obs_descriptors'] = self.obs_descriptors
data_dict['channel_descriptors'] = self.channel_descriptors
data_dict['type'] = type(self).__name__
return data_dict
class Dataset(DatasetBase):
"""
Dataset class is a standard version of DatasetBase.
It contains one data set - or multiple data sets with the same structure
"""
def split_obs(self, by):
""" Returns a list Datasets splited by obs
Args:
by(String): the descriptor by which the splitting is made
Returns:
list of Datasets, splitted by the selected obs_descriptor
"""
unique_values = get_unique_unsorted(self.obs_descriptors[by])
dataset_list = []
for v in unique_values:
selection = (self.obs_descriptors[by] == v)
measurements = self.measurements[selection, :]
descriptors = self.descriptors
obs_descriptors = subset_descriptor(
self.obs_descriptors, selection)
channel_descriptors = self.channel_descriptors
dataset = Dataset(measurements=measurements,
descriptors=descriptors,
obs_descriptors=obs_descriptors,
channel_descriptors=channel_descriptors)
dataset_list.append(dataset)
return dataset_list
def split_channel(self, by):
""" Returns a list Datasets splited by channels
Args:
by(String): the descriptor by which the splitting is made
Returns:
list of Datasets, splitted by the selected channel_descriptor
"""
unique_values = get_unique_unsorted(self.channel_descriptors[by])
dataset_list = []
for v in unique_values:
selection = (self.channel_descriptors[by] == v)
measurements = self.measurements[:, selection]
descriptors = self.descriptors.copy()
descriptors[by] = v
obs_descriptors = self.obs_descriptors
channel_descriptors = subset_descriptor(
self.channel_descriptors, selection)
dataset = Dataset(measurements=measurements,
descriptors=descriptors,
obs_descriptors=obs_descriptors,
channel_descriptors=channel_descriptors)
dataset_list.append(dataset)
return dataset_list
def subset_obs(self, by, value):
""" Returns a subsetted Dataset defined by certain obs value
Args:
by(String): the descriptor by which the subset selection
is made from obs dimension
value: the value by which the subset selection is made
from obs dimension
Returns:
Dataset, with subset defined by the selected obs_descriptor
"""
selection = bool_index(self.obs_descriptors[by], value)
measurements = self.measurements[selection, :]
descriptors = self.descriptors
obs_descriptors = subset_descriptor(
self.obs_descriptors, selection)
channel_descriptors = self.channel_descriptors
dataset = Dataset(measurements=measurements,
descriptors=descriptors,
obs_descriptors=obs_descriptors,
channel_descriptors=channel_descriptors)
return dataset
def subset_channel(self, by, value):
""" Returns a subsetted Dataset defined by certain channel value
Args:
by(String): the descriptor by which the subset selection is
made from channel dimension
value: the value by which the subset selection is made
from channel dimension
Returns:
Dataset, with subset defined by the selected channel_descriptor
"""
selection = bool_index(self.channel_descriptors[by], value)
measurements = self.measurements[:, selection]
descriptors = self.descriptors
obs_descriptors = self.obs_descriptors
channel_descriptors = subset_descriptor(
self.channel_descriptors, selection)
dataset = Dataset(measurements=measurements,
descriptors=descriptors,
obs_descriptors=obs_descriptors,
channel_descriptors=channel_descriptors)
return dataset
def sort_by(self, by):
""" sorts the dataset by a given observation descriptor
Args:
by(String): the descriptor by which the dataset shall be sorted
Returns:
---
"""
desc = self.obs_descriptors[by]
order = np.argsort(desc)
self.measurements = self.measurements[order]
self.obs_descriptors = subset_descriptor(self.obs_descriptors, order)
def odd_even_split(self, obs_desc):
"""
Perform a simple odd-even split on a PyRSA dataset. It will be
partitioned into n different datasets, where n is the number of
distinct values on dataset.obs_descriptors[obs_desc].
The resulting list will be split into odd and even (index) subset.
The datasets contained in these subsets will then be merged.
Args:
obs_desc (str):
Observation descriptor, basis for partitioning (must contained
in keys of dataset.obs_descriptors)
Returns:
odd_split (Dataset):
subset of the Dataset with odd list-indices after partitioning
according to obs_desc
even_split (Dataset):
subset of the Dataset with even list-indices after partitioning
according to obs_desc
"""
assert obs_desc in self.obs_descriptors.keys(), \
"obs_desc must be contained in keys of dataset.obs_descriptors"
ds_part = self.split_obs(obs_desc)
odd_list = ds_part[0::2]
even_list = ds_part[1::2]
odd_split = merge_subsets(odd_list)
even_split = merge_subsets(even_list)
return odd_split, even_split
def nested_odd_even_split(self, l1_obs_desc, l2_obs_desc):
"""
Nested version of odd_even_split, where dataset is first partitioned
according to the l1_obs_desc and each partition is again partitioned
according to the l2_obs_desc (after which the actual oe-split occurs).
Useful for balancing, especially if the order of your measurements is
inconsistent, or if the two descriptors are not orthogonalized. It's
advised to apply .sort_by(l2_obs_desc) to the output of this function.
Args:
l1_obs_desc (str):
Observation descriptor, basis for level 1 partitioning
(must contained in keys of dataset.obs_descriptors)
Returns:
odd_split (Dataset):
subset of the Dataset with odd list-indices after partitioning
according to obs_desc
even_split (Dataset):
subset of the Dataset with even list-indices after partitioning
according to obs_desc
"""
assert l1_obs_desc and l2_obs_desc in self.obs_descriptors.keys(), \
"observation descriptors must be contained in keys " \
+ "of dataset.obs_descriptors"
ds_part = self.split_obs(l1_obs_desc)
odd_list = []
even_list = []
for partition in ds_part:
odd_split, even_split = partition.odd_even_split(l2_obs_desc)
odd_list.append(odd_split)
even_list.append(even_split)
odd_split = merge_subsets(odd_list)
even_split = merge_subsets(even_list)
return odd_split, even_split
class TemporalDataset(Dataset):
"""
TemporalDataset for spatio-temporal datasets
Args:
measurements (numpy.ndarray): n_obs x n_channel x time 3d-array,
descriptors (dict): descriptors (metadata)
obs_descriptors (dict): observation descriptors (all
are array-like with shape = (n_obs,...))
channel_descriptors (dict): channel descriptors (all are
array-like with shape = (n_channel,...))
time_descriptors (dict): time descriptors (alls are
array-like with shape= (n_time,...))
time_descriptors needs to contain one key 'time' that
specifies the time-coordinate. if None is provided, 'time' is
set as (0, 1, ..., n_time-1)
Returns:
dataset object
"""
def __init__(self, measurements, descriptors=None,
obs_descriptors=None, channel_descriptors=None,
time_descriptors=None):
if measurements.ndim != 3:
raise AttributeError(
"measurements must be in dimension n_obs x n_channel x time")
self.measurements = measurements
self.n_obs, self.n_channel, self.n_time = self.measurements.shape
if time_descriptors is None:
time_descriptors = {'time': np.arange(self.n_time)}
elif 'time' not in time_descriptors:
time_descriptors['time'] = np.arange(self.n_time)
raise Warning(
"there was no 'time' provided in dictionary time_descriptors\n"\
"'time' will be set to (0, 1, ..., n_time-1)")
check_descriptor_length_error(obs_descriptors,
"obs_descriptors",
self.n_obs
)
check_descriptor_length_error(channel_descriptors,
"channel_descriptors",
self.n_channel
)
check_descriptor_length_error(time_descriptors,
"time_descriptors",
self.n_time
)
self.descriptors = parse_input_descriptor(descriptors)
self.obs_descriptors = parse_input_descriptor(obs_descriptors)
self.channel_descriptors = parse_input_descriptor(channel_descriptors)
self.time_descriptors = parse_input_descriptor(time_descriptors)
def __str__(self):
"""
defines the output of print
"""
string_desc = format_descriptor(self.descriptors)
string_obs_desc = format_descriptor(self.obs_descriptors)
string_channel_desc = format_descriptor(self.channel_descriptors)
string_time_desc = format_descriptor(self.time_descriptors)
if self.measurements.shape[0] > 5:
measurements = self.measurements[:5, :, :]
else:
measurements = self.measurements
return (f'pyrsa.data.{self.__class__.__name__}\n'
f'measurements = \n{measurements}\n...\n\n'
f'descriptors: \n{string_desc}\n\n'
f'obs_descriptors: \n{string_obs_desc}\n\n'
f'channel_descriptors: \n{string_channel_desc}\n'
f'time_descriptors: \n{string_time_desc}\n'
)
def split_obs(self, by):
""" Returns a list TemporalDataset splited by obs
Args:
by(String): the descriptor by which the splitting is made
Returns:
list of TemporalDataset, splitted by the selected obs_descriptor
"""
unique_values = get_unique_unsorted(self.obs_descriptors[by])
dataset_list = []
for v in unique_values:
selection = (self.obs_descriptors[by] == v)
measurements = self.measurements[selection, :, :]
descriptors = self.descriptors
obs_descriptors = subset_descriptor(
self.obs_descriptors, selection)
channel_descriptors = self.channel_descriptors
time_descriptors = self.time_descriptors
dataset = TemporalDataset(measurements=measurements,
descriptors=descriptors,
obs_descriptors=obs_descriptors,
channel_descriptors=channel_descriptors,
time_descriptors=time_descriptors)
dataset_list.append(dataset)
return dataset_list
def split_channel(self, by):
""" Returns a list TemporalDataset splited by channels
Args:
by(String): the descriptor by which the splitting is made
Returns:
list of TemporalDataset, splitted by the selected channel_descriptor
"""
unique_values = get_unique_unsorted(self.channel_descriptors[by])
dataset_list = []
for v in unique_values:
selection = (self.channel_descriptors[by] == v)
measurements = self.measurements[:, selection, :]
descriptors = self.descriptors.copy()
descriptors[by] = v
obs_descriptors = self.obs_descriptors
channel_descriptors = subset_descriptor(
self.channel_descriptors, selection)
time_descriptors = self.time_descriptors
dataset = TemporalDataset(measurements=measurements,
descriptors=descriptors,
obs_descriptors=obs_descriptors,
channel_descriptors=channel_descriptors,
time_descriptors=time_descriptors)
dataset_list.append(dataset)
return dataset_list
def split_time(self, by):
""" Returns a list TemporalDataset splited by time
Args:
by(String): the descriptor by which the splitting is made
Returns:
list of TemporalDataset, splitted by the selected time_descriptor
"""
time = get_unique_unsorted(self.time_descriptors[by])
dataset_list = []
for v in time:
selection = (self.time_descriptors[by] == v)
measurements = self.measurements[:, :, selection]
descriptors = self.descriptors
obs_descriptors = self.obs_descriptors
channel_descriptors = self.channel_descriptors
time_descriptors = subset_descriptor(
self.time_descriptors, selection)
dataset = TemporalDataset(measurements=measurements,
descriptors=descriptors,
obs_descriptors=obs_descriptors,
channel_descriptors=channel_descriptors,
time_descriptors=time_descriptors)
dataset_list.append(dataset)
return dataset_list
def bin_time(self, by, bins):
""" Returns an object TemporalDataset with time-binned data.
Args:
bins(array-like): list of bins, with bins[i] containing the vector
of time-points for the i-th bin
Returns:
a single TemporalDataset object
Data is averaged within time-bins.
'time' descriptor is set to the average of the
binned time-points.
"""
time = self.time_descriptors[by]
n_bins = len(bins)
binned_measurements = np.zeros((self.n_obs, self.n_channel, n_bins))
binned_time = np.zeros(n_bins)
for t in range(n_bins):
t_idx = np.isin(time, bins[t])
binned_measurements[:,:,t] = np.mean(self.measurements[:,:,t_idx],axis=2)
binned_time[t] = np.mean(time[t_idx])
time_descriptors = self.time_descriptors.copy()
time_descriptors[by] = binned_time
# adding the bins as an additional descriptor currently
# does not work because of check_descriptor_length which transforms
# it into a numpy.array.
#time_descriptors['bins'] = [x for x in bins]
time_descriptors['bins'] = [np.array2string(x, precision=2, separator=',') for x in bins]
dataset = TemporalDataset(measurements=binned_measurements,
descriptors=self.descriptors,
obs_descriptors=self.obs_descriptors,
channel_descriptors=self.channel_descriptors,
time_descriptors=time_descriptors)
return dataset
def subset_obs(self, by, value):
""" Returns a subsetted TemporalDataset defined by certain obs value
Args:
by(String): the descriptor by which the subset selection
is made from obs dimension
value: the value by which the subset selection is made
from obs dimension
Returns:
TemporalDataset, with subset defined by the selected obs_descriptor
"""
selection = bool_index(self.obs_descriptors[by], value)
measurements = self.measurements[selection, :, :]
descriptors = self.descriptors
obs_descriptors = subset_descriptor(
self.obs_descriptors, selection)
channel_descriptors = self.channel_descriptors
time_descriptors = self.time_descriptors
dataset = TemporalDataset(measurements=measurements,
descriptors=descriptors,
obs_descriptors=obs_descriptors,
channel_descriptors=channel_descriptors,
time_descriptors=time_descriptors)
return dataset
def subset_channel(self, by, value):
""" Returns a subsetted TemporalDataset defined by certain channel value
Args:
by(String): the descriptor by which the subset selection is
made from channel dimension
value: the value by which the subset selection is made
from channel dimension
Returns:
TemporalDataset, with subset defined by the selected channel_descriptor
"""
selection = bool_index(self.channel_descriptors[by], value)
measurements = self.measurements[:, selection]
descriptors = self.descriptors
obs_descriptors = self.obs_descriptors
channel_descriptors = subset_descriptor(
self.channel_descriptors, selection)
time_descriptors = self.time_descriptors
dataset = TemporalDataset(measurements=measurements,
descriptors=descriptors,
obs_descriptors=obs_descriptors,
channel_descriptors=channel_descriptors,
time_descriptors=time_descriptors)
return dataset
def subset_time(self, by, t_from, t_to):
""" Returns a subsetted TemporalDataset with time between t_from to t_to
Args:
by(String): the descriptor by which the subset selection is
made from channel dimension
t_from: time-point from which onwards data should be subsetted
t_to: time-point until which data should be subsetted
Returns:
TemporalDataset, with subset defined by the selected time_descriptor
"""
time = get_unique_unsorted(self.time_descriptors[by])
sel_time = [t for t in time if t <= t_to and t>=t_from]
selection = bool_index(self.time_descriptors[by], sel_time)
measurements = self.measurements[:, :, selection]
descriptors = self.descriptors
obs_descriptors = self.obs_descriptors
channel_descriptors = self.channel_descriptors
time_descriptors = subset_descriptor(
self.time_descriptors, selection)
dataset = TemporalDataset(measurements=measurements,
descriptors=descriptors,
obs_descriptors=obs_descriptors,
channel_descriptors=channel_descriptors,
time_descriptors=time_descriptors)
return dataset
def sort_by(self, by):
""" sorts the dataset by a given observation descriptor
Args:
by(String): the descriptor by which the dataset shall be sorted
Returns:
---
"""
desc = self.obs_descriptors[by]
order = np.argsort(desc)
self.measurements = self.measurements[order]
self.obs_descriptors = subset_descriptor(self.obs_descriptors, order)
def convert_to_dataset(self, by):
""" converts to Dataset long format.
time dimension is absorbed into observation dimension
Args:
by(String): the descriptor which indicates the time dimension in
the time_descriptor
Returns:
Dataset
"""
time = get_unique_unsorted(self.time_descriptors[by])
descriptors = self.descriptors
channel_descriptors = self.channel_descriptors.copy()
measurements = np.empty([0, self.n_channel])
obs_descriptors = dict.fromkeys(self.obs_descriptors, [])
for key in self.time_descriptors:
obs_descriptors[key] = np.array([])
for v in time:
selection = (self.time_descriptors[by] == v)
measurements = np.concatenate((measurements,
self.measurements[:, :, selection].squeeze()),
axis=0)
for key in self.obs_descriptors:
obs_descriptors[key] = np.concatenate((obs_descriptors[key],
self.obs_descriptors[key].copy()),
axis=0)
for key in self.time_descriptors:
obs_descriptors[key] = np.concatenate((obs_descriptors[key],
np.repeat(self.time_descriptors[key][selection],
self.n_obs)), axis=0)
dataset = Dataset(measurements=measurements,
descriptors=descriptors,
obs_descriptors=obs_descriptors,
channel_descriptors=channel_descriptors)
return dataset
def to_dict(self):
""" Generates a dictionary which contains the information to
recreate the TemporalDataset object. Used for saving to disc
Returns:
data_dict(dict): dictionary with TemporalDataset information
"""
data_dict = {}
data_dict['measurements'] = self.measurements
data_dict['descriptors'] = self.descriptors
data_dict['obs_descriptors'] = self.obs_descriptors
data_dict['channel_descriptors'] = self.channel_descriptors
data_dict['time_descriptors'] = self.channel_descriptors
data_dict['type'] = type(self).__name__
return data_dict
def load_dataset(filename, file_type=None):
""" loads a Dataset object from disc
Args:
filename(String): path to file to load
"""
if file_type is None:
if isinstance(filename, str):
if filename[-4:] == '.pkl':
file_type = 'pkl'
elif filename[-3:] == '.h5' or filename[-4:] == 'hdf5':
file_type = 'hdf5'
if file_type == 'hdf5':
data_dict = read_dict_hdf5(filename)
elif file_type == 'pkl':
data_dict = read_dict_pkl(filename)
else:
raise ValueError('filetype not understood')
return dataset_from_dict(data_dict)
def dataset_from_dict(data_dict):
""" regenerates a Dataset object from the dictionary representation
Currently this function works for Dataset, DatasetBase, and TemporalDataset objects
Args:
data_dict(dict): the dictionary representation
Returns:
data(Dataset): the regenerated Dataset
"""
if data_dict['type'] == 'Dataset':
data = Dataset(
data_dict['measurements'],
descriptors=data_dict['descriptors'],
obs_descriptors=data_dict['obs_descriptors'],
channel_descriptors=data_dict['channel_descriptors'])
elif data_dict['type'] == 'DatasetBase':
data = DatasetBase(
data_dict['measurements'],
descriptors=data_dict['descriptors'],
obs_descriptors=data_dict['obs_descriptors'],
channel_descriptors=data_dict['channel_descriptors'])
elif data_dict['type'] == 'TemporalDataset':
data = TemporalDataset(
data_dict['measurements'],
descriptors=data_dict['descriptors'],
obs_descriptors=data_dict['obs_descriptors'],
channel_descriptors=data_dict['channel_descriptors'],
time_descriptors=data_dict['time_descriptors'])
else:
raise ValueError('type of Dataset not recognized')
return data
def merge_subsets(dataset_list):
"""
Generate a dataset object from a list of smaller dataset objects
(e.g., as generated by the subset_* methods). Assumes that descriptors,
channel descriptors and number of channels per observation match.
Args:
dataset_list (list):
List containing PyRSA datasets
Returns:
merged_dataset (Dataset):
PyRSA dataset created from all datasets in dataset_list
"""
assert isinstance(dataset_list, list), "Provided object is not a list."
assert "Dataset" in str(type(dataset_list[0])), \
"Provided list does not only contain Dataset objects."
baseline_ds = dataset_list[0]
descriptors = baseline_ds.descriptors.copy()
channel_descriptors = baseline_ds.channel_descriptors.copy()
measurements = baseline_ds.measurements.copy()
obs_descriptors = baseline_ds.obs_descriptors.copy()
for ds in dataset_list[1:]:
assert "Dataset" in str(type(ds)), \
"Provided list does not only contain Dataset objects."
assert descriptors == ds.descriptors.copy(), \
"Dataset descriptors do not match."
measurements = np.append(measurements, ds.measurements, axis=0)
obs_descriptors = append_obs_descriptors(obs_descriptors,
ds.obs_descriptors.copy())
merged_dataset = Dataset(measurements,
descriptors=descriptors,
obs_descriptors=obs_descriptors,
channel_descriptors=channel_descriptors)
return merged_dataset | /rsa3-3.0.0.post20201106-py3-none-any.whl/pyrsa/data/dataset.py | 0.80479 | 0.296005 | dataset.py | pypi |
from os.path import basename
import numpy
from scipy.io import loadmat
from pyrsa.rdm.rdms import RDMs
def load_rdms(fpath, sort=True):
"""Read a Meadows results file and return any RDMs as a pyrsa object
Args:
fpath (str): path to .mat Meadows results file
sort (bool): whether to sort the RDM based on the stimulus names
Raises:
ValueError: Will raise an error if the file is missing an expected
variable. This can happen if the file does not contain MA task
data.
Returns:
RDMs: All rdms found in the data file as an RDMs object
"""
info = extract_filename_segments(fpath)
data = loadmat(fpath)
if info['participant_scope'] == 'single':
for var in ('stimuli', 'rdmutv'):
if var not in data:
raise ValueError(f'File missing variable: {var}')
utvs = data['rdmutv']
stimuli_fnames = data['stimuli']
pnames = [info['participant']]
else:
stim_vars = [v for v in data.keys() if v[:7] == 'stimuli']
stimuli_fnames = data[stim_vars[0]]
pnames = ['-'.join(v.split('_')[1:]) for v in stim_vars]
utv_vars = ['rdmutv_' + p.replace('-', '_') for p in pnames]
utvs = numpy.squeeze(numpy.stack([data[v] for v in utv_vars]))
desc_info_keys = (
'participant',
'task_index',
'task_name',
'experiment_name'
)
conds = [f.split('.')[0] for f in stimuli_fnames]
rdms = RDMs(
utvs,
dissimilarity_measure='euclidean',
descriptors={k: info[k] for k in desc_info_keys if k in info},
rdm_descriptors=dict(participants=pnames),
pattern_descriptors=dict(conds=conds),
)
if sort:
rdms.sort_by(conds='alpha')
return rdms
def extract_filename_segments(fpath):
"""Get information from the name of a downloaded results file
Will determine:
* participant_scope: 'single' or 'multiple', how many participant
sessions this file covers.
* task_scope: 'single' or 'multiple', how many experiment tasks this
file covers.
* participant: the Meadows nickname of the participant, if this is a
single participation file.
* task_index: the 1-based index of the task in the experiment, if
this is a single participant file.
* task_name: the name of the task in the experiment, if
this is not a single participant file.
* version: the experiment version as a string.
* experiment_name: name of the experiment on Meadows.
* structure: the structure of the data contained, one of 'tree',
'events', '1D', '2D', etc.
* filetype: the file extension and file format used to serialize the
data.
Args:
fpath (str): File system path to downloaded file
Returns:
dict: Dictionary with the fields described above.
"""
fname, ext = basename(fpath).split('.')
segments = fname.split('_')
info = dict(
task_scope='single',
version=segments[3].replace('v', ''),
experiment_name=segments[1],
structure=segments[-1],
filetype=ext
)
if segments[-2].isdigit():
info['participant_scope'] = 'single'
info['participant'] = segments[-3]
info['task_index'] = int(segments[-2])
else:
info['participant_scope'] = 'multiple'
info['task_name'] = segments[-2]
return info | /rsa3-3.0.0.post20201106-py3-none-any.whl/pyrsa/io/meadows.py | 0.739705 | 0.517022 | meadows.py | pypi |
import numpy as np
from scipy.spatial.distance import squareform
def batch_to_vectors(x):
"""converts a *stack* of RDMs in vector or matrix form into vector form
Args:
x: stack of RDMs
Returns:
tuple: **v** (np.ndarray): 2D, vector form of the stack of RDMs
**n_rdm** (int): number of rdms
**n_cond** (int): number of conditions
"""
if x.ndim == 2:
v = x
n_rdm = x.shape[0]
n_cond = _get_n_from_reduced_vectors(x)
elif x.ndim == 3:
m = x
n_rdm = x.shape[0]
n_cond = x.shape[1]
v = np.ndarray((n_rdm, int(n_cond * (n_cond - 1) / 2)))
for idx in np.arange(n_rdm):
v[idx, :] = squareform(m[idx, :, :], checks=False)
elif x.ndim == 1:
v = np.array([x])
n_rdm = 1
n_cond = _get_n_from_reduced_vectors(v)
return v, n_rdm, n_cond
def batch_to_matrices(x):
"""converts a *stack* of RDMs in vector or matrix form into matrix form
Args:
**x**: stack of RDMs
Returns:
tuple: **v** (np.ndarray): 3D, matrix form of the stack of RDMs
**n_rdm** (int): number of rdms
**n_cond** (int): number of conditions
"""
if x.ndim == 2:
v = x
n_rdm = x.shape[0]
n_cond = _get_n_from_reduced_vectors(x)
m = np.ndarray((n_rdm, n_cond, n_cond))
for idx in np.arange(n_rdm):
m[idx, :, :] = squareform(v[idx, :])
elif x.ndim == 3:
m = x
n_rdm = x.shape[0]
n_cond = x.shape[1]
return m, n_rdm, n_cond
def _get_n_from_reduced_vectors(x):
"""
calculates the size of the RDM from the vector representation
Args:
**x**(np.ndarray): stack of RDM vectors (2D)
Returns:
int: n: size of the RDM
"""
return int(np.ceil(np.sqrt(x.shape[1] * 2)))
def add_pattern_index(rdms, pattern_descriptor):
"""
adds index if pattern_descriptor is None
Args:
**rdms** (pyrsa.rdm.RDMs): rdms object to be parsed
Returns:
pattern_descriptor
pattern_select
"""
pattern_select = rdms.pattern_descriptors[pattern_descriptor]
pattern_select = np.unique(pattern_select)
return pattern_descriptor, pattern_select | /rsa3-3.0.0.post20201106-py3-none-any.whl/pyrsa/util/rdm_utils.py | 0.910451 | 0.739211 | rdm_utils.py | pypi |
import numpy as np
def bool_index(descriptor, value):
"""
creates a boolean index vector where a descriptor has a value
Args:
descriptor(numpy.ndarray): descriptor vector
value: value or list of values to mark
Returns:
numpy.ndarray:
bool_index: boolean index vector where descriptor == value
"""
descriptor = np.array(descriptor)
if (type(value) is list or
type(value) is tuple or
type(value) is np.ndarray):
index = np.array([descriptor == v for v in value])
index = np.any(index, axis=0)
else:
index = np.array(descriptor == value)
return index
def format_descriptor(descriptors):
""" formats a descriptor dictionary
Args:
descriptors(dict): the descriptor dictionary
Returns:
String: formated string to show dict
"""
string_descriptors = ''
for entry in descriptors:
string_descriptors = (string_descriptors +
f'{entry} = {descriptors[entry]}\n'
)
return string_descriptors
def parse_input_descriptor(descriptors):
""" parse input descriptor checks whether an input descriptors dictionary
is a dictionary. If it is None instead it is replaced by an empty dict.
Otherwise an error is raised.
Args:
descriptors(dict/None): the descriptor dictionary
Returns:
dict: descriptor dictionary
"""
if descriptors is None:
descriptors = {}
elif not isinstance(descriptors, dict):
raise ValueError('Descriptors must be dictionaries!')
return descriptors
def check_descriptor_length(descriptor, n_element):
"""
Checks whether the entries of a descriptor dictionary have the right length
Args:
descriptor(dict): the descriptor dictionary
n_element: the correct length of the descriptors
Returns:
bool
"""
for k, v in descriptor.items():
v = np.asarray(v)
if not v.shape:
# 0-d array happens e.g. when casting str to array
v = v.flatten()
descriptor[k] = v
if v.shape[0] != n_element:
return False
return True
def subset_descriptor(descriptor, indices):
"""
retrievs a subset of a descriptor given by indices.
Args:
descriptor(dict): the descriptor dictionary
indices: the indices to be extracted
Returns:
extracted_descriptor(dict): the selected subset of the descriptor
"""
extracted_descriptor = {}
for k, v in descriptor.items():
if isinstance(indices, tuple) or isinstance(indices, list):
extracted_descriptor[k] = [v[index] for index in indices]
else:
extracted_descriptor[k] = np.array(v)[indices]
if len(np.array(extracted_descriptor[k]).shape) == 0:
extracted_descriptor[k] = [extracted_descriptor[k]]
return extracted_descriptor
def append_descriptor(descriptor, desc_new):
"""
appends a descriptor to an existing one
Args:
descriptor(dict): the descriptor dictionary
desc_new(dict): the descriptor dictionary to append
Returns:
descriptor(dict): the longer descriptor
"""
for k, v in descriptor.items():
assert k in desc_new.keys(), f'appended descriptors misses key {k}'
descriptor[k] = np.concatenate((v, desc_new[k]), axis=0)
descriptor['index'] = np.arange(len(descriptor['index']))
return descriptor
def check_descriptor_length_error(descriptor, name, n_element):
"""
Raises an error if the given descriptor does not have the right length
Args:
descriptor(dict/None): the descriptor dictionary
name(String): Descriptor name used for error message
n_element: the desired descriptor length
Returns:
---
"""
if descriptor is not None:
if not check_descriptor_length(descriptor, n_element):
raise AttributeError(
name + " have mismatched dimension with measurements.")
def append_obs_descriptors(dict_orig, dict_addit):
"""
Merge two dictionaries of observation descriptors with matching keys and
numpy arrays as values.
"""
assert list(dict_orig.keys()) == list(dict_addit.keys()), \
"Provided observationdescriptors have different keys."
dict_merged = {}
keys = list(dict_orig.keys())
for k in keys:
values = np.array(np.append(dict_orig[k], dict_addit[k]))
dict_merged.update({k: values})
return dict_merged | /rsa3-3.0.0.post20201106-py3-none-any.whl/pyrsa/util/descriptor_utils.py | 0.915214 | 0.645288 | descriptor_utils.py | pypi |
import numpy as np
from scipy.spatial.distance import cdist
from tqdm import tqdm
from joblib import Parallel, delayed
from pyrsa.data.dataset import Dataset
from pyrsa.rdm.calc import calc_rdm
from pyrsa.rdm import RDMs
def _get_searchlight_neighbors(mask, center, radius=3):
"""Return indices for searchlight where distance
between a voxel and their center < radius (in voxels)
Args:
center (index): point around which to make searchlight sphere
Returns:
list: the list of volume indices that respect the
searchlight radius for the input center.
"""
center = np.array(center)
mask_shape = mask.shape
cx, cy, cz = np.array(center)
x = np.arange(mask_shape[0])
y = np.arange(mask_shape[1])
z = np.arange(mask_shape[2])
# First mask the obvious points
# - may actually slow down your calculation depending.
x = x[abs(x - cx) < radius]
y = y[abs(y - cy) < radius]
z = z[abs(z - cz) < radius]
# Generate grid of points
X, Y, Z = np.meshgrid(x, y, z)
data = np.vstack((X.ravel(), Y.ravel(), Z.ravel())).T
distance = cdist(data, center.reshape(1, -1), 'euclidean').ravel()
return tuple(data[distance < radius].T.tolist())
def get_volume_searchlight(mask, radius=2, threshold=1.0):
"""Searches through the non-zero voxels of the mask, selects centers where
proportion of sphere voxels >= self.threshold.
Args:
mask ([numpy array]): binary brain mask
radius (int, optional): the radius of each searchlight, defined in voxels. Defaults to 2.
threshold (float, optional): Threshold of the proportion of voxels that need to
be inside the brain mask in order for it to be
considered a good searchlight center.
Values go between 0.0 - 1.0 where 1.0 means that
100% of the voxels need to be inside
the brain mask. Defaults to 1.0.
Returns:
[numpy array]: array of centers of size n_centers x 3
[list]: list of lists with neighbors - the length of the list will correspond to:
n_centers x 3 x n_neighbors
"""
mask = np.array(mask)
assert mask.ndim == 3, "Mask needs to be a 3-dimensional numpy array"
centers = list(zip(*np.nonzero(mask)))
good_centers = []
good_neighbors = []
for center in tqdm(centers, desc='Finding searchlights...'):
neighbors = _get_searchlight_neighbors(mask, center, radius)
if mask[neighbors].mean() >= threshold:
good_centers.append(center)
good_neighbors.append(neighbors)
good_centers = np.array(good_centers)
assert good_centers.shape[0] == len(good_neighbors),\
"number of centers and sets of neighbors do not match"
print(f'Found {len(good_neighbors)} searchlights')
# turn the 3-dim coordinates to array coordinates
centers = np.ravel_multi_index(good_centers.T, mask.shape)
neighbors = [np.ravel_multi_index(n, mask.shape) for n in good_neighbors]
return centers, neighbors
def get_searchlight_RDMs(data_2d, centers, neighbors, events,
method='correlation', verbose=True):
"""Iterates over all the searchlight centers and calculates the RDM
Args:
data_2d (2D numpy array): brain data, shape n_observations x n_channels (i.e. voxels/vertices)
centers (1D numpy array): center indices for all searchlights as provided
by pyrsa.util.searchlight.get_volume_searchlight
neighbors (list): list of lists with neighbor voxel indices for all searchlights
as provided by pyrsa.util.searchlight.get_volume_searchlight
events (1D numpy array): 1D array of length n_observations
method (str, optional): distance metric,
see pyrsa.rdm.calc for options. Defaults to 'correlation'.
verbose (bool, optional): Defaults to True.
Returns:
RDM [pyrsa.rdm.RDMs]: RDMs object with the RDM for each searchlight
the RDM.rdm_descriptors['voxel_index']
describes the center voxel index each RDM is associated with
"""
data_2d, centers = np.array(data_2d), np.array(centers)
n_centers = centers.shape[0]
# For memory reasons, we chunk the data if we have more than 1000 RDMs
if n_centers > 1000:
# we can't run all centers at once, that will take too much memory
# so lets to some chunking
chunked_center = np.split(np.arange(n_centers),
np.linspace(0, n_centers,
101, dtype=int)[1:-1])
# loop over chunks
n_conds = len(np.unique(events))
RDM = np.zeros((n_centers, n_conds * (n_conds - 1) // 2))
for chunks in tqdm(chunked_center, desc='Calculating RDMs...'):
center_data = []
for c in chunks:
# grab this center and neighbors
center = centers[c]
center_neighbors = neighbors[c]
# create a database object with this data
ds = Dataset(data_2d[:, center_neighbors],
descriptors={'center': center},
obs_descriptors={'events': events},
channel_descriptors={'voxels': center_neighbors})
center_data.append(ds)
RDM_corr = calc_rdm(center_data, method=method,
descriptor='events')
RDM[chunk, :] = RDM_corr.dissimilarities
else:
center_data = []
for c in range(n_centers):
# grab this center and neighbors
center = centers[c]
nb = neighbors[c]
# create a database object with this data
ds = Dataset(data_2d[:, nb],
descriptors={'center': c},
obs_descriptors={'events': events},
channel_descriptors={'voxels': nb})
center_data.append(ds)
# calculate RDMs for each database object
RDM = calc_rdm(center_data, method=method,
descriptor='events').dissimilarities
SL_rdms = RDMs(RDM,
rdm_descriptors={'voxel_index': centers},
dissimilarity_measure=method)
return SL_rdms
def evaluate_models_searchlight(sl_RDM, models, eval_function, method='corr', theta=None, n_jobs=1):
"""evaluates each searchlighth with the given model/models
Args:
sl_RDM ([pyrsa.rdm.RDMs]): RDMs object as computed by pyrsa.util.searchlight.get_searchlight_RDMs
models ([pyrsa.model]: models to evaluate - can also be list of models
eval_function (pyrsa.inference evaluation-function): [description]
method (str, optional): see pyrsa.rdm.compare for specifics. Defaults to 'corr'.
n_jobs (int, optional): how many jobs to run. Defaults to 1.
Returns:
[list]: list of with the model evaluation for each searchlight center
"""
results = Parallel(n_jobs=n_jobs)(
delayed(eval_function)(
models, x, method=method, theta=theta) for x in tqdm(
sl_RDM, desc='Evaluating models for each searchlight'))
return results | /rsa3-3.0.0.post20201106-py3-none-any.whl/pyrsa/util/searchlight.py | 0.899945 | 0.706568 | searchlight.py | pypi |
import numpy as np
from joblib import Parallel, delayed, effective_n_jobs
import warnings
from sklearn.base import BaseEstimator
from sklearn.metrics import euclidean_distances
from sklearn.utils import check_random_state, check_array, check_symmetric
from sklearn.isotonic import IsotonicRegression
from scipy.spatial.distance import squareform
from pyrsa.util.rdm_utils import _get_n_from_reduced_vectors
def weight_to_matrices(x):
"""converts a *stack* of weights in vector or matrix form into matrix form
Args:
**x** (np.ndarray): stack of weight matrices or weight vectors
Returns:
tuple: **v** (np.ndarray): 3D, matrix form of the stack of weight matrices
"""
if x.ndim == 2:
v = x
n_rdm = x.shape[0]
n_cond = _get_n_from_reduced_vectors(x)
m = np.ndarray((n_rdm, n_cond, n_cond))
for idx in np.arange(n_rdm):
m[idx, :, :] = squareform(v[idx, :])
elif x.ndim == 3:
m = x
return m
def _smacof_single(dissimilarities, metric=True, n_components=2, init=None,
max_iter=300, verbose=0, eps=1e-3, random_state=None,
weight=None):
"""Computes multidimensional scaling using SMACOF algorithm.
Parameters
----------
dissimilarities : ndarray of shape (n_samples, n_samples)
Pairwise dissimilarities between the points. Must be symmetric.
metric : bool, default=True
Compute metric or nonmetric SMACOF algorithm.
n_components : int, default=2
Number of dimensions in which to immerse the dissimilarities. If an
``init`` array is provided, this option is overridden and the shape of
``init`` is used to determine the dimensionality of the embedding
space.
init : ndarray of shape (n_samples, n_components), default=None
Starting configuration of the embedding to initialize the algorithm. By
default, the algorithm is initialized with a randomly chosen array.
max_iter : int, default=300
Maximum number of iterations of the SMACOF algorithm for a single run.
verbose : int, default=0
Level of verbosity.
eps : float, default=1e-3
Relative tolerance with respect to stress at which to declare
convergence.
random_state : int, RandomState instance or None, default=None
Determines the random number generator used to initialize the centers.
Pass an int for reproducible results across multiple function calls.
See :term: `Glossary <random_state>`.
weight : ndarray of shape (n_samples, n_samples), default=None
symmetric weighting matrix of similarities.
In default, all weights are 1.
Returns
-------
X : ndarray of shape (n_samples, n_components)
Coordinates of the points in a ``n_components``-space.
stress : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points).
n_iter : int
The number of iterations corresponding to the best stress.
"""
dissimilarities = check_symmetric(dissimilarities, raise_exception=True)
n_samples = dissimilarities.shape[0]
random_state = check_random_state(random_state)
sim_flat = ((1 - np.tri(n_samples)) * dissimilarities).ravel()
sim_flat_w = sim_flat[sim_flat != 0]
if init is None:
# Randomly choose initial configuration
X = random_state.rand(n_samples * n_components)
X = X.reshape((n_samples, n_components))
else:
# overrides the parameter p
n_components = init.shape[1]
if n_samples != init.shape[0]:
raise ValueError("init matrix should be of shape (%d, %d)" %
(n_samples, n_components))
X = init
old_stress = None
ir = IsotonicRegression()
for it in range(max_iter):
# Compute distance and monotonic regression
dis = euclidean_distances(X)
if metric:
disparities = dissimilarities
else:
dis_flat = dis.ravel()
# dissimilarities with 0 are considered as missing values
dis_flat_w = dis_flat[sim_flat != 0]
# Compute the disparities using a monotonic regression
disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w)
disparities = dis_flat.copy()
disparities[sim_flat != 0] = disparities_flat
disparities = disparities.reshape((n_samples, n_samples))
disparities *= np.sqrt((n_samples * (n_samples - 1) / 2) /
(disparities ** 2).sum())
# Compute stress
stress = ((dis.ravel() - disparities.ravel()) ** 2).sum() / 2
# Update X using the Guttman transform
dis[dis == 0] = 1e-5
if weight is None:
ratio = disparities / dis
B = - ratio
B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1)
X = 1. / n_samples * np.dot(B, X)
else:
ratio = weight * disparities / dis
B = - ratio
B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1)
V = np.zeros((n_samples, n_samples))
for nn in range(n_samples):
for mm in range(nn, n_samples):
v = np.zeros((n_samples, 1))
v[nn], v[mm] = 1, -1
V += weight[nn, mm] * np.dot(v, v.T)
X = np.dot(np.linalg.pinv(V), np.dot(B, X))
dis = np.sqrt((X ** 2).sum(axis=1)).sum()
if verbose >= 2:
print('it: %d, stress %s' % (it, stress))
if old_stress is not None:
if(old_stress - stress / dis) < eps:
if verbose:
print('breaking at iteration %d with stress %s' % (it,
stress))
break
old_stress = stress / dis
return X, stress, it + 1
def smacof(dissimilarities, *, metric=True, n_components=2, init=None,
n_init=8, n_jobs=None, max_iter=300, verbose=0, eps=1e-3,
random_state=None, return_n_iter=False, weight=None):
"""Computes multidimensional scaling using the SMACOF algorithm.
The SMACOF (Scaling by MAjorizing a COmplicated Function) algorithm is a
multidimensional scaling algorithm which minimizes an objective function
(the *stress*) using a majorization technique. Stress majorization, also
known as the Guttman Transform, guarantees a monotone convergence of
stress, and is more powerful than traditional techniques such as gradient
descent.
The SMACOF algorithm for metric MDS can summarized by the following steps:
1. Set an initial start configuration, randomly or not.
2. Compute the stress
3. Compute the Guttman Transform
4. Iterate 2 and 3 until convergence.
The nonmetric algorithm adds a monotonic regression step before computing
the stress.
Parameters
----------
dissimilarities : ndarray of shape (n_samples, n_samples)
Pairwise dissimilarities between the points. Must be symmetric.
metric : bool, default=True
Compute metric or nonmetric SMACOF algorithm.
n_components : int, default=2
Number of dimensions in which to immerse the dissimilarities. If an
``init`` array is provided, this option is overridden and the shape of
``init`` is used to determine the dimensionality of the embedding
space.
init : ndarray of shape (n_samples, n_components), default=None
Starting configuration of the embedding to initialize the algorithm. By
default, the algorithm is initialized with a randomly chosen array.
n_init : int, default=8
Number of times the SMACOF algorithm will be run with different
initializations. The final results will be the best output of the runs,
determined by the run with the smallest final stress. If ``init`` is
provided, this option is overridden and a single run is performed.
n_jobs : int, default=None
The number of jobs to use for the computation. If multiple
initializations are used (``n_init``), each run of the algorithm is
computed in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
max_iter : int, default=300
Maximum number of iterations of the SMACOF algorithm for a single run.
verbose : int, default=0
Level of verbosity.
eps : float, default=1e-3
Relative tolerance with respect to stress at which to declare
convergence.
random_state : int, RandomState instance or None, default=None
Determines the random number generator used to initialize the centers.
Pass an int for reproducible results across multiple function calls.
See :term: `Glossary <random_state>`.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
weight : ndarray of shape (n_samples, n_samples), default=None
symmetric weighting matrix of similarities.
In default, all weights are 1.
Returns
-------
X : ndarray of shape (n_samples, n_components)
Coordinates of the points in a ``n_components``-space.
stress : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points).
n_iter : int
The number of iterations corresponding to the best stress. Returned
only if ``return_n_iter`` is set to ``True``.
Notes
-----
"Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
Groenen P. Springer Series in Statistics (1997)
"Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
Psychometrika, 29 (1964)
"Multidimensional scaling by optimizing goodness of fit to a nonmetric
hypothesis" Kruskal, J. Psychometrika, 29, (1964)
"""
dissimilarities = check_array(dissimilarities)
random_state = check_random_state(random_state)
if hasattr(init, '__array__'):
init = np.asarray(init).copy()
if not n_init == 1:
warnings.warn(
'Explicit initial positions passed: '
'performing only one init of the MDS instead of %d'
% n_init)
n_init = 1
best_pos, best_stress = None, None
if effective_n_jobs(n_jobs) == 1:
for it in range(n_init):
pos, stress, n_iter_ = _smacof_single(
dissimilarities, metric=metric,
n_components=n_components, init=init,
max_iter=max_iter, verbose=verbose,
eps=eps, random_state=random_state,
weight=weight)
if best_stress is None or stress < best_stress:
best_stress = stress
best_pos = pos.copy()
best_iter = n_iter_
else:
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=max(verbose - 1, 0))(
delayed(_smacof_single)(
dissimilarities, metric=metric, n_components=n_components,
init=init, max_iter=max_iter, verbose=verbose, eps=eps,
random_state=seed, weight=weight)
for seed in seeds)
positions, stress, n_iters = zip(*results)
best = np.argmin(stress)
best_stress = stress[best]
best_pos = positions[best]
best_iter = n_iters[best]
if return_n_iter:
return best_pos, best_stress, best_iter
else:
return best_pos, best_stress
class Weighted_MDS(BaseEstimator):
"""Multidimensional scaling with weighting options.
Read more in the :ref:`User Guide <multidimensional_scaling>`.
Parameters
----------
n_components : int, default=2
Number of dimensions in which to immerse the dissimilarities.
metric : bool, default=True
If ``True``, perform metric MDS; otherwise, perform nonmetric MDS.
n_init : int, default=4
Number of times the SMACOF algorithm will be run with different
initializations. The final results will be the best output of the runs,
determined by the run with the smallest final stress.
max_iter : int, default=300
Maximum number of iterations of the SMACOF algorithm for a single run.
verbose : int, default=0
Level of verbosity.
eps : float, default=1e-3
Relative tolerance with respect to stress at which to declare
convergence.
n_jobs : int, default=None
The number of jobs to use for the computation. If multiple
initializations are used (``n_init``), each run of the algorithm is
computed in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance or None, default=None
Determines the random number generator used to initialize the centers.
Pass an int for reproducible results across multiple function calls.
See :term: `Glossary <random_state>`.
dissimilarity : {'euclidean', 'precomputed'}, default='euclidean'
Dissimilarity measure to use:
- 'euclidean':
Pairwise Euclidean distances between points in the dataset.
- 'precomputed':
Pre-computed dissimilarities are passed directly to ``fit`` and
``fit_transform``.
Attributes
----------
embedding_ : ndarray of shape (n_samples, n_components)
Stores the position of the dataset in the embedding space.
stress_ : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points).
dissimilarity_matrix_ : ndarray of shape (n_samples, n_samples)
Pairwise dissimilarities between the points. Symmetric matrix that:
- either uses a custom dissimilarity matrix by setting `dissimilarity`
to 'precomputed';
- or constructs a dissimilarity matrix from data using
Euclidean distances.
n_iter_ : int
The number of iterations corresponding to the best stress.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.manifold import MDS
>>> X, _ = load_digits(return_X_y=True)
>>> X.shape
(1797, 64)
>>> embedding = MDS(n_components=2)
>>> X_transformed = embedding.fit_transform(X[:100])
>>> X_transformed.shape
(100, 2)
References
----------
"Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
Groenen P. Springer Series in Statistics (1997)
"Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
Psychometrika, 29 (1964)
"Multidimensional scaling by optimizing goodness of fit to a nonmetric
hypothesis" Kruskal, J. Psychometrika, 29, (1964)
"""
def __init__(self, n_components=2, *, metric=True, n_init=4,
max_iter=300, verbose=0, eps=1e-3, n_jobs=None,
random_state=None, dissimilarity="euclidean"):
self.n_components = n_components
self.dissimilarity = dissimilarity
self.metric = metric
self.n_init = n_init
self.max_iter = max_iter
self.eps = eps
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
@property
def _pairwise(self):
return self.dissimilarity == "precomputed"
def fit(self, X, y=None, init=None, weight=None):
"""
Computes the position of the points in the embedding space.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples, n_samples)
Input data. If ``dissimilarity=='precomputed'``, the input should
be the dissimilarity matrix.
y : Ignored
init : ndarray of shape (n_samples,), default=None
Starting configuration of the embedding to initialize the SMACOF
algorithm. By default, the algorithm is initialized with a randomly
chosen array.
weight : ndarray of shape (n_samples, n_samples), default=None
symmetric weighting matrix of similarities.
In default, all weights are 1.
"""
self.fit_transform(X, init=init, weight=weight)
return self
def fit_transform(self, X, y=None, init=None, weight=None):
"""
Fit the data from X, and returns the embedded coordinates.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples, n_samples)
Input data. If ``dissimilarity=='precomputed'``, the input should
be the dissimilarity matrix.
y : Ignored
init : ndarray of shape (n_samples,), default=None
Starting configuration of the embedding to initialize the SMACOF
algorithm. By default, the algorithm is initialized with a randomly
chosen array.
weight : ndarray of shape (n_samples, n_samples), default=None
symmetric weighting matrix of similarities.
In default, all weights are 1.
"""
X = self._validate_data(X)
if X.shape[0] == X.shape[1] and self.dissimilarity != "precomputed":
warnings.warn("The MDS API has changed. ``fit`` now constructs an"
" dissimilarity matrix from data. To use a custom "
"dissimilarity matrix, set "
"``dissimilarity='precomputed'``.")
if self.dissimilarity == "precomputed":
self.dissimilarity_matrix_ = X
elif self.dissimilarity == "euclidean":
self.dissimilarity_matrix_ = euclidean_distances(X)
else:
raise ValueError("Proximity must be 'precomputed' or 'euclidean'."
" Got %s instead" % str(self.dissimilarity))
self.embedding_, self.stress_, self.n_iter_ = smacof(
self.dissimilarity_matrix_, metric=self.metric,
n_components=self.n_components, init=init, n_init=self.n_init,
n_jobs=self.n_jobs, max_iter=self.max_iter, verbose=self.verbose,
eps=self.eps, random_state=self.random_state,
return_n_iter=True, weight=weight)
return self.embedding_ | /rsa3-3.0.0.post20201106-py3-none-any.whl/pyrsa/util/vis_utils.py | 0.932821 | 0.629049 | vis_utils.py | pypi |
import numpy as np
from scipy.sparse import coo_matrix
def indicator(index_vector, positive=False):
""" Indicator matrix with one
column per unique element in vector
Args:
index_vector (numpy.ndarray): n_row vector to
code - discrete values (one dimensional)
positive (bool): should the function ignore zero
negative entries in the index_vector?
Default: false
Returns:
indicator_matrix (numpy.ndarray): nrow x nconditions
indicator matrix
"""
c_unique = np.unique(index_vector)
n_unique = c_unique.size
rows = np.size(index_vector)
if positive:
c_unique = c_unique[c_unique > 0]
n_unique = c_unique.size
indicator_matrix = np.zeros((rows, n_unique))
for i in np.arange(n_unique):
indicator_matrix[index_vector == c_unique[i], i] = 1
return indicator_matrix
def pairwise_contrast(index_vector):
""" Contrast matrix with one row per unqiue pairwise contrast
Args:
index_vector (numpy.ndarray): n_row vector to code
discrete values (one dimensional)
Returns:
numpy.ndarray: indicator_matrix: n_values * (n_values-1)/2 x n_row
contrast matrix
"""
c_unique = np.unique(index_vector)
n_unique = c_unique.size
rows = np.size(index_vector)
cols = int(n_unique * (n_unique - 1) / 2)
indicator_matrix = np.zeros((cols, rows))
n_row = 0
# Now make an indicator_matrix with a pair of conditions per row
for i in range(n_unique):
for j in np.arange(i + 1, n_unique):
select = (index_vector == c_unique[i])
indicator_matrix[n_row, select] = 1. / np.sum(select)
select = (index_vector == c_unique[j])
indicator_matrix[n_row, select] = -1. / np.sum(select)
n_row = n_row + 1
return indicator_matrix
def pairwise_contrast_sparse(index_vector):
""" Contrast matrix with one row per unqiue pairwise contrast
Args:
index_vector (numpy.ndarray): n_row vector to code
discrete values (one dimensional)
Returns:
scipy.sparse.csr_matrix: indicator_matrix:
n_values * (n_values-1)/2 x n_row contrast matrix
"""
c_unique = np.unique(index_vector)
n_unique = c_unique.size
rows = np.size(index_vector)
cols = int(n_unique * (n_unique - 1) / 2)
# Now make an indicator_matrix with a pair of conditions per row
n_repeats = np.zeros(n_unique, dtype=int)
select = [None] * n_unique
for i in range(n_unique):
sel = (index_vector == c_unique[i])
n_repeats[i] = np.sum(sel)
select[i] = list(np.where(index_vector == c_unique[i])[0])
n_row = 0
dat = []
idx_i = []
idx_j = []
for i in range(n_unique):
for j in np.arange(i + 1, n_unique):
dat += [1/n_repeats[i]] * n_repeats[i]
idx_i += [n_row] * n_repeats[i]
idx_j += select[i]
dat += [-1/n_repeats[j]] * n_repeats[j]
idx_i += [n_row] * n_repeats[j]
idx_j += select[j]
n_row = n_row + 1
indicator_matrix = coo_matrix((dat, (idx_i, idx_j)),
shape=(cols, rows))
return indicator_matrix.asformat("csr")
def centering(size):
""" generates a centering matrix
Args:
size (int): size of the center matrix
Returns:
centering_matrix (numpy.ndarray): size * size
"""
centering_matrix = np.identity(size) - np.ones(size) / size
return centering_matrix
def row_col_indicator_rdm(n_cond):
""" generates a row and column indicator matrix for an RDM vector
Args:
n_cond (int): Number of conditions underlying the RDM
Returns:
row_indicator (numpy.ndarray): n_cond (n_cond-1)/2 * n_cond
col_indicator (numpy.ndarray): n_cond (n_cond-1)/2 * n_cond
"""
n_dist = int(n_cond * (n_cond - 1) / 2)
row_i = np.zeros((n_dist, n_cond))
col_i = np.zeros((n_dist, n_cond))
_row_col_indicator(row_i, col_i, n_cond)
return (row_i, col_i)
def row_col_indicator_g(n_cond):
""" generates a row and column indicator matrix for a vectorized
second moment matrix. The vectorized version has the off-diagonal elements
first (like in an RDM), and then appends the diagnoal.
You can vectorize a second momement matrix G by
np.diag(row_i@G@col_i.T) = np.sum(col_i*(row_i@G)),axis=1)
Args:
n_cond (int): Number of conditions underlying the second moment
Returns:
row_indicator (numpy.ndarray): n_cond (n_cond-1)/2+n_cond * n_cond
col_indicator (numpy.ndarray): n_cond (n_cond-1)/2+n_cond * n_cond
"""
n_elem = int(n_cond * (n_cond - 1) / 2)+n_cond # Number of elements in G
row_i = np.zeros((n_elem, n_cond))
col_i = np.zeros((n_elem, n_cond))
_row_col_indicator(row_i, col_i, n_cond)
np.fill_diagonal(row_i[-n_cond:, :], 1)
np.fill_diagonal(col_i[-n_cond:, :], 1)
return (row_i, col_i)
def _row_col_indicator(row_i, col_i, n_cond):
""" Helper function that writes the correct pattern for the
row / column indicator matrix
Args:
row_indicator: row_i (numpy.ndarray)
col_indicator: row_i (numpy.ndarray)
n_cond (int): Number of conditions underlying the second moment
"""
j = 0
for i in range(n_cond):
row_i[j:j + n_cond - i - 1, i] = 1
np.fill_diagonal(col_i[j:j + n_cond - i - 1, i + 1:], 1)
j = j + (n_cond - i - 1) | /rsa3-3.0.0.post20201106-py3-none-any.whl/pyrsa/util/matrix.py | 0.849269 | 0.656658 | matrix.py | pypi |
import numpy as np
from scipy.stats import rankdata
from pyrsa.model import Model
from pyrsa.rdm import RDMs
from collections.abc import Iterable
def input_check_model(model, theta=None, fitter=None, N=1):
""" Checks whether model related inputs to evaluations are valid and
generates an evaluation-matrix of fitting size.
Args:
model : [list of] pyrsa.rdm.RDMs
the models to be evaluated
theta : numpy.ndarray or list , optional
Parameter(s) for the model(s). The default is None.
fitter : [list of] function, optional
fitting function to overwrite the model default.
The default is None, i.e. keep default
N : int, optional
number of samples/rows in evaluations matrix. The default is 1.
Returns:
evaluations : numpy.ndarray
empty evaluations-matrix
theta : list
the processed and checked model parameters
fitter : [list of] functions
checked and processed fitter functions
"""
if isinstance(model, Model):
evaluations = np.zeros(N)
elif isinstance(model, Iterable):
if N > 1:
evaluations = np.zeros((N, len(model)))
else:
evaluations = np.zeros(len(model))
if theta is not None:
assert isinstance(theta, Iterable), 'If a list of models is' \
+ ' passed theta must be a list of parameters'
assert len(model) == len(theta), 'there should equally many' \
+ ' models as parameters'
else:
theta = [None] * len(model)
if fitter is None:
fitter = [None] * len(model)
else:
assert len(fitter) == len(model), 'if fitters are passed ' \
+ 'there should be as many as models'
for k in range(len(model)):
if fitter[k] is None:
fitter[k] = model[k].default_fitter
else:
raise ValueError('model should be a pyrsa.model.Model or a list of'
+ ' such objects')
return evaluations, theta, fitter
def pool_rdm(rdms, method='cosine', sigma_k=None):
"""pools multiple RDMs into the one with maximal performance under a given
evaluation metric
rdm_descriptors of the generated rdms are empty
Args:
rdms (pyrsa.rdm.RDMs):
RDMs to be pooled
method : String, optional
Which comparison method to optimize for. The default is 'cosine'.
Returns:
pyrsa.rdm.RDMs: the pooled RDM, i.e. a RDM with maximal performance
under the chosen method
"""
rdm_vec = rdms.get_vectors()
if method == 'euclid':
rdm_vec = _nan_mean(rdm_vec)
elif method == 'cosine':
rdm_vec = rdm_vec / np.sqrt(np.nanmean(rdm_vec ** 2, axis=1,
keepdims=True))
rdm_vec = _nan_mean(rdm_vec)
elif method == 'corr':
rdm_vec = rdm_vec - np.nanmean(rdm_vec, axis=1, keepdims=True)
rdm_vec = rdm_vec / np.nanstd(rdm_vec, axis=1, keepdims=True)
rdm_vec = _nan_mean(rdm_vec)
rdm_vec = rdm_vec - np.nanmin(rdm_vec)
elif method == 'cosine_cov':
rdm_vec = rdm_vec / np.sqrt(np.nanmean(rdm_vec ** 2, axis=1,
keepdims=True))
rdm_vec = _nan_mean(rdm_vec)
elif method == 'corr_cov':
rdm_vec = rdm_vec - np.nanmean(rdm_vec, axis=1, keepdims=True)
rdm_vec = rdm_vec / np.nanstd(rdm_vec, axis=1, keepdims=True)
rdm_vec = _nan_mean(rdm_vec)
rdm_vec = rdm_vec - np.nanmin(rdm_vec)
elif method == 'spearman' or method == 'rho-a':
rdm_vec = np.array([_nan_rank_data(v) for v in rdm_vec])
rdm_vec = _nan_mean(rdm_vec)
elif method == 'rho-a':
rdm_vec = np.array([_nan_rank_data(v) for v in rdm_vec])
rdm_vec = _nan_mean(rdm_vec)
elif method == 'kendall' or method == 'tau-b':
Warning('Noise ceiling for tau based on averaged ranks!')
rdm_vec = np.array([_nan_rank_data(v) for v in rdm_vec])
rdm_vec = _nan_mean(rdm_vec)
elif method == 'tau-a':
Warning('Noise ceiling for tau based on averaged ranks!')
rdm_vec = np.array([_nan_rank_data(v) for v in rdm_vec])
rdm_vec = _nan_mean(rdm_vec)
else:
raise ValueError('Unknown RDM comparison method requested!')
return RDMs(rdm_vec,
dissimilarity_measure=rdms.dissimilarity_measure,
descriptors=rdms.descriptors,
rdm_descriptors=None,
pattern_descriptors=rdms.pattern_descriptors)
def _nan_mean(rdm_vector):
""" takes the average over a rdm_vector with nans for masked entries
without a warning
Args:
rdm_vector(numpy.ndarray): set of rdm_vectors to be averaged
Returns:
rdm_mean(numpy.ndarray): the mean rdm
"""
nan_idx = ~np.isnan(rdm_vector[0])
mean_values = np.mean(rdm_vector[:, nan_idx], axis=0)
rdm_mean = np.empty((1, rdm_vector.shape[1])) * np.nan
rdm_mean[:, nan_idx] = mean_values
return rdm_mean
def _nan_rank_data(rdm_vector):
""" rank_data for vectors with nan entries
Args:
rdm_vector(numpy.ndarray): the vector to be rank_transformed
Returns:
ranks(numpy.ndarray): the ranks with nans where the original vector
had nans
"""
ranks_no_nan = rankdata(rdm_vector[~np.isnan(rdm_vector)])
ranks = np.ones_like(rdm_vector) * np.nan
ranks[~np.isnan(rdm_vector)] = ranks_no_nan
return ranks
def pair_tests(evaluations):
"""pairwise bootstrapping significance tests for a difference in model
performance.
Tests add 1/len(evaluations) to each p-value and are computed as
two sided tests, i.e. as 2 * the smaller proportion
Args:
evaluations (numpy.ndarray):
RDMs to be pooled
Returns:
numpy.ndarray: matrix of proportions of opposit conclusions, i.e.
p-values for the bootstrap test
"""
proportions = np.zeros((evaluations.shape[1], evaluations.shape[1]))
while len(evaluations.shape) > 2:
evaluations = np.mean(evaluations, axis=-1)
for i_model in range(evaluations.shape[1]-1):
for j_model in range(i_model + 1, evaluations.shape[1]):
proportions[i_model, j_model] = np.sum(
evaluations[:, i_model] < evaluations[:, j_model]) \
/ (evaluations.shape[0] -
np.sum(evaluations[:, i_model] == evaluations[:, j_model]))
proportions[j_model, i_model] = proportions[i_model, j_model]
proportions = np.minimum(proportions, 1 - proportions) * 2
proportions = (len(evaluations) - 1) / len(evaluations) * proportions \
+ 1 / len(evaluations)
np.fill_diagonal(proportions, 1)
return proportions | /rsa3-3.0.0.post20201106-py3-none-any.whl/pyrsa/util/inference_util.py | 0.895357 | 0.700492 | inference_util.py | pypi |
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.offsetbox import OffsetImage, AnnotationBbox, DrawingArea
import numpy as np
import PIL
import PIL.ImageOps
import PIL.ImageFilter
from PIL import UnidentifiedImageError
import os
from pyrsa.rdm import RDMs
from pyrsa.util.inference_util import pool_rdm
class Icon:
""" Icon object, i.e. an object which can be plotted into an axis or as
an axis label.
Args:
image (np.ndarray or PIL.Image or RDMs or Icon)
the image to use as an icon
arrays and images should give the image directly
RDMs takes the average RDM from the object
If an Icon is passed its image property is used
string (String)
string to place on the icon
col (color definition)
background / border color
default: None -> no border or background
marker (matplotlib markertype)
sets what kind of symbol to plot
cmap (color map)
color map applied to the image
border_type (String)
None : default, puts the color as a background
where the alpha of the image is not 0
'pad' : pads the image with the border color -> square border
'conv' : extends the area by convolving with a circle
border_width (integer)
width of the border
make_square (bool)
if set to true the image is first reshaped into a square
circ_cut (flag)
sets how the icon is cut into circular shape
None : default, no cutting
'cut' : sets alpha to 0 out of a circular aperture
'cosine' : sets alpha to a raised cosine window
a number between 0 and 1 : a tukey window with the flat proportion
of the aperture given by the number. For 0 this corresponds
to the cosine window, for 1 it corresponds to 'cut'.
resolution (1 or two numbers):
sets a resolution for the icon to which the image is resized
prior to all processing. If only one number is provided,
the image is resized to a square with that size
marker_front (bool):
switches whether the marker is plotted in front or behind the
image. If True the marker is plotted unfilled in front
If False the marker is plotted behind the image filled.
default = True
"""
def __init__(self, image=None, string=None, col=None, marker=None,
cmap=None, border_type=None, border_width=2,
make_square=False, circ_cut=None, resolution=None,
marker_front=True, markeredgewidth=2,
fontsize=None, fontname=None, fontcolor=None):
self.fontsize = fontsize
self.fontname = fontname
self.string = string
self.fontcolor = fontcolor
self.marker = marker
self.marker_front = marker_front
self.markeredgewidth = markeredgewidth
self._make_square = make_square
self._border_width = border_width
self._border_type = border_type
self._cmap = cmap
self._col = col
self._circ_cut = None
self._resolution = None
self.image = image
if resolution is not None:
self.resolution = resolution
self.circ_cut = circ_cut
@property
def image(self):
return self._image
@image.setter
def image(self, image):
""" interprets image/converts it into an image"""
if isinstance(image, Icon):
self._image = image.image
elif isinstance(image, RDMs):
avg_rdm = pool_rdm(image)
image = avg_rdm.get_matrices()[0]
self._image = image / np.max(image)
if self.resolution is None:
self._resolution = np.array(100)
elif image is not None:
self._image = image
else:
self._image = None
self.recompute_final_image()
@property
def string(self):
return self._string
@string.setter
def string(self, string):
if string is None or isinstance(string, str):
self._string = string
else:
raise ValueError('String must be a string')
@property
def col(self):
return self._col
@col.setter
def col(self, col):
self._col = col
self.recompute_final_image()
@property
def cmap(self):
return self._cmap
@cmap.setter
def cmap(self, cmap):
self._cmap = cmap
self.recompute_final_image()
@property
def make_square(self):
return self._make_square
@make_square.setter
def make_square(self, make_square):
self._make_square = make_square
self.recompute_final_image()
@property
def border_width(self):
return self._border_width
@border_width.setter
def border_width(self, border_width):
self._border_width = border_width
self.recompute_final_image()
@property
def border_type(self):
return self._border_type
@border_type.setter
def border_type(self, border_type):
self._border_type = border_type
self.recompute_final_image()
@property
def resolution(self):
return self._resolution
@resolution.setter
def resolution(self, resolution):
if resolution is not None:
self._resolution = np.array(resolution)
else:
self._resolution = None
self.recompute_final_image()
@property
def circ_cut(self):
return self._circ_cut
@circ_cut.setter
def circ_cut(self, circ_cut):
if circ_cut is None:
self._circ_cut = None
elif circ_cut == 'cut':
self._circ_cut = 1
elif circ_cut == 'cosine':
self._circ_cut = 0
elif circ_cut <= 1 and circ_cut >= 0:
self._circ_cut = circ_cut
else:
raise ValueError('circ_cut must be in [0,1]')
self.recompute_final_image()
def recompute_final_image(self):
""" computes the icon image from the parameters
This function handles most of the image processing and must be run
again if any properties are changed. If you use set to change
properties this is automatically run.
"""
if self._image is None:
self.final_image = None
return
if isinstance(self._image, np.ndarray):
if self._image.dtype == np.float and np.any(self._image > 1):
im = self._image / 255
else:
im = self._image
if self.cmap is not None:
im = cm.get_cmap(self.cmap)(im)
im = PIL.Image.fromarray((im * 255).astype(np.uint8))
else: # we hope it is a PIL image or equivalent
im = self._image
im = im.convert('RGBA')
if self.make_square:
new_size = max(im.width, im.height)
im = im.resize((new_size, new_size), PIL.Image.NEAREST)
if self.resolution is not None:
if self.resolution.size == 1:
im = im.resize((self.resolution, self.resolution),
PIL.Image.NEAREST)
else:
im = im.resize(self.resolution,
PIL.Image.NEAREST)
if self.circ_cut is not None:
middle = np.array(im.size) / 2
x = np.arange(im.size[0]) - middle[0] + 0.5
x = x / np.max(np.abs(x))
y = np.arange(im.size[1]) - middle[1] + 0.5
y = y / np.max(np.abs(y))
yy, xx = np.meshgrid(y, x)
r = np.sqrt(xx ** 2 + yy ** 2)
alpha = np.empty(r.shape)
alpha[r > 1] = 0
alpha[r <= self.circ_cut] = 1
val = (r > self.circ_cut) & (r <= 1)
alpha[val] = (
0.5 + 0.5 * np.cos(
np.pi * (r[val] - self.circ_cut)
/ (1 - self.circ_cut)))
alpha = alpha.T * np.array(im.getchannel('A'))
alpha = PIL.Image.fromarray(np.uint8(alpha))
im.putalpha(alpha)
if self.col is not None:
if self.border_type is None:
pass
elif self.border_type == 'alpha':
bg_alpha = np.array(im.getchannel('A'))
bg_alpha = bg_alpha > 0
bg_alpha = PIL.Image.fromarray(255 * np.uint8(bg_alpha))
bg = PIL.Image.new('RGBA', im.size, color=self.col)
bg.putalpha(bg_alpha)
im = PIL.Image.alpha_composite(bg, im)
elif self.border_type == 'pad':
im = PIL.ImageOps.expand(
im,
border=self.border_width,
fill=self.col)
elif self.border_type == 'conv':
im = PIL.ImageOps.expand(
im,
border=self.border_width,
fill=(0, 0, 0, 0))
bg_alpha = im.getchannel('A')
bg_alpha = bg_alpha.filter(PIL.ImageFilter.BoxBlur(
self.border_width))
bg_alpha = np.array(bg_alpha)
bg_alpha = 255 * np.uint8(bg_alpha > 0)
bg_alpha = PIL.Image.fromarray(bg_alpha)
bg = PIL.Image.new('RGBA', im.size, color=self.col)
bg.putalpha(bg_alpha)
im = PIL.Image.alpha_composite(bg, im)
self.final_image = im
def plot(self, x, y, ax=None, size=None):
""" plots the icon into an axis
Args:
x (float)
x-position
y (float)
y-position
ax (matplotlib axis)
the axis to plot in
size : float
size of the icon scaling the image
"""
if ax is None:
ax = plt.gca()
if size is None:
size = 1
if self.final_image is not None:
imagebox = OffsetImage(self.final_image, zoom=size)
ab = AnnotationBbox(
imagebox, (x, y), frameon=False,
pad=0)
ax.add_artist(ab)
zorder = ab.zorder
else:
zorder = 0
if self.marker:
if self.final_image is not None:
markersize = max(self.final_image.size)
else:
markersize = 50
markersize = markersize * size
if self.marker_front:
plt.plot(x, y, marker=self.marker, markeredgecolor=self.col,
markerfacecolor=(0, 0, 0, 0), markersize=markersize,
zorder=zorder + 0.1,
markeredgewidth=self.markeredgewidth)
else:
plt.plot(x, y, marker=self.marker, markeredgecolor=self.col,
markerfacecolor=self.col, markersize=markersize,
zorder=zorder - 0.1,
markeredgewidth=self.markeredgewidth)
if self.string is not None:
ax.annotate(self.string, (x, y),
horizontalalignment='center',
verticalalignment='center',
zorder=zorder + 0.2,
fontsize=self.fontsize, fontname=self.fontname,
color=self.fontcolor)
def x_tick_label(self, x, size, offset=7, ax=None):
"""
uses the icon as a ticklabel at location x
Args:
x (float)
the position of the tick
size (float)
scaling the size of the icon
offset (integer)
how far the icon should be from the axis in points
ax (matplotlib axis)
the axis to put the label on
"""
if ax is None:
ax = plt.gca()
if self.final_image is not None:
imagebox = OffsetImage(self.final_image, zoom=size)
ab = AnnotationBbox(
imagebox, (x, 0),
xybox=(0, -offset),
xycoords=('data', 'axes fraction'),
box_alignment=(.5, 1),
boxcoords='offset points',
bboxprops={'edgecolor': 'none', 'facecolor': 'none'},
arrowprops={
'arrowstyle': '-',
'shrinkA': 0,
'shrinkB': 1
},
pad=0.1)
zorder = ab.zorder
ax.add_artist(ab)
else:
zorder = 0
if self.marker:
if self.final_image is not None:
markersize = max(self.final_image.size)
else:
markersize = 50
markersize = markersize * size
d = DrawingArea(markersize, markersize)
if self.marker_front:
zorder_marker = zorder + 0.1
else:
zorder_marker = zorder - 0.1
d.set_zorder(zorder_marker)
d.set_alpha(0)
if self.marker_front:
d.add_artist(plt.Line2D(
[markersize / 2], [markersize / 2],
marker=self.marker, markeredgecolor=self.col,
markerfacecolor=(0, 0, 0, 0), markersize=markersize,
markeredgewidth=self.markeredgewidth,
transform=d.get_transform(),
zorder=zorder_marker))
else:
d.add_artist(plt.Line2D(
[markersize / 2], [markersize / 2],
marker=self.marker, markeredgecolor=self.col,
markerfacecolor=self.col, markersize=markersize,
markeredgewidth=self.markeredgewidth,
transform=d.get_transform(),
zorder=zorder_marker))
ab_marker = AnnotationBbox(
d, (x, 0),
xybox=(0, -offset),
xycoords=('data', 'axes fraction'),
box_alignment=(.5, 1),
boxcoords='offset points',
bboxprops={'edgecolor': 'none', 'facecolor': 'none'},
arrowprops={
'arrowstyle': '-',
'shrinkA': 0,
'shrinkB': 1
},
pad=0.1)
ab_marker.set_zorder(zorder_marker)
ab_marker.set_alpha(0)
ax.add_artist(ab_marker)
if self.string is not None:
ax.annotate(
self.string, (x, 0),
xytext=(0, -offset),
xycoords=('data', 'axes fraction'),
textcoords='offset points',
horizontalalignment='center',
verticalalignment='top',
arrowprops={
'arrowstyle': '-',
'shrinkA': 0,
'shrinkB': 1
},
zorder=zorder + 0.2,
fontsize=self.fontsize, fontname=self.fontname,
color=self.fontcolor)
def y_tick_label(self, y, size, offset=7, ax=None):
"""
uses the icon as a ticklabel at location x
Args:
y (float)
the position of the tick
size (float)
scaling the size of the icon
offset (integer)
how far the icon should be from the axis in points
ax (matplotlib axis)
the axis to put the label on
"""
if ax is None:
ax = plt.gca()
if self.final_image is not None:
imagebox = OffsetImage(self.final_image, zoom=size)
ab = AnnotationBbox(
imagebox, (0, y),
xybox=(-offset, 0),
xycoords=('axes fraction', 'data'),
box_alignment=(1, .5),
boxcoords='offset points',
bboxprops={'edgecolor': 'none', 'facecolor': 'none'},
arrowprops={
'arrowstyle': '-',
'shrinkA': 0,
'shrinkB': 1
},
pad=0.1)
ax.add_artist(ab)
zorder = ab.zorder
else:
zorder = 0
if self.marker:
if self.final_image is not None:
markersize = max(self.final_image.size)
else:
markersize = 50
markersize = markersize * size
d = DrawingArea(markersize, markersize)
if self.marker_front:
zorder_marker = zorder + 0.1
else:
zorder_marker = zorder - 0.1
d.set_zorder(zorder_marker)
d.set_alpha(0)
if self.marker_front:
d.add_artist(plt.Line2D(
[markersize / 2], [markersize / 2],
marker=self.marker, markeredgecolor=self.col,
markerfacecolor=(0, 0, 0, 0), markersize=markersize,
markeredgewidth=self.markeredgewidth,
transform=d.get_transform(),
zorder=zorder_marker))
else:
d.add_artist(plt.Line2D(
[markersize / 2], [markersize / 2],
marker=self.marker, markeredgecolor=self.col,
markerfacecolor=self.col, markersize=markersize,
markeredgewidth=self.markeredgewidth,
transform=d.get_transform(),
zorder=zorder_marker))
ab_marker = AnnotationBbox(
d, (0, y),
xybox=(-offset, 0),
xycoords=('axes fraction', 'data'),
box_alignment=(1, 0.5),
boxcoords='offset points',
bboxprops={'edgecolor': 'none', 'facecolor': 'none'},
arrowprops={
'arrowstyle': '-',
'shrinkA': 0,
'shrinkB': 1
},
pad=0.1)
ab_marker.set_zorder(zorder_marker)
ab_marker.set_alpha(0)
ax.add_artist(ab_marker)
if self.string is not None:
ax.annotate(
self.string, (0, y),
xytext=(-offset, 0),
xycoords=('axes fraction', 'data'),
textcoords='offset points',
horizontalalignment='right',
verticalalignment='center',
arrowprops={
'arrowstyle': '-',
'shrinkA': 0,
'shrinkB': 1
},
zorder=zorder + 1,
fontsize=self.fontsize, fontname=self.fontname,
color=self.fontcolor)
def icons_from_folder(folder, resolution=None, col=None,
cmap=None, border_type=None, border_width=2,
make_square=False, circ_cut=None):
""" generates a dictionary of Icons for all images in a folder
"""
icons = dict()
for filename in os.listdir(folder):
try:
im = PIL.Image.open(filename)
icons[filename] = Icon(
image=im, col=col, resolution=resolution,
cmap=cmap, border_type=border_type,
border_width=border_width,
make_square=make_square, circ_cut=circ_cut)
except (FileNotFoundError, UnidentifiedImageError, IsADirectoryError,
PermissionError):
pass
return icons | /rsa3-3.0.0.post20201106-py3-none-any.whl/pyrsa/vis/icon.py | 0.815159 | 0.483405 | icon.py | pypi |
import numpy as np
import matplotlib.pyplot as plt
from pyrsa.rdm import rank_transform
from pyrsa.vis.colors import rdm_colormap
def show_rdm(rdm, do_rank_transform=False, pattern_descriptor=None,
cmap=None, rdm_descriptor=None, dpi=300, filename=None,
show_colorbar=False):
"""shows an rdm object
Parameters
----------
rdm : pyrsa.rdm.RDMs
RDMs object to be plotted
do_rank_transform : bool
whether we should do a rank transform before plotting
pattern_descriptor : String
name of a pattern descriptor which will be used as an axis label
cmap : color map
colormap or identifier for a colormap to be used
conventions as for matplotlib colormaps
rdm_descriptor : String
name of a rdm descriptor which will be used as a title per RDM
dpi : int
dots per inch (determines visual resolution of plots)
filename : str
relative path to which the plot will be saved
(if None: do not save plot)
show_colorbar : bool
whether to display a colorbar next to each RDM
"""
plt.figure(dpi=dpi)
if cmap is None:
cmap = rdm_colormap()
if do_rank_transform:
rdm = rank_transform(rdm)
rdm_mat = rdm.get_matrices()
if rdm.n_rdm > 1:
m = np.ceil(np.sqrt(rdm.n_rdm+1))
n = np.ceil((1 + rdm.n_rdm) / m)
for idx in range(rdm.n_rdm):
plt.subplot(n, m, idx + 1)
image = plt.imshow(rdm_mat[idx], cmap=cmap)
_add_descriptor_labels(rdm, pattern_descriptor)
if rdm_descriptor in rdm.rdm_descriptors:
plt.title(rdm.rdm_descriptors[rdm_descriptor][idx])
elif isinstance(rdm_descriptor, str):
plt.title(rdm_descriptor)
if show_colorbar:
plt.colorbar(image)
plt.subplot(n, m, n * m)
image = plt.imshow(np.mean(rdm_mat, axis=0), cmap=cmap)
_add_descriptor_labels(rdm, pattern_descriptor)
plt.title('Average')
if show_colorbar:
plt.colorbar(image)
elif rdm.n_rdm == 1:
image = plt.imshow(rdm_mat[0], cmap=cmap)
_add_descriptor_labels(rdm, pattern_descriptor)
if rdm_descriptor in rdm.rdm_descriptors:
plt.title(rdm.rdm_descriptors[rdm_descriptor][0])
elif isinstance(rdm_descriptor, str):
plt.title(rdm_descriptor)
if show_colorbar:
plt.colorbar(image)
if isinstance(filename, str):
fig1 = plt.gcf()
fig1.savefig(filename, bbox_inches='tight')
plt.show()
def _add_descriptor_labels(rdm, descriptor, ax=None):
""" adds a descriptor as ticklabels """
if ax is None:
ax = plt.gca()
if descriptor is not None:
desc = rdm.pattern_descriptors[descriptor]
ax.set_xticks(np.arange(rdm.n_cond))
ax.set_xticklabels(
desc,
{'fontsize': 'xx-small',
'fontweight': 'normal',
'verticalalignment': 'center',
'horizontalalignment': 'center'})
ax.set_yticks(np.arange(rdm.n_cond))
ax.set_yticklabels(
desc,
{'fontsize': 'xx-small',
'fontweight': 'normal',
'verticalalignment': 'center',
'horizontalalignment': 'right'})
plt.ylim(rdm.n_cond - 0.5, -0.5)
plt.xlim(-0.5, rdm.n_cond - 0.5)
plt.setp(ax.get_xticklabels(), rotation=90, ha="right",
rotation_mode="anchor")
else:
ax.set_xticks([])
ax.set_yticks([]) | /rsa3-3.0.0.post20201106-py3-none-any.whl/pyrsa/vis/rdm_plot.py | 0.804981 | 0.459197 | rdm_plot.py | pypi |
import numpy as np
import scipy.optimize as opt
from pyrsa.rdm import compare
def fit_mock(model, data, method='cosine', pattern_idx=None,
pattern_descriptor=None):
""" formally acceptable fitting method which always returns a vector of
zeros
Args:
model(pyrsa.model.Model): model to be fit
data(pyrsa.rdm.RDMs): Data to fit to
method(String): Evaluation method
pattern_idx(numpy.ndarray): Which patterns are sampled
pattern_descriptor(String): Which descriptor is used
Returns:
theta(numpy.ndarray): parameter vector
"""
return np.zeros(model.n_param)
def fit_select(model, data, method='cosine', pattern_idx=None,
pattern_descriptor=None):
""" fits selection models by evaluating each rdm and selcting the one
with best performance. Works only for ModelSelect
Args:
model(pyrsa.model.Model): model to be fit
data(pyrsa.rdm.RDMs): Data to fit to
method(String): Evaluation method
pattern_idx(numpy.ndarray): Which patterns are sampled
pattern_descriptor(String): Which descriptor is used
Returns:
theta(int): parameter vector
"""
evaluations = np.zeros(model.n_rdm)
for i_rdm in range(model.n_rdm):
pred = model.predict_rdm(i_rdm)
if not (pattern_idx is None or pattern_descriptor is None):
pred = pred.subsample_pattern(pattern_descriptor, pattern_idx)
evaluations[i_rdm] = np.mean(compare(pred, data, method=method))
theta = np.argmax(evaluations)
return theta
def fit_optimize(model, data, method='cosine', pattern_idx=None,
pattern_descriptor=None):
"""
fitting theta using optimization
currently allowed for ModelWeighted only
Args:
model(Model): the model to be fit
data(pyrsa.rdm.RDMs): data to be fit
method(String, optional): evaluation metric The default is 'cosine'.
pattern_idx(numpy.ndarray, optional)
sampled patterns The default is None.
pattern_descriptor (String, optional)
descriptor used for fitting. The default is None.
Returns:
numpy.ndarray: theta, parameter vector for the model
"""
def _loss_opt(theta):
return _loss(theta, model, data, method=method,
pattern_idx=pattern_idx,
pattern_descriptor=pattern_descriptor)
theta0 = np.random.rand(model.n_param)
theta = opt.minimize(_loss_opt, theta0)
return theta.x
def fit_interpolate(model, data, method='cosine', pattern_idx=None,
pattern_descriptor=None):
"""
fitting theta using bisection optimization
allowed for ModelInterpolate only
Args:
model(Model): the model to be fit
data(pyrsa.rdm.RDMs): data to be fit
method(String, optional): evaluation metric The default is 'cosine'.
pattern_idx(numpy.ndarray, optional)
sampled patterns The default is None.
pattern_descriptor (String, optional)
descriptor used for fitting. The default is None.
Returns:
numpy.ndarray: theta, parameter vector for the model
"""
results = []
for i_pair in range(model.n_rdm-1):
def loss_opt(w):
theta = np.zeros(model.n_param)
theta[i_pair] = w
theta[i_pair+1] = 1-w
return _loss(theta, model, data, method=method,
pattern_idx=pattern_idx,
pattern_descriptor=pattern_descriptor)
results.append(
opt.minimize_scalar(loss_opt, np.array([.5]),
method='bounded', bounds=(0, 1)))
losses = [r.fun for r in results]
i_pair = np.argmin(losses)
result = results[i_pair]
theta = np.zeros(model.n_rdm)
theta[i_pair] = result.x
theta[i_pair+1] = 1-result.x
return theta
def _loss(theta, model, data, method='cosine', cov=None,
pattern_descriptor=None, pattern_idx=None):
"""Method for calculating a loss for a model and parameter combination
Args:
theta(numpy.ndarray): evaluated parameter value
model(Model): the model to be fit
data(pyrsa.rdm.RDMs): data to be fit
method(String, optional): evaluation metric The default is 'cosine'.
pattern_idx(numpy.ndarray, optional)
sampled patterns The default is None.
pattern_descriptor (String, optional)
descriptor used for fitting. The default is None.
cov(numpy.ndarray, optional):
Covariance matrix for likelihood based evaluation.
It is ignored otherwise. The default is None.
Returns:
numpy.ndarray: loss
"""
pred = model.predict_rdm(theta)
if not (pattern_idx is None or pattern_descriptor is None):
pred = pred.subsample_pattern(pattern_descriptor, pattern_idx)
return -np.mean(compare(pred, data, method=method)) | /rsa3-3.0.0.post20201106-py3-none-any.whl/pyrsa/model/fitter.py | 0.931572 | 0.59749 | fitter.py | pypi |
import numpy as np
from pyrsa.rdm import RDMs
from pyrsa.rdm import rdms_from_dict
from pyrsa.util.rdm_utils import batch_to_vectors
from .fitter import fit_mock, fit_optimize, fit_select, fit_interpolate
class Model:
"""
Abstract model class.
Defines members that every class needs to have, but does not implement any
interesting behavior. Inherit from this class to define specific model
types
"""
def __init__(self, name):
self.name = name
self.n_param = 0
self.default_fitter = fit_mock
self.rdm_obj = None
def predict(self, theta=None):
""" Returns the predicted rdm vector
Args:
theta(numpy.ndarray): the model parameter vector (one dimensional)
Returns:
numpy.ndarray: rdm vector
"""
raise NotImplementedError(
"Predict function not implemented in used model class!")
def predict_rdm(self, theta=None):
""" Returns the predicted rdm as an object
Args:
theta(numpy.ndarray): the model parameter vector (one dimensional)
Returns:
numpy.ndarray: rdm object
"""
raise NotImplementedError(
"Predict rdm function not implemented in used model class!")
def fit(self, data):
""" fit the model to a RDM object data
Args:
data(RDM object): the RDMs to be fit with the model
Returns:
theta(numpy.ndarray): parameter vector (one dimensional)
"""
return self.default_fitter(self, data, method='cosine',
pattern_idx=None,
pattern_descriptor=None)
def to_dict(self):
""" Converts the model into a dictionary, which can be used for saving
Returns:
model_dict(dict): A dictionary containting all data needed to
recreate the object
"""
model_dict = {}
if self.rdm_obj:
model_dict['rdm'] = self.rdm_obj.to_dict()
else:
model_dict['rdm'] = None
model_dict['name'] = self.name
model_dict['type'] = type(self).__name__
return model_dict
class ModelFixed(Model):
def __init__(self, name, rdm):
"""
Fixed model
This is a parameter-free model that simply predicts a fixed RDM
It takes rdm object, a vector or a matrix as input to define the RDM
Args:
Name(String): Model name
rdm(pyrsa.rdm.RDMs): rdms in one object
"""
Model.__init__(self, name)
if isinstance(rdm, RDMs):
self.rdm_obj = rdm
self.rdm = np.mean(rdm.get_vectors(), axis=0)
self.n_cond = rdm.n_cond
elif rdm.ndim == 1: # User passed a vector
self.rdm_obj = RDMs(np.array([rdm]))
self.n_cond = (1 + np.sqrt(1 + 8 * rdm.size)) / 2
if self.n_cond % 1 != 0:
raise NameError(
"RDM vector needs to have size of ncond*(ncond-1)/2")
self.rdm = rdm
else: # User passed a matrix
self.rdm_obj = RDMs(np.array([rdm]))
self.rdm = batch_to_vectors(np.array([rdm]))[0]
self.n_cond = self.rdm_obj.n_cond
self.n_param = 0
self.default_fitter = fit_mock
self.rdm_obj.pattern_descriptors['index'] = np.arange(self.n_cond)
def predict(self, theta=None):
""" Returns the predicted rdm vector
For the fixed model there are no parameters. theta is ignored.
Args:
theta(numpy.ndarray): the model parameter vector (one dimensional)
Returns:
rdm vector
"""
return self.rdm
def predict_rdm(self, theta=None):
""" Returns the predicted rdm vector
For the fixed model there are no parameters.
Args:
theta(numpy.ndarray): the model parameter vector (one dimensional)
Returns:
pyrsa.rdm.RDMs: rdm object
"""
return self.rdm_obj
class ModelSelect(Model):
"""
Selection model
This model has a set of RDMs and selects one of them as its prediction.
theta should here be an integer index
"""
# Model Constructor
def __init__(self, name, rdm):
Model.__init__(self, name)
if isinstance(rdm, RDMs):
self.rdm_obj = rdm
self.rdm = rdm.get_vectors()
elif rdm.ndim == 2: # User supplied vectors
self.rdm_obj = RDMs(rdm)
self.n_cond = (1 + np.sqrt(1 + 8 * rdm.shape[1])) / 2
if self.n_cond % 1 != 0:
raise NameError(
"RDM vector needs to have size of ncond*(ncond-1)/2")
self.rdm = rdm
else: # User passed matrixes
self.rdm_obj = RDMs(rdm)
self.rdm = batch_to_vectors(rdm)
self.n_param = 1
self.n_rdm = self.rdm_obj.n_rdm
self.default_fitter = fit_select
def predict(self, theta=0):
""" Returns the predicted rdm vector
For the fixed model there are no parameters. theta is ignored.
Args:
theta(numpy.ndarray): the model parameter vector (one dimensional)
Returns:
rdm vector
"""
return self.rdm[theta]
def predict_rdm(self, theta=0):
""" Returns the predicted rdm vector
For the fixed model there are no parameters.
Args:
theta(numpy.ndarray): the model parameter vector (one dimensional)
Returns:
pyrsa.rdm.RDMs: rdm object
"""
return self.rdm_obj[theta]
class ModelWeighted(Model):
"""
weighted Model
models the RDM as a weighted sum of a set of RDMs
"""
# Model Constructor
def __init__(self, name, rdm):
Model.__init__(self, name)
if isinstance(rdm, RDMs):
self.rdm_obj = rdm
self.rdm = rdm.get_vectors()
elif rdm.ndim == 2: # User supplied vectors
self.rdm_obj = RDMs(rdm)
self.n_cond = (1 + np.sqrt(1 + 8 * rdm.shape[1])) / 2
if self.n_cond % 1 != 0:
raise NameError(
"RDM vector needs to have size of ncond*(ncond-1)/2")
self.rdm = rdm
else: # User passed matrixes
self.rdm_obj = RDMs(rdm)
self.rdm = batch_to_vectors(rdm)
self.n_param = self.rdm_obj.n_rdm
self.n_rdm = self.rdm_obj.n_rdm
self.default_fitter = fit_optimize
def predict(self, theta=None):
""" Returns the predicted rdm vector
theta are the weights for the different rdms
Args:
theta(numpy.ndarray): the model parameter vector (one dimensional)
Returns:
rdm vector
"""
if theta is None:
theta = np.ones(self.n_rdm)
theta = np.array(theta)
return np.matmul(self.rdm.T, theta.reshape(-1))
def predict_rdm(self, theta=None):
""" Returns the predicted rdm vector
For the fixed model there are no parameters.
Args:
theta(numpy.ndarray): the model parameter vector (one dimensional)
Returns:
pyrsa.rdm.RDMs: rdm object
"""
if theta is None:
theta = np.ones(self.n_rdm)
theta = np.maximum(theta, 0)
theta = np.array(theta)
dissimilarities = np.matmul(self.rdm.T, theta.reshape(-1))
rdms = RDMs(
dissimilarities.reshape(1, -1),
dissimilarity_measure=self.rdm_obj.dissimilarity_measure,
descriptors=self.rdm_obj.descriptors,
pattern_descriptors=self.rdm_obj.pattern_descriptors)
return rdms
class ModelInterpolate(Model):
"""
inpterpolation Model
models the RDM as an interpolation between 2 neigboring rdms
"""
# Model Constructor
def __init__(self, name, rdm):
Model.__init__(self, name)
if isinstance(rdm, RDMs):
self.rdm_obj = rdm
self.rdm = rdm.get_vectors()
elif rdm.ndim == 2: # User supplied vectors
self.rdm_obj = RDMs(rdm)
self.n_cond = (1 + np.sqrt(1 + 8 * rdm.shape[1])) / 2
if self.n_cond % 1 != 0:
raise NameError(
"RDM vector needs to have size of ncond*(ncond-1)/2")
self.rdm = rdm
else: # User passed matrixes
self.rdm_obj = RDMs(rdm)
self.rdm = batch_to_vectors(rdm)
self.n_param = self.rdm_obj.n_rdm
self.n_rdm = self.rdm_obj.n_rdm
self.default_fitter = fit_interpolate
def predict(self, theta=None):
""" Returns the predicted rdm vector
theta are the weights for the different rdms
Args:
theta(numpy.ndarray): the model parameter vector (one dimensional)
Returns:
rdm vector
"""
if theta is None:
theta = np.zeros(self.n_rdm)
theta[0] = 0.5
theta[1] = 0.5
theta = np.array(theta)
return np.matmul(self.rdm.T, theta.reshape(-1))
def predict_rdm(self, theta=None):
""" Returns the predicted rdm vector
For the fixed model there are no parameters.
Args:
theta(numpy.ndarray): the model parameter vector (one dimensional)
Returns:
pyrsa.rdm.RDMs: rdm object
"""
if theta is None:
theta = np.ones(self.n_rdm)
theta = np.maximum(theta, 0)
theta = np.array(theta)
dissimilarities = np.matmul(self.rdm.T, theta.reshape(-1))
rdms = RDMs(
dissimilarities.reshape(1, -1),
dissimilarity_measure=self.rdm_obj.dissimilarity_measure,
descriptors=self.rdm_obj.descriptors,
pattern_descriptors=self.rdm_obj.pattern_descriptors)
return rdms
def model_from_dict(model_dict):
""" recreates a model object from a dictionary
Args:
model_dict(dict): The dictionary to be turned into a model
Returns
model(Model): The recreated model
"""
if model_dict['rdm']:
rdm_obj = rdms_from_dict(model_dict['rdm'])
if model_dict['type'] == 'Model':
model = Model(model_dict['name'])
elif model_dict['type'] == 'ModelFixed':
model = ModelFixed(model_dict['name'], rdm_obj)
elif model_dict['type'] == 'ModelSelect':
model = ModelSelect(model_dict['name'], rdm_obj)
elif model_dict['type'] == 'ModelWeighted':
model = ModelWeighted(model_dict['name'], rdm_obj)
elif model_dict['type'] == 'ModelInterpolate':
model = ModelInterpolate(model_dict['name'], rdm_obj)
return model | /rsa3-3.0.0.post20201106-py3-none-any.whl/pyrsa/model/model.py | 0.882466 | 0.494324 | model.py | pypi |
import base64
import binascii
import hashlib
import json
import re
__all__ = ['PublicKey', 'BadSignatureError', 'verify_jwt']
ASN1_HASH = {b'0Q0\r\x06\t`\x86H\x01e\x03\x04\x02\x03\x05\x00\x04@': 'sha512',
b'0A0\r\x06\t`\x86H\x01e\x03\x04\x02\x02\x05\x00\x040': 'sha384',
b'010\r\x06\t`\x86H\x01e\x03\x04\x02\x01\x05\x00\x04 ': 'sha256'}
# json web algorithms to Python hash functions
ALG_HASH = {'RS256': 'sha256', 'RS384': 'sha384', 'RS512': 'sha512'}
pkcs_regex = (b'\x01\xff+\x00(?P<algorithm>' +
b'|'.join(sorted(re.escape(asn1) for asn1 in ASN1_HASH.keys())) +
b')(?P<hash>.+)')
pkcs_signature = re.compile(pkcs_regex, re.DOTALL)
class BadSignatureError(ValueError): pass
class PublicKey(object):
KTY = "RSA"
def __init__(self, n, e, alg='RS256'):
self.e = e
self.n = n
self.alg = alg
@classmethod
def from_jwk(cls, jwk):
"""Load RSA PublicKey from a JSON Web Key"""
if jwk['kty'] != cls.KTY:
raise ValueError("Not a {0} key".format(cls.KTY))
n = b64_to_int(as_binary(jwk['n']))
e = b64_to_int(as_binary(jwk['e']))
return cls(n, e, jwk['alg'])
def to_jwk(self):
"""Return a JSON Web Key for this key."""
jwk = {'kty':self.KTY,
'n':as_text(int_to_b64(self.n)),
'e':as_text(int_to_b64(self.e)),
'alg':self.alg}
return jwk
def verify(self, message, signature):
"""Verify a message signed with this key. Return the verified message on success.
Input, output are bytes."""
signature_integer = bytes_to_int(signature)
plain_signature = decrypt(signature_integer, self)
plain_signature_bytes = int_to_bytes(plain_signature)
match = pkcs_signature.match(plain_signature_bytes)
if match:
asn1_hash = ASN1_HASH[match.group(1)]
alg_hash = ALG_HASH[self.alg]
if asn1_hash != alg_hash:
raise BadSignatureError("Key and signature algorithm do not match", asn1_hash, alg_hash)
hash = hashlib.new(ASN1_HASH[match.group(1)])
hash.update(message)
if hash.digest() == match.group(2):
return message
raise BadSignatureError("Bad signature")
def __repr__(self):
return "{0}(n={1},e={2})".format(self.__class__.__name__, self.n, self.e)
def decrypt(ciphertext, pub):
"""RSA decryption on integers"""
return pow(ciphertext, pub.e, pub.n)
def urlsafe_b64encode(data):
"""urlsafe_b64encode without padding"""
return base64.urlsafe_b64encode(data).rstrip(b'=')
def urlsafe_b64decode(data):
"""urlsafe_b64decode without padding"""
pad = b'=' * (4 - (len(data) & 3))
return base64.urlsafe_b64decode(data + pad)
def bytes_to_int(data):
"""Convert bytes to an integer"""
hexy = binascii.hexlify(data)
hexy = b'0'*(len(hexy)%2) + hexy
return int(hexy, 16)
def b64_to_int(data):
"""Convert urlsafe_b64encode(data) to an integer"""
return bytes_to_int(urlsafe_b64decode(data))
def int_to_bytes(integer):
hexy = as_binary('%x' % integer)
hexy = b'0'*(len(hexy)%2) + hexy
data = binascii.unhexlify(hexy)
return data
def int_to_b64(integer):
"""Convert an integer to urlsafe_b64encode() data"""
return urlsafe_b64encode(int_to_bytes(integer))
def as_binary(text):
return text.encode('latin1')
def as_text(data):
return data.decode('latin1')
def _verify_jwt(token, jwks):
message, _, signature = token.rpartition('.')
header, payload = message.split('.')
header = json.loads(urlsafe_b64decode(header))
kid = header['kid']
try:
jwk = next(key for key in jwks['keys'] if key['kid'] == kid)
except StopIteration:
raise BadSignatureError("Key not found in jwks")
pk = PublicKey.from_jwk(jwk)
verified = pk.verify(message, urlsafe_b64decode(signature))
return verified
def verify_jwt(token, jwks):
"""Verify the signature on a RSA-signed JSON web token.
Does not check any of the claims within the token.
token: Full signed token
jwks: JSON web key set (object with {"keys":[...]})
Return decoded payload.
"""
verified = _verify_jwt(token, jwks)
header, _, payload = verified.partition('.')
return json.loads(urlsafe_b64decode(payload)) | /rsalette-0.4.2.tar.gz/rsalette-0.4.2/rsalette.py | 0.663124 | 0.300964 | rsalette.py | pypi |
import sys
import json
import random
import hashlib
from pathlib import Path
from shutil import copyfileobj
from functools import partial
from contextlib import redirect_stdout
from importlib import resources, import_module
from base64 import b64encode, urlsafe_b64encode
from gmpy2 import invert, isqrt, gcd
from . import output
DEFAULT_E = 65537
def carmichael_lcm(p, q):
phi = (p-1)*(q-1)
return phi // gcd(p-1, q-1)
def byte_length(n):
"""Return byte length of the given integer
Arguments:
n -- int
"""
return -(n.bit_length() // -8)
def to_bytes_auto(n):
"""Convert int to shortest possible bytes object, big endian
Arguments:
n -- int
"""
return n.to_bytes(byte_length(n), "big")
def int_from_path(path):
with open(path, "rb") as f:
return int.from_bytes(f.read(), "big")
def compute_extra_key_elements(d, p, q):
"""Compute extra key elements
Arguments:
d -- RSA private exponent
p -- RSA first factor
q -- RSA second factor
"""
if d is not None:
dp = d%(p-1)
dq = d%(q-1)
else:
dp, dq = None, None
if p is not None and q is not None:
pinv = int(invert(p, q))
qinv = int(invert(q, p))
else:
pinv, qinv = None, None
return dp, dq, pinv, qinv
def compute_pubkey(n, e, d, p, q, phi=None):
"""Compute public key elements
Arguments:
n -- RSA modulus
e -- RSA public exponent
d -- RSA private exponent
p -- RSA first factor
q -- RSA second factor
"""
tup = (n, e, d, p, q)
pks = set()
if n is not None and e is not None:
pks.add((n, e))
if n is not None and d is not None:
if phi is not None:
tmp_e = int(invert(d, phi))
pks.add((n, tmp_e))
else:
if p is None:
p = q
if p is not None:
q = n//p
phi = (p-1) * (q-1)
tmp_e = int(invert(d, phi))
if e is not None and tmp_e != e:
tmp_e = int(invert(d, phi//gcd(p-1,q-1)))
pks.add((n, tmp_e))
if p is not None and q is not None:
if d is not None:
phi = (p-1) * (q-1)
tmp_e = int(invert(d, phi))
if e is not None and tmp_e != e:
tmp_e = int(invert(d, phi//gcd(p-1,q-1)))
pks.add((p*q, tmp_e))
if e is not None:
pks.add((p*q, e))
if len(pks) != 1:
raise ValueError(f"Inconsistent parameters {tup}")
return pks.pop()
def recover_pq(n, e, d):
k = d*e - 1
if k % 2 != 0:
raise ValueError(f"p and q cannot be recovered from these parameters {(n, e, d)}")
factor = 1
while k % 2 == 0:
factor *= 2
k //= 2
for i in range(100):
b = 0
g = random.randint(0, n)
y = pow(g, k, n)
if y == 1 or y == (n - 1):
continue
for j in range(1, factor):
x = pow(y, 2, n)
if x in (1, n-1):
break
y = x
else:
x = pow(y, 2, n)
if x == 1:
break
else:
raise ValueError(f"p and q cannot be recovered (not enough iterations?)")
p = int(gcd(y-1, n))
q = n // p
return p, q
def complete_privkey(n, e, d, p, q, phi=None, use_lcm=True):
"""Compute missing private key elements
Arguments:
n -- RSA modulus
e -- RSA public exponent
d -- RSA private exponent
p -- RSA first factor
q -- RSA second factor
"""
tup = (n, e, d, p, q, phi)
if n is None and (p is None or q is None):
raise ValueError(f"You have to provide n or both p and q in tuple '{tup}'")
if n is not None and (p is None and q is None and phi is None) and (d is None or e is None):
raise ValueError(f"If you provide n, you must provide also either one of p, q or phi, or d and e, in tuple '{tup}'")
if e is None and d is None:
raise ValueError(f"You have to provide e or d in tuple '{tup}'")
if n is not None and p is None and q is None and phi is not None:
p = ((n + 1 - phi) - isqrt((n + 1 - phi)**2 - 4*n)) // 2
if n is None:
n = p*q
elif p is None:
p = n//q
elif q is None:
q = n//p
if p is None:
p, q = recover_pq(n, e, d)
if n != p * q:
raise ValueError(f"n is not equal to p * q in tuple '{tup}'")
if use_lcm:
phi = carmichael_lcm(p, q)
else:
phi = (p-1) * (q-1)
if e is None:
e = int(invert(d, phi))
elif d is None:
d = int(invert(e, phi))
return n, e, d, p, q
def compute_d(n, e, d, p, q, phi=None):
"""Compute d from available parameters
Arguments:
n -- RSA modulus
e -- RSA public exponent
d -- RSA private exponent
p -- RSA first factor
q -- RSA second factor
"""
tup = (n, e, d, p, q, phi)
ds = set()
if d is not None:
ds.add(d)
if e is None and len(ds) == 0:
raise ValueError("Missing public exponent")
if phi is not None:
ds.add(int(invert(e, phi)))
if p is None:
p = q
if p is not None:
if q is not None:
tmp_d = int(invert(e, (p-1) * (q-1)))
if d is not None and tmp_d != d:
tmp_d = int(invert(e, carmichael_lcm(p, q)))
ds.add(tmp_d)
if n is not None:
q = n//p
tmp_d = int(invert(e, (p-1) * (q-1)))
if d is not None and tmp_d != d:
tmp_d = int(invert(e, carmichael_lcm(p, q)))
ds.add(tmp_d)
if len(ds) != 1:
raise ValueError(f"Inconsistent parameters {tup}")
return ds.pop()
def compute_n(n, e, d, p, q, phi=None):
"""Compute d from available parameters
Arguments:
n -- RSA modulus
e -- RSA public exponent
d -- RSA private exponent
p -- RSA first factor
q -- RSA second factor
"""
tup = (n, e, d, p, q, phi)
ns = set()
if n is not None:
ns.add(n)
if p is not None and q is not None:
ns.add(p*q)
if p is None:
p = q
if phi is not None and p is not None:
ns.add((pow(p, 2) - p + p*phi) // (p-1))
if len(ns) != 1:
raise ValueError(f"Inconsistent parameters {tup}")
return ns.pop()
def output_text(label, text, filename, encoding=None, json_output=False):
text_raw = to_bytes_auto(text)
text_b64 = b64encode(text_raw).decode("ascii")
text_b64_url = urlsafe_b64encode(text_raw).decode("ascii")
if encoding is not None:
try:
text_str = text_raw.decode(encoding)
except ValueError:
text_str = f"Cannot decode ({encoding})"
else:
text_str = None
if filename is True:
text_hex = f"0x{text_raw.hex()}"
if json_output:
output_obj = {
"dec": str(text),
"hex": text_hex,
"raw": str(text_raw),
"b64": str(text_b64),
"url": str(text_b64_url)
}
if text_str is not None:
output_obj["str"] = text_str
json.dump(output_obj, sys.stdout, indent=4)
else:
with redirect_stdout(sys.stderr):
output.primary(f"{label} (dec): {text}")
output.primary(f"{label} (hex): {text_hex}")
output.primary(f"{label} (raw): {text_raw}")
output.primary(f"{label} (b64): {text_b64}")
output.primary(f"{label} (url): {text_b64_url}")
if text_str is not None:
output.primary(f"{label} (str): {text_str}")
else:
with open(filename, "wb") as file:
file.write(text_raw)
# From a tip I saw here: https://stackoverflow.com/questions/3431825/generating-an-md5-checksum-of-a-file
def file_checksum(filename):
hash_sha256 = hashlib.sha256()
with open(filename, "rb") as f:
for chunk in iter(partial(f.read, 4096), b""):
hash_sha256.update(chunk)
return hash_sha256.hexdigest()
def module_root(m):
mpath = Path(m.__file__)
if mpath.stem == "__init__":
mpath = mpath.parent
return mpath
def copy_resource(package, res, dest):
with resources.open_binary(package, res) as src, \
open(Path(dest)/res, "wb") as dst:
copyfileobj(src, dst)
def copy_resource_tree(package, dest):
package_name = package.__name__.split(".")[-1]
dest_subdir = Path(dest)/package_name
dest_subdir.mkdir(mode=0o755, exist_ok=True)
for x in resources.contents(package):
if resources.is_resource(package, x):
copy_resource(package, x, dest_subdir)
elif x != "__pycache__":
subpackage = import_module(f".{x}", package.__name__)
copy_resource_tree(subpackage, dest_subdir) | /rsarmageddon-2.1.0-py3-none-any.whl/utils/__init__.py | 0.437343 | 0.227276 | __init__.py | pypi |
import rsb.util
from threading import Condition
FilterAction = rsb.util.Enum("FilterAction", ["ADD", "REMOVE", "UPDATE"])
class AbstractFilter(object):
"""
Interface for concrete filters.
.. codeauthor:: jwienke
"""
def match(self, event):
"""
Matches this filter against a given event.
Args:
event:
event to match against
Returns:
True if this filter matches the event, else False
"""
pass
class ScopeFilter(AbstractFilter):
"""
A filter to restrict the scope for events.
.. codeauthor:: jwienke
"""
def __init__(self, scope):
"""
Constructs a new scope filter with a given scope to restrict to.
Args:
scope:
top-level scope to accept and al child scopes
"""
self.__scope = scope
def getScope(self):
"""
Returns the top-level scope this filter matches for.
Returns:
scope
"""
return self.__scope
def match(self, event):
return event.scope == self.__scope \
or event.scope.isSubScopeOf(self.__scope)
class OriginFilter(AbstractFilter):
"""
Matching events have to originate at a particular participant.
.. codeauthor:: jmoringe
"""
def __init__(self, origin, invert=False):
"""
Args:
origin:
The id of the :obj:`Participant` from which matching events
should originate.
invert (bool):
Controls whether matching results should inverted (i.e.
matching events ``not`` originating form ``origin``).
"""
self.__origin = origin
self.__invert = invert
def getOrigin(self):
return self.__origin
origin = property(getOrigin)
def getInvert(self):
return self.__invert
invert = property(getInvert)
def match(self, event):
result = self.origin == event.senderId
if self.invert:
return not result
else:
return result
def __str__(self):
inverted = ''
if self.invert:
inverted = 'not '
return '<%s %sfrom %s at 0x%x>' % (type(self).__name__,
inverted,
self.origin,
id(self))
def __repr__(self):
return '%s("%s", invert = %s)' \
% (type(self).__name__, self.origin, self.invert)
class CauseFilter(AbstractFilter):
"""
Filter events based on their cause vectors.
.. codeauthor:: jmoringe
"""
def __init__(self, cause, invert=False):
"""
Args:
cause:
The id of the :obj:`Event` that should be in the cause
vector of matching events.
invert (bool):
Controls whether matching results should inverted
(i.e. matching events that do ``not`` have the
specified event id in their cause vector).
"""
self.__cause = cause
self.__invert = invert
def getCause(self):
return self.__cause
cause = property(getCause)
def getInvert(self):
return self.__invert
invert = property(getInvert)
def match(self, event):
result = self.cause in event.causes
if self.invert:
return not result
else:
return result
def __str__(self):
inverted = ''
if self.invert:
inverted = 'not '
return '<%s %scaused-by %s at 0x%x>' \
% (type(self).__name__, inverted, self.cause, id(self))
def __repr__(self):
return '%s("%s", invert = %s)' \
% (type(self).__name__, self.cause, self.invert)
class MethodFilter(AbstractFilter):
"""
Matching events have (not) have a particular value in their method
field.
.. codeauthor:: jmoringe
"""
def __init__(self, method, invert=False):
"""
Args:
method (str):
The method string that matching events have to have in their
method field.
invert (bool):
Controls whether matching results should inverted (i.e.
matching events ``not`` having ``method`` in their method
field).
"""
self.__method = method
self.__invert = invert
def getMethod(self):
return self.__method
method = property(getMethod)
def getInvert(self):
return self.__invert
invert = property(getInvert)
def match(self, event):
result = self.method == event.method
if self.invert:
return not result
else:
return result
def __str__(self):
inverted = ''
if self.invert:
inverted = 'not '
return '<%s %sfrom %s at 0x%x>' \
% (type(self).__name__, inverted, self.method, id(self))
def __repr__(self):
return '%s("%s", invert = %s)' \
% (type(self).__name__, self.method, self.invert)
class RecordingTrueFilter(AbstractFilter):
def __init__(self):
self.events = []
self.condition = Condition()
def match(self, event):
with self.condition:
self.events.append(event)
self.condition.notifyAll()
return True
class RecordingFalseFilter(AbstractFilter):
def __init__(self):
self.events = []
self.condition = Condition()
def match(self, event):
with self.condition:
self.events.append(event)
self.condition.notifyAll()
return False
class TrueFilter(AbstractFilter):
def match(self, event):
return True
class FalseFilter(AbstractFilter):
def match(self, event):
return False | /rsb-python-0.18.0.tar.gz/rsb-python-0.18.0/rsb/filter.py | 0.885625 | 0.409339 | filter.py | pypi |
import abc
import copy
import threading
import Queue
import rsb.util
import rsb.filter
class ScopeDispatcher(object):
"""
Maintains a map of :ref:`Scopes <scope>` to sink objects.
.. codeauthor:: jmoringe
"""
def __init__(self):
self.__map = dict()
def __len__(self):
return len(self.__map)
def __bool__(self):
return bool(self.__map)
def addSink(self, scope, sink):
"""
Associates `sink` to `scope`.
Args:
scope (Scope):
The scope to which `sink` should be associated.
sink (object):
The arbitrary object that should be associated to `scope`.
"""
if scope in self.__map:
sinks = self.__map[scope]
else:
sinks = list()
self.__map[scope] = sinks
sinks.append(sink)
def removeSink(self, scope, sink):
"""
Disassociates `sink` from `scope`.
Args:
scope (Scope):
The scope from which `sink` should be disassociated.
sink (object):
The arbitrary object that should be disassociated from
`scope`.
"""
sinks = self.__map.get(scope)
sinks.remove(sink)
if not sinks:
del self.__map[scope]
def getSinks(self):
"""
Returns a generator yielding all sinks.
Yields:
sinks:
A generator yielding all known sinks in an unspecified
order.
"""
for sinks in self.__map.values():
for sink in sinks:
yield sink
sinks = property(getSinks)
def matchingSinks(self, scope):
"""
Returns a generator yielding sinks matching `scope`.
A sink matches `scope` if it was previously associated to
`scope` or one of its super-scopes.
Yields:
sinks:
A generator yielding all matching sinks in an
unspecified order.
"""
for sink in self.__map.get(scope, []):
yield sink
for scope in scope.superScopes():
for sink in self.__map.get(scope, []):
yield sink
class BroadcastProcessor(object):
"""
This event processor implements synchronous broadcast dispatch to
a list of handlers.
.. codeauthor:: jmoringe
"""
def __init__(self, handlers=None):
self.__logger = rsb.util.getLoggerByClass(self.__class__)
if handlers is None:
self.__handlers = []
else:
self.__handlers = list(handlers)
def getHandlers(self):
return self.__handlers
def addHandler(self, handler):
self.__handlers.append(handler)
def removeHandler(self, handler):
self.__handlers.remove(handler)
handlers = property(getHandlers)
def __call__(self, event):
self.handle(event)
def handle(self, event):
self.dispatch(event)
def dispatch(self, event):
for handler in self.handlers:
handler(event)
def __str__(self):
return '<%s %d handlers at 0x%x>' \
% (type(self).__name__,
len(self.handlers),
id(self))
class EventReceivingStrategy(object):
"""
Superclass for event receiving strategies.
.. codeauthor:: jwienke
"""
__metaclass__ = abc.ABCMeta
class PushEventReceivingStrategy(EventReceivingStrategy):
"""
Superclass for push-based event receiving strategies.
.. codeauthor:: jmoringe
.. codeauthor:: jwienke
"""
@abc.abstractmethod
def addHandler(self, handler, wait):
pass
@abc.abstractmethod
def removeHandler(self, handler, wait):
pass
@abc.abstractmethod
def addFilter(self, theFilter):
pass
@abc.abstractmethod
def removeFilter(self, theFilter):
pass
@abc.abstractmethod
def handle(self, event):
pass
class PullEventReceivingStrategy(EventReceivingStrategy):
"""
Superclass for pull-based event receiving.
.. codeauthor:: jwienke
"""
@abc.abstractmethod
def setConnectors(self, connectors):
pass
@abc.abstractmethod
def raiseEvent(self, block):
"""
Receives the next event.
Args:
block (bool):
if ``True``, wait for the next event. Else, immediately return,
potentially a ``None``.
"""
pass
class FirstConnectorPullEventReceivingStrategy(PullEventReceivingStrategy):
"""
Directly receives events only from the first provided connector.
.. codeauthor:: jwienke
"""
def setConnectors(self, connectors):
if not connectors:
raise ValueError("There must be at least on connector")
self.__connectors = connectors
def raiseEvent(self, block):
assert self.__connectors
event = self.__connectors[0].raiseEvent(block)
if event:
event.metaData.setDeliverTime()
return event
class ParallelEventReceivingStrategy(PushEventReceivingStrategy):
"""
An :obj:`PushEventReceivingStrategy` that dispatches events to multiple
handlers in individual threads in parallel. Each handler is called only
sequentially but potentially from different threads.
.. codeauthor:: jwienke
"""
def __init__(self, numThreads=5):
self.__logger = rsb.util.getLoggerByClass(self.__class__)
self.__pool = rsb.util.OrderedQueueDispatcherPool(
threadPoolSize=numThreads, delFunc=self.__deliver,
filterFunc=self.__filter)
self.__pool.start()
self.__filters = []
self.__filtersMutex = threading.RLock()
def __del__(self):
self.__logger.debug("Destructing ParallelEventReceivingStrategy")
self.deactivate()
def deactivate(self):
self.__logger.debug("Deactivating ParallelEventReceivingStrategy")
if self.__pool:
self.__pool.stop()
self.__pool = None
def __deliver(self, action, event):
# pylint: disable=no-self-use
action(event)
def __filter(self, action, event):
with self.__filtersMutex:
filterCopy = list(self.__filters)
for flt in filterCopy:
if not flt.match(event):
return False
return True
def handle(self, event):
"""
Dispatches the event to all registered listeners.
Args:
event:
event to dispatch
"""
self.__logger.debug("Processing event %s", event)
event.metaData.setDeliverTime()
self.__pool.push(event)
def addHandler(self, handler, wait):
# We can ignore wait since the pool implements the desired
# behavior.
self.__pool.registerReceiver(handler)
def removeHandler(self, handler, wait):
# We can ignore wait since the pool implements the desired
# behavior.
self.__pool.unregisterReceiver(handler)
def addFilter(self, theFilter):
with self.__filtersMutex:
self.__filters.append(theFilter)
def removeFilter(self, theFilter):
with self.__filtersMutex:
self.__filters = [f for f in self.__filters if f != theFilter]
class FullyParallelEventReceivingStrategy(PushEventReceivingStrategy):
"""
An :obj:`PushEventReceivingStrategy` that dispatches events to multiple
handlers in individual threads in parallel. Each handler can be called
in parallel for different requests.
.. codeauthor:: jwienke
"""
def __init__(self):
self.__logger = rsb.util.getLoggerByClass(self.__class__)
self.__filters = []
self.__mutex = threading.RLock()
self.__handlers = []
def deactivate(self):
pass
class Worker(threading.Thread):
def __init__(self, handler, event, filters):
threading.Thread.__init__(self, name='DispatcherThread')
self.handler = handler
self.event = event
self.filters = filters
def run(self):
for f in self.filters:
if not f.match(self.event):
return
self.handler(self.event)
def handle(self, event):
"""
Dispatches the event to all registered listeners.
Args:
event:
event to dispatch
"""
self.__logger.debug("Processing event %s", event)
event.metaData.setDeliverTime()
workers = []
with self.__mutex:
for h in self.__handlers:
workers.append(self.Worker(h, event, list(self.__filters)))
for w in workers:
w.start()
def addHandler(self, handler, wait):
# We can ignore wait since the pool implements the desired
# behavior.
with self.__mutex:
self.__handlers.append(handler)
def removeHandler(self, handler, wait):
# TODO anything required to implement wait functionality?
with self.__mutex:
self.__handlers.remove(handler)
def addFilter(self, f):
with self.__mutex:
self.__filters.append(f)
def removeFilter(self, theFilter):
with self.__mutex:
self.__filters = [f for f in self.__filters if f != theFilter]
class NonQueuingParallelEventReceivingStrategy(PushEventReceivingStrategy):
"""
An :obj:`PushEventReceivingStrategy` that dispatches events to multiple
handlers using a single thread and without queuing. Only a single buffer
is used to decouple the transport from the registered handlers. In case
the handler processing is slower than the transport, the transport will
block on inserting events into this strategy. Callers must ensure that they
are in no active call for #handle when deactivating this instance.
.. codeauthor:: jwienke
"""
def __init__(self):
self.__logger = rsb.util.getLoggerByClass(self.__class__)
self.__filters = []
self.__mutex = threading.RLock()
self.__handlers = []
self.__queue = Queue.Queue(1)
self.__interrupted = False
self.__thread = threading.Thread(target=self.__work)
self.__thread.start()
def deactivate(self):
self.__interrupted = True
self.__queue.put(None, True)
self.__thread.join()
def __work(self):
while True:
event = self.__queue.get(True)
# interruption checking is handled here and not in the head of the
# loop since we need put an artificial item into the queue when
# deactivating this strategy and this item must never receive at
# any handler
if self.__interrupted:
return
with self.__mutex:
for f in self.__filters:
if not f.match(event):
return
for handler in self.__handlers:
handler(event)
def handle(self, event):
self.__logger.debug("Processing event %s", event)
event.metaData.setDeliverTime()
self.__queue.put(event, True)
def addHandler(self, handler, wait):
with self.__mutex:
self.__handlers.append(handler)
def removeHandler(self, handler, wait):
with self.__mutex:
self.__handlers.remove(handler)
def addFilter(self, f):
with self.__mutex:
self.__filters.append(f)
def removeFilter(self, theFilter):
with self.__mutex:
self.__filters = [f for f in self.__filters if f != theFilter]
class EventSendingStrategy(object):
def getConnectors(self):
raise NotImplementedError
connectors = property(getConnectors)
def addConnector(self, connector):
raise NotImplementedError
def removeConnector(self, connector):
raise NotImplementedError
def handle(self, event):
raise NotImplementedError
class DirectEventSendingStrategy(EventSendingStrategy):
def __init__(self):
self.__connectors = []
def getConnectors(self):
return self.__connectors
def addConnector(self, connector):
self.__connectors.append(connector)
def removeConnector(self, connector):
self.__connectors.remove(connector)
def handle(self, event):
for connector in self.__connectors:
connector.handle(event)
class Configurator(object):
"""
Superclass for in- and out-direction Configurator classes. Manages
the basic aspects like the connector list and (de)activation that
are not direction-specific.
.. codeauthor:: jwienke
.. codeauthor:: jmoringe
"""
def __init__(self, connectors=None):
self.__logger = rsb.util.getLoggerByClass(self.__class__)
self.__scope = None
if connectors is None:
self.__connectors = []
else:
self.__connectors = copy.copy(connectors)
self.__active = False
def __del__(self):
self.__logger.debug("Destructing Configurator")
if self.__active:
self.deactivate()
def getScope(self):
return self.__scope
def setScope(self, scope):
"""
Defines the scope the in route has to be set up. This will be called
before calling #activate.
Args:
scope (rsb.Scope):
the scope of the in route
"""
self.__scope = scope
self.__logger.debug("Got new scope %s", scope)
for connector in self.connectors:
connector.setScope(scope)
scope = property(getScope, setScope)
def getConnectors(self):
return self.__connectors
connectors = property(getConnectors)
def getTransportURLs(self):
"""
Return list of transport URLs describing the connectors
managed by the configurator.
Returns:
list:
List of transport URLs.
"""
return set(x.getTransportURL() for x in self.__connectors)
transportURLs = property(getTransportURLs)
def isActive(self):
return self.__active
active = property(isActive)
def activate(self):
if self.__active:
raise RuntimeError("Configurator is already active")
self.__logger.info("Activating configurator")
for connector in self.connectors:
connector.activate()
self.__active = True
def deactivate(self):
if not self.__active:
raise RuntimeError("Configurator is not active")
self.__logger.info("Deactivating configurator")
for connector in self.connectors:
connector.deactivate()
self.__active = False
def setQualityOfServiceSpec(self, qos):
for connector in self.connectors:
connector.setQualityOfServiceSpec(qos)
class InPushRouteConfigurator(Configurator):
"""
Instances of this class manage the receiving, filtering and
dispatching of events via one or more :obj:`rsb.transport.Connector` s
and an :obj:`PushEventReceivingStrategy`.
.. codeauthor:: jwienke
.. codeauthor:: jmoringe
"""
def __init__(self, connectors=None, receivingStrategy=None):
"""
Creates a new configurator which manages ``connectors`` and
``receivingStrategy``.
Args:
connectors:
Connectors through which events are received.
receivingStrategy:
The event receiving strategy according to which the filtering
and dispatching of incoming events should be performed.
"""
super(InPushRouteConfigurator, self).__init__(connectors)
self.__logger = rsb.util.getLoggerByClass(self.__class__)
if receivingStrategy is None:
self.__receivingStrategy = ParallelEventReceivingStrategy()
else:
self.__receivingStrategy = receivingStrategy
for connector in self.connectors:
connector.setObserverAction(self.__receivingStrategy.handle)
def deactivate(self):
super(InPushRouteConfigurator, self).deactivate()
for connector in self.connectors:
connector.setObserverAction(None)
self.__receivingStrategy.deactivate()
def handlerAdded(self, handler, wait):
self.__receivingStrategy.addHandler(handler, wait)
def handlerRemoved(self, handler, wait):
self.__receivingStrategy.removeHandler(handler, wait)
def filterAdded(self, theFilter):
self.__receivingStrategy.addFilter(theFilter)
for connector in self.connectors:
connector.filterNotify(theFilter, rsb.filter.FilterAction.ADD)
def filterRemoved(self, theFilter):
self.__receivingStrategy.removeFilter(theFilter)
for connector in self.connectors:
connector.filterNotify(theFilter, rsb.filter.FilterAction.REMOVE)
class InPullRouteConfigurator(Configurator):
"""
Instances of this class manage the pull-based receiving of events via one
or more :obj:`rsb.transport.Connector` s and an
:obj:`PullEventReceivingStrategy`.
.. codeauthor:: jwienke
"""
def __init__(self, connectors=None, receivingStrategy=None):
"""
Creates a new configurator which manages ``connectors`` and
``receivingStrategy``.
Args:
connectors:
Connectors through which events are received.
receivingStrategy:
The event receiving strategy according to which the dispatching
of incoming events should be performed.
"""
super(InPullRouteConfigurator, self).__init__(connectors)
self.__logger = rsb.util.getLoggerByClass(self.__class__)
if receivingStrategy is None:
self.__receivingStrategy = FirstConnectorPullEventReceivingStrategy()
else:
self.__receivingStrategy = receivingStrategy
self.__receivingStrategy.setConnectors(connectors)
def getReceivingStrategy(self):
return self.__receivingStrategy
class OutRouteConfigurator(Configurator):
"""
Instances of this class manage the sending of events via one or
more :obj:`rsb.transport.Connector` s and an :obj:`EventSendingStrategy`.
.. codeauthor:: jmoringe
"""
def __init__(self, connectors=None, sendingStrategy=None):
self.__logger = rsb.util.getLoggerByClass(self.__class__)
super(OutRouteConfigurator, self).__init__(connectors)
if sendingStrategy is None:
self.__sendingStrategy = DirectEventSendingStrategy()
else:
self.__sendingStrategy = sendingStrategy
if connectors is not None:
map(self.__sendingStrategy.addConnector, connectors)
def handle(self, event):
if not self.active:
raise RuntimeError("Trying to publish event on Configurator "
"which is not active.")
self.__logger.debug("Publishing event: %s", event)
self.__sendingStrategy.handle(event) | /rsb-python-0.18.0.tar.gz/rsb-python-0.18.0/rsb/eventprocessing.py | 0.852583 | 0.238262 | eventprocessing.py | pypi |
import sys
import os
import platform
import getpass
import copy
import uuid
import rsb
import rsb.version
from rsb.util import getLoggerByClass
import rsb.converter
from rsb.protocol.introspection.Hello_pb2 import Hello
from rsb.protocol.introspection.Bye_pb2 import Bye
_displayName = None
# Model
class ParticipantInfo(object):
"""
Instances of this class store information about a participant.
The participant can reside in the current process or in a remote
process.
.. codeauthor:: jmoringe
"""
def __init__(self, kind, id, scope, type, parentId=None,
transportURLs=None):
self.__kind = kind
self.__id = id
self.__scope = rsb.Scope.ensureScope(scope)
self.__type = type
self.__parentId = parentId
self.__transportURLs = transportURLs or []
def getKind(self):
"""
Returns the kind of the participant.
Examples include "listener", "informer" and "local-server".
Returns:
str:
A lower-case, hyphen-separated string identifying the kind of
participant.
"""
return self.__kind
kind = property(getKind)
def getId(self):
"""
Returns the unique id of the participant.
Returns:
uuid.uuid:
The unique id of the participant.
"""
return self.__id
id = property(getId)
def getScope(self):
"""
Returns the scope of the participant.
Returns:
rsb.Scope:
The scope of the participant.
"""
return self.__scope
scope = property(getScope)
def getType(self):
"""
Returns a representation of the type of the participant, if
available.
Note that this is a temporary solution and will change in
future versions.
Returns:
type or tuple:
A representation of the type.
"""
return self.__type
type = property(getType)
def getParentId(self):
"""
Return the unique id of the parent participant of the participant,
or ``None``, if the participant does not have a parent.
Returns:
uuid.uuid or NoneType:
``None`` or the unique id of the participant's parent.
"""
return self.__parentId
parentId = property(getParentId)
def getTransportURLs(self):
"""
Return list of transport URLs.
Returns:
list:
List of transport URLs describing the transports used
by the participant.
"""
return self.__transportURLs
transportURLs = property(getTransportURLs)
def __str__(self):
return '<%s %s %s at 0x%0x>' \
% (type(self).__name__, self.kind, self.scope.toString(), id(self))
def __repr__(self):
return str(self)
__processStartTime = None
def processStartTime():
"""
Return the start time of the current process (or an approximation)
in fractional seconds since UNIX epoch.
Returns:
float:
Start time in factional seconds since UNIX epoch.
"""
global __processStartTime
# Used cached value, if there is one.
if __processStartTime is not None:
return __processStartTime
# Try to determine the start time of the current process in a
# platform dependent way. Since all of these methods seem kind of
# error prone, allow failing silently and fall back to the default
# implementation below.
if 'linux' in sys.platform:
try:
import re
procStatContent = open('/proc/stat').read()
btimeEntry = re.match('(?:.|\n)*btime ([0-9]+)',
procStatContent).group(1)
bootTimeUNIXSeconds = int(btimeEntry)
selfStatContent = open('/proc/self/stat').read()
startTimeBootJiffies = int(selfStatContent.split(' ')[21])
__processStartTime = float(bootTimeUNIXSeconds) \
+ float(startTimeBootJiffies) / 100.0
except:
pass
# Default/fallback strategy: just use the current time.
if __processStartTime is None:
import time
__processStartTime = time.time()
return __processStartTime
def programName():
import __main__
if hasattr(__main__, '__file__'):
return __main__.__file__
else:
return '<no script>'
class ProcessInfo(object):
"""
Instances of this class store information about operating system
processes.
The stored information can describe the current process, a
different process on the local machine or a remote process.
.. codeauthor:: jmoringe
"""
def __init__(self,
id=os.getpid(),
programName='python%d.%d %s'
% (sys.version_info.major,
sys.version_info.minor,
programName()),
arguments=copy.copy(sys.argv),
startTime=processStartTime(),
executingUser=None,
rsbVersion=rsb.version.getVersion()):
self.__id = id
self.__programName = programName
self.__arguments = arguments
self.__startTime = startTime
self.__executingUser = executingUser
if not self.__executingUser:
try:
self.__executingUser = getpass.getuser()
except OSError:
pass
self.__rsbVersion = rsbVersion
def getId(self):
"""
Returns the numeric id of the process.
Returns:
int:
The numeric id of the process.
"""
return self.__id
id = property(getId)
def getProgramName(self):
"""
Returns the name of the program being executed in the process.
Returns:
str:
The name of the program.
"""
return self.__programName
programName = property(getProgramName)
def getArguments(self):
"""
Returns the list of commandline argument the process has been
started with.
Returns:
list:
A list of commandline argument strings
"""
return self.__arguments
arguments = property(getArguments)
def getStartTime(self):
"""
Returns the start time of the process in fractional seconds
since UNIX epoch.
Returns:
float:
start time in fractional seconds since UNIX epoch.
"""
return self.__startTime
startTime = property(getStartTime)
def getExecutingUser(self):
"""
Return the login- or account-name of the user executing the
process.
Returns:
str:
login- or account-name of the user executing the process or
None if not determinable
"""
return self.__executingUser
executingUser = property(getExecutingUser)
def getRSBVersion(self):
"""
Return the version of the RSB implementation used in this process.
Returns:
str:
Version string of the form::
MAJOR.MINOR.REVISION[-COMMIT]
"""
return self.__rsbVersion
rsbVersion = property(getRSBVersion)
def __str__(self):
return '<%s %s [%d] at 0x%0x>' \
% (type(self).__name__, self.programName, self.id, id(self))
def __repr__(self):
return str(self)
def hostId():
"""
Returns a unique id string for the current host.
Returns:
str or NoneType:
A platform-dependent, string (hopefully) uniquely identifying the
current host or ``None`` if such an id cannot be obtained.
"""
def maybeRead(filename):
try:
with open(filename, 'r') as f:
return f.read().strip()
except:
return None
return \
('linux' in sys.platform and maybeRead('/var/lib/dbus/machine-id')) \
or ('linux' in sys.platform and maybeRead('/etc/machine-id')) \
or None
def machineType():
result = platform.machine().lower()
if result in ['i368', 'i586', 'i686']:
return 'x86'
elif result in ['x86_64', 'amd64']:
return 'x86_64'
else:
return result
def machineVersion():
if 'linux' in sys.platform:
import re
try:
cpuInfo = open('/proc/cpuinfo').read()
return re.match('(?:.|\n)*model name\t: ([^\n]+)',
cpuInfo).group(1)
except:
return None
class HostInfo(object):
"""
Instances of this class store information about a host.
The stored information can describe the local host or a remote
host.
.. codeauthor:: jmoringe
"""
def __init__(self,
id=hostId(),
hostname=platform.node().split('.')[0],
machineType=machineType(),
machineVersion=machineVersion(),
softwareType=platform.system().lower(),
softwareVersion=platform.release()):
self.__id = id
self.__hostname = hostname
self.__machineType = machineType
self.__machineVersion = machineVersion
self.__softwareType = softwareType
self.__softwareVersion = softwareVersion
def getId(self):
"""
Return the unique id string for the host.
Returns:
str or None:
The platform-dependent, (hopefully) unique id string.
"""
return self.__id
id = property(getId)
def getHostname(self):
"""
Returns the hostname of the host.
Returns:
str:
The hostname.
"""
return self.__hostname
hostname = property(getHostname)
def getMachineType(self):
"""
Return the type of the machine, usually CPU architecture.
Returns:
str or NoneType:
The machine type when known.
"""
return self.__machineType
machineType = property(getMachineType)
def getMachineVersion(self):
"""
Returns the version of the machine within its type, usually
the CPU identification string.
Returns:
str or NoneType:
The machine version when known.
"""
return self.__machineVersion
machineVersion = property(getMachineVersion)
def getSoftwareType(self):
"""
Returns the type of the operating system running on the host,
usually the kernel name.
Returns:
str or NoneType:
The software type when known.
"""
return self.__softwareType
softwareType = property(getSoftwareType)
def getSoftwareVersion(self):
"""
Returns the version of the operating system within its type,
usually the kernel version string.
Returns:
str or NoneType:
The software version when known.
"""
return self.__softwareVersion
softwareVersion = property(getSoftwareVersion)
def __str__(self):
return '<%s %s %s %s at 0x%0x>' \
% (type(self).__name__,
self.hostname, self.machineType, self.softwareType,
id(self))
def __repr__(self):
return str(self)
# IntrospectionSender
BASE_SCOPE = rsb.Scope('/__rsb/introspection/')
PARTICIPANTS_SCOPE = BASE_SCOPE.concat(rsb.Scope('/participants/'))
HOSTS_SCOPE = BASE_SCOPE.concat(rsb.Scope('/hosts/'))
def participantScope(participantId, baseScope=PARTICIPANTS_SCOPE):
return baseScope.concat(rsb.Scope('/' + str(participantId)))
def processScope(hostId, processId, baseScope=HOSTS_SCOPE):
return (baseScope
.concat(rsb.Scope('/' + hostId))
.concat(rsb.Scope('/' + processId)))
class IntrospectionSender(object):
"""
Instances of this class (usually zero or one per process) send
information about participants in the current process, the current
process itself and the local host to receivers of introspection
information.
Instances need to be notified of created and destroyed
participants via calls of the :obj:`addParticipant` and
:obj:`removeParticipant` methods.
.. codeauthor:: jmoringe
"""
def __init__(self):
self.__logger = getLoggerByClass(self.__class__)
self.__participants = []
self.__process = ProcessInfo()
self.__host = HostInfo()
self.__informer = rsb.createInformer(PARTICIPANTS_SCOPE)
self.__listener = rsb.createListener(PARTICIPANTS_SCOPE)
def handle(event):
# TODO use filter when we get conjunction filter
if event.method not in ['REQUEST', 'SURVEY']:
return
participantId = None
participant = None
if len(event.scope.components) > \
len(PARTICIPANTS_SCOPE.components):
try:
participantId = uuid.UUID(event.scope.components[-1])
if participantId is not None:
participant = next((p for p in self.__participants
if p.id == participantId),
None)
except Exception, e:
self.__logger.warn('Query event %s does not '
'properly address a participant: %s',
event, e)
def process(thunk):
if participant is not None and event.method == 'REQUEST':
thunk(query=event, participant=participant)
elif participant is None and event.method == 'SURVEY':
for p in self.__participants:
thunk(query=event, participant=p)
else:
self.__logger.warn('Query event %s not understood', event)
if event.data is None:
process(self.sendHello)
elif event.data == 'ping':
process(self.sendPong)
else:
self.__logger.warn('Query event %s not understood', event)
self.__listener.addHandler(handle)
self.__server = rsb.createServer(
processScope(self.__host.id or self.__host.hostname,
str(self.__process.id)))
def echo(request):
reply = rsb.Event(scope=request.scope,
data=request.data,
type=type(request.data))
reply.metaData.setUserTime('request.send',
request.metaData.sendTime)
reply.metaData.setUserTime('request.receive',
request.metaData.receiveTime)
return reply
self.__server.addMethod('echo', echo,
requestType=rsb.Event,
replyType=rsb.Event)
def deactivate(self):
self.__listener.deactivate()
self.__informer.deactivate()
self.__server.deactivate()
def getProcess(self):
return self.__process
process = property(getProcess)
def getHost(self):
return self.__host
host = property(getHost)
def addParticipant(self, participant, parent=None):
parentId = None
if parent:
parentId = parent.id
def camelCaseToDashSeperated(name):
result = []
for i, c in enumerate(name):
if c.isupper() and i > 0 and name[i - 1].islower():
result.append('-')
result.append(c.lower())
return ''.join(result)
info = ParticipantInfo(
kind=camelCaseToDashSeperated(type(participant).__name__),
id=participant.id,
parentId=parentId,
scope=participant.scope,
type=object, # TODO
transportURLs=participant.transportURLs)
self.__participants.append(info)
self.sendHello(info)
def removeParticipant(self, participant):
removed = None
for p in self.__participants:
if p.id == participant.id:
removed = p
break
if removed is not None:
self.__participants.remove(removed)
self.sendBye(removed)
return bool(self.__participants)
def sendHello(self, participant, query=None):
hello = Hello()
hello.kind = participant.kind
hello.id = participant.id.get_bytes()
hello.scope = participant.scope.toString()
if participant.parentId:
hello.parent = participant.parentId.get_bytes()
for url in participant.transportURLs:
hello.transport.append(url)
host = hello.host
if self.host.id is None:
host.id = self.host.hostname
else:
host.id = self.host.id
host.hostname = self.host.hostname
host.machine_type = self.host.machineType
if self.host.machineVersion is not None:
host.machine_version = self.host.machineVersion
host.software_type = self.host.softwareType
host.software_version = self.host.softwareVersion
process = hello.process
process.id = str(self.process.id)
process.program_name = self.process.programName
for argument in self.process.arguments:
process.commandline_arguments.append(argument)
process.start_time = int(self.process.startTime * 1000000.0)
if self.process.executingUser:
process.executing_user = self.process.executingUser
process.rsb_version = self.process.rsbVersion
if _displayName:
process.display_name = _displayName
scope = participantScope(participant.id, self.__informer.scope)
helloEvent = rsb.Event(scope=scope,
data=hello,
type=type(hello))
if query:
helloEvent.addCause(query.id)
self.__informer.publishEvent(helloEvent)
def sendBye(self, participant):
bye = Bye()
bye.id = participant.id.get_bytes()
scope = participantScope(participant.id, self.__informer.scope)
byeEvent = rsb.Event(scope=scope,
data=bye,
type=type(bye))
self.__informer.publishEvent(byeEvent)
def sendPong(self, participant, query=None):
scope = participantScope(participant.id, self.__informer.scope)
pongEvent = rsb.Event(scope=scope,
data='pong',
type=str)
if query:
pongEvent.addCause(query.id)
self.__informer.publishEvent(pongEvent)
__sender = None
def handleParticipantCreation(participant, parent=None):
"""
This function is intended to be connected to
:obj:`rsb.participantCreationHook` and calls
:obj:`IntrospectionSender.addParticipant` when appropriate, first
creating the :obj:`IntrospectionSender` instance, if necessary.
"""
global __sender
if participant.scope.isSubScopeOf(BASE_SCOPE) \
or not participant.config.introspection:
return
if __sender is None:
__sender = IntrospectionSender()
__sender.addParticipant(participant, parent=parent)
def handleParticipantDestruction(participant):
"""
This function is intended to be connected to
:obj:`rsb.participantDestructionHook` and calls
:obj:`IntrospectionSender.removeParticipant` when appropriate,
potentially deleting the :obj:`IntrospectionSender` instance
afterwards.
"""
global __sender
if participant.scope.isSubScopeOf(BASE_SCOPE) \
or not participant.config.introspection:
return
if __sender and not __sender.removeParticipant(participant):
__sender.deactivate()
__sender = None
def initialize(displayName=None):
"""
Initializes the introspection module. Clients need to ensure that this
method is called only once.
Args:
displayName (str or NoneType if not set, optional):
a user-defined process name to use in the introspection
"""
global _displayName
_displayName = displayName
# Register converters for introspection messages
for clazz in [Hello, Bye]:
converter = rsb.converter.ProtocolBufferConverter(messageClass=clazz)
rsb.converter.registerGlobalConverter(converter, replaceExisting=True)
rsb.participantCreationHook.addHandler(handleParticipantCreation)
rsb.participantDestructionHook.addHandler(handleParticipantDestruction) | /rsb-python-0.18.0.tar.gz/rsb-python-0.18.0/rsb/introspection/__init__.py | 0.625781 | 0.179279 | __init__.py | pypi |
import threading
class FutureError(RuntimeError):
def __init__(self, *args):
super(FutureError, self).__init__(*args)
class FutureTimeout(FutureError):
def __init__(self, *args):
super(FutureTimeout, self).__init__(*args)
class FutureExecutionError(FutureError):
def __init__(self, *args):
super(FutureExecutionError, self).__init__(*args)
class Future(object):
"""
Objects of this class represent the results of in-progress
operations.
Methods of this class allow checking the state of the represented
operation, waiting for the operation to finish and retrieving the
result of the operation.
.. todo::
Support Python's native future protocol?
.. codeauthor:: jmoringe
See Also:
<http://docs.python.org/dev/library/concurrent.futures.html>_
"""
def __init__(self):
"""
Create a new :obj:`Future` object that represents an in-progress
operation for which a result is not yet available.
"""
self.__error = False
self.__result = None
self.__lock = threading.Lock()
self.__condition = threading.Condition(lock=self.__lock)
def isDone(self):
"""
Check whether the represented operation is still in progress.
Returns:
bool:
``True`` is the represented operation finished successfully or
failed.
"""
with self.__lock:
return self.__result is not None
done = property(isDone)
def get(self, timeout=0):
"""
Try to obtain and then return the result of the represented
operation.
If necessary, wait for the operation to complete, and then
retrieve its result.
Args:
timeout (float, optional):
The amount of time in seconds in which the operation has to
complete.
Returns:
The result of the operation if it did complete successfully.
Raises:
FutureExecutionException:
If the operation represented by the Future object failed.
FutureTimeoutException:
If the result does not become available within the amount of
time specified via ``timeout``.
"""
with self.__lock:
while self.__result is None:
if timeout <= 0:
self.__condition.wait()
else:
self.__condition.wait(timeout=timeout)
if self.__result is None:
raise FutureTimeout(
'Timeout while waiting for result; '
'Waited %s seconds.' % timeout)
if self.__error:
raise FutureExecutionError('Failed to execute operation: %s' %
self.__result)
return self.__result
def set(self, result):
"""
Set the result of the :obj:`Future` to ``result`` and wake all
threads waiting for the result.
Args:
result:
The result of the :obj:`Future` object.
"""
with self.__lock:
self.__result = result
self.__condition.notifyAll()
def setError(self, message):
"""
Mark the operation represented by the :obj:`Future` object as
failed, set ``message`` as the error message and notify all
threads waiting for the result.
Args:
message (str):
An error message that explains why/how the operation failed.
"""
with self.__lock:
self.__result = message
self.__error = True
self.__condition.notify()
def __str__(self):
with self.__lock:
if self.__result is None:
state = 'running'
elif self.__error:
state = 'failed'
else:
state = 'completed'
return '<%s %s at 0x%x>' % (type(self).__name__, state, id(self))
def __repr__(self):
return str(self)
class DataFuture(Future):
"""
Instances of this class are like ordinary :obj:`Future`s, the only
difference being that the :obj:`get` method returns the payload of an
:obj:`Event` object.
.. codeauthor:: jmoringe
"""
def get(self, timeout=0):
return super(DataFuture, self).get(timeout=timeout).data | /rsb-python-0.18.0.tar.gz/rsb-python-0.18.0/rsb/patterns/future.py | 0.781789 | 0.227266 | future.py | pypi |
import abc
import threading
from rsb.util import getLoggerByClass
class Connector(object):
"""
Superclass for transport-specific connector classes.
.. codeauthor:: jwienke
"""
__metaclass__ = abc.ABCMeta
def __init__(self, wireType=None, **kwargs):
"""
Creates a new connector with a serialization type wireType.
Args:
wireType (types.TypeType):
the type of serialized data used by this connector.
"""
self.__logger = getLoggerByClass(self.__class__)
self.__wireType = None
self.__scope = None
if wireType is None:
raise ValueError("Wire type must be a type object, None given")
self.__logger.debug("Using specified converter map for wire-type %s",
wireType)
self.__wireType = wireType
# fails if still some arguments are left over
super(Connector, self).__init__(**kwargs)
def getWireType(self):
"""
Returns the serialization type used for this connector.
Returns:
python serialization type
"""
return self.__wireType
wireType = property(getWireType)
def getScope(self):
return self.__scope
def setScope(self, newValue):
"""
Sets the scope this connector will receive events from to
``newValue``. Called before #activate.
Args:
newValue (rsb.Scope):
scope of the connector
"""
self.__scope = newValue
scope = property(getScope, setScope)
@abc.abstractmethod
def activate(self):
pass
@abc.abstractmethod
def deactivate(self):
pass
@abc.abstractmethod
def setQualityOfServiceSpec(self, qos):
pass
class InPushConnector(Connector):
"""
Superclass for in-direction (that is, dealing with incoming
events) connector implementations.
.. codeauthor:: jmoringe
"""
@abc.abstractmethod
def filterNotify(self, filter, action):
pass
@abc.abstractmethod
def setObserverAction(self, action):
"""
Sets the action used by the connector to notify about incoming
events. The call to this method must be thread-safe.
Args:
action:
action called if a new message is received from the connector.
Must accept an :obj:`Event` as parameter.
"""
pass
class InPullConnector(Connector):
"""
Superclass for connectors that receive events using a pull style.
.. codeauthor:: jwienke
"""
@abc.abstractmethod
def raiseEvent(self, block):
"""
Returns the next received event.
Args:
block (bool):
If ``True``, wait for the next event, else immediately return,
possibly ``None``.
Returns:
rsb.Event or ``None``
The next event or ``None`` if ``block`` is ``False``.
"""
pass
class OutConnector(Connector):
"""
Superclass for out-direction (that is, dealing with outgoing
events) connector implementations.
.. codeauthor:: jmoringe
"""
def handle(self, event):
"""
Sends ``event`` and adapts its meta data instance with the
actual send time.
Args:
event:
event to send
"""
raise NotImplementedError()
class ConverterSelectingConnector(object):
"""
This class is intended to be used a superclass (or rather mixin
class) for connector classes which have to store a map of
converters and select converters for (de)serialization.
.. codeauthor:: jmoringe
"""
def __init__(self, converters, **kwargs):
"""
Creates a new connector that uses the converters in
``converters`` to deserialize notification and/or serialize
events.
Args:
converters (rsb.converter.ConverterSelectionStrategy):
The converter selection strategy that should be used by the
connector. If ``None``, the global map of converters for the
wire-type of the connector is used.
"""
self.__converterMap = converters
assert(self.__converterMap.getWireType() == self.wireType)
def getConverterForDataType(self, dataType):
"""
Returns a converter that can convert the supplied data to the
wire-type.
Args:
dataType:
the type of the object for which a suitable converter should
returned.
Returns:
converter
Raises:
KeyError:
no converter is available for the supplied data.
"""
return self.__converterMap.getConverterForDataType(dataType)
def getConverterForWireSchema(self, wireSchema):
"""
Returns a suitable converter for the ``wireSchema``.
Args:
wireSchema (str):
the wire-schema to or from which the returned converter should
convert
Returns:
converter
Raises:
KeyError:
no converter is available for the specified wire-schema.
"""
return self.__converterMap.getConverterForWireSchema(wireSchema)
def getConverterMap(self):
return self.__converterMap
converterMap = property(getConverterMap)
class TransportFactory(object):
"""
Interface for factories which are able to create :obj:`Connector` instances
for a certain transport.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def getName(self):
"""
Returns the name representing this transport.
Returns:
str:
name of the transport, non-empty
"""
pass
@abc.abstractmethod
def isRemote(self):
"""
Returns true is the transport performs remote communication.
Returns:
bool:
does the transport perform remote communication?
"""
pass
@abc.abstractmethod
def createInPushConnector(self, converters, options):
"""
Creates a new instance of an :obj:`InPushConnector` for the represented
transport.
Args:
converters (ConverterSelectionStrategy):
the converters to use for this type options(dict of str):
options for the new connector
Returns:
rsb.transport.InPushConnector:
the new connector instance
"""
pass
@abc.abstractmethod
def createInPullConnector(self, converters, options):
"""
Creates a new instance of an :obj:`InPullConnector` for the represented
transport.
Args:
converters (ConverterSelectionStrategy):
the converters to use for this type
options (dict of str):
options for the new connector
Returns:
rsb.transport.InPullConnector:
the new connector instance
"""
pass
@abc.abstractmethod
def createOutConnector(self, converters, options):
"""
Creates a new instance of an :obj:`OutConnector` for the represented
transport.
Args:
converters (ConverterSelectionStrategy):
the converters to use for this type options(dict of str):
options for the new connector
Returns:
rsb.transport.OutConnector:
the new connector instance
"""
pass
__factoriesByName = {}
__factoryLock = threading.Lock()
def registerTransport(factory):
"""
Registers a new transport.
Args:
factory (rsb.transport.TransportFactory):
the factory for the transport
Raises:
ValueError:
there is already a transport registered with this name or the given
factory argument is invalid
"""
if factory is None:
raise ValueError("None cannot be a TransportFactory")
with __factoryLock:
if factory.getName() in __factoriesByName:
raise ValueError(
"There is already a transport with name {name}".format(
name=factory.getName()))
__factoriesByName[factory.getName()] = factory
def getTransportFactory(name):
"""
Returns a ``TransportFactory`` instance for the transport with the given
name.
Args:
name (str):
name of the transport
Returns:
rsb.transport.TransportFactory:
the ``TransportFactory`` instance
Raises:
KeyError:
there is not transport with the given name
"""
with __factoryLock:
return __factoriesByName[name] | /rsb-python-0.18.0.tar.gz/rsb-python-0.18.0/rsb/transport/__init__.py | 0.878516 | 0.327803 | __init__.py | pypi |
__author__ = "Jens Kasten <jens@kasten-edv.de>"
__status__ = "beta"
__date__ = "08 July 2014"
__all__ = ["Converter", "Paths", "PathsFilter", "PathsExclude", "PathsShow",
"Shields"]
import os
import sys
import logging
logging.basicConfig(format='%(levelname)s:%(name)s:line %(lineno)s: %(message)s')
log = logging.getLogger(__name__)
try:
from rsbactools import config, converter, rsbac
except ImportError as error:
print(error)
sys.exit(-1)
# Path where ff config files located
RSBAC_FF_CONFIG_DIR = "/etc/rsbac/ff"
# maps given keywords form ff config to a class
RSBAC_FF_CLASS_MAPPER = {
"paths": "Paths",
"paths-exclude": "PathsExclude",
}
# internal values for module FF to display readable values
RSBAC_FF_FLAGS = {
"1": "read_only",
"2": "execute_only",
"4": "search_only",
"8": "write_only",
"16": "secure_delete",
"32": "no_execute",
"64": "no_delete_or_rename",
"128": "add_inherited",
"256": "append_only",
"512": "no_mount",
"1024": "no_search",
}
RSBAC_FF_ADD_INHERITED = 128
RSBAC_FF_READ_ONLY = 1
# a approximatly counter of policies
POLICY_COUNTER = 0
def update_counter(file_dir_list=False):
global POLICY_COUNTER
if file_dir_list:
POLICY_COUNTER += len(file_dir_list)
else:
POLICY_COUNTER += 1
def get_counter():
return POLICY_COUNTER
"""
RSBAC ff_flags has a special output values.
This Converter prepare the output and add human readable ff_flags values
to the output.
Expect rsbac output off
/proc: Rückgabewert: 128
/bin: Rückgabewert: 128
"""
def convert(output):
"""Transform the output always to a list."""
result = {}
# list comprehension does first split by line
# after split by space
output = [x.split(" ") for x in output.split("\n") if len(x) > 0]
try:
# list comprehension return only path and value
# remove from first value the last sign
for x in output:
path = x[0].split(":")[0]
ff_value = x[len(x)-1]
ff_list = converter.Converter().int_to_bin(ff_value, 32)
ff_names = [(RSBAC_FF_FLAGS[str(x)], x) for x in ff_list]
result[path] = {
"ff_flags": ff_value,
"ff_list": ff_names
}
return result
except IndexError as error:
log.error("convert: %s" % error)
except KeyError as error:
log.error("Value not in RSBAC_FF_FLAGS: %s" % error)
def format_result(result):
"""Print the formated output."""
output = convert(result)
if not output:
return
for path, value in sorted(output.items()):
if len(value["ff_list"]) == 1:
print("%-54s%-20s% +4s" % (path, value["ff_list"][0][0],
value["ff_list"][0][1]))
continue
print("%-54s%-20s% +4s" % (path, "", value["ff_flags"]))
for j in range(len(value["ff_list"])):
print("%-54s%-20s% +4s" % ("", value["ff_list"][j][0],
value["ff_list"][j][1]))
class Paths(object):
"""Handle the config key paths."""
def __init__(self, policies):
"""Argument policies must be a dicts with keyword paths."""
self.policies = policies["paths"]
self.args = ["FF", "FD", "ff_flags"]
def on(self):
try:
for file_dir, ff_flags in self.policies.items():
args = list(self.args + [str(ff_flags), file_dir])
result, error = rsbac.Rsbac().attr_set_fd(args)
if error:
log.error(result)
else:
update_counter()
return True
except KeyError as error:
log.error(error)
return False
def off(self):
try:
for file_dir in self.policies.keys():
args = list(self.args + [str(RSBAC_FF_ADD_INHERITED), file_dir])
result, error = rsbac.Rsbac().attr_set_fd(args)
if error:
log.error(result)
else:
update_counter()
return True
except KeyError as error:
log.error(error)
return False
def show(self):
try:
for file_dir in sorted(self.policies.keys()):
args = list(self.args + [file_dir])
result, error = rsbac.Rsbac().attr_get_fd(args)
if error:
log.error(result)
else:
update_counter()
format_result(result)
return True
except KeyError as error:
log.error(error)
return False
class PathsFilter(object):
"""Filter out exclude and exculde-startswitht from file_ dir list"""
def __init__(self, policies):
self.policies = policies
def get_paths(self):
"""return a list"""
return self.policies.keys()
def get_exclude(self, path):
"""return a list"""
if "exclude" in self.policies[path]:
if len(self.policies[path]["exclude"]) > 0:
return self.policies[path]["exclude"]
return []
def get_exclude_startswith(self, path):
"""return a list"""
if "exclude-startswith" in self.policies[path]:
if len(self.policies[path]["exclude-startswith"]) > 0:
return self.policies[path]["exclude-startswith"]
return []
def filter_exclude(self, list_to_filter, exclude):
"""return a list"""
return list(set(list_to_filter) - set(exclude))
def filter_exclude_startswith(self, list_to_filter, exclude):
result = list_to_filter
for i in sorted(list_to_filter):
for j in exclude:
if i.startswith(j):
result.remove(i)
break
return result
def get_filtered_list(self):
"""return a list"""
result = []
for path in self.get_paths():
try:
list_to_filter = os.listdir(path)
except OSError as error:
log.error(error)
continue
if self.get_exclude(path):
list_to_filter = self.filter_exclude(list_to_filter,
self.get_exclude(path))
if self.get_exclude_startswith(path):
list_to_filer = self.filter_exclude_startswith(list_to_filter,
self.get_exclude_startswith(path))
result.extend([os.path.join(path, x) for x in list_to_filter])
return result
class PathsExclude(object):
"""Handle the config key paths-exclude."""
def __init__(self, policies):
"""Argument policies must be a dicts with keyword paths-exclude."""
self.policies = policies["paths-exclude"]
self.args = ["FF", "FD", "ff_flags"]
def on(self):
try:
file_dir_list = PathsFilter(self.policies).get_filtered_list()
args = self.args
args.append(str(RSBAC_FF_READ_ONLY))
args.extend(file_dir_list)
result, error = rsbac.Rsbac().attr_set_fd(args)
if error:
log.error(result)
else:
update_counter(file_dir_list)
return True
except KeyError as error:
log.error(error)
return False
def off(self):
try:
file_dir_list = PathsFilter(self.policies).get_filtered_list()
args = self.args
args.append(str(RSBAC_FF_ADD_INHERITED))
args.extend(file_dir_list)
result, error = rsbac.Rsbac().attr_set_fd(args)
if error:
log.error(result)
else:
update_counter(file_dir_list)
return True
except KeyError as error:
log.error(error)
return False
def show(self):
try:
file_dir_list = PathsFilter(self.policies).get_filtered_list()
args = self.args
args.extend(file_dir_list)
result, error = rsbac.Rsbac().attr_get_fd(args)
if error:
log.error(result)
else:
update_counter(file_dir_list)
format_result(result)
return True
except KeyError as error:
log.error(error)
return False
class Shields(object):
def __init__(self):
self.args = {}
self.policies = {}
def add_args(self, args):
for key, value in args.items():
self.args[key.replace("_", "-")] = value
def set_log_level(self, log_level):
log.setLevel(log_level)
def get_log_level(self):
return log.getEffectiveLevel()
def update_policies(self, config_file):
""""""
log.debug("loading config: %s" % config_file)
jl = config.JsonLoader()
jl.set_log_level(self.get_log_level())
if jl.add_config(config_file) and jl.load_config():
log.debug("add policies values")
self.policies.update(jl.get_values())
return True
else:
return False
def get_policies(self, key=False):
try:
if key and key.startswith("@"):
return {}
elif key:
log.debug("Found key in policies: %s" % key)
return self.policies[key]
else:
return self.policies
except KeyError as error:
log.debug("Skip key is not in policies: %s" % key)
return False
def load_configs(self, config_dir):
try:
for config_file in sorted(os.listdir(config_dir)):
if config_file.endswith(".json"):
self.update_policies(os.path.join(config_dir, config_file))
return True
except OSError as error:
log.warning(error)
return False
def run(self):
# set all keys to status off full
if self.args["full"]:
for key in self.get_policies().keys():
self.args[key] = self.args["full"]
for key in self.args.keys():
if not self.get_policies(key):
continue
if not self.args[key]:
continue
for i in self.get_policies(key).keys():
try:
log.debug("call: %s()" % RSBAC_FF_CLASS_MAPPER[i])
obj = getattr(sys.modules[__name__],
RSBAC_FF_CLASS_MAPPER[i])(self.get_policies(key))
except KeyError as error:
log.debug(error)
log.debug("No class mapper found for: %s" % i)
continue
try:
log.debug("call: %s()" % self.args[key])
getattr(obj, self.args[key])()
except TypeError as error:
log.debug("failed: %s()" % self.args[key])
log.debug(error)
return False
return True
def get_policies_counter(self):
return get_counter() | /rsbac_tools-0.3.3.31-py3-none-any.whl/rsbactools/ff.py | 0.437583 | 0.150122 | ff.py | pypi |
import argparse
class Converter(object):
"""Converter to obtain names from rsbac values."""
def int_to_bin(self, value, bitmask_len):
"""interger to binary"""
value = int(value)
dual_count = 1
values_to_find = []
for i in range(bitmask_len):
bit_count = value >> i & 1
if i >= 1:
dual_count += dual_count
if bit_count == 1:
values_to_find.append(dual_count)
if dual_count >= value:
return values_to_find
def show_bits_from_int(self, value, as_dual=False):
"""binary to integer"""
value = int(value)
result = []
bit_count = ""
while value > 0:
bit_count = str(value % 2) + bit_count
value = value >> 1
bit_count = "".join(reversed(bit_count))
if as_dual:
for i in range(len(bit_count)):
if int(bit_count[i]) == 1:
result.append(1)
else:
result.append(0)
result = "".join([str(i) for i in result])
return result[::-1]
else:
for i in range(len(bit_count)):
if int(bit_count[i]) == 1:
result.append(i)
return result
def bin_to_int(self, value):
"""binary to integer"""
value = int(value)
result = []
bit_count = ""
while value > 0:
bit_count = str(value % 2) + bit_count
value = value >> 1
bit_count = "".join(reversed(bit_count))
for i in range(len(bit_count)):
if int(bit_count[i]) == 1:
result.append(i)
return result
def main():
parser = argparse.ArgumentParser(description="Converter")
parser.add_argument("-i", "--int-to-bin", type=int,
metavar="interger count",
help="Convert an interger to binary representation")
parser.add_argument("-I", "--show-bits-from-int", type=int,
metavar="integer count",
help="Show the bit representation of an interger. The shown"
" value are the place where bits are set.")
parser.add_argument("-b", "--bin-to-int", type=str,
metavar="dual string",
help="Convert a binary to interger representation")
parser.add_argument("-B", "--bitmask-length", default=32,
action="store_true",
help="The length of bitmask to using. Default length is 32.")
parser.add_argument("-S", default=False, action="store_true",
help="Show result as string")
options = parser.parse_args()
print(options)
if not options.int_to_bin and not options.show_bits_from_int:
parser.print_usage()
c = Converter()
if options.int_to_bin:
print(c.int_to_bin(options.int_to_bin, options.bitmask_length))
if options.show_bits_from_int:
print(c.show_bits_from_int(options.show_bits_from_int, options.S))
if options.bin_to_int:
print(c.bin_to_int(options.bin_to_int))
if __name__ == "__main__":
main() | /rsbac_tools-0.3.3.31-py3-none-any.whl/rsbactools/converter.py | 0.610686 | 0.33497 | converter.py | pypi |
# The rsconnect-python Library
This package is a library used by the [`rsconnect-jupyter`](https://github.com/rstudio/rsconnect-jupyter)
package to deploy Jupyter notebooks to RStudio Connect. It contains a full deployment
API so can also be used by other Python-based deployment tools. Other types of content
supported by RStudio Connect may also be deployed by this package, including WSGi-style
APIs and Dash applications.
> **Important:** Dash support in RStudio Connect is currently in beta. You should not
> rely on it for deployments in production.
A command-line deployment tool is also provided that can be used directly to deploy
Jupyter notebooks, Python APIs and apps. Content types not directly supported by the
CLI can also be deployed if they include a prepared `manifest.json` file. See
["Deploying R or Other Content"](#deploying-r-or-other-content) for details.
## Deploying Python Content to RStudio Connect
In addition to various kinds of R content, RStudio Connect also supports the
deployment of Jupyter notebooks, Python APIs (such as `flask`-based) and apps (such
as Dash). Much like deploying R content to RStudio Connect, there are some
caveats to understand when replicating your environment on the RStudio Connect server:
RStudio Connect insists on matching <MAJOR.MINOR> versions of Python. For example,
a server with only Python 3.5 installed will fail to match content deployed with
Python 3.4. Your administrator may also enable exact Python version matching which
will be stricter and require matching major, minor, and patch versions. For more
information see the [RStudio Connect Admin Guide chapter titled Python Version
Matching](https://docs.rstudio.com/connect/admin/python.html#python-version-matching).
### Installation
To install `rsconnect-python` from this repository:
```bash
git clone https://github.com/rstudio/rsconnect-python
cd rsconnect-python
python setup.py install
```
To install the current version directly from pip:
```bash
pip install rsconnect-python
```
### Using the rsconnect CLI
Here's an example command that deploys a Jupyter notebook to RStudio Connect.
```bash
rsconnect deploy notebook \
--server https://my.connect.server:3939 \
--api-key my-api-key \
my-notebook.ipynb
```
> **Note:** The examples here use long command line options, but there are short
> options (`-s`, `-k`, etc.) available also. Run `rsconnect deploy notebook --help`
> for details.
### Setting up `rsconnect` CLI auto-completion
If you would like to use your shell's tab completion support with the `rsconnect`
command, use the command below for the shell you are using.
#### `bash`
If you are using the `bash` shell, use this to enable tab completion.
```bash
#~/.bashrc
eval "$(_RSCONNECT_COMPLETE=source rsconnect)"
```
#### `zsh`
If you are using the `zsh` shell, use this to enable tab completion.
```zsh
#~/.zshrc
eval "$(_RSCONNECT_COMPLETE=source_zsh rsconnect)"
```
If you get `command not found: compdef`, you need to add the following lines to your
`.zshrc` before the completion setup:
```zsh
#~/.zshrc
autoload -Uz compinit
compinit
```
### Managing Server Information
The information used by the `rsconnect` command to communicate with an RStudio Connect
server can be tedious to repeat on every command. To help, the CLI supports the idea
of saving this information, making it usable by a simple nickname.
> **Important:** One item of information saved is the API key used to authenticate with
> RStudio Connect. Although the file where this information is saved is marked as
> accessible by the owner only, it's important to remember that the key is present
> in the file as plain text so care must be taken to prevent any unauthorized access
> to the server information file.
#### TLS Support and RStudio Connect
Usually, an RStudio Connect server will be set up to be accessed in a secure manner,
using the `https` protocol rather than simple `http`. If RStudio Connect is set up
with a self-signed certificate, you will need to include the `--insecure` flag on
all commands. If RStudio Connect is set up to require a client-side certificate chain,
you will need to include the `--cacert` option that points to your certificate
authority (CA) trusted certificates file. Both of these options can be saved along
with the URL and API Key for a server.
> **Note:** When certificate information is saved for the server, the specified file
> is read and its _contents_ are saved under the server's nickname. If the CA file's
> contents are ever changed, you will need to add the server information again.
See the [Network Options](#network-options) section for more details about these options.
#### Remembering Server Information
Use the `add` command to store information about an RStudio Connect server:
```bash
rsconnect add \
--api-key my-api-key \
--server https://my.connect.server:3939 \
--name myserver
```
> **Note:** The `rsconnect` CLI will verify that the serve URL and API key
> are valid. If either is found not to be, no information will be saved.
If any of the access information for the server changes, simply rerun the
`add` command with the new information and it will replace the original
information.
Once the server's information is saved, you can refer to it by its nickname:
```bash
rsconnect deploy notebook --name myserver my-notebook.ipynb
```
If there is information for only one server saved, this will work too:
```bash
rsconnect deploy notebook my-notebook.ipynb
```
#### Listing Server Information
You can see the list of saved server information with:
```
rsconnect list
```
#### Removing Server Information
You can remove information about a server with:
```
rsconnect remove --name myserver
```
Removing may be done by its nickname (`--name`) or URL (`--server`).
### Verifying Server Information
You can verify that a URL refers to a running instance of RStudio Connect by using
the `details` command:
```bash
rsconnect details --server https://my.connect.server:3939
```
In this form, `rsconnect` will only tell you whether the URL given does, in fact, refer
to a running RStudio Connect instance. If you include a valid API key:
```bash
rsconnect details --server https://my.connect.server:3939 --api-key my-api-key
```
the tool will provide the version of RStudio Connect (if the server is configured to
divulge that information) and environmental information including versions of Python
that are installed on the server.
You can also use nicknames with the `details` command if you want to verify that the
stored information is still valid.
### Notebook Deployment Options
There are a variety of options available to you when deploying a Jupyter notebook to
RStudio Connect.
#### Including Extra Files
You can include extra files in the deployment bundle to make them available when your
notebook is run by the RStudio Connect server. Just specify them on the command line
after the notebook file:
```bash
rsconnect deploy notebook my-notebook.ipynb data.csv
```
#### Package Dependencies
If a `requirements.txt` file exists in the same directory as the notebook file, it will
be included in the bundle. It must specify the package dependencies needed to execute
the notebook. RStudio Connect will reconstruct the Python environment using the
specified package list.
If there is no `requirements.txt` file or the `--force-generate` option is specified,
the package dependencies will be determined from the current Python environment, or
from an alternative Python executable specified via the `--python` option or via the
`RETICULATE_PYTHON` environment variable:
```bash
rsconnect deploy notebook --python /path/to/python my-notebook.ipynb
```
You can see the packages list that will be included by running `pip freeze` yourself,
ensuring that you use the same Python that you use to run your Jupyter Notebook:
```bash
/path/to/python -m pip freeze
```
#### Static (Snapshot) Deployment
By default, `rsconnect` deploys the original notebook with all its source code. This
enables the RStudio Connect server to re-run the notebook upon request or on a schedule.
If you just want to publish an HTML snapshot of the notebook, you can use the `--static`
option. This will cause `rsconnect` to execute your notebook locally to produce the HTML
file, then publish the HTML file to the RStudio Connect server:
```bash
rsconnect deploy notebook --static my-notebook.ipynb
```
### Creating a Manifest for Future Deployment
You can create a `manifest.json` file for a Jupyter Notebook, then use that manifest
in a later deployment. Use the `write-manifest` command to do this.
The `write-manifest` command will also create a `requirements.txt` file, if it does
not already exist or the `--force-generate` option is specified. It will contain the
package dependencies from the current Python environment, or from an alternative
Python executable specified in the `--python` option or via the `RETICULATE_PYTHON`
environment variable.
Here is an example of the `write-manifest` command:
```bash
rsconnect write-manifest notebook my-notebook.ipynb
```
> **Note:** Manifests for static (pre-rendered) notebooks cannot be created.
### API/Application Deployment Options
There are a variety of options available to you when deploying a Python WSGi-style
API or Dash application. All options below apply equally to `api` and `dash`
sub-commands.
#### Including Extra Files
You can include extra files in the deployment bundle to make them available when your
API or application is run by the RStudio Connect server. Just specify them on the
command line after the API or application directory:
```bash
rsconnect deploy api flask-api/ data.csv
```
Since deploying an API or application starts at a directory level, there will be times
when some files under that directory subtree should not be included in the deployment
or manifest. Use the `--exclude` option to specify files to exclude. An exclusion may
be a glob pattern and the `--exclude` option may be repeated.
```bash
rsconnect deploy dash --exclude "workfiles/*" dash-app/ data.csv
```
You should always quote a glob pattern so that it will be passed to `rsconnect` as-is
instead of letting the shell expand it. If a file is specifically listed as an extra
file that also matches an exclusion pattern, the file will still be included in the
deployment (i.e., extra files trumps exclusions).
#### Package Dependencies
If a `requirements.txt` file exists in the API/application directory, it will be
included in the bundle. It must specify the package dependencies needed to execute
the API or application. RStudio Connect will reconstruct the Python environment using
the specified package list.
If there is no `requirements.txt` file or the `--force-generate` option is specified,
the package dependencies will be determined from the current Python environment, or
from an alternative Python executable specified via the `--python` option or via the
`RETICULATE_PYTHON` environment variable:
```bash
rsconnect deploy api --python /path/to/python my-api/
```
You can see the packages list that will be included by running `pip freeze` yourself,
ensuring that you use the same Python that you use to run your API or application:
```bash
/path/to/python -m pip freeze
```
### Creating a Manifest for Future Deployment
You can create a `manifest.json` file for an API or application, then use that
manifest in a later deployment. Use the `write-manifest` command to do this.
The `write-manifest` command will also create a `requirements.txt` file, if it does
not already exist or the `--force-generate` option is specified. It will contain
the package dependencies from the current Python environment, or from an alternative
Python executable specified in the `--python` option or via the `RETICULATE_PYTHON`
environment variable.
Here is an example of the `write-manifest` command:
```bash
rsconnect write-manifest api my-api/
```
### Deploying R or Other Content
You can deploy other content that has an existing RStudio Connect `manifest.json`
file. For example, if you download and unpack a source bundle from RStudio Connect,
you can deploy the resulting directory. The options are similar to notebook or
API/application deployment; see `rsconnect deploy manifest --help` for details.
Here is an example of the `deploy manifest` command:
```bash
rsconnect deploy manifest /path/to/manifest.json
```
> **Note:** In this case, the existing content is deployed as-is. Python environment
> inspection and notebook pre-rendering, if needed, are assumed to be done already
> and represented in the manifest.
The argument to `deploy manifest` may also be a directory so long as that directory
contains a `manifest.json` file.
If you have R content but don't have a `manifest.json` file, you can use the RStudio
IDE to create the manifest. See the help for the `rsconnect::writeManifest` R function:
```r
install.packages('rsconnect')
library(rsconnect)
?rsconnect::writeManifest
```
### Options for All Types of Deployments
These options apply to any type of content deployment.
#### Title
The title of the deployed content is, by default, derived from the filename. For
example, if you deploy `my-notebook.ipynb`, the title will be `my-notebook`. To change
this, use the `--title` option:
```
rsconnect deploy notebook --title "My Notebook" my-notebook.ipynb
```
When using `rsconnect deploy api` or `rsconnect deploy dash`, the title is derived
from the directory containing the API or application.
When using `rsconnect deploy manifest`, the title is derived from the primary
filename referenced in the manifest.
### Network Options
When specifying information that `rsconnect` needs to be able to interact with RStudio
Connect, you can tailor how transport layer security is performed.
#### TLS/SSL Certificates
RStudio Connect servers can be configured to use TLS/SSL. If your server's certificate
is trusted by your Jupyter Notebook server, API client or user's browser, then you
don't need to do anything special. You can test this out with the `details` command:
```bash
rsconnect details --api-key my-api-key --server https://my.connect.server:3939
```
If this fails with a TLS Certificate Validation error, then you have two options.
* Provide the Root CA certificate that is at the root of the signing chain for your
RStudio Connect server. This will enable `rsconnect` to securely validate the
server's TLS certificate.
```bash
rsconnect details \
--api-key my-api-key \
--server https://my.connect.server:3939 \
--cacert /path/to/certificate.pem
```
* RStudio Connect is in "insecure mode". This disables TLS certificate verification,
which results in a less secure connection.
```bash
rsconnect add \
--api-key my-api-key \
--server https://my.connect.server:3939 \
--insecure
```
Once you work out the combination of options that allow you to successfully work with
an instance of RStudio Connect, you'll probably want to use the `add` command to have
`rsconnect` remember those options and allow you to just use a nickname.
### Updating a Deployment
If you deploy a file again to the same server, `rsconnect` will update the previous
deployment. This means that you can keep running `rsconnect deploy notebook my-notebook.ipynb`
as you develop new versions of your notebook. The same applies to other Python content
types.
#### Forcing a New Deployment
To bypass this behavior and force a new deployment, use the `--new` option:
```bash
rsconnect deploy dash --new my-app/
```
#### Updating a Different Deployment
If you want to update an existing deployment but don't have the saved deployment data,
you can provide the app's numeric ID or GUID on the command line:
```bash
rsconnect deploy notebook --app-id 123456 my-notebook.ipynb
```
You must be the owner of the target deployment, or a collaborator with permission to
change the content. The type of content (static notebook, notebook with source code,
API, or application) must match the existing deployment.
> **Note:** There is no confirmation required to update a deployment. If you do so
> accidentally, use the "Source Versions" dialog in the RStudio Connect dashboard to
> activate the previous version and remove the erroneous one.
##### Finding the App ID
The App ID associated with a piece of content you have previously deployed from the
`rsconnect` command line interface can be found easily by querying the deployment
information using the `info` command. For more information, see the
[Showing the Deployment Information](#showing-the-deployment-information) section.
If the content was deployed elsewhere or `info` does not return the correct App ID,
but you can open the content on RStudio Connect, find the content and open it in a
browser. The URL in your browser's location bar will contain `#/apps/NNN` where `NNN`
is your App ID. The GUID identifier for the app may be found on the **Info** tab for
the content in the RStudio Connect UI.
#### Showing the Deployment Information
You can see the information that the `rsconnect` command has saved for the most recent
deployment with the `info` command:
```bash
rsconnect info my-notebook.ipynb
```
If you have deployed to multiple servers, the most recent deployment information for
each server will be shown. This command also displays the path to the file where the
deployment data is stored.
## Stored Information Files
Stored information files are stored in a platform-specific directory:
| Platform | Location |
| -------- | ------------------------------------------------------------------ |
| Mac | `$HOME/Library/Application Support/rsconnect-python/` |
| Linux | `$HOME/.rsconnect-python/` or `$XDG_CONFIG_HOME/rsconnect-python/` |
| Windows | `$APPDATA/rsconnect-python` |
Remembered server information is stored in the `servers.json` file in that directory.
### Deployment Data
After a deployment is completed, information about the deployment is saved
to enable later redeployment. This data is stored alongside the deployed file,
in an `rsconnect-python` subdirectory, if possible. If that location is not writable
during deployment, then the deployment data will be stored in the global configuration
directory specified above.
| /rsconnect_python-1.4.4.1.tar.gz/rsconnect_python-1.4.4.1/README.md | 0.654011 | 0.805326 | README.md | pypi |
import hashlib
import io
import json
import os
import subprocess
import tarfile
import tempfile
from os.path import basename, dirname, exists, isdir, join, relpath, splitext
from rsconnect.log import logger
from rsconnect.models import AppModes, GlobSet
# From https://github.com/rstudio/rsconnect/blob/485e05a26041ab8183a220da7a506c9d3a41f1ff/R/bundle.R#L85-L88
# noinspection SpellCheckingInspection
directories_to_ignore = ['rsconnect/', 'rsconnect-python/', 'packrat/', '.svn/', '.git/', '.Rproj.user/']
# noinspection SpellCheckingInspection
def make_source_manifest(entrypoint, environment, app_mode):
package_manager = environment['package_manager']
# noinspection SpellCheckingInspection
manifest = {
"version": 1,
"metadata": {
"appmode": app_mode.name(),
"entrypoint": entrypoint
},
"locale": environment['locale'],
"python": {
"version": environment['python'],
"package_manager": {
"name": package_manager,
"version": environment[package_manager],
"package_file": environment['filename']
}
},
"files": {}
}
return manifest
def manifest_add_file(manifest, rel_path, base_dir):
"""Add the specified file to the manifest files section
The file must be specified as a pathname relative to the notebook directory.
"""
path = join(base_dir, rel_path)
manifest['files'][rel_path] = {
'checksum': file_checksum(path)
}
def manifest_add_buffer(manifest, filename, buf):
"""Add the specified in-memory buffer to the manifest files section"""
manifest['files'][filename] = {
'checksum': buffer_checksum(buf)
}
def file_checksum(path):
"""Calculate the md5 hex digest of the specified file"""
with open(path, 'rb') as f:
m = hashlib.md5()
chunk_size = 64 * 1024
chunk = f.read(chunk_size)
while chunk:
m.update(chunk)
chunk = f.read(chunk_size)
return m.hexdigest()
def buffer_checksum(buf):
"""Calculate the md5 hex digest of a buffer (str or bytes)"""
m = hashlib.md5()
m.update(to_bytes(buf))
return m.hexdigest()
def to_bytes(s):
if hasattr(s, 'encode'):
return s.encode('utf-8')
return s
def bundle_add_file(bundle, rel_path, base_dir):
"""Add the specified file to the tarball.
The file path is relative to the notebook directory.
"""
logger.debug('adding file: %s', rel_path)
path = join(base_dir, rel_path)
bundle.add(path, arcname=rel_path)
def bundle_add_buffer(bundle, filename, contents):
"""Add an in-memory buffer to the tarball.
`contents` may be a string or bytes object
"""
logger.debug('adding file: %s', filename)
buf = io.BytesIO(to_bytes(contents))
file_info = tarfile.TarInfo(filename)
file_info.size = len(buf.getvalue())
bundle.addfile(file_info, buf)
def write_manifest(relative_dir, nb_name, environment, output_dir):
"""Create a manifest for source publishing the specified notebook.
The manifest will be written to `manifest.json` in the output directory..
A requirements.txt file will be created if one does not exist.
Returns the list of filenames written.
"""
manifest_filename = 'manifest.json'
manifest = make_source_manifest(nb_name, environment, AppModes.JUPYTER_NOTEBOOK)
manifest_file = join(output_dir, manifest_filename)
created = []
skipped = []
manifest_relative_path = join(relative_dir, manifest_filename)
if exists(manifest_file):
skipped.append(manifest_relative_path)
else:
with open(manifest_file, 'w') as f:
f.write(json.dumps(manifest, indent=2))
created.append(manifest_relative_path)
logger.debug('wrote manifest file: %s', manifest_file)
environment_filename = environment['filename']
environment_file = join(output_dir, environment_filename)
environment_relative_path = join(relative_dir, environment_filename)
if environment['source'] == 'file':
skipped.append(environment_relative_path)
else:
with open(environment_file, 'w') as f:
f.write(environment['contents'])
created.append(environment_relative_path)
logger.debug('wrote environment file: %s', environment_file)
return created, skipped
def list_files(base_dir, include_sub_dirs, walk=os.walk):
"""List the files in the directory at path.
If include_sub_dirs is True, recursively list
files in subdirectories.
Returns an iterable of file paths relative to base_dir.
"""
skip_dirs = ['.ipynb_checkpoints', '.git']
def iter_files():
for root, sub_dirs, files in walk(base_dir):
if include_sub_dirs:
for skip in skip_dirs:
if skip in sub_dirs:
sub_dirs.remove(skip)
else:
# tell walk not to traverse any subdirectories
sub_dirs[:] = []
for filename in files:
yield relpath(join(root, filename), base_dir)
return list(iter_files())
def make_notebook_source_bundle(file, environment, extra_files=None):
"""Create a bundle containing the specified notebook and python environment.
Returns a file-like object containing the bundle tarball.
"""
if extra_files is None:
extra_files = []
base_dir = dirname(file)
nb_name = basename(file)
manifest = make_source_manifest(nb_name, environment, AppModes.JUPYTER_NOTEBOOK)
manifest_add_file(manifest, nb_name, base_dir)
manifest_add_buffer(manifest, environment['filename'], environment['contents'])
if extra_files:
skip = [nb_name, environment['filename'], 'manifest.json']
extra_files = sorted(list(set(extra_files) - set(skip)))
for rel_path in extra_files:
manifest_add_file(manifest, rel_path, base_dir)
logger.debug('manifest: %r', manifest)
bundle_file = tempfile.TemporaryFile(prefix='rsc_bundle')
with tarfile.open(mode='w:gz', fileobj=bundle_file) as bundle:
# add the manifest first in case we want to partially untar the bundle for inspection
bundle_add_buffer(bundle, 'manifest.json', json.dumps(manifest, indent=2))
bundle_add_buffer(bundle, environment['filename'], environment['contents'])
bundle_add_file(bundle, nb_name, base_dir)
for rel_path in extra_files:
bundle_add_file(bundle, rel_path, base_dir)
bundle_file.seek(0)
return bundle_file
def make_html_manifest(filename):
# noinspection SpellCheckingInspection
return {
"version": 1,
"metadata": {
"appmode": "static",
"primary_html": filename,
},
}
def make_notebook_html_bundle(filename, python, check_output=subprocess.check_output):
# noinspection SpellCheckingInspection
cmd = [
python, '-m', 'jupyter',
'nbconvert', '--execute', '--stdout',
'--log-level', 'ERROR', filename
]
try:
output = check_output(cmd)
except subprocess.CalledProcessError:
raise
nb_name = basename(filename)
filename = splitext(nb_name)[0] + '.html'
bundle_file = tempfile.TemporaryFile(prefix='rsc_bundle')
with tarfile.open(mode='w:gz', fileobj=bundle_file) as bundle:
bundle_add_buffer(bundle, filename, output)
# manifest
manifest = make_html_manifest(filename)
bundle_add_buffer(bundle, 'manifest.json', json.dumps(manifest))
# rewind file pointer
bundle_file.seek(0)
return bundle_file
def keep_manifest_specified_file(relative_path):
"""
A helper to see if the relative path given, which is assumed to have come
from a manifest.json file, should be kept or ignored.
:param relative_path: the relative path name to check.
:return: True, if the path should kept or False, if it should be ignored.
"""
for ignore_me in directories_to_ignore:
if relative_path.startswith(ignore_me):
return False
return True
def read_manifest_file(manifest_path):
"""
Read a manifest's content from its file. The content is provided as both a
raw string and a parsed dictionary.
:param manifest_path: the path to the file to read.
:return: the parsed manifest data and the raw file content as a string.
"""
with open(manifest_path, 'rb') as f:
raw_manifest = f.read().decode('utf-8')
manifest = json.loads(raw_manifest)
return manifest, raw_manifest
def make_manifest_bundle(manifest_path):
"""Create a bundle, given a manifest.
:return: a file-like object containing the bundle tarball.
"""
manifest, raw_manifest = read_manifest_file(manifest_path)
base_dir = dirname(manifest_path)
files = list(filter(keep_manifest_specified_file, manifest.get('files', {}).keys()))
if 'manifest.json' in files:
# this will be created
files.remove('manifest.json')
bundle_file = tempfile.TemporaryFile(prefix='rsc_bundle')
with tarfile.open(mode='w:gz', fileobj=bundle_file) as bundle:
# add the manifest first in case we want to partially untar the bundle for inspection
bundle_add_buffer(bundle, 'manifest.json', raw_manifest)
for rel_path in files:
bundle_add_file(bundle, rel_path, base_dir)
# rewind file pointer
bundle_file.seek(0)
return bundle_file
def create_glob_set(directory, excludes):
"""
Takes a list of glob strings and produces a GlobSet for path matching.
**Note:** we don't use Python's glob support because it takes way too
long to run when large file trees are involved in conjunction with the
'**' pattern.
:param directory: the directory the globs are relative to.
:param excludes: the list of globs to expand.
:return: a GlobSet ready for path matching.
"""
work = []
if excludes:
for pattern in excludes:
file_pattern = join(directory, pattern)
# Special handling, if they gave us just a dir then "do the right thing".
if isdir(file_pattern):
file_pattern = join(file_pattern, '**/*')
work.append(file_pattern)
return GlobSet(work)
def _create_api_file_list(directory, requirements_file_name, extra_files=None, excludes=None):
"""
Builds a full list of files under the given directory that should be included
in a manifest or bundle. Extra files and excludes are relative to the given
directory and work as you'd expect.
:param directory: the directory to walk for files.
:param requirements_file_name: the name of the requirements file for the current
Python environment.
:param extra_files: a sequence of any extra files to include in the bundle.
:param excludes: a sequence of glob patterns that will exclude matched files.
:return: the list of relevant files, relative to the given directory.
"""
# Don't let these top-level files be added via the extra files list.
extra_files = extra_files or []
skip = [requirements_file_name, 'manifest.json']
extra_files = sorted(list(set(extra_files) - set(skip)))
# Don't include these top-level files.
excludes = list(excludes) if excludes else []
excludes.append('manifest.json')
excludes.append(requirements_file_name)
glob_set = create_glob_set(directory, excludes)
file_list = []
for subdir, dirs, files in os.walk(directory):
for file in files:
abs_path = os.path.join(subdir, file)
rel_path = os.path.relpath(abs_path, directory)
if keep_manifest_specified_file(rel_path) and (rel_path in extra_files or not glob_set.matches(abs_path)):
file_list.append(rel_path)
# Don't add extra files more than once.
if rel_path in extra_files:
extra_files.remove(rel_path)
for rel_path in extra_files:
file_list.append(rel_path)
return sorted(file_list)
def make_api_manifest(directory, entry_point, app_mode, environment, extra_files=None, excludes=None):
"""
Makes a manifest for an API.
:param directory: the directory containing the files to deploy.
:param entry_point: the main entry point for the API.
:param app_mode: the app mode to use.
:param environment: the Python environment information.
:param extra_files: a sequence of any extra files to include in the bundle.
:param excludes: a sequence of glob patterns that will exclude matched files.
:return: the manifest and a list of the files involved.
"""
relevant_files = _create_api_file_list(directory, environment['filename'], extra_files, excludes)
manifest = make_source_manifest(entry_point, environment, app_mode)
manifest_add_buffer(manifest, environment['filename'], environment['contents'])
for rel_path in relevant_files:
manifest_add_file(manifest, rel_path, directory)
return manifest, relevant_files
def make_api_bundle(directory, entry_point, app_mode, environment, extra_files=None, excludes=None):
"""
Create an API bundle, given a directory path and a manifest.
:param directory: the directory containing the files to deploy.
:param entry_point: the main entry point for the API.
:param app_mode: the app mode to use.
:param environment: the Python environment information.
:param extra_files: a sequence of any extra files to include in the bundle.
:param excludes: a sequence of glob patterns that will exclude matched files.
:return: a file-like object containing the bundle tarball.
"""
manifest, relevant_files = make_api_manifest(directory, entry_point, app_mode, environment, extra_files, excludes)
bundle_file = tempfile.TemporaryFile(prefix='rsc_bundle')
with tarfile.open(mode='w:gz', fileobj=bundle_file) as bundle:
bundle_add_buffer(bundle, 'manifest.json', json.dumps(manifest, indent=2))
bundle_add_buffer(bundle, environment['filename'], environment['contents'])
for rel_path in relevant_files:
bundle_add_file(bundle, rel_path, directory)
# rewind file pointer
bundle_file.seek(0)
return bundle_file | /rsconnect_python-1.4.4.1.tar.gz/rsconnect_python-1.4.4.1/rsconnect/bundle.py | 0.593609 | 0.156169 | bundle.py | pypi |
import datetime
import json
import locale
import os
import re
import subprocess
import sys
version_re = re.compile(r'\d+\.\d+(\.\d+)?')
exec_dir = os.path.dirname(sys.executable)
class EnvironmentException(Exception):
pass
def detect_environment(dirname, force_generate=False, compatibility_mode=False, conda=None):
"""Determine the python dependencies in the environment.
`pip freeze` will be used to introspect the environment.
Returns a dictionary containing the package spec filename
and contents if successful, or a dictionary containing 'error'
on failure.
:param: dirname Directory name
:param: force_generate Force the generation of an environment
:param: compatibility_mode Force the usage of `pip freeze` for older
connect versions which do not support conda.
"""
if not compatibility_mode:
conda = get_conda(conda)
if conda:
if force_generate:
result = conda_env_export(conda)
else:
result = (output_file(dirname, 'environment.yml', 'conda')
or conda_env_export(conda))
else:
if force_generate:
result = pip_freeze()
else:
result = (output_file(dirname, 'requirements.txt', 'pip')
or pip_freeze())
if result is not None:
result['python'] = get_python_version()
result['pip'] = get_version('pip')
if conda:
result['conda'] = get_conda_version(conda)
result['locale'] = get_default_locale()
return result
def get_conda(conda=None):
"""get_conda tries to find the conda executable if we're in
a conda environment. If not, or if we cannot find the executable,
return None.
:returns: conda string path to conda or None.
"""
if os.environ.get('CONDA_PREFIX', None) is None and conda is None:
return None
else:
return conda or os.environ.get('CONDA_EXE', None)
def get_python_version():
v = sys.version_info
return "%d.%d.%d" % (v[0], v[1], v[2])
def get_conda_version(conda):
try:
args = [conda, '-V']
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
stdout, stderr = proc.communicate()
match = version_re.search(stdout or stderr)
if match:
return match.group()
msg = "Failed to get version of conda from the output of: %s - standard output: %s; standard error: %s" % \
(' '.join(args), stdout, stderr)
raise EnvironmentException(msg)
except Exception as exception:
raise EnvironmentException("Error getting conda version: %s" % str(exception))
def get_default_locale(locale_source=locale.getdefaultlocale):
result = '.'.join([item or '' for item in locale_source()])
return '' if result == '.' else result
def get_version(module):
try:
args = [sys.executable, '-m', module, '--version']
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
stdout, stderr = proc.communicate()
match = version_re.search(stdout)
if match:
return match.group()
msg = "Failed to get version of '%s' from the output of: %s" % (module, ' '.join(args))
raise EnvironmentException(msg)
except Exception as exception:
raise EnvironmentException("Error getting '%s' version: %s" % (module, str(exception)))
def output_file(dirname, filename, package_manager):
"""Read an existing package spec file.
Returns a dictionary containing the filename and contents
if successful, None if the file does not exist,
or a dictionary containing 'error' on failure.
"""
try:
path = os.path.join(dirname, filename)
if not os.path.exists(path):
return None
with open(path, 'r') as f:
data = f.read()
data = '\n'.join([line for line in data.split('\n')
if 'rsconnect' not in line])
return {
'filename': filename,
'contents': data,
'source': 'file',
'package_manager': package_manager,
}
except Exception as exception:
raise EnvironmentException('Error reading %s: %s' % (filename, str(exception)))
def pip_freeze():
"""Inspect the environment using `pip freeze`.
Returns a dictionary containing the filename
(always 'requirements.txt') and contents if successful,
or a dictionary containing 'error' on failure.
"""
try:
proc = subprocess.Popen(
[sys.executable, '-m', 'pip', 'freeze'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
pip_stdout, pip_stderr = proc.communicate()
pip_status = proc.returncode
except Exception as exception:
raise EnvironmentException('Error during pip freeze: %s' % str(exception))
if pip_status != 0:
msg = pip_stderr or ('exited with code %d' % pip_status)
raise EnvironmentException('Error during pip freeze: %s' % msg)
pip_stdout = '\n'.join([line for line in pip_stdout.split('\n')
if 'rsconnect' not in line])
pip_stdout = '# requirements.txt generated by rsconnect-python on '+str(datetime.datetime.utcnow())+'\n'+pip_stdout
return {
'filename': 'requirements.txt',
'contents': pip_stdout,
'source': 'pip_freeze',
'package_manager': 'pip',
}
def conda_env_export(conda):
"""Inspect the environment using `conda env export`
:param: conda path to the `conda` tool
:return: dictionary containing the key "environment.yml" and the data inside
"""
try:
proc = subprocess.Popen(
[conda, 'env', 'export'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
conda_stdout, conda_stderr = proc.communicate()
conda_status = proc.returncode
except Exception as exception:
raise EnvironmentException('Error during conda env export: %s' % str(exception))
if conda_status != 0:
msg = conda_stderr or ('exited with code %d' % conda_status)
raise EnvironmentException('Error during conda env export: %s' % msg)
return {
'filename': 'environment.yml',
'contents': conda_stdout,
'source': 'conda_env_export',
'package_manager': 'conda'
}
def main():
try:
if len(sys.argv) < 2:
raise EnvironmentException('Usage: %s [-fc] DIRECTORY' % sys.argv[0])
# directory is always the last argument
directory = sys.argv[len(sys.argv)-1]
flags = ''
force_generate = False
compatibility_mode = False
if len(sys.argv) > 2:
flags = sys.argv[1]
if 'f' in flags:
force_generate = True
if 'c' in flags:
compatibility_mode = True
result = detect_environment(directory, force_generate, compatibility_mode)
except EnvironmentException as exception:
result = dict(error=str(exception))
json.dump(result, sys.stdout, indent=4)
if __name__ == '__main__':
main() | /rsconnect_python-1.4.4.1.tar.gz/rsconnect_python-1.4.4.1/rsconnect/environment.py | 0.444324 | 0.191762 | environment.py | pypi |
import copy
from sushy.resources import base
from rsd_lib import utils as rsd_lib_utils
class DynamicField(base.Field):
"""Base class for fields consisting of several dynamic attributes."""
def __init__(self, *args, **kwargs):
super(DynamicField, self).__init__(*args, **kwargs)
self._subfields = None
def _load(self, body, resource, nested_in=None):
"""Load the all attributes.
:param body: parent JSON body.
:param resource: parent resource.
:param nested_in: parent resource name (for error reporting only).
:returns: a new object with sub-fields attached to it.
"""
nested_in = (nested_in or []) + self._path
value = super(DynamicField, self)._load(body, resource)
if value is None:
return None
# We need a new instance, as this method is called a singleton instance
# that is attached to a class (not instance) of a resource or another
# CompositeField. We don't want to end up modifying this instance.
instance = copy.copy(self)
for name, attr in value.items():
setattr(
instance,
rsd_lib_utils.camelcase_to_underscore_joined(name),
attr,
)
return instance
class StatusField(base.CompositeField):
"""This Field describes the status of a resource and its children."""
health = base.Field("Health")
"""Represents health of resource w/o considering its dependent resources"""
health_rollup = base.Field("HealthRollup")
"""Represents health state of resource and its dependent resources"""
state = base.Field("State")
"""Indicates the known state of the resource, such as if it is enabled."""
class ReferenceableMemberField(base.ListField):
member_id = base.Field("MemberId")
"""This is the identifier for the member within the collection."""
class LocationField(base.CompositeField):
info = base.Field("Info")
"""This indicates the location of the resource."""
info_format = base.Field("InfoFormat")
"""This represents the format of the Info property."""
class LocationCollectionField(ReferenceableMemberField):
info = base.Field("Info")
"""This indicates the location of the resource."""
info_format = base.Field("InfoFormat")
"""This represents the format of the Info property."""
class IdentifierField(base.CompositeField):
durable_name = base.Field("DurableName")
"""This indicates the world wide, persistent name of the resource."""
durable_name_format = base.Field("DurableNameFormat")
"""This represents the format of the DurableName property."""
class IdentifierCollectionField(ReferenceableMemberField):
durable_name = base.Field("DurableName")
"""This indicates the world wide, persistent name of the resource."""
durable_name_format = base.Field("DurableNameFormat")
"""This represents the format of the DurableName property."""
class ResourceBase(base.ResourceBase):
identity = base.Field("Id")
"""The resource identity string"""
name = base.Field("Name")
"""The resource name"""
description = base.Field("Description")
"""The resource description"""
def post(self, path="", data=None):
"""Issue HTTP POST request to this resource"""
if path == "":
path = self.path
self._conn.post(path, data=data)
def patch(self, path="", data=None):
"""Issue HTTP PATCH request to this resource"""
if path == "":
path = self.path
self._conn.patch(path, data=data)
def put(self, path="", data=None):
"""Issue HTTP PUT request to this resource"""
if path == "":
path = self.path
self._conn.put(path, data=data)
def delete(self):
"""Delete this resource"""
self._conn.delete(self._path)
class ResourceCollectionBase(base.ResourceCollectionBase):
name = base.Field("Name")
"""The resource collection name"""
description = base.Field("Description")
"""The resource collection description"""
def post(self, path="", data=None):
"""Issue HTTP POST request to this resource"""
if path == "":
path = self.path
self._conn.post(path, data=data)
def patch(self, path="", data=None):
"""Issue HTTP PATCH request to this resource"""
if path == "":
path = self.path
self._conn.patch(path, data=data)
def put(self, path="", data=None):
"""Issue HTTP PUT request to this resource"""
if path == "":
path = self.path
self._conn.put(path, data=data)
def delete(self):
"""Delete this resource collection"""
self._conn.delete(self._path) | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/base.py | 0.857052 | 0.275938 | base.py | pypi |
from distutils import version
from sushy import connector
from sushy.resources import base
from rsd_lib.resources import v2_1
from rsd_lib.resources import v2_2
from rsd_lib.resources import v2_3
from rsd_lib.resources import v2_4
class RSDLib(base.ResourceBase):
_redfish_version = base.Field(["RedfishVersion"], required=True)
"""FabricCollection path"""
_rsd_api_version = base.Field(
["Oem", "Intel_RackScale", "ApiVersion"], required=True
)
"""RSD API version"""
def __init__(
self,
base_url,
username=None,
password=None,
root_prefix="/redfish/v1/",
verify=True,
):
"""A class representing a RootService
:param base_url: The base URL to the Redfish controller. It
should include scheme and authority portion of the URL. For
example: https://mgmt.vendor.com
:param username: User account with admin/server-profile access
privilege
:param password: User account password
:param root_prefix: The default URL prefix. This part includes
the root service and version. Defaults to /redfish/v1
:param verify: Either a boolean value, a path to a CA_BUNDLE
file or directory with certificates of trusted CAs. If set to
True the driver will verify the host certificates; if False
the driver will ignore verifying the SSL certificate; if it's
a path the driver will use the specified certificate or one of
the certificates in the directory. Defaults to True.
"""
self._root_prefix = root_prefix
super(RSDLib, self).__init__(
connector.Connector(base_url, username, password, verify),
path=self._root_prefix,
)
def factory(self):
"""Return different resource module according to RSD API Version
:returns: a resource module
"""
rsd_version = version.StrictVersion(self._rsd_api_version)
if rsd_version < version.StrictVersion("2.2.0"):
# Use the interface of RSD API 2.1.0 to interact with RSD 2.1.0 and
# all previous version.
return v2_1.RSDLibV2_1(
self._conn,
self._root_prefix,
redfish_version=self._redfish_version,
)
elif version.StrictVersion(
"2.2.0"
) <= rsd_version and rsd_version < version.StrictVersion("2.3.0"):
# Specific interface for RSD 2.2 version
return v2_2.RSDLibV2_2(
self._conn,
self._root_prefix,
redfish_version=self._redfish_version,
)
elif version.StrictVersion(
"2.3.0"
) <= rsd_version and rsd_version < version.StrictVersion("2.4.0"):
# Specific interface for RSD 2.3 version
return v2_3.RSDLibV2_3(
self._conn,
self._root_prefix,
redfish_version=self._redfish_version,
)
elif version.StrictVersion(
"2.4.0"
) <= rsd_version and rsd_version < version.StrictVersion("2.5.0"):
# Specific interface for RSD 2.4 version
return v2_4.RSDLibV2_4(
self._conn,
self._root_prefix,
redfish_version=self._redfish_version,
)
else:
raise NotImplementedError(
"The rsd-lib library doesn't support RSD API "
"version {0}.".format(self._rsd_api_version)
) | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/main.py | 0.77518 | 0.201971 | main.py | pypi |
from copy import deepcopy
from rsd_lib.resources.v2_2.types import RESOURCE_CLASS as RESOURCE_CLASS_V22
from rsd_lib.resources.v2_3.chassis import chassis
from rsd_lib.resources.v2_3.ethernet_switch import ethernet_switch
from rsd_lib.resources.v2_3.fabric import endpoint
from rsd_lib.resources.v2_3.fabric import fabric
from rsd_lib.resources.v2_3.fabric import zone
from rsd_lib.resources.v2_3.manager import manager
from rsd_lib.resources.v2_3.node import attach_action_info
from rsd_lib.resources.v2_3.node import node
from rsd_lib.resources.v2_3.storage_service import drive
from rsd_lib.resources.v2_3.storage_service import drive_metrics
from rsd_lib.resources.v2_3.storage_service import storage_pool
from rsd_lib.resources.v2_3.storage_service import storage_service
from rsd_lib.resources.v2_3.storage_service import volume
from rsd_lib.resources.v2_3.storage_service import volume_metrics
from rsd_lib.resources.v2_3.system import ethernet_interface
from rsd_lib.resources.v2_3.system import system
RESOURCE_CLASS = deepcopy(RESOURCE_CLASS_V22)
RESOURCE_CLASS.update(
{
"ActionInfo": attach_action_info.AttachResourceActionInfo,
"Chassis": chassis.Chassis,
"ChassisCollection": chassis.ChassisCollection,
"ComposedNode": node.Node,
"ComposedNodeCollection": node.NodeCollection,
"ComputerSystem": system.System,
"ComputerSystemCollection": system.SystemCollection,
"Drive": drive.Drive,
"DriveCollection": drive.DriveCollection,
"DriveMetrics": drive_metrics.DriveMetrics,
"Endpoint": endpoint.Endpoint,
"EndpointCollection": endpoint.EndpointCollection,
"EthernetInterface": ethernet_interface.EthernetInterface,
"EthernetInterfaceCollection":
ethernet_interface.EthernetInterfaceCollection,
"EthernetSwitch": ethernet_switch.EthernetSwitch,
"EthernetSwitchCollection": ethernet_switch.EthernetSwitchCollection,
"Fabric": fabric.Fabric,
"FabricCollection": fabric.FabricCollection,
"Manager": manager.Manager,
"ManagerCollection": manager.ManagerCollection,
"StoragePool": storage_pool.StoragePool,
"StoragePoolCollection": storage_pool.StoragePoolCollection,
"StorageService": storage_service.StorageService,
"StorageServiceCollection": storage_service.StorageServiceCollection,
"Volume": volume.Volume,
"VolumeCollection": volume.VolumeCollection,
"VolumeMetrics": volume_metrics.VolumeMetrics,
"Zone": zone.Zone,
"ZoneCollection": zone.ZoneCollection,
}
)
for k in (
"LogicalDrive",
"LogicalDriveCollection",
"PhysicalDrive",
"PhysicalDriveCollection",
"RemoteTarget",
"RemoteTargetCollection",
):
RESOURCE_CLASS.pop(k) | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_3/types.py | 0.521715 | 0.211295 | types.py | pypi |
from sushy.resources import base
from rsd_lib import exceptions as rsd_lib_exceptions
from rsd_lib.resources import v2_2
from rsd_lib.resources.v2_3.chassis import chassis
from rsd_lib.resources.v2_3.ethernet_switch import ethernet_switch
from rsd_lib.resources.v2_3.fabric import fabric
from rsd_lib.resources.v2_3.manager import manager
from rsd_lib.resources.v2_3.node import node
from rsd_lib.resources.v2_3.storage_service import storage_service
from rsd_lib.resources.v2_3.system import system
from rsd_lib.resources.v2_3.types import RESOURCE_CLASS
class RSDLibV2_3(v2_2.RSDLibV2_2):
_ethernet_switches_path = base.Field(
["Oem", "Intel_RackScale", "EthernetSwitches", "@odata.id"]
)
"""EthernetSwitchCollecton path"""
_storage_service_path = base.Field(["StorageServices", "@odata.id"])
"""StorageServiceCollection path"""
def get_chassis_collection(self):
"""Get the ChassisCollection object
:raises: MissingAttributeError, if the collection attribute is
not found
:returns: a ChassisCollection object
"""
return chassis.ChassisCollection(
self._conn,
self._chassis_path,
redfish_version=self.redfish_version,
)
def get_chassis(self, identity):
"""Given the identity return a Chassis object
:param identity: The identity of the Chassis resource
:returns: The Chassis object
"""
return chassis.Chassis(
self._conn, identity, redfish_version=self.redfish_version
)
def get_system(self, identity):
"""Given the identity return a System object
:param identity: The identity of the System resource
:returns: The System object
"""
return system.System(
self._conn, identity, redfish_version=self.redfish_version
)
def get_system_collection(self):
"""Get the SystemCollection object
:raises: MissingAttributeError, if the collection attribute is
not found
:returns: a SystemCollection object
"""
return system.SystemCollection(
self._conn,
self._systems_path,
redfish_version=self.redfish_version,
)
def get_node_collection(self):
"""Get the NodeCollection object
:raises: MissingAttributeError, if the collection attribute is
not found
:returns: a NodeCollection object
"""
return node.NodeCollection(
self._conn, self._nodes_path, redfish_version=self.redfish_version
)
def get_node(self, identity):
"""Given the identity return a Node object
:param identity: The identity of the Node resource
:returns: The Node object
"""
return node.Node(
self._conn, identity, redfish_version=self.redfish_version
)
def get_manager_collection(self):
"""Get the ManagerCollection object
:raises: MissingAttributeError, if the collection attribute is
not found
:returns: a ManagerCollection object
"""
return manager.ManagerCollection(
self._conn,
self._managers_path,
redfish_version=self.redfish_version,
)
def get_manager(self, identity):
"""Given the identity return a Manager object
:param identity: The identity of the Manager resource
:returns: The Manager object
"""
return manager.Manager(
self._conn, identity, redfish_version=self.redfish_version
)
def get_storage_service_collection(self):
"""Get the StorageServiceCollection object
:raises: MissingAttributeError, if the collection attribute is
not found
:returns: a StorageServiceCollection object
"""
return storage_service.StorageServiceCollection(
self._conn,
self._storage_service_path,
redfish_version=self.redfish_version,
)
def get_storage_service(self, identity):
"""Given the identity return a StorageService object
:param identity: The identity of the StorageService resource
:returns: The StorageService object
"""
return storage_service.StorageService(
self._conn, identity, redfish_version=self.redfish_version
)
def get_fabric_collection(self):
"""Get the FabricCollection object
:raises: MissingAttributeError, if the collection attribute is
not found
:returns: a FabricCollection object
"""
return fabric.FabricCollection(
self._conn,
self._fabrics_path,
redfish_version=self.redfish_version,
)
def get_fabric(self, identity):
"""Given the identity return a Fabric object
:param identity: The identity of the Fabric resource
:returns: The Fabric object
"""
return fabric.Fabric(
self._conn, identity, redfish_version=self.redfish_version
)
def get_ethernet_switch_collection(self):
"""Get the EthernetSwitchCollection object
:raises: MissingAttributeError, if the collection attribute is
not found
:returns: a EthernetSwitchCollection object
"""
return ethernet_switch.EthernetSwitchCollection(
self._conn,
self._ethernet_switches_path,
redfish_version=self.redfish_version,
)
def get_ethernet_switch(self, identity):
"""Given the identity return a EthernetSwitch object
:param identity: The identity of the EthernetSwitch resource
:returns: The EthernetSwitch object
"""
return ethernet_switch.EthernetSwitch(
self._conn, identity, redfish_version=self.redfish_version
)
def get_resource(self, path):
"""Return corresponding resource object from path
:param path: The path of a resource or resource collection
:returns: corresponding resource or resource collection object
"""
resource_class = self._get_resource_class_from_path(
path, RESOURCE_CLASS
)
if not resource_class:
raise rsd_lib_exceptions.NoMatchingResourceError(uri=path)
return resource_class(
self._conn, path, redfish_version=self.redfish_version
) | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_3/__init__.py | 0.830697 | 0.266859 | __init__.py | pypi |
identifiers_req_schema = {
"type": "array",
"items": {
"type": "object",
"properties": {
"DurableNameFormat": {"type": "string", "enum": ["NQN", "iQN"]},
"DurableName": {"type": "string"},
},
"required": ["DurableNameFormat", "DurableName"],
},
}
connected_entities_req_schema = {
"type": "array",
"items": {
"type": "object",
"properties": {
"EntityLink": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
"required": ["@odata.id"],
},
"EntityRole": {
"type": "string",
"enum": ["Initiator", "Target", "Both"],
},
"Identifiers": {
"type": "array",
"items": {
"type": "object",
"properties": {
"DurableNameFormat": {
"type": "string",
"enum": [
"NQN",
"iQN",
"FC_WWN",
"UUID",
"EUI",
"NAA",
"NSID",
"SystemPath",
"LUN",
],
},
"DurableName": {"type": "string"},
},
"required": ["DurableNameFormat", "DurableName"],
},
},
},
"required": ["EntityLink", "EntityRole"],
},
}
protocol_req_schema = {"type": "string"}
ip_transport_details_req_schema = {
"type": "array",
"items": {
"type": "object",
"properties": {
"TransportProtocol": {"type": "string"},
"IPv4Address": {
"type": "object",
"properties": {"Address": {"type": "string"}},
},
"IPv6Address": {
"type": "object",
"properties": {"Address": {"type": "string"}},
},
"Port": {"type": "number"},
},
},
}
interface_req_schema = {"type": "string"}
authentication_req_schema = {
"type": "object",
"properties": {
"Username": {"type": "string"},
"Password": {"type": "string"},
},
} | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_3/fabric/endpoint_schemas.py | 0.546496 | 0.406096 | endpoint_schemas.py | pypi |
from jsonschema import validate
import logging
from sushy import exceptions
from sushy.resources import base
from rsd_lib.resources.v2_1.node import node as v2_1_node
from rsd_lib.resources.v2_2.node import node as v2_2_node
from rsd_lib.resources.v2_3.node import attach_action_info
from rsd_lib.resources.v2_3.node import schemas as node_schemas
from rsd_lib import utils as rsd_lib_utils
LOG = logging.getLogger(__name__)
class AttachEndpointActionField(base.CompositeField):
target_uri = base.Field("target", required=True)
action_info_path = base.Field(
"@Redfish.ActionInfo", adapter=rsd_lib_utils.get_resource_identity
)
action_info = None
class DetachEndpointActionField(base.CompositeField):
target_uri = base.Field("target", required=True)
action_info_path = base.Field(
"@Redfish.ActionInfo", adapter=rsd_lib_utils.get_resource_identity
)
action_info = None
class NodeActionsField(v2_1_node.NodeActionsField):
attach_endpoint = AttachEndpointActionField("#ComposedNode.AttachResource")
detach_endpoint = DetachEndpointActionField("#ComposedNode.DetachResource")
class Node(v2_1_node.Node):
clear_tpm_on_delete = base.Field("ClearTPMOnDelete", adapter=bool)
"""This is used to specify if TPM module should be cleared on composed node
DELETE request
"""
_actions = NodeActionsField("Actions", required=True)
def update(self, clear_tpm_on_delete):
"""Update properties of this composed node
:param clear_tpm_on_delete: This is used to specify if TPM module
should be cleared on composed node DELETE request.
:raises: InvalidParameterValueError, if any information passed is
invalid.
"""
if not isinstance(clear_tpm_on_delete, bool):
raise exceptions.InvalidParameterValueError(
parameter="clear_tpm_on_delete",
value=clear_tpm_on_delete,
valid_values=[True, False],
)
data = {"ClearTPMOnDelete": clear_tpm_on_delete}
self._conn.patch(self.path, data=data)
def _get_attach_endpoint_action_element(self):
attach_endpoint_action = self._actions.attach_endpoint
if not attach_endpoint_action:
raise exceptions.MissingActionError(
action="#ComposedNode.AttachResource", resource=self._path
)
if attach_endpoint_action.action_info is None:
attach_endpoint_action.action_info = attach_action_info.\
AttachResourceActionInfo(
self._conn,
attach_endpoint_action.action_info_path,
redfish_version=self.redfish_version,
)
return attach_endpoint_action
def get_allowed_attach_endpoints(self):
"""Get the allowed endpoints for attach action.
:returns: A set with the allowed attach endpoints.
"""
attach_action = self._get_attach_endpoint_action_element()
for i in attach_action.action_info.parameters:
if i["name"] == "Resource":
return i["allowable_values"]
return ()
def attach_endpoint(self, resource, protocol=None):
"""Attach endpoint from available pool to composed node
:param resource: Link to endpoint to attach.
:param protocol: Protocol of the remote drive.
:raises: InvalidParameterValueError
"""
attach_action = self._get_attach_endpoint_action_element()
valid_endpoints = self.get_allowed_attach_endpoints()
target_uri = attach_action.target_uri
if resource and resource not in valid_endpoints:
raise exceptions.InvalidParameterValueError(
parameter="resource",
value=resource,
valid_values=valid_endpoints,
)
data = {}
if resource is not None:
data["Resource"] = {"@odata.id": resource}
if protocol is not None:
data["Protocol"] = protocol
self._conn.post(target_uri, data=data)
def _get_detach_endpoint_action_element(self):
detach_endpoint_action = self._actions.detach_endpoint
if not detach_endpoint_action:
raise exceptions.MissingActionError(
action="#ComposedNode.DetachResource", resource=self._path
)
if detach_endpoint_action.action_info is None:
detach_endpoint_action.action_info = attach_action_info.\
AttachResourceActionInfo(
self._conn,
detach_endpoint_action.action_info_path,
redfish_version=self.redfish_version,
)
return detach_endpoint_action
def get_allowed_detach_endpoints(self):
"""Get the allowed endpoints for detach action.
:returns: A set with the allowed detach endpoints.
"""
detach_action = self._get_detach_endpoint_action_element()
for i in detach_action.action_info.parameters:
if i["name"] == "Resource":
return i["allowable_values"]
return ()
def detach_endpoint(self, resource):
"""Detach endpoint from available pool to composed node
:param resource: Link to endpoint to detach.
:raises: InvalidParameterValueError
"""
detach_action = self._get_detach_endpoint_action_element()
valid_endpoints = self.get_allowed_detach_endpoints()
target_uri = detach_action.target_uri
if resource not in valid_endpoints:
raise exceptions.InvalidParameterValueError(
parameter="resource",
value=resource,
valid_values=valid_endpoints,
)
data = {}
if resource is not None:
data["Resource"] = {"@odata.id": resource}
self._conn.post(target_uri, data=data)
def refresh(self, force=True):
super(Node, self).refresh(force)
if self._actions.attach_endpoint:
self._actions.attach_endpoint.action_info = None
if self._actions.detach_endpoint:
self._actions.detach_endpoint.action_info = None
class NodeCollection(v2_2_node.NodeCollection):
@property
def _resource_type(self):
return Node
def _create_compose_request(
self,
name=None,
description=None,
processor_req=None,
memory_req=None,
remote_drive_req=None,
local_drive_req=None,
ethernet_interface_req=None,
security_req=None,
total_system_core_req=None,
total_system_memory_req=None,
):
request = {}
if name is not None:
request["Name"] = name
if description is not None:
request["Description"] = description
if processor_req is not None:
validate(processor_req, node_schemas.processor_req_schema)
request["Processors"] = processor_req
if memory_req is not None:
validate(memory_req, node_schemas.memory_req_schema)
request["Memory"] = memory_req
if remote_drive_req is not None:
validate(remote_drive_req, node_schemas.remote_drive_req_schema)
request["RemoteDrives"] = remote_drive_req
if local_drive_req is not None:
validate(local_drive_req, node_schemas.local_drive_req_schema)
request["LocalDrives"] = local_drive_req
if ethernet_interface_req is not None:
validate(
ethernet_interface_req,
node_schemas.ethernet_interface_req_schema,
)
request["EthernetInterfaces"] = ethernet_interface_req
if security_req is not None:
validate(security_req, node_schemas.security_req_schema)
request["Security"] = security_req
if total_system_core_req is not None:
validate(
total_system_core_req,
node_schemas.total_system_core_req_schema,
)
request["TotalSystemCoreCount"] = total_system_core_req
if total_system_memory_req is not None:
validate(
total_system_memory_req,
node_schemas.total_system_memory_req_schema,
)
request["TotalSystemMemoryMiB"] = total_system_memory_req
return request
def compose_node(
self,
name=None,
description=None,
processor_req=None,
memory_req=None,
remote_drive_req=None,
local_drive_req=None,
ethernet_interface_req=None,
security_req=None,
total_system_core_req=None,
total_system_memory_req=None,
):
"""Compose a node from RackScale hardware
:param name: Name of node
:param description: Description of node
:param processor_req: JSON for node processors
:param memory_req: JSON for node memory modules
:param remote_drive_req: JSON for node remote drives
:param local_drive_req: JSON for node local drives
:param ethernet_interface_req: JSON for node ethernet ports
:param security_req: JSON for node security requirements
:param total_system_core_req: Total processor cores available in
composed node
:param total_system_memory_req: Total memory available in composed node
:returns: The location of the composed node
When the 'processor_req' is not none: it need a computer system
contains processors whose each processor meet all conditions in the
value.
When the 'total_system_core_req' is not none: it need a computer
system contains processors whose cores sum up to number equal or
greater than 'total_system_core_req'.
When both values are not none: it need meet all conditions.
'memory_req' and 'total_system_memory_req' is the same.
"""
target_uri = self._get_compose_action_element().target_uri
properties = self._create_compose_request(
name=name,
description=description,
processor_req=processor_req,
memory_req=memory_req,
remote_drive_req=remote_drive_req,
local_drive_req=local_drive_req,
ethernet_interface_req=ethernet_interface_req,
security_req=security_req,
total_system_core_req=total_system_core_req,
total_system_memory_req=total_system_memory_req,
)
resp = self._conn.post(target_uri, data=properties)
LOG.info("Node created at %s", resp.headers["Location"])
node_url = resp.headers["Location"]
return node_url[node_url.find(self._path):] | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_3/node/node.py | 0.660939 | 0.153581 | node.py | pypi |
processor_req_schema = {
"type": "array",
"items": [
{
"type": "object",
"properties": {
"Model": {"type": "string"},
"TotalCores": {"type": "number"},
"AchievableSpeedMHz": {"type": "number"},
"InstructionSet": {
"type": "string",
"enum": [
"x86",
"x86-64",
"IA-64",
"ARM-A32",
"ARM-A64",
"MIPS32",
"MIPS64",
"OEM",
],
},
"Oem": {
"type": "object",
"properties": {
"Brand": {
"type": "string",
"enum": [
"E3",
"E5",
"E7",
"X3",
"X5",
"X7",
"I3",
"I5",
"I7",
"Silver",
"Gold",
"Platinum",
"Unknown",
],
},
"Capabilities": {
"type": "array",
"items": [{"type": "string"}],
},
},
},
"Resource": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
"Chassis": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
"ProcessorType": {
"type": "string",
"enum": [
"CPU",
"FPGA",
"GPU",
"DSP",
"Accelerator",
"OEM",
],
},
},
"additionalProperties": False,
}
],
}
memory_req_schema = {
"type": "array",
"items": [
{
"type": "object",
"properties": {
"CapacityMiB": {"type": "number"},
"MemoryDeviceType": {
"type": "string",
"enum": [
"DDR",
"DDR2",
"DDR3",
"DDR4",
"DDR4_SDRAM",
"DDR4E_SDRAM",
"LPDDR4_SDRAM",
"DDR3_SDRAM",
"LPDDR3_SDRAM",
"DDR2_SDRAM",
"DDR2_SDRAM_FB_DIMM",
"DDR2_SDRAM_FB_DIMM_PROBE",
"DDR_SGRAM",
"DDR_SDRAM",
"ROM",
"SDRAM",
"EDO",
"FastPageMode",
"PipelinedNibble",
],
},
"SpeedMHz": {"type": "number"},
"Manufacturer": {"type": "string"},
"DataWidthBits": {"type": "number"},
"Resource": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
"Chassis": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
},
"additionalProperties": False,
}
],
}
remote_drive_req_schema = {
"type": "array",
"items": [
{
"type": "object",
"properties": {
"CapacityGiB": {"type": "number"},
"Protocol": {
"type": "string",
"enum": ["iSCSI", "NVMeOverFabrics"],
},
"Master": {
"type": "object",
"properties": {
"Type": {
"type": "string",
"enum": ["Snapshot", "Clone"],
},
"Resource": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
},
},
"Resource": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
},
"additionalProperties": False,
}
],
}
local_drive_req_schema = {
"type": "array",
"items": [
{
"type": "object",
"properties": {
"CapacityGiB": {"type": "number"},
"Type": {"type": "string", "enum": ["HDD", "SSD"]},
"MinRPM": {"type": "number"},
"SerialNumber": {"type": "string"},
"Interface": {
"type": "string",
"enum": ["SAS", "SATA", "NVMe"],
},
"Resource": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
"Chassis": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
"FabricSwitch": {"type": "boolean"},
},
"additionalProperties": False,
}
],
}
ethernet_interface_req_schema = {
"type": "array",
"items": [
{
"type": "object",
"properties": {
"SpeedMbps": {"type": "number"},
"PrimaryVLAN": {"type": "number"},
"VLANs": {
"type": "array",
"additionalItems": {
"type": "object",
"properties": {
"VLANId": {"type": "number"},
"Tagged": {"type": "boolean"},
},
},
},
"Resource": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
"Chassis": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
},
"additionalProperties": False,
}
],
}
security_req_schema = {
"type": "object",
"properties": {
"TpmPresent": {"type": "boolean"},
"TpmInterfaceType": {"type": "string"},
"TxtEnabled": {"type": "boolean"},
"ClearTPMOnDelete": {"type": "boolean"},
},
"additionalProperties": False,
}
total_system_core_req_schema = {"type": "number"}
total_system_memory_req_schema = {"type": "number"} | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_3/node/schemas.py | 0.514644 | 0.449393 | schemas.py | pypi |
from sushy.resources import base
from rsd_lib import base as rsd_lib_base
from rsd_lib import utils as rsd_lib_utils
class LifeTimeField(base.CompositeField):
unit_size_bytes = base.Field(
"UnitSizeBytes", adapter=rsd_lib_utils.num_or_none
)
"""The size of a unit in bytes used by UnitsRead and UnitsWritten"""
units_read = base.Field("UnitsRead")
"""The number of units of a read since reset """
units_written = base.Field("UnitsWritten")
"""The number of units of a written since reset"""
host_read_commands = base.Field("HostReadCommands")
"""The number of read commands completed by the disk controller"""
host_write_commands = base.Field("HostWriteCommands")
"""The number of write commands completed by the disk controller"""
power_cycles = base.Field("PowerCycles")
"""The number of power cycels of this drive"""
power_on_hours = base.Field("PowerOnHours")
"""The number of power-on hours of this drive"""
controller_busy_time_minutes = base.Field("ControllerBusyTimeMinutes")
"""The amount of time in minutes the driver controller is busy"""
class HealthDataField(base.CompositeField):
available_spare_percentage = base.Field("AvailableSparePercentage")
"""The percentage of the remaining spare capacity available"""
predicted_media_life_used_percent = base.Field(
"PredictedMediaLifeUsedPercent"
)
"""The percentage of life remaining in the driver's media"""
unsafe_shutdowns = base.Field("UnsafeShutdowns")
"""The number of unsafe shutdowns of this drive"""
media_errors = base.Field("MediaErrors")
"""The number of media and data integrity errors of this drive"""
class DriveMetrics(rsd_lib_base.ResourceBase):
name = base.Field("Name")
"""Drive metrics name"""
identity = base.Field("Id")
"""Drive metrics id"""
description = base.Field("Description")
"""Drive metrics description"""
life_time = LifeTimeField("LifeTime")
"""The life time metrics for this drive"""
health_data = HealthDataField("HealthData")
"""The health data metrics for this drive"""
temperature_kelvin = base.Field("TemperatureKelvin")
"""The temperature in Kelvin degrees of this drive""" | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_3/storage_service/drive_metrics.py | 0.700075 | 0.359336 | drive_metrics.py | pypi |
from sushy.resources import base
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib import common as rsd_lib_common
from rsd_lib.resources.v2_3.storage_service import drive_metrics
from rsd_lib import utils as rsd_lib_utils
class LinksField(base.CompositeField):
chassis = base.Field(
"Chassis", adapter=rsd_lib_utils.get_resource_identity
)
"""Link to related chassis of this drive"""
volumes = base.Field(
"Volumes", default=(), adapter=utils.get_members_identities
)
"""Link to related volumes of this drive"""
endpoints = base.Field(
"Endpoints", default=(), adapter=utils.get_members_identities
)
"""Link to related endpoints of this drive"""
class OemField(base.CompositeField):
erased = base.Field(
["Intel_RackScale", "DriveErased"], adapter=bool, required=True
)
erase_on_detach = base.Field(
["Intel_RackScale", "EraseOnDetach"], adapter=bool
)
firmware_version = base.Field(["Intel_RackScale", "FirmwareVersion"])
storage = base.Field(["Intel_RackScale", "Storage"])
pcie_function = base.Field(["Intel_RackScale", "PCIeFunction"])
class IdentifiersField(base.ListField):
durable_name = base.Field("DurableName")
durable_name_format = base.Field("DurableNameFormat")
class LocationField(base.ListField):
info = base.Field("Info")
info_format = base.Field("InfoFormat")
class Drive(rsd_lib_base.ResourceBase):
identity = base.Field("Id", required=True)
"""The drive identity string"""
name = base.Field("Name")
"""The drive name string"""
protocol = base.Field("Protocol")
"""The protocol of this drive"""
drive_type = base.Field("Type")
"""The protocol of this drive"""
media_type = base.Field("MediaType")
"""The media type of this drive"""
capacity_bytes = base.Field(
"CapacityBytes", adapter=rsd_lib_utils.num_or_none
)
"""The capacity in Bytes of this drive"""
manufacturer = base.Field("Manufacturer")
"""The manufacturer of this drive"""
model = base.Field("Model")
"""The drive model"""
revision = base.Field("Revision")
"""The revision of this drive"""
sku = base.Field("SKU")
"""The sku of this drive"""
serial_number = base.Field("SerialNumber")
"""The serial number of this drive"""
part_number = base.Field("PartNumber")
"""The part number of this drive"""
asset_tag = base.Field("AssetTag")
"""The asset tag of this drive"""
rotation_speed_rpm = base.Field("RotationSpeedRPM")
"""The rotation speed of this drive"""
identifiers = IdentifiersField("Identifiers")
"""These identifiers list of this drive"""
location = LocationField("Location")
"""The location of this drive"""
status = rsd_lib_common.StatusField("Status")
"""The drive status"""
oem = OemField("Oem")
"""The OEM additional info of this drive"""
status_indicator = base.Field("StatusIndicator")
"""The status indicator state for the status indicator associated
with this drive"""
indicator_led = base.Field("IndicatorLED")
"""The indicator light state for the indicator light associated
with the drive"""
capable_speed_gbs = base.Field("CapableSpeedGbs")
"""The current bus speed of the associated drive"""
negotiated_speed_gbs = base.Field("NegotiatedSpeedGbs")
"""The current bus speed of the associated drive"""
predicted_media_life_left_percent = base.Field(
"PredictedMediaLifeLeftPercent"
)
"""An indicator of the percentage of life remaining in the drive's media"""
links = LinksField("Links")
"""These links to related components of this volume"""
def _get_metrics_path(self):
"""Helper function to find the Metrics path"""
return utils.get_sub_resource_path_by(
self, ["Links", "Oem", "Intel_RackScale", "Metrics"]
)
@property
@utils.cache_it
def metrics(self):
"""Property to provide reference to `Metrics` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return drive_metrics.DriveMetrics(
self._conn,
self._get_metrics_path(),
redfish_version=self.redfish_version,
)
class DriveCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return Drive | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_3/storage_service/drive.py | 0.721056 | 0.20951 | drive.py | pypi |
import logging
from sushy.resources import base
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib import common as rsd_lib_common
from rsd_lib.resources.v2_3.fabric import endpoint
from rsd_lib.resources.v2_3.storage_service import drive
from rsd_lib.resources.v2_3.storage_service import storage_pool
from rsd_lib.resources.v2_3.storage_service import volume
LOG = logging.getLogger(__name__)
class StorageService(rsd_lib_base.ResourceBase):
description = base.Field("Description")
"""The storage service description"""
identity = base.Field("Id", required=True)
"""The storage service identity string"""
name = base.Field("Name")
"""The storage service name"""
status = rsd_lib_common.StatusField("Status")
"""The storage service status"""
def _get_volume_collection_path(self):
"""Helper function to find the VolumeCollection path"""
return utils.get_sub_resource_path_by(self, "Volumes")
@property
@utils.cache_it
def volumes(self):
"""Property to provide reference to `VolumeCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return volume.VolumeCollection(
self._conn,
self._get_volume_collection_path(),
redfish_version=self.redfish_version,
)
def _get_storage_pool_collection_path(self):
"""Helper function to find the StoragePoolCollection path"""
return utils.get_sub_resource_path_by(self, "StoragePools")
@property
@utils.cache_it
def storage_pools(self):
"""Property to provide reference to `StoragePoolCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return storage_pool.StoragePoolCollection(
self._conn,
self._get_storage_pool_collection_path(),
redfish_version=self.redfish_version,
)
def _get_drive_collection_path(self):
"""Helper function to find the DriveCollection path"""
return utils.get_sub_resource_path_by(self, "Drives")
@property
@utils.cache_it
def drives(self):
"""Property to provide reference to `DriveCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return drive.DriveCollection(
self._conn,
self._get_drive_collection_path(),
redfish_version=self.redfish_version,
)
def _get_endpoint_collection_path(self):
"""Helper function to find the EndpointCollection path"""
return utils.get_sub_resource_path_by(self, "Endpoints")
@property
@utils.cache_it
def endpoints(self):
"""Property to provide reference to `EndpointCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return endpoint.EndpointCollection(
self._conn,
self._get_endpoint_collection_path(),
redfish_version=self.redfish_version,
)
class StorageServiceCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return StorageService | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_3/storage_service/storage_service.py | 0.74872 | 0.179441 | storage_service.py | pypi |
import jsonschema
import logging
from sushy import exceptions
from sushy.resources import base
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib import common as rsd_lib_common
from rsd_lib.resources.v2_3.storage_service import volume_metrics
from rsd_lib.resources.v2_3.storage_service import volume_schemas
from rsd_lib import utils as rsd_lib_utils
LOG = logging.getLogger(__name__)
class CapacitySourcesField(base.ListField):
providing_pools = base.Field(
"ProvidingPools", adapter=utils.get_members_identities
)
allocated_Bytes = base.Field(
["ProvidedCapacity", "Data", "AllocatedBytes"],
adapter=rsd_lib_utils.num_or_none,
)
class LinksField(base.CompositeField):
endpoints = base.Field(
["Oem", "Intel_RackScale", "Endpoints"],
default=(),
adapter=utils.get_members_identities,
)
"""Link to related endpoints of this volume"""
metrics = base.Field(
["Oem", "Intel_RackScale", "Metrics"],
adapter=rsd_lib_utils.get_resource_identity,
)
"""Link to telemetry metrics of this volume"""
class IdentifiersField(base.ListField):
durable_name = base.Field("DurableName")
durable_name_format = base.Field("DurableNameFormat")
class ReplicaInfosField(base.ListField):
replica_readonly_access = base.Field("ReplicaReadOnlyAccess")
replica_type = base.Field("ReplicaType")
replica_role = base.Field("ReplicaRole")
replica = base.Field(
"Replica", adapter=rsd_lib_utils.get_resource_identity
)
class InitializeActionField(base.CompositeField):
target_uri = base.Field("target", required=True)
class VolumeActionsField(base.CompositeField):
initialize = InitializeActionField("#Volume.Initialize")
class Volume(rsd_lib_base.ResourceBase):
identity = base.Field("Id", required=True)
"""The volume identity string"""
description = base.Field("Description")
"""The volume description string"""
name = base.Field("Name")
"""The volume name string"""
model = base.Field("Model")
"""The volume model"""
manufacturer = base.Field("Manufacturer")
"""The volume manufacturer"""
access_capabilities = base.Field("AccessCapabilities")
"""The access capabilities of volume"""
capacity_bytes = base.Field(
"CapacityBytes", adapter=rsd_lib_utils.num_or_none
)
"""The capacity of volume in bytes"""
allocated_Bytes = base.Field(
["Capacity", "Data", "AllocatedBytes"],
adapter=rsd_lib_utils.num_or_none,
)
"""The allocated capacity of volume in bytes"""
capacity_sources = CapacitySourcesField("CapacitySources")
"""The logical drive status"""
identifiers = IdentifiersField("Identifiers")
"""These identifiers list of this volume"""
links = LinksField("Links")
"""These links to related components of this volume"""
replica_infos = ReplicaInfosField("ReplicaInfos")
"""These replica related info of this volume"""
status = rsd_lib_common.StatusField("Status")
"""The volume status"""
bootable = base.Field(["Oem", "Intel_RackScale", "Bootable"], adapter=bool)
"""The bootable info of this volume"""
erased = base.Field(["Oem", "Intel_RackScale", "Erased"])
"""The erased info of this volume"""
erase_on_detach = base.Field(
["Oem", "Intel_RackScale", "EraseOnDetach"], adapter=bool
)
"""The rrase on detach info of this volume"""
_actions = VolumeActionsField("Actions", required=True)
def update(self, bootable=None, erased=None):
"""Update volume properties
:param bootable: Change bootable ability of the volume
:param erased: Provide information if the drive was erased
:raises: BadRequestError if at least one param isn't specified
"""
if bootable is None and erased is None:
raise ValueError(
'At least "bootable" or "erased" parameter has '
"to be specified"
)
if bootable and not isinstance(bootable, bool):
raise exceptions.InvalidParameterValueError(
parameter="bootable",
value=bootable,
valid_values=[True, False],
)
if erased and not isinstance(erased, bool):
raise exceptions.InvalidParameterValueError(
parameter="erased", value=erased, valid_values=[True, False]
)
data = {"Oem": {"Intel_RackScale": {}}}
if bootable is not None:
data["Oem"]["Intel_RackScale"]["Bootable"] = bootable
if erased is not None:
data["Oem"]["Intel_RackScale"]["Erased"] = erased
self._conn.patch(self.path, data=data)
def _get_initialize_action_element(self):
initialize_action = self._actions.initialize
if not initialize_action:
raise exceptions.MissingActionError(
action="#Volume.Initialize", resource=self._path
)
return initialize_action
def initialize(self, init_type):
"""Change initialize type of this volume
:param type: volume initialize type
:raises: InvalidParameterValueError if invalid "type" parameter
"""
allowed_init_type_values = ["Fast", "Slow"]
if init_type not in allowed_init_type_values:
raise exceptions.InvalidParameterValueError(
parameter="init_type",
value=init_type,
valid_values=allowed_init_type_values,
)
data = {"InitializeType": init_type}
target_uri = self._get_initialize_action_element().target_uri
self._conn.post(target_uri, data=data)
def _get_metrics_path(self):
"""Helper function to find the Metrics path"""
return utils.get_sub_resource_path_by(
self, ["Links", "Oem", "Intel_RackScale", "Metrics"]
)
@property
@utils.cache_it
def metrics(self):
"""Property to provide reference to `Metrics` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return volume_metrics.VolumeMetrics(
self._conn,
self._get_metrics_path(),
redfish_version=self.redfish_version,
)
class VolumeCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return Volume
def _create_volume_request(
self,
capacity,
access_capabilities=None,
capacity_sources=None,
replica_infos=None,
bootable=None,
):
request = {}
jsonschema.validate(capacity, volume_schemas.capacity_req_schema)
request["CapacityBytes"] = capacity
if access_capabilities is not None:
jsonschema.validate(
access_capabilities,
volume_schemas.access_capabilities_req_schema,
)
request["AccessCapabilities"] = access_capabilities
if capacity_sources is not None:
jsonschema.validate(
capacity_sources, volume_schemas.capacity_sources_req_schema
)
request["CapacitySources"] = capacity_sources
if replica_infos is not None:
jsonschema.validate(
replica_infos, volume_schemas.replica_infos_req_schema
)
request["ReplicaInfos"] = replica_infos
if bootable is not None:
jsonschema.validate(bootable, volume_schemas.bootable_req_schema)
request["Oem"] = {"Intel_RackScale": {"Bootable": bootable}}
return request
def create_volume(
self,
capacity,
access_capabilities=None,
capacity_sources=None,
replica_infos=None,
bootable=None,
):
"""Create a new volume
:param capacity: Requested volume capacity in bytes
:param access_capabilities: List of volume access capabilities
:param capacity_sources: JSON for volume providing source
:param replica_infos: JSON for volume replica infos
:param bootable: Determines if the volume should be bootable
:returns: The uri of the new volume
"""
properties = self._create_volume_request(
capacity=capacity,
access_capabilities=access_capabilities,
capacity_sources=capacity_sources,
replica_infos=replica_infos,
bootable=bootable,
)
resp = self._conn.post(self._path, data=properties)
LOG.info("Volume created at %s", resp.headers["Location"])
volume_url = resp.headers["Location"]
return volume_url[volume_url.find(self._path):] | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_3/storage_service/volume.py | 0.732879 | 0.175786 | volume.py | pypi |
import logging
from sushy.resources import base
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib import common as rsd_lib_common
from rsd_lib.resources.v2_3.storage_service import volume
from rsd_lib import utils as rsd_lib_utils
LOG = logging.getLogger(__name__)
class CapacityField(base.CompositeField):
allocated_bytes = base.Field(
["Data", "AllocatedBytes"], adapter=rsd_lib_utils.num_or_none
)
consumed_bytes = base.Field(
["Data", "ConsumedBytes"], adapter=rsd_lib_utils.num_or_none
)
guaranteed_bytes = base.Field(
["Data", "GuaranteedBytes"], adapter=rsd_lib_utils.num_or_none
)
provisioned_bytes = base.Field(
["Data", "ProvisionedBytes"], adapter=rsd_lib_utils.num_or_none
)
class CapacitySourcesField(base.ListField):
providing_drives = base.Field(
"ProvidingDrives", default=(), adapter=utils.get_members_identities
)
provided_capacity = CapacityField("ProvidedCapacity")
class IdentifierField(base.CompositeField):
durable_name = base.Field("DurableName")
durable_name_format = base.Field("DurableNameFormat")
class StoragePool(rsd_lib_base.ResourceBase):
identity = base.Field("Id", required=True)
"""The storage pool identity string"""
description = base.Field("Description")
"""The storage pool description string"""
name = base.Field("Name")
"""The storage pool name string"""
status = rsd_lib_common.StatusField("Status")
"""The storage pool status"""
capacity = CapacityField("Capacity")
"""The storage pool capacity info"""
capacity_sources = CapacitySourcesField("CapacitySources")
"""The storage pool capacity source info"""
identifier = IdentifierField("Identifier")
"""These identifiers list of this volume"""
def _get_allocated_volumes_path(self):
"""Helper function to find the AllocatedVolumes path"""
return utils.get_sub_resource_path_by(self, "AllocatedVolumes")
@property
@utils.cache_it
def allocated_volumes(self):
"""Property to provide reference to `AllocatedVolumes` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return volume.VolumeCollection(
self._conn,
self._get_allocated_volumes_path(),
redfish_version=self.redfish_version,
)
def _get_allocated_pools_path(self):
"""Helper function to find the AllocatedPools path"""
return utils.get_sub_resource_path_by(self, "AllocatedPools")
@property
@utils.cache_it
def allocated_pools(self):
"""Property to provide reference to `AllocatedPools` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return StoragePoolCollection(
self._conn,
self._get_allocated_pools_path(),
redfish_version=self.redfish_version,
)
class StoragePoolCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return StoragePool | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_3/storage_service/storage_pool.py | 0.658418 | 0.175326 | storage_pool.py | pypi |
from sushy.resources import base
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib.resources.v2_1.chassis import chassis
from rsd_lib.resources.v2_1.chassis import log_service
from rsd_lib.resources.v2_2.chassis import power
from rsd_lib.resources.v2_2.chassis import thermal
from rsd_lib import utils as rsd_lib_utils
class LinksField(chassis.LinksField):
pcie_devices = base.Field(
"PCIeDevices", adapter=utils.get_members_identities
)
"""An array of references to the PCIe Devices located in this Chassis."""
class Chassis(rsd_lib_base.ResourceBase):
"""Chassis resource class
A Chassis represents the physical components for any system. This
resource represents the sheet-metal confined spaces and logical zones
like racks, enclosures, chassis and all other containers. Subsystems
(like sensors), which operate outside of a system's data plane (meaning
the resources are not accessible to software running on the system) are
linked either directly or indirectly through this resource.
"""
chassis_type = base.Field("ChassisType")
"""This property indicates the type of physical form factor of this
resource.
"""
manufacturer = base.Field("Manufacturer")
"""This is the manufacturer of this chassis."""
model = base.Field("Model")
"""This is the model number for the chassis."""
sku = base.Field("SKU")
"""This is the SKU for this chassis."""
serial_number = base.Field("SerialNumber")
"""The serial number for this chassis."""
part_number = base.Field("PartNumber")
"""The part number for this chassis."""
asset_tag = base.Field("AssetTag")
"""The user assigned asset tag for this chassis."""
indicator_led = base.Field("IndicatorLED")
"""The state of the indicator LED, used to identify the chassis."""
links = LinksField("Links")
"""Contains references to other resources that are related to this
resource.
"""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
power_state = base.Field("PowerState")
"""This is the current power state of the chassis."""
physical_security = chassis.PhysicalSecurityField("PhysicalSecurity")
"""The state of the physical security sensor."""
location = rsd_lib_base.LocationField("Location")
"""Location of a resource"""
height_mm = base.Field("HeightMm", adapter=rsd_lib_utils.num_or_none)
"""The height of the chassis."""
width_mm = base.Field("WidthMm", adapter=rsd_lib_utils.num_or_none)
"""The width of the chassis."""
depth_mm = base.Field("DepthMm", adapter=rsd_lib_utils.num_or_none)
"""The depth of the chassis."""
weight_kg = base.Field("WeightKg", adapter=rsd_lib_utils.num_or_none)
"""The weight of the chassis."""
oem = chassis.OemField("Oem")
"""Oem specific properties."""
@property
@utils.cache_it
def log_services(self):
"""Property to provide reference to `LogServiceCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return log_service.LogServiceCollection(
self._conn,
utils.get_sub_resource_path_by(self, "LogServices"),
redfish_version=self.redfish_version,
)
@property
@utils.cache_it
def thermal(self):
"""Property to provide reference to `Thermal` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return thermal.Thermal(
self._conn,
utils.get_sub_resource_path_by(self, "Thermal"),
redfish_version=self.redfish_version,
)
@property
@utils.cache_it
def power(self):
"""Property to provide reference to `Power` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return power.Power(
self._conn,
utils.get_sub_resource_path_by(self, "Power"),
redfish_version=self.redfish_version,
)
def update(self, asset_tag=None, location_id=None):
"""Update AssetTag and Location->Id properties
:param asset_tag: The user assigned asset tag for this chassis
:param location_id: The user assigned location id for this chassis.
It can be changed only for a Rack Chassis
"""
data = {}
if asset_tag is not None:
data["AssetTag"] = asset_tag
if location_id is not None:
data["Oem"] = {
"Intel_RackScale": {"Location": {"Id": location_id}}
}
self._conn.patch(self.path, data=data)
class ChassisCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return Chassis | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_3/chassis/chassis.py | 0.796925 | 0.416144 | chassis.py | pypi |
import logging
from rsd_lib.resources.v2_2.ethernet_switch import ethernet_switch\
as v2_2_ethernet_switch
from sushy.resources import base
from rsd_lib import base as rsd_lib_base
from rsd_lib import utils as rsd_lib_utils
LOG = logging.getLogger(__name__)
class ClassToPriorityMappingField(base.ListField):
priority = base.Field("Priority", adapter=rsd_lib_utils.num_or_none)
traffic_class = base.Field(
"TrafficClass", adapter=rsd_lib_utils.num_or_none
)
class PriorityFlowControlField(base.CompositeField):
enabled = base.Field("Enabled", adapter=bool)
lossless_priorities = base.Field("LosslessPriorities")
class PriorityToClassMappingField(base.ListField):
priority = base.Field("Priority", adapter=rsd_lib_utils.num_or_none)
traffic_class = base.Field(
"TrafficClass", adapter=rsd_lib_utils.num_or_none
)
class TrafficClassficationField(base.ListField):
port = base.Field("Port", adapter=rsd_lib_utils.num_or_none)
protocol = base.Field("Protocol")
traffic_class = base.Field(
"TrafficClass", adapter=rsd_lib_utils.num_or_none
)
class TransmissionSelectionField(base.ListField):
bandwidth_percent = base.Field(
"BandwidthPercent", adapter=rsd_lib_utils.num_or_none
)
traffic_class = base.Field(
"TrafficClass", adapter=rsd_lib_utils.num_or_none
)
class EthernetSwitch(v2_2_ethernet_switch.EthernetSwitch):
class_to_priority_mapping = ClassToPriorityMappingField(
"ClassToPriorityMapping"
)
"""The ethernet switch class to priority mapping"""
dcbx_enabled = base.Field("DCBXEnabled", adapter=bool)
"""The boolean indicate this dcbx is enabled or not"""
ets_enabled = base.Field("ETSEnabled", adapter=bool)
"""The boolean indicate this etse is enabled or not"""
lldp_enabled = base.Field("LLDPEnabled", adapter=bool)
"""The boolean indicate this lldp is enabled or not"""
max_acl_number = base.Field("MaxACLNumber")
"""The ethernet switch max acl number"""
metrics = base.Field(
"Metrics", default=(), adapter=rsd_lib_utils.get_resource_identity
)
"""The ethernet switch metrics"""
priority_flow_control = PriorityFlowControlField("PriorityFlowControl")
"""The ethernet switch priority flow control"""
priority_to_class_mapping = PriorityToClassMappingField(
"PriorityToClassMapping"
)
"""The ethernet switch priority to class mapping"""
traffic_classification = TrafficClassficationField("TrafficClassification")
"""The ethernet switch traffic classification"""
transmission_selection = TransmissionSelectionField(
"TransmissionSelection"
)
"""The ethernet switch transmission selection"""
class EthernetSwitchCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return EthernetSwitch | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_3/ethernet_switch/ethernet_switch.py | 0.727007 | 0.177526 | ethernet_switch.py | pypi |
from sushy.resources import base
from rsd_lib import exceptions as rsd_lib_exceptions
from rsd_lib.resources import v2_3
from rsd_lib.resources.v2_4.fabric import fabric
from rsd_lib.resources.v2_4.node import node
from rsd_lib.resources.v2_4.storage_service import storage_service
from rsd_lib.resources.v2_4.system import system
from rsd_lib.resources.v2_4.types import RESOURCE_CLASS
class RSDLibV2_4(v2_3.RSDLibV2_3):
_telemetry_service_path = base.Field(
["Oem", "Intel_RackScale", "TelemetryService", "@odata.id"]
)
"""EthernetSwitchCollecton path"""
def get_system(self, identity):
"""Given the identity return a System object
:param identity: The identity of the System resource
:returns: The System object
"""
return system.System(
self._conn, identity, redfish_version=self.redfish_version
)
def get_system_collection(self):
"""Get the SystemCollection object
:raises: MissingAttributeError, if the collection attribute is
not found
:returns: a SystemCollection object
"""
return system.SystemCollection(
self._conn,
self._systems_path,
redfish_version=self.redfish_version,
)
def get_storage_service_collection(self):
"""Get the StorageServiceCollection object
:raises: MissingAttributeError, if the collection attribute is
not found
:returns: a StorageServiceCollection object
"""
return storage_service.StorageServiceCollection(
self._conn,
self._storage_service_path,
redfish_version=self.redfish_version,
)
def get_storage_service(self, identity):
"""Given the identity return a StorageService object
:param identity: The identity of the StorageService resource
:returns: The StorageService object
"""
return storage_service.StorageService(
self._conn, identity, redfish_version=self.redfish_version
)
def get_node_collection(self):
"""Get the NodeCollection object
:raises: MissingAttributeError, if the collection attribute is
not found
:returns: a NodeCollection object
"""
return node.NodeCollection(
self._conn, self._nodes_path, redfish_version=self.redfish_version
)
def get_node(self, identity):
"""Given the identity return a Node object
:param identity: The identity of the Node resource
:returns: The Node object
"""
return node.Node(
self._conn, identity, redfish_version=self.redfish_version
)
def get_fabric_collection(self):
"""Get the FabricCollection object
:raises: MissingAttributeError, if the collection attribute is
not found
:returns: a FabricCollection object
"""
return fabric.FabricCollection(
self._conn,
self._fabrics_path,
redfish_version=self.redfish_version,
)
def get_fabric(self, identity):
"""Given the identity return a Fabric object
:param identity: The identity of the Fabric resource
:returns: The Fabric object
"""
return fabric.Fabric(
self._conn, identity, redfish_version=self.redfish_version
)
def get_resource(self, path):
"""Return corresponding resource object from path
:param path: The path of a resource or resource collection
:returns: corresponding resource or resource collection object
"""
resource_class = self._get_resource_class_from_path(
path, RESOURCE_CLASS
)
if not resource_class:
raise rsd_lib_exceptions.NoMatchingResourceError(uri=path)
return resource_class(
self._conn, path, redfish_version=self.redfish_version
) | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_4/__init__.py | 0.862771 | 0.273262 | __init__.py | pypi |
from sushy.resources import base
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib.resources.v2_2.system import processor
from rsd_lib import utils as rsd_lib_utils
class OnPackageMemoryField(base.ListField):
memory_type = base.Field("Type")
"""Type of memory"""
capacity_mb = base.Field("CapacityMB", adapter=rsd_lib_utils.num_or_none)
"""Memory capacity"""
speed_mhz = base.Field("SpeedMHz", adapter=rsd_lib_utils.num_or_none)
"""Memory speed"""
class ReconfigurationSlotsDetailsField(base.ListField):
slot_id = base.Field("SlotId")
"""Reconfiguration slot identity"""
uuid = base.Field("UUID")
"""Reconfiguration slot uuid"""
programmable_from_host = base.Field("ProgrammableFromHost", adapter=bool)
"""Indict whether programmable from host"""
class FpgaField(base.CompositeField):
fpga_type = base.Field("Type")
"""Type of FPGA"""
model = base.Field("Model")
"""Model of FPGA"""
fw_id = base.Field("FwId")
"""Firmware identity of FPGA"""
fw_manufacturer = base.Field("FwManufacturer")
"""Firmware manufacturer of FPGA"""
fw_version = base.Field("FwVersion")
"""Firmware version of FPGA"""
host_interface = base.Field("HostInterface")
"""Host interface of FPGA"""
external_interfaces = base.Field("ExternalInterfaces")
"""External interfaces of FPGA"""
sideband_interface = base.Field("SidebandInterface")
"""Sideband interface of FPGA"""
pcie_virtual_functions = base.Field(
"PCIeVirtualFunctions", adapter=rsd_lib_utils.num_or_none
)
"""PCIe Virtual functions of FPGA"""
programmable_from_host = base.Field("ProgrammableFromHost", adapter=bool)
"""Indict whether programmable from host"""
reconfiguration_slots = base.Field(
"ReconfigurationSlots", adapter=rsd_lib_utils.num_or_none
)
"""Number of supported reconfiguration slots"""
reconfiguration_slots_details = ReconfigurationSlotsDetailsField(
"ReconfigurationSlotsDetails"
)
"""Details of supported reconfiguration slots"""
# TODO(linyang): might need to return instance instead of URI
acceleration_functions = base.Field(
"AccelerationFunctions", adapter=rsd_lib_utils.get_resource_identity
)
"""The reference to a resource of type AccelerationFunctions"""
class IntelRackScaleField(processor.IntelRackScaleField):
on_package_memory = OnPackageMemoryField("OnPackageMemory")
"""An array of references to the endpoints that connect to this processor
"""
thermal_design_power_watt = base.Field(
"ThermalDesignPowerWatt", adapter=rsd_lib_utils.num_or_none
)
"""Thermal Design Power (TDP) of this processor"""
metrics = base.Field(
"Metrics", adapter=rsd_lib_utils.get_resource_identity
)
"""A reference to the Metrics associated with this Processor"""
extended_identification_registers = rsd_lib_base.DynamicField(
"ExtendedIdentificationRegisters"
)
"""Extended contents of the Identification Registers (CPUID) for this
processor
"""
fpga = FpgaField("FPGA")
"""FPGA specific properties for FPGA ProcessorType"""
# TODO(linyang): might need to return instance instead of URI
pcie_function = base.Field(
"PCIeFunction", adapter=rsd_lib_utils.get_resource_identity
)
"""The reference to a resource of type PCIeFunction"""
class OemField(base.CompositeField):
intel_rackscale = IntelRackScaleField("Intel_RackScale")
"""Intel Rack Scale Design extensions ('Intel_RackScale' object)"""
class LinksIntelRackScaleField(base.CompositeField):
connected_port = base.Field(
"ConnectedPort", adapter=rsd_lib_utils.get_resource_identity
)
"""The reference to a resource of type ConnectedPort"""
endpoints = base.Field("Endpoints", adapter=utils.get_members_identities)
"""The reference to a list of type Endpoints"""
connected_processors = base.Field(
"ConnectedProcessors", adapter=utils.get_members_identities
)
"""The reference to a list of type ConnectedProcessors"""
class LinksOemField(base.CompositeField):
intel_rackscale = LinksIntelRackScaleField("Intel_RackScale")
"""The Intel Rack Scale specific reference links"""
class LinksField(base.CompositeField):
chassis = base.Field(
"Chassis", adapter=rsd_lib_utils.get_resource_identity
)
"""The reference to a resource of type Chassis that represent the physical
container associated with this processor
"""
oem = LinksOemField("Oem")
"""The Oem specific reference links"""
class Processor(processor.Processor):
links = LinksField("Links")
"""Contain references to resources that are related to, but not contained
by (subordinate to), this processor
"""
oem = OemField("Oem")
"""Oem extension object"""
def _get_sub_processors_path(self):
"""Helper function to find the System process metrics path"""
return utils.get_sub_resource_path_by(self, "SubProcessors")
@property
@utils.cache_it
def sub_processors(self):
"""Property to provide reference to `ProcessorCollection` instance
It is calculated once the first time it is queried. On refresh,
this property is reset.
"""
return ProcessorCollection(
self._conn,
self._get_sub_processors_path(),
redfish_version=self.redfish_version,
)
class ProcessorCollection(processor.ProcessorCollection):
@property
def _resource_type(self):
return Processor | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_4/system/processor.py | 0.553747 | 0.30505 | processor.py | pypi |
identifiers_req_schema = {
"type": "array",
"items": {
"type": "object",
"properties": {
"DurableNameFormat": {"type": "string"},
"DurableName": {"type": "string"},
},
"required": ["DurableNameFormat", "DurableName"],
},
}
connected_entities_req_schema = {
"type": "array",
"items": {
"type": "object",
"properties": {
"EntityLink": {
"type": ["object", "null"],
"properties": {"@odata.id": {"type": "string"}},
"required": ["@odata.id"],
},
"EntityRole": {
"type": "string",
"enum": ["Initiator", "Target", "Both"],
},
"EntityType": {
"type": "string",
"enum": [
"StorageInitiator",
"RootComplex",
"NetworkController",
"Drive",
"StorageExpander",
"DisplayController",
"Bridge",
"Processor",
"Volume",
],
},
"EntityPciId": {
"type": "object",
"properties": {
"DeviceId": {"type": "string"},
"VendorId": {"type": "string"},
"SubsystemId": {"type": "string"},
"SubsystemVendorId": {"type": "string"},
},
},
"PciFunctionNumber": {"type": "number"},
"PciClassCode": {"type": "string"},
"Identifiers": {
"type": "array",
"items": {
"type": "object",
"properties": {
"DurableNameFormat": {
"type": "string",
"enum": [
"NQN",
"iQN",
"FC_WWN",
"UUID",
"EUI",
"NAA",
"NSID",
"SystemPath",
"LUN",
],
},
"DurableName": {"type": "string"},
},
"required": ["DurableNameFormat", "DurableName"],
},
},
},
"required": ["EntityRole"],
},
}
protocol_req_schema = {"type": "string"}
pci_id_req_schema = {
"type": "object",
"properties": {
"DeviceId": {"type": "string"},
"VendorId": {"type": "string"},
"SubsystemId": {"type": "string"},
"SubsystemVendorId": {"type": "string"},
},
}
host_reservation_memory_bytes_req_schema = {"type": "number"}
ip_transport_details_req_schema = {
"type": "array",
"items": {
"type": "object",
"properties": {
"TransportProtocol": {"type": "string"},
"IPv4Address": {
"type": "object",
"properties": {"Address": {"type": "string"}},
},
"IPv6Address": {
"type": "object",
"properties": {"Address": {"type": "string"}},
},
"Port": {"type": "number"},
},
},
}
links_req_schema = {
"type": "object",
"properties": {
"Ports": {
"type": "array",
"items": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
}
},
}
oem_req_schema = {
"type": "object",
"properties": {
"Intel_RackScale": {
"type": "object",
"properties": {
"EndpointProtocol": {"type": "string"},
"Authentication": {
"type": "object",
"properties": {
"Username": {"type": "string"},
"Password": {"type": "string"},
},
},
},
}
},
} | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_4/fabric/endpoint_schemas.py | 0.511961 | 0.399167 | endpoint_schemas.py | pypi |
processor_req_schema = {
"type": "array",
"items": [
{
"type": "object",
"properties": {
"@odata.type": {"type": "string"},
"Model": {"type": "string"},
"TotalCores": {"type": "number"},
"AchievableSpeedMHz": {"type": "number"},
"InstructionSet": {
"type": "string",
"enum": [
"x86",
"x86-64",
"IA-64",
"ARM-A32",
"ARM-A64",
"MIPS32",
"MIPS64",
"OEM",
],
},
"Oem": {
"type": "object",
"properties": {
"Intel_RackScale": {
"type": "object",
"properties": {
"Brand": {
"type": "string",
"enum": [
"E3",
"E5",
"E7",
"X3",
"X5",
"X7",
"I3",
"I5",
"I7",
"Silver",
"Gold",
"Platinum",
"Unknown",
],
},
"Capabilities": {
"type": "array",
"items": [{"type": "string"}],
},
},
}
},
},
"Resource": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
"Chassis": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
"ProcessorType": {
"type": "string",
"enum": [
"CPU",
"FPGA",
"GPU",
"DSP",
"Accelerator",
"OEM",
],
},
"Connectivity": {
"type": "array",
"items": [
{
"type": "string",
"enum": ["Local", "Ethernet", "RemotePCIe"],
}
],
},
},
"additionalProperties": False,
}
],
}
memory_req_schema = {
"type": "array",
"items": [
{
"type": "object",
"properties": {
"@odata.type": {"type": "string"},
"CapacityMiB": {"type": "number"},
"MemoryType": {
"type": "string",
"enum": [
"DRAM",
"NVDIMM_N",
"NVDIMM_F",
"NVMDIMM_P",
"IntelOptane",
],
},
"MemoryDeviceType": {
"type": "string",
"enum": [
"DDR",
"DDR2",
"DDR3",
"DDR4",
"DDR4_SDRAM",
"DDR4E_SDRAM",
"LPDDR4_SDRAM",
"DDR3_SDRAM",
"LPDDR3_SDRAM",
"DDR2_SDRAM",
"DDR2_SDRAM_FB_DIMM",
"DDR2_SDRAM_FB_DIMM_PROBE",
"DDR_SGRAM",
"DDR_SDRAM",
"ROM",
"SDRAM",
"EDO",
"FastPageMode",
"PipelinedNibble",
],
},
"SpeedMHz": {"type": "number"},
"Manufacturer": {"type": "string"},
"DataWidthBits": {"type": "number"},
"Resource": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
"Chassis": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
},
"additionalProperties": False,
}
],
}
remote_drive_req_schema = {
"type": "array",
"items": [
{
"type": "object",
"properties": {
"@odata.type": {"type": "string"},
"CapacityGiB": {"type": "number"},
"Protocol": {
"type": "string",
"enum": ["iSCSI", "NVMeOverFabrics"],
},
"Master": {
"type": "object",
"properties": {
"Type": {
"type": "string",
"enum": ["Snapshot", "Clone"],
},
"Resource": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
},
},
"Resource": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
},
"additionalProperties": False,
}
],
}
local_drive_req_schema = {
"type": "array",
"items": [
{
"type": "object",
"properties": {
"@odata.type": {"type": "string"},
"CapacityGiB": {"type": "number"},
"Type": {"type": "string", "enum": ["HDD", "SSD"]},
"MinRPM": {"type": "number"},
"SerialNumber": {"type": "string"},
"Interface": {
"type": "string",
"enum": ["SAS", "SATA", "NVMe"],
},
"Resource": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
"Chassis": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
"FabricSwitch": {"type": "boolean"},
},
"additionalProperties": False,
}
],
}
ethernet_interface_req_schema = {
"type": "array",
"items": [
{
"type": "object",
"properties": {
"@odata.type": {"type": "string"},
"SpeedMbps": {"type": "number"},
"PrimaryVLAN": {"type": "number"},
"VLANs": {
"type": "array",
"additionalItems": {
"type": "object",
"properties": {
"VLANId": {"type": "number"},
"Tagged": {"type": "boolean"},
},
},
},
"Resource": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
"Chassis": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
},
"additionalProperties": False,
}
],
}
security_req_schema = {
"type": "object",
"properties": {
"@odata.type": {"type": "string"},
"TpmPresent": {"type": "boolean"},
"TpmInterfaceType": {"type": "string"},
"TxtEnabled": {"type": "boolean"},
"ClearTPMOnDelete": {"type": "boolean"},
"PersistentMemoryOperationOnDelete": {
"type": "string",
"enum": ["PreserveConfiguration", "SecureErase", "OverwritePCD"],
},
},
"additionalProperties": False,
}
total_system_core_req_schema = {"type": "number"}
total_system_memory_req_schema = {"type": "number"} | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_4/node/schemas.py | 0.481698 | 0.441492 | schemas.py | pypi |
from sushy.resources import base
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib.resources.v2_3.storage_service import drive
from rsd_lib.resources.v2_3.storage_service import storage_pool
from rsd_lib import utils as rsd_lib_utils
class CapacityInfoField(base.CompositeField):
"""CapacityInfo field
The capacity of specific data type in a data store.
"""
consumed_bytes = base.Field(
"ConsumedBytes", adapter=rsd_lib_utils.num_or_none
)
"""The number of bytes consumed in this data store for this data type."""
allocated_bytes = base.Field(
"AllocatedBytes", adapter=rsd_lib_utils.num_or_none
)
"""The number of bytes currently allocated by the storage system in this
data store for this data type.
"""
guaranteed_bytes = base.Field(
"GuaranteedBytes", adapter=rsd_lib_utils.num_or_none
)
"""The number of bytes the storage system guarantees can be allocated in
this data store for this data type.
"""
provisioned_bytes = base.Field(
"ProvisionedBytes", adapter=rsd_lib_utils.num_or_none
)
"""The maximum number of bytes that can be allocated in this data store
for this data type.
"""
class CapacityField(base.CompositeField):
"""Capacity field
This is the schema definition for the Capacity of a device. It
represents the properties for capacity for any data store.
"""
data = CapacityInfoField("Data")
"""The capacity information relating to the user data."""
metadata = CapacityInfoField("Metadata")
"""The capacity information relating to metadata."""
snapshot = CapacityInfoField("Snapshot")
"""The capacity information relating to snapshot or backup data."""
is_thin_provisioned = base.Field("IsThinProvisioned", adapter=bool)
"""Marks that the capacity is not necessarily fully allocated."""
class CapacitySource(rsd_lib_base.ResourceBase):
"""CapacitySource resource class
A description of the type and source of storage.
"""
provided_capacity = CapacityField("ProvidedCapacity")
"""The amount of space that has been provided from the ProvidingDrives,
ProvidingVolumes, ProvidingMemory or ProvidingPools.
"""
# TODO(lin.yang): Add property for references in CapacitySource resource
@property
@utils.cache_it
def providing_volumes(self):
"""Property to provide reference to `VolumeCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
from rsd_lib.resources.v2_4.storage_service import volume
return [
volume.VolumeCollection(
self._conn, path, redfish_version=self.redfish_version
)
for path in utils.get_sub_resource_path_by(
self, "ProvidingVolumes", is_collection=True
)
]
@property
@utils.cache_it
def providing_pools(self):
"""Property to provide reference to `StoragePoolCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return [
storage_pool.StoragePoolCollection(
self._conn, path, redfish_version=self.redfish_version
)
for path in utils.get_sub_resource_path_by(
self, "ProvidingPools", is_collection=True
)
]
@property
@utils.cache_it
def providing_drives(self):
"""Property to provide reference to `DriveCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return [
drive.DriveCollection(
self._conn, path, redfish_version=self.redfish_version
)
for path in utils.get_sub_resource_path_by(
self, "ProvidingDrives", is_collection=True
)
] | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_4/storage_service/capacity_source.py | 0.764276 | 0.501953 | capacity_source.py | pypi |
from sushy import exceptions
from sushy.resources import base
from rsd_lib import exceptions as rsd_lib_exceptions
from rsd_lib.resources.v2_1.chassis import chassis
from rsd_lib.resources.v2_1.ethernet_switch import ethernet_switch
from rsd_lib.resources.v2_1.event_service import event_service
from rsd_lib.resources.v2_1.fabric import fabric
from rsd_lib.resources.v2_1.manager import manager
from rsd_lib.resources.v2_1.node import node
from rsd_lib.resources.v2_1.registries import message_registry_file
from rsd_lib.resources.v2_1.storage_service import storage_service
from rsd_lib.resources.v2_1.system import system
from rsd_lib.resources.v2_1.task import task_service
from rsd_lib.resources.v2_1.types import RESOURCE_CLASS
class RSDLibV2_1(base.ResourceBase):
_systems_path = base.Field(["Systems", "@odata.id"], required=True)
"""SystemCollection path"""
_nodes_path = base.Field(["Nodes", "@odata.id"], required=True)
"""NodeCollection path"""
_chassis_path = base.Field(["Chassis", "@odata.id"])
"""ChassisCollection path"""
_storage_service_path = base.Field(["Services", "@odata.id"])
"""StorageServiceCollection path"""
_fabrics_path = base.Field(["Fabrics", "@odata.id"])
"""FabricCollection path"""
_managers_path = base.Field(["Managers", "@odata.id"])
"""ManagerCollection path"""
_ethernet_switches_path = base.Field(["EthernetSwitches", "@odata.id"])
"""EthernetSwitchCollecton path"""
_task_service_path = base.Field(["Tasks", "@odata.id"])
"""Task Service path"""
_registries_path = base.Field(["Registries", "@odata.id"])
"""RegistriesCollection path"""
_event_service_path = base.Field(["EventService", "@odata.id"])
"""Event Service path"""
_redfish_version = base.Field(["RedfishVersion"])
"""Redfish version"""
_rsd_api_version = base.Field(["Oem", "Intel_RackScale", "ApiVersion"])
"""RSD API version"""
def __init__(
self, connector, identity="/redfish/v1/", redfish_version=None
):
"""A class representing a ComposedNode
:param connector: A Connector instance
:param identity: The identity of the Node resource
:param redfish_version: The version of RedFish. Used to construct
the object according to schema of the given version.
"""
super(RSDLibV2_1, self).__init__(connector, identity, redfish_version)
def get_system_collection(self):
"""Get the SystemCollection object
:raises: MissingAttributeError, if the collection attribute is
not found
:returns: a SystemCollection object
"""
return system.SystemCollection(
self._conn,
self._systems_path,
redfish_version=self.redfish_version,
)
def get_system(self, identity):
"""Given the identity return a System object
:param identity: The identity of the System resource
:returns: The System object
"""
return system.System(
self._conn, identity, redfish_version=self.redfish_version
)
def get_node_collection(self):
"""Get the NodeCollection object
:raises: MissingAttributeError, if the collection attribute is
not found
:returns: a NodeCollection object
"""
return node.NodeCollection(
self._conn, self._nodes_path, redfish_version=self.redfish_version
)
def get_node(self, identity):
"""Given the identity return a Node object
:param identity: The identity of the Node resource
:returns: The Node object
"""
return node.Node(
self._conn, identity, redfish_version=self.redfish_version
)
def get_storage_service_collection(self):
"""Get the StorageServiceCollection object
:raises: MissingAttributeError, if the collection attribute is
not found
:returns: a StorageServiceCollection object
"""
return storage_service.StorageServiceCollection(
self._conn,
self._storage_service_path,
redfish_version=self.redfish_version,
)
def get_storage_service(self, identity):
"""Given the identity return a StorageService object
:param identity: The identity of the StorageService resource
:returns: The StorageService object
"""
return storage_service.StorageService(
self._conn, identity, redfish_version=self.redfish_version
)
def get_chassis_collection(self):
"""Get the ChassisCollection object
:raises: MissingAttributeError, if the collection attribute is
not found
:returns: a ChassisCollection object
"""
return chassis.ChassisCollection(
self._conn,
self._chassis_path,
redfish_version=self.redfish_version,
)
def get_chassis(self, identity):
"""Given the identity return a Chassis object
:param identity: The identity of the Chassis resource
:returns: The Chassis object
"""
return chassis.Chassis(
self._conn, identity, redfish_version=self.redfish_version
)
def get_fabric_collection(self):
"""Get the FabricCollection object
:raises: MissingAttributeError, if the collection attribute is
not found
:returns: a FabricCollection object
"""
return fabric.FabricCollection(
self._conn,
self._fabrics_path,
redfish_version=self.redfish_version,
)
def get_fabric(self, identity):
"""Given the identity return a Fabric object
:param identity: The identity of the Fabric resource
:returns: The Fabric object
"""
return fabric.Fabric(
self._conn, identity, redfish_version=self.redfish_version
)
def get_manager_collection(self):
"""Get the ManagerCollection object
:raises: MissingAttributeError, if the collection attribute is
not found
:returns: a ManagerCollection object
"""
return manager.ManagerCollection(
self._conn,
self._managers_path,
redfish_version=self.redfish_version,
)
def get_manager(self, identity):
"""Given the identity return a Manager object
:param identity: The identity of the Manager resource
:returns: The Manager object
"""
return manager.Manager(
self._conn, identity, redfish_version=self.redfish_version
)
def get_ethernet_switch_collection(self):
"""Get the EthernetSwitchCollection object
:raises: MissingAttributeError, if the collection attribute is
not found
:returns: a EthernetSwitchCollection object
"""
return ethernet_switch.EthernetSwitchCollection(
self._conn,
self._ethernet_switches_path,
redfish_version=self.redfish_version,
)
def get_ethernet_switch(self, identity):
"""Given the identity return a EthernetSwitch object
:param identity: The identity of the EthernetSwitch resource
:returns: The EthernetSwitch object
"""
return ethernet_switch.EthernetSwitch(
self._conn, identity, redfish_version=self.redfish_version
)
def get_task_service(self):
"""Get the TaskService object
:returns: The TaskService object
"""
return task_service.TaskService(
self._conn,
self._task_service_path,
redfish_version=self.redfish_version,
)
def get_registries_collection(self):
"""Get the RegistriesCollection object
:raises: MissingAttributeError, if the collection attribute is
not found
:returns: a RegistriesCollection object
"""
return message_registry_file.MessageRegistryFileCollection(
self._conn,
self._registries_path,
redfish_version=self.redfish_version,
)
def get_registries(self, identity):
"""Given the identity return a Registries object
:param identity: The identity of the registries resource
:returns: The Registries object
"""
return message_registry_file.MessageRegistryFile(
self._conn, identity, redfish_version=self.redfish_version
)
def get_event_service(self):
"""Return a EventService object
:returns: The EventService object
"""
return event_service.EventService(
self._conn,
self._event_service_path,
redfish_version=self.redfish_version,
)
def _get_resource_class_from_path(self, path, resource_class):
"""Get resource class from a given path
:param path: Path of any rsd resource
:param resource_class: Mapping for looking up resource class by
entity_type string
:returns: Corresponding resource class
"""
body = self._conn.get(path=path).json()
if not body.get("@odata.type"):
raise exceptions.MissingAttributeError(
attribute="@odata.type", resource=path
)
# Normally the format of resource_type is '#{namespace}.{entity_type}'
# Here we use entity_type to find the corresponding resource class
entity_type = body["@odata.type"].split(".")[-1]
return resource_class.get(entity_type)
def get_resource(self, path):
"""Return corresponding resource object from path
:param path: The path of a resource or resource collection
:returns: corresponding resource or resource collection object
"""
resource_class = self._get_resource_class_from_path(
path, RESOURCE_CLASS
)
if not resource_class:
raise rsd_lib_exceptions.NoMatchingResourceError(uri=path)
return resource_class(
self._conn, path, redfish_version=self.redfish_version
) | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/__init__.py | 0.779825 | 0.218128 | __init__.py | pypi |
from sushy.resources import base
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib import utils as rsd_lib_utils
class LinksField(base.CompositeField):
ethernet_interfaces = base.Field(
"EthernetInterfaces", adapter=utils.get_members_identities
)
"""An array of references to the ethernet interfaces which the PCIe device
produces
"""
drives = base.Field("Drives", adapter=utils.get_members_identities)
"""An array of references to the drives which the PCIe device produces"""
storage_controllers = base.Field(
"StorageControllers", adapter=utils.get_members_identities
)
"""An array of references to the storage controllers which the PCIe device
produces
"""
pcie_device = base.Field(
"PCIeDevice", adapter=rsd_lib_utils.get_resource_identity
)
"""A reference to the PCIeDevice on which this function resides."""
class PCIeFunction(rsd_lib_base.ResourceBase):
"""PCIeFunction resource class
This is the schema definition for the PCIeFunction resource. It
represents the properties of a PCIeFunction attached to a System.
"""
function_id = base.Field("FunctionId", adapter=rsd_lib_utils.num_or_none)
"""The the PCIe Function identifier."""
function_type = base.Field("FunctionType")
"""The type of the PCIe Function."""
device_class = base.Field("DeviceClass")
"""The class for this PCIe Function."""
device_id = base.Field("DeviceId")
"""The Device ID of this PCIe function."""
vendor_id = base.Field("VendorId")
"""The Vendor ID of this PCIe function."""
class_code = base.Field("ClassCode")
"""The Class Code of this PCIe function."""
revision_id = base.Field("RevisionId")
"""The Revision ID of this PCIe function."""
subsystem_id = base.Field("SubsystemId")
"""The Subsystem ID of this PCIe function."""
subsystem_vendor_id = base.Field("SubsystemVendorId")
"""The Subsystem Vendor ID of this PCIe function."""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
links = LinksField("Links")
"""The links object contains the links to other resources that are related
to this resource.
""" | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/system/pcie_function.py | 0.675336 | 0.328139 | pcie_function.py | pypi |
from sushy.resources import base
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib.resources.v2_1.common import ip_addresses
from rsd_lib.resources.v2_1.ethernet_switch import vlan_network_interface
from rsd_lib import utils as rsd_lib_utils
class LinksIntelRackScaleField(base.CompositeField):
neighbor_port = base.Field(
"NeighborPort", adapter=rsd_lib_utils.get_resource_identity
)
"""The neighbor port of Rack ScaleIntel"""
class LinksOemField(base.CompositeField):
intel_rackScale = LinksIntelRackScaleField("Intel_RackScale")
"""The Oem Intel_RackScale"""
class LinksField(base.CompositeField):
endpoints = base.Field("Endpoints", adapter=utils.get_members_identities)
"""An array of references to the endpoints that connect to this ethernet
interface.
"""
oem = LinksOemField("Oem")
""""The oem of Links"""
class IPv6AddressPolicyEntryCollectionField(base.ListField):
prefix = base.Field("Prefix")
"""The IPv6 Address Prefix (as defined in RFC 6724 section 2.1)"""
precedence = base.Field("Precedence", adapter=rsd_lib_utils.num_or_none)
"""The IPv6 Precedence (as defined in RFC 6724 section 2.1"""
label = base.Field("Label", adapter=rsd_lib_utils.num_or_none)
"""The IPv6 Label (as defined in RFC 6724 section 2.1)"""
class EthernetInterface(rsd_lib_base.ResourceBase):
"""EthernetInterface resource class
This schema defines a simple ethernet NIC resource.
"""
uefi_device_path = base.Field("UefiDevicePath")
"""The UEFI device path for this interface"""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
interface_enabled = base.Field("InterfaceEnabled", adapter=bool)
"""This indicates whether this interface is enabled."""
permanent_mac_address = base.Field("PermanentMACAddress")
"""This is the permanent MAC address assigned to this interface (port)"""
mac_address = base.Field("MACAddress")
"""This is the currently configured MAC address of the (logical port)
interface.
"""
speed_mbps = base.Field("SpeedMbps", adapter=rsd_lib_utils.num_or_none)
"""This is the current speed in Mbps of this interface."""
auto_neg = base.Field("AutoNeg", adapter=bool)
"""This indicates if the speed and duplex are automatically negotiated and
configured on this interface.
"""
full_duplex = base.Field("FullDuplex", adapter=bool)
"""This indicates if the interface is in Full Duplex mode or not."""
mtu_size = base.Field("MTUSize", adapter=rsd_lib_utils.num_or_none)
"""This is the currently configured Maximum Transmission Unit (MTU) in
bytes on this interface.
"""
host_name = base.Field("HostName")
"""The DNS Host Name, without any domain information"""
fqdn = base.Field("FQDN")
"""This is the complete, fully qualified domain name obtained by DNS for
this interface.
"""
max_ipv6_static_addresses = base.Field(
"MaxIPv6StaticAddresses", adapter=rsd_lib_utils.num_or_none
)
"""This indicates the maximum number of Static IPv6 addresses that can be
configured on this interface.
"""
vlan = vlan_network_interface.VLANField("VLAN")
"""If this Network Interface supports more than one VLAN, this property
will not be present and the client should look for VLANs collection in
the link section of this resource.
"""
ipv4_addresses = ip_addresses.IPv4AddressCollectionField("IPv4Addresses")
"""The IPv4 addresses assigned to this interface"""
ipv6_address_policy_table = IPv6AddressPolicyEntryCollectionField(
"IPv6AddressPolicyTable"
)
"""An array representing the RFC 6724 Address Selection Policy Table."""
ipv6_addresses = ip_addresses.IPv6AddressCollectionField("IPv6Addresses")
"""This array of objects enumerates all of the currently assigned IPv6
addresses on this interface.
"""
ipv6_static_addresses = ip_addresses.IPv6StaticAddressCollectionField(
"IPv6StaticAddresses"
)
"""This array of objects represents all of the IPv6 static addresses to be
assigned on this interface.
"""
ipv6_default_gateway = base.Field("IPv6DefaultGateway")
"""This is the IPv6 default gateway address that is currently in use on
this interface.
"""
name_servers = base.Field("NameServers")
"""This represents DNS name servers that are currently in use on this
interface.
"""
link_status = base.Field("LinkStatus")
"""The link status of this interface (port)"""
links = LinksField("Links")
"""Contains references to other resources that are related to this
resource.
"""
@property
@utils.cache_it
def vlans(self):
"""Property to provide reference to `VLanNetworkInterfaceCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return vlan_network_interface.VLanNetworkInterfaceCollection(
self._conn,
utils.get_sub_resource_path_by(self, "VLANs"),
redfish_version=self.redfish_version,
)
class EthernetInterfaceCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return EthernetInterface | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/system/ethernet_interface.py | 0.817429 | 0.325279 | ethernet_interface.py | pypi |
from sushy.resources import base
from rsd_lib import base as rsd_lib_base
from rsd_lib import utils as rsd_lib_utils
class IntelRackScaleField(base.CompositeField):
voltage_volt = base.Field("VoltageVolt", adapter=rsd_lib_utils.num_or_none)
"""This indicates current voltage of memory module"""
class OemField(base.CompositeField):
intel_rackscale = IntelRackScaleField("Intel_RackScale")
"""Intel Rack Scale Design specific properties."""
class MemoryLocationField(base.CompositeField):
socket = base.Field("Socket", adapter=rsd_lib_utils.num_or_none)
"""Socket number in which Memory is connected"""
memory_controller = base.Field(
"MemoryController", adapter=rsd_lib_utils.num_or_none
)
"""Memory controller number in which Memory is connected"""
channel = base.Field("Channel", adapter=rsd_lib_utils.num_or_none)
"""Channel number in which Memory is connected"""
slot = base.Field("Slot", adapter=rsd_lib_utils.num_or_none)
"""Slot number in which Memory is connected"""
class RegionSetCollectionField(base.ListField):
"""RegionSet field
Memory memory region information.
"""
region_id = base.Field("RegionId")
"""Unique region ID representing a specific region within the Memory"""
memory_classification = base.Field("MemoryClassification")
"""Classification of memory occupied by the given memory region"""
offset_mib = base.Field("OffsetMiB", adapter=rsd_lib_utils.num_or_none)
"""Offset with in the Memory that corresponds to the starting of this
memory region in MiB
"""
size_mib = base.Field("SizeMiB", adapter=rsd_lib_utils.num_or_none)
"""Size of this memory region in MiB"""
passphrase_state = base.Field("PassphraseState", adapter=bool)
"""State of the passphrase for this region"""
class Memory(rsd_lib_base.ResourceBase):
"""Memory resource class
This is the schema definition for definition of a Memory and its
configuration
"""
memory_type = base.Field("MemoryType")
"""The type of Memory"""
memory_device_type = base.Field("MemoryDeviceType")
"""Type details of the Memory"""
base_module_type = base.Field("BaseModuleType")
"""The base module type of Memory"""
memory_media = base.Field("MemoryMedia")
"""Media of this Memory"""
capacity_mib = base.Field("CapacityMiB", adapter=rsd_lib_utils.num_or_none)
"""Memory Capacity in MiB."""
data_width_bits = base.Field(
"DataWidthBits", adapter=rsd_lib_utils.num_or_none
)
"""Data Width in bits."""
bus_width_bits = base.Field(
"BusWidthBits", adapter=rsd_lib_utils.num_or_none
)
"""Bus Width in bits."""
manufacturer = base.Field("Manufacturer")
"""The Memory manufacturer"""
serial_number = base.Field("SerialNumber")
"""The product serial number of this device"""
part_number = base.Field("PartNumber")
"""The product part number of this device"""
allowed_speeds_mhz = base.Field("AllowedSpeedsMHz")
"""Speed bins supported by this Memory"""
firmware_revision = base.Field("FirmwareRevision")
"""Revision of firmware on the Memory controller"""
firmware_api_version = base.Field("FirmwareApiVersion")
"""Version of API supported by the firmware"""
function_classes = base.Field("FunctionClasses")
"""Function Classes by the Memory"""
vendor_id = base.Field("VendorID")
"""Vendor ID"""
device_id = base.Field("DeviceID")
"""Device ID"""
subsystem_vendor_id = base.Field("SubsystemVendorID")
"""SubSystem Vendor ID"""
subsystem_device_id = base.Field("SubsystemDeviceID")
"""Subsystem Device ID"""
max_tdp_milli_watts = base.Field("MaxTDPMilliWatts")
"""Maximum TDPs in milli Watts"""
spare_device_count = base.Field(
"SpareDeviceCount", adapter=rsd_lib_utils.num_or_none
)
"""Number of unused spare devices available in the Memory"""
rank_count = base.Field("RankCount", adapter=rsd_lib_utils.num_or_none)
"""Number of ranks available in the Memory"""
device_locator = base.Field("DeviceLocator")
"""Location of the Memory in the platform"""
memory_location = MemoryLocationField("MemoryLocation")
"""Memory connection information to sockets and memory controllers."""
error_correction = base.Field("ErrorCorrection")
"""Error correction scheme supported for this memory"""
operating_speed_mhz = base.Field(
"OperatingSpeedMhz", adapter=rsd_lib_utils.num_or_none
)
"""Operating speed of Memory in MHz"""
volatile_region_size_limit_mib = base.Field(
"VolatileRegionSizeLimitMiB", adapter=rsd_lib_utils.num_or_none
)
"""Total size of volatile regions in MiB"""
persistent_region_size_limit_mib = base.Field(
"PersistentRegionSizeLimitMiB", adapter=rsd_lib_utils.num_or_none
)
"""Total size of persistent regions in MiB"""
regions = RegionSetCollectionField("Regions")
"""Memory regions information within the Memory"""
operating_memory_modes = base.Field("OperatingMemoryModes")
"""Memory modes supported by the Memory"""
is_spare_device_enabled = base.Field("IsSpareDeviceEnabled", adapter=bool)
"""Spare device enabled status"""
is_rank_spare_enabled = base.Field("IsRankSpareEnabled", adapter=bool)
"""Rank spare enabled status"""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
oem = OemField("Oem")
"""Oem specific properties."""
# TODO(linyang): Add Action Field
class MemoryCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return Memory | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/system/memory.py | 0.865395 | 0.480722 | memory.py | pypi |
from sushy import exceptions
from sushy.resources import base
from sushy.resources import common
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib.resources.v2_1.system import volume
from rsd_lib import utils as rsd_lib_utils
class IntelRackScaleField(base.CompositeField):
erase_on_detach = base.Field("EraseOnDetach", adapter=bool)
"""This indicates if drive should be erased when detached from PCI switch.
"""
drive_erased = base.Field("DriveErased", adapter=bool)
"""This indicates whether drive was cleared after assignment to composed
node.
"""
firmware_version = base.Field("FirmwareVersion")
"""This indicates drive firmware version."""
storage = base.Field(
"Storage", adapter=rsd_lib_utils.get_resource_identity
)
"""A reference to the storage controller where this drive is connected."""
pcie_function = base.Field(
"PCIeFunction", adapter=rsd_lib_utils.get_resource_identity
)
"""A reference to the PCIe function that provides this drive functionality.
"""
class LinksField(base.CompositeField):
volumes = base.Field("Volumes", adapter=utils.get_members_identities)
"""An array of references to the volumes contained in this drive. This
will reference Volumes that are either wholly or only partly contained
by this drive.
"""
endpoints = base.Field("Endpoints", adapter=utils.get_members_identities)
"""An array of references to the endpoints that connect to this drive."""
class OemField(base.CompositeField):
intel_rackscale = IntelRackScaleField("Intel_RackScale")
"""Intel Rack Scale Design specific properties."""
class ActionsField(base.CompositeField):
secure_erase = common.ActionField("#Drive.SecureErase")
class Drive(rsd_lib_base.ResourceBase):
"""Drive resource class
Drive contains properties describing a single physical disk drive for
any system, along with links to associated Volumes.
"""
status_indicator = base.Field("StatusIndicator")
"""The state of the status indicator, used to communicate status
information about this drive.
"""
indicator_led = base.Field("IndicatorLED")
"""The state of the indicator LED, used to identify the drive."""
model = base.Field("Model")
"""This is the model number for the drive."""
revision = base.Field("Revision")
"""The revision of this Drive"""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
capacity_bytes = base.Field(
"CapacityBytes", adapter=rsd_lib_utils.num_or_none
)
"""The size in bytes of this Drive"""
failure_predicted = base.Field("FailurePredicted", adapter=bool)
"""Is this drive currently predicting a failure in the near future"""
protocol = base.Field("Protocol")
"""The protocol this drive is using to communicate to the storage
controller
"""
media_type = base.Field("MediaType")
"""The type of media contained in this drive"""
manufacturer = base.Field("Manufacturer")
"""This is the manufacturer of this drive."""
sku = base.Field("SKU")
"""This is the SKU for this drive."""
serial_number = base.Field("SerialNumber")
"""The serial number for this drive."""
part_number = base.Field("PartNumber")
"""The part number for this drive."""
asset_tag = base.Field("AssetTag")
"""The user assigned asset tag for this drive."""
identifiers = rsd_lib_base.IdentifierCollectionField("Identifiers")
"""The Durable names for the drive"""
location = rsd_lib_base.LocationCollectionField("Location")
"""The Location of the drive"""
hotspare_type = base.Field("HotspareType")
"""The type of hotspare this drive is currently serving as"""
encryption_ability = base.Field("EncryptionAbility")
"""The encryption abilities of this drive"""
encryption_status = base.Field("EncryptionStatus")
"""The status of the encryption of this drive"""
rotation_speed_rpm = base.Field(
"RotationSpeedRPM", adapter=rsd_lib_utils.num_or_none
)
"""The rotation speed of this Drive in Revolutions per Minute (RPM)"""
block_size_bytes = base.Field(
"BlockSizeBytes", adapter=rsd_lib_utils.num_or_none
)
"""The size of the smallest addressible unit (Block) of this drive in bytes
"""
capable_speed_gbs = base.Field(
"CapableSpeedGbs", adapter=rsd_lib_utils.num_or_none
)
"""The speed which this drive can communicate to a storage controller in
ideal conditions in Gigabits per second
"""
negotiated_speed_gbs = base.Field(
"NegotiatedSpeedGbs", adapter=rsd_lib_utils.num_or_none
)
"""The speed which this drive is currently communicating to the storage
controller in Gigabits per second
"""
predicted_media_life_left_percent = base.Field(
"PredictedMediaLifeLeftPercent", adapter=rsd_lib_utils.num_or_none
)
"""The percentage of reads and writes that are predicted to still be
available for the media
"""
links = LinksField("Links")
"""Contains references to other resources that are related to this
resource.
"""
operations = volume.OperationsCollectionField("Operations")
"""The operations currently running on the Drive."""
oem = OemField("Oem")
"""Oem specific properties."""
_actions = ActionsField("Actions")
def _get_secure_erase_action_element(self):
secure_erase_action = self._actions.secure_erase
if not secure_erase_action:
raise exceptions.MissingActionError(
action="#Drive.SecureErase", resource=self._path
)
return secure_erase_action
def secure_erase(self):
"""Secure erase the drive.
:raises: MissingActionError, if no secure erase action exists.
"""
target_uri = self._get_secure_erase_action_element().target_uri
self._conn.post(target_uri, data={})
def update(self, asset_tag=None, erase_on_detach=None, erased=None):
"""Update drive properties
:param asset_tag: The user assigned asset tag for this drive
:param erase_on_detach: Indicates if drive should be erased when
detached from Composed Node.
:param erased: Indicate whether drive was cleared after assignment to
composed node
:raises: InvalidParameterValueError if one param is incorrect
"""
data = {}
if asset_tag is not None:
data["AssetTag"] = asset_tag
if erase_on_detach is not None or erased is not None:
data["Oem"] = {"Intel_RackScale": {}}
if erase_on_detach is not None:
if not isinstance(erase_on_detach, bool):
raise exceptions.InvalidParameterValueError(
parameter="erase_on_detach",
value=erase_on_detach,
valid_values=[True, False],
)
else:
data["Oem"]["Intel_RackScale"][
"EraseOnDetach"
] = erase_on_detach
if erased is not None:
if not isinstance(erased, bool):
raise exceptions.InvalidParameterValueError(
parameter="erased",
value=erased,
valid_values=[True, False],
)
else:
data["Oem"]["Intel_RackScale"]["DriveErased"] = erased
self._conn.patch(self.path, data=data) | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/system/drive.py | 0.752104 | 0.314695 | drive.py | pypi |
from sushy.resources import base
from sushy import utils
from rsd_lib import base as rsd_lib_base
class LinksField(base.CompositeField):
chassis = base.Field("Chassis", adapter=utils.get_members_identities)
"""An array of references to the chassis in which the PCIe device is
contained
"""
pcie_functions = base.Field(
"PCIeFunctions", adapter=utils.get_members_identities
)
"""An array of references to PCIeFunctions exposed by this device."""
class PCIeDevice(rsd_lib_base.ResourceBase):
"""PCIeDevice resource class
This is the schema definition for the PCIeDevice resource. It
represents the properties of a PCIeDevice attached to a System.
"""
manufacturer = base.Field("Manufacturer")
"""This is the manufacturer of this PCIe device."""
model = base.Field("Model")
"""This is the model number for the PCIe device."""
sku = base.Field("SKU")
"""This is the SKU for this PCIe device."""
serial_number = base.Field("SerialNumber")
"""The serial number for this PCIe device."""
part_number = base.Field("PartNumber")
"""The part number for this PCIe device."""
asset_tag = base.Field("AssetTag")
"""The user assigned asset tag for this PCIe device."""
device_type = base.Field("DeviceType")
"""The device type for this PCIe device."""
firmware_version = base.Field("FirmwareVersion")
"""The version of firmware for this PCIe device."""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
links = LinksField("Links")
"""The links object contains the links to other resources that are related
to this resource.
"""
def update(self, asset_tag):
"""Update AssetTag properties
:param asset_tag: The user assigned asset tag for this PCIe device
"""
data = {"AssetTag": asset_tag}
self._conn.patch(self.path, data=data) | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/system/pcie_device.py | 0.754192 | 0.289058 | pcie_device.py | pypi |
from sushy.resources import base
from rsd_lib import base as rsd_lib_base
from rsd_lib import utils as rsd_lib_utils
class iSCSIBootField(base.CompositeField):
ip_address_type = base.Field("IPAddressType")
"""The type of IP address (IPv6 or IPv4) being populated in the iSCSIBoot
IP address fields.
"""
initiator_ip_address = base.Field("InitiatorIPAddress")
"""The IPv6 or IPv4 address of the iSCSI initiator."""
initiator_name = base.Field("InitiatorName")
"""The iSCSI initiator name."""
initiator_default_gateway = base.Field("InitiatorDefaultGateway")
"""The IPv6 or IPv4 iSCSI boot default gateway."""
initiator_netmask = base.Field("InitiatorNetmask")
"""The IPv6 or IPv4 netmask of the iSCSI boot initiator."""
target_info_via_dhcp = base.Field("TargetInfoViaDHCP", adapter=bool)
"""Whether the iSCSI boot target name, LUN, IP address, and netmask should
be obtained from DHCP.
"""
primary_target_name = base.Field("PrimaryTargetName")
"""The name of the iSCSI primary boot target."""
primary_target_ip_address = base.Field("PrimaryTargetIPAddress")
"""The IP address (IPv6 or IPv4) for the primary iSCSI boot target."""
primary_target_tcp_port = base.Field(
"PrimaryTargetTCPPort", adapter=rsd_lib_utils.num_or_none
)
"""The TCP port for the primary iSCSI boot target."""
primary_lun = base.Field("PrimaryLUN", adapter=rsd_lib_utils.num_or_none)
"""The logical unit number (LUN) for the primary iSCSI boot target."""
primary_vlan_enable = base.Field("PrimaryVLANEnable", adapter=bool)
"""This indicates if the primary VLAN is enabled."""
primary_vlan_id = base.Field(
"PrimaryVLANId", adapter=rsd_lib_utils.num_or_none
)
"""The 802.1q VLAN ID to use for iSCSI boot from the primary target."""
primary_dns = base.Field("PrimaryDNS")
"""The IPv6 or IPv4 address of the primary DNS server for the iSCSI boot
initiator.
"""
secondary_target_name = base.Field("SecondaryTargetName")
"""The name of the iSCSI secondary boot target."""
secondary_target_ip_address = base.Field("SecondaryTargetIPAddress")
"""The IP address (IPv6 or IPv4) for the secondary iSCSI boot target."""
secondary_target_tcp_port = base.Field(
"SecondaryTargetTCPPort", adapter=rsd_lib_utils.num_or_none
)
"""The TCP port for the secondary iSCSI boot target."""
secondary_lun = base.Field(
"SecondaryLUN", adapter=rsd_lib_utils.num_or_none
)
"""The logical unit number (LUN) for the secondary iSCSI boot target."""
secondary_vlan_enable = base.Field("SecondaryVLANEnable", adapter=bool)
"""This indicates if the secondary VLAN is enabled."""
secondary_vlan_id = base.Field(
"SecondaryVLANId", adapter=rsd_lib_utils.num_or_none
)
"""The 802.1q VLAN ID to use for iSCSI boot from the secondary target."""
secondary_dns = base.Field("SecondaryDNS")
"""The IPv6 or IPv4 address of the secondary DNS server for the iSCSI boot
initiator.
"""
ip_mask_dns_via_dhcp = base.Field("IPMaskDNSViaDHCP", adapter=bool)
"""Whether the iSCSI boot initiator uses DHCP to obtain the iniator name,
IP address, and netmask.
"""
router_advertisement_enabled = base.Field(
"RouterAdvertisementEnabled", adapter=bool
)
"""Whether IPv6 router advertisement is enabled for the iSCSI boot target.
"""
authentication_method = base.Field("AuthenticationMethod")
"""The iSCSI boot authentication method for this network device function.
"""
chap_username = base.Field("CHAPUsername")
"""The username for CHAP authentication."""
chap_secret = base.Field("CHAPSecret")
"""The shared secret for CHAP authentication."""
mutual_chap_username = base.Field("MutualCHAPUsername")
"""The CHAP Username for 2-way CHAP authentication."""
mutual_chap_secret = base.Field("MutualCHAPSecret")
"""The CHAP Secret for 2-way CHAP authentication."""
class EthernetField(base.CompositeField):
mac_address = base.Field("MACAddress")
"""This is the currently configured MAC address of the (logical port)
network device function.
"""
class NetworkDeviceFunction(rsd_lib_base.ResourceBase):
"""NetworkDeviceFunction resource class
A Network Device Function represents a logical interface exposed by the
network adapter.
"""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
device_enabled = base.Field("DeviceEnabled", adapter=bool)
"""Whether the network device function is enabled."""
ethernet = EthernetField("Ethernet")
"""Ethernet."""
iscsi_boot = iSCSIBootField("iSCSIBoot")
"""iSCSI Boot."""
def update(self, ethernet=None, iscsi_boot=None):
"""Enable iSCSI boot of compute node
:param ethernet: Ethernet capabilities for this network device function
:param iscsi_boot: iSCSI boot capabilities, status, and configuration
values for this network device function
"""
data = {}
if ethernet is not None:
data["Ethernet"] = ethernet
if iscsi_boot is not None:
data["iSCSIBoot"] = iscsi_boot
self._conn.patch(self.path, data=data)
class NetworkDeviceFunctionCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return NetworkDeviceFunction | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/system/network_device_function.py | 0.78964 | 0.204263 | network_device_function.py | pypi |
from sushy.resources import base
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib.resources.v2_1.common import redundancy
from rsd_lib.resources.v2_1.system import drive
from rsd_lib.resources.v2_1.system import volume
from rsd_lib import utils as rsd_lib_utils
class StorageControllerLinksField(base.CompositeField):
endpoints = base.Field("Endpoints", adapter=utils.get_members_identities)
"""An array of references to the endpoints that connect to this controller.
"""
class LinksField(base.CompositeField):
enclosures = base.Field("Enclosures", adapter=utils.get_members_identities)
"""An array of references to the chassis to which this storage subsystem
is attached
"""
class StorageControllerCollectionField(rsd_lib_base.ReferenceableMemberField):
"""StorageController field
This schema defines a storage controller and its respective properties.
A storage controller represents a storage device (physical or virtual)
that produces Volumes.
"""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
speed_gbps = base.Field("SpeedGbps", adapter=rsd_lib_utils.num_or_none)
"""The speed of the storage controller interface."""
firmware_version = base.Field("FirmwareVersion")
"""The firmware version of this storage Controller"""
manufacturer = base.Field("Manufacturer")
"""This is the manufacturer of this storage controller."""
model = base.Field("Model")
"""This is the model number for the storage controller."""
sku = base.Field("SKU")
"""This is the SKU for this storage controller."""
serial_number = base.Field("SerialNumber")
"""The serial number for this storage controller."""
part_number = base.Field("PartNumber")
"""The part number for this storage controller."""
asset_tag = base.Field("AssetTag")
"""The user assigned asset tag for this storage controller."""
supported_controller_protocols = base.Field("SupportedControllerProtocols")
"""This represents the protocols by which this storage controller can be
communicated to.
"""
supported_device_protocols = base.Field("SupportedDeviceProtocols")
"""This represents the protocols which the storage controller can use to
communicate with attached devices.
"""
identifiers = rsd_lib_base.IdentifierCollectionField("Identifiers")
"""The Durable names for the storage controller"""
links = StorageControllerLinksField("Links")
"""Contains references to other resources that are related to this
resource.
"""
class Storage(rsd_lib_base.ResourceBase):
"""Storage resource class
This schema defines a storage subsystem and its respective properties.
A storage subsystem represents a set of storage controllers (physical
or virtual) and the resources such as volumes that can be accessed from
that subsystem.
"""
links = LinksField("Links")
"""Contains references to other resources that are related to this
resource.
"""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
storage_controllers = StorageControllerCollectionField(
"StorageControllers"
)
"""The set of storage controllers represented by this resource."""
redundancy = redundancy.RedundancyCollectionField("Redundancy")
"""Redundancy information for the storage subsystem"""
# TODO(linyang): Add Action Field
@property
@utils.cache_it
def drives(self):
"""Property to provide a list of `Drive` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return [
drive.Drive(self._conn, path, redfish_version=self.redfish_version)
for path in rsd_lib_utils.get_sub_resource_path_list_by(
self, "Drives"
)
]
@property
@utils.cache_it
def volumes(self):
"""Property to provide reference to `VolumeCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return volume.VolumeCollection(
self._conn,
utils.get_sub_resource_path_by(self, "Volumes"),
redfish_version=self.redfish_version,
)
class StorageCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return Storage | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/system/storage.py | 0.711732 | 0.350019 | storage.py | pypi |
from sushy.resources import base
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib import utils as rsd_lib_utils
class LinksField(base.CompositeField):
drives = base.Field("Drives", adapter=utils.get_members_identities)
"""An array of references to the drives which contain this volume. This
will reference Drives that either wholly or only partly contain this
volume.
"""
class OperationsCollectionField(base.ListField):
operation_name = base.Field("OperationName")
"""The name of the operation."""
percentage_complete = base.Field(
"PercentageComplete", adapter=rsd_lib_utils.num_or_none
)
"""The percentage of the operation that has been completed."""
associated_task = base.Field(
"AssociatedTask", adapter=rsd_lib_utils.get_resource_identity
)
"""A reference to the task associated with the operation if any."""
class Volume(rsd_lib_base.ResourceBase):
"""Volume resource class
Volume contains properties used to describe a volume, virtual disk,
LUN, or other logical storage entity for any system.
"""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
capacity_bytes = base.Field(
"CapacityBytes", adapter=rsd_lib_utils.num_or_none
)
"""The size in bytes of this Volume"""
volume_type = base.Field("VolumeType")
"""The type of this volume"""
encrypted = base.Field("Encrypted", adapter=bool)
"""Is this Volume encrypted"""
encryption_types = base.Field("EncryptionTypes")
"""The types of encryption used by this Volume"""
identifiers = rsd_lib_base.IdentifierCollectionField("Identifiers")
"""The Durable names for the volume"""
block_size_bytes = base.Field(
"BlockSizeBytes", adapter=rsd_lib_utils.num_or_none
)
"""The size of the smallest addressible unit (Block) of this volume in
bytes
"""
operations = OperationsCollectionField("Operations")
"""The operations currently running on the Volume"""
optimum_io_size_bytes = base.Field(
"OptimumIOSizeBytes", adapter=rsd_lib_utils.num_or_none
)
"""The size in bytes of this Volume's optimum IO size."""
links = LinksField("Links")
"""Contains references to other resources that are related to this
resource.
"""
# TODO(linyang): Add Action Field
class VolumeCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return Volume | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/system/volume.py | 0.639849 | 0.41401 | volume.py | pypi |
from sushy.resources import base
from rsd_lib import base as rsd_lib_base
from rsd_lib import utils as rsd_lib_utils
class IntelRackScaleField(base.CompositeField):
brand = base.Field("Brand")
"""This indicates processor brand"""
capabilities = base.Field("Capabilities")
"""This indicates array of processor capabilities"""
class ProcessorIdField(base.CompositeField):
vendor_id = base.Field("VendorId")
"""The Vendor Identification for this processor"""
identification_registers = base.Field("IdentificationRegisters")
"""The contents of the Identification Registers (CPUID) for this processor
"""
effective_family = base.Field("EffectiveFamily")
"""The effective Family for this processor"""
effective_model = base.Field("EffectiveModel")
"""The effective Model for this processor"""
step = base.Field("Step")
"""The Step value for this processor"""
microcode_info = base.Field("MicrocodeInfo")
"""The Microcode Information for this processor"""
class OemField(base.CompositeField):
intel_rackscale = IntelRackScaleField("Intel_RackScale")
"""Intel Rack Scale Design specific properties."""
class Processor(rsd_lib_base.ResourceBase):
"""Processor resource class
This is the schema definition for the Processor resource. It
represents the properties of a processor attached to a System.
"""
socket = base.Field("Socket")
"""The socket or location of the processor"""
processor_type = base.Field("ProcessorType")
"""The type of processor"""
processor_architecture = base.Field("ProcessorArchitecture")
"""The architecture of the processor"""
instruction_set = base.Field("InstructionSet")
"""The instruction set of the processor"""
processor_id = ProcessorIdField("ProcessorId")
"""Identification information for this processor."""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
manufacturer = base.Field("Manufacturer")
"""The processor manufacturer"""
model = base.Field("Model")
"""The product model number of this device"""
max_speed_mhz = base.Field(
"MaxSpeedMHz", adapter=rsd_lib_utils.num_or_none
)
"""The maximum clock speed of the processor"""
total_cores = base.Field("TotalCores", adapter=rsd_lib_utils.num_or_none)
"""The total number of cores contained in this processor"""
total_threads = base.Field(
"TotalThreads", adapter=rsd_lib_utils.num_or_none
)
"""The total number of execution threads supported by this processor"""
oem = OemField("Oem")
"""Oem specific properties."""
class ProcessorCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return Processor | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/system/processor.py | 0.804214 | 0.326768 | processor.py | pypi |
import logging
from sushy import exceptions
from sushy.resources import base
from sushy.resources import common
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib import constants
from rsd_lib.resources.v2_1.chassis import log_service
from rsd_lib.resources.v2_1.system import ethernet_interface
from rsd_lib.resources.v2_1.system import memory
from rsd_lib.resources.v2_1.system import network_interface
from rsd_lib.resources.v2_1.system import pcie_device
from rsd_lib.resources.v2_1.system import pcie_function
from rsd_lib.resources.v2_1.system import processor
from rsd_lib.resources.v2_1.system import simple_storage
from rsd_lib.resources.v2_1.system import storage
from rsd_lib import utils as rsd_lib_utils
LOG = logging.getLogger(__name__)
class ProcessorSummaryField(base.CompositeField):
"""ProcessorSummary field
This object describes the central processors of the system in general
detail.
"""
count = base.Field("Count", adapter=rsd_lib_utils.num_or_none)
"""The number of processors in the system."""
model = base.Field("Model")
"""The processor model for the primary or majority of processors in this
system.
"""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
class LinksField(base.CompositeField):
chassis = base.Field("Chassis", adapter=utils.get_members_identities)
"""An array of references to the chassis in which this system is contained
"""
managed_by = base.Field("ManagedBy", adapter=utils.get_members_identities)
"""An array of references to the Managers responsible for this system"""
powered_by = base.Field("PoweredBy", adapter=utils.get_members_identities)
"""An array of ID[s] of resources that power this computer system.
Normally the ID will be a chassis or a specific set of powerSupplies
"""
cooled_by = base.Field("CooledBy", adapter=utils.get_members_identities)
"""An array of ID[s] of resources that cool this computer system. Normally
the ID will be a chassis or a specific set of fans.
"""
endpoints = base.Field("Endpoints", adapter=utils.get_members_identities)
"""An array of references to the endpoints that connect to this system."""
class BootField(base.CompositeField):
"""Boot field
This object contains the boot information for the current resource.
"""
boot_source_override_target = base.Field("BootSourceOverrideTarget")
"""The current boot source to be used at next boot instead of the normal
boot device, if BootSourceOverrideEnabled is true.
"""
boot_source_override_target_allowed_values = base.Field(
"BootSourceOverrideTarget@Redfish.AllowableValues"
)
boot_source_override_enabled = base.Field("BootSourceOverrideEnabled")
"""Describes the state of the Boot Source Override feature"""
uefi_target_boot_source_override = base.Field(
"UefiTargetBootSourceOverride"
)
"""This property is the UEFI Device Path of the device to boot from when
BootSourceOverrideSupported is UefiTarget.
"""
boot_source_override_mode = base.Field("BootSourceOverrideMode")
"""The BIOS Boot Mode (either Legacy or UEFI) to be used when
BootSourceOverrideTarget boot source is booted from.
"""
boot_source_override_mode_allowed_values = base.Field(
"BootSourceOverrideMode@Redfish.AllowableValues"
)
class MemorySummaryField(base.CompositeField):
"""MemorySummary field
This object describes the memory of the system in general detail.
"""
total_system_memory_gib = base.Field(
"TotalSystemMemoryGiB", adapter=rsd_lib_utils.num_or_none
)
"""The total installed, operating system-accessible memory (RAM), measured
in GiB.
"""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
class PciDeviceCollectionField(base.ListField):
vendor_id = base.Field("VendorId")
device_id = base.Field("DeviceId")
class IntelRackScaleField(base.CompositeField):
pcie_connection_id = base.Field("PCIeConnectionId")
"""This property is an array of IDs of cable or cables connected to this
port.
"""
pci_devices = PciDeviceCollectionField("PciDevices")
"""This indicates array of PCI devices present in computer system"""
discovery_state = base.Field("DiscoveryState")
"""This indicates computer system discovery state"""
processor_sockets = base.Field(
"ProcessorSockets", adapter=rsd_lib_utils.num_or_none
)
"""This indicates number of processor sockets available in system"""
memory_sockets = base.Field(
"MemorySockets", adapter=rsd_lib_utils.num_or_none
)
"""This indicates number of memory sockets available in system"""
class OemField(base.CompositeField):
intel_rackscale = IntelRackScaleField("Intel_RackScale")
"""Intel Rack Scale Design specific properties."""
class ActionsField(base.CompositeField):
reset = common.ResetActionField("#ComputerSystem.Reset")
class System(rsd_lib_base.ResourceBase):
"""ComputerSystem resource class
This schema defines a computer system and its respective properties. A
computer system represents a machine (physical or virtual) and the
local resources such as memory, cpu and other devices that can be
accessed from that machine.
"""
system_type = base.Field("SystemType")
"""The type of computer system represented by this resource."""
links = LinksField("Links")
"""Contains references to other resources that are related to this
resource.
"""
asset_tag = base.Field("AssetTag")
"""The user definable tag that can be used to track this computer system
for inventory or other client purposes
"""
manufacturer = base.Field("Manufacturer")
"""The manufacturer or OEM of this system."""
model = base.Field("Model")
"""The model number for this system"""
sku = base.Field("SKU")
"""The manufacturer SKU for this system"""
serial_number = base.Field("SerialNumber")
"""The serial number for this system"""
part_number = base.Field("PartNumber")
"""The part number for this system"""
uuid = base.Field("UUID")
"""The universal unique identifier (UUID) for this system"""
host_name = base.Field("HostName")
"""The DNS Host Name, without any domain information"""
indicator_led = base.Field("IndicatorLED")
"""The state of the indicator LED, used to identify the system"""
power_state = base.Field("PowerState")
"""This is the current power state of the system"""
boot = BootField("Boot")
"""Information about the boot settings for this system"""
bios_version = base.Field("BiosVersion")
"""The version of the system BIOS or primary system firmware."""
processor_summary = ProcessorSummaryField("ProcessorSummary")
"""This object describes the central processors of the system in general
detail.
"""
memory_summary = MemorySummaryField("MemorySummary")
"""This object describes the central memory of the system in general
detail.
"""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
hosting_roles = base.Field("HostingRoles")
"""The hosing roles that this computer system supports."""
oem = OemField("Oem")
"""Oem specific properties."""
_actions = ActionsField("Actions", required=True)
def _get_reset_action_element(self):
reset_action = self._actions.reset
if not reset_action:
raise exceptions.MissingActionError(
action="#ComputerSystem.Reset", resource=self._path
)
return reset_action
def get_allowed_reset_system_values(self):
"""Get the allowed values for resetting the system.
:returns: A set with the allowed values.
"""
reset_action = self._get_reset_action_element()
if not reset_action.allowed_values:
LOG.warning(
"Could not figure out the allowed values for the "
"reset system action for System %s",
self.identity,
)
return set(constants.RESET_TYPE_VALUE)
return set(constants.RESET_TYPE_VALUE).intersection(
reset_action.allowed_values
)
def reset_system(self, value):
"""Reset the system.
:param value: The target value.
:raises: InvalidParameterValueError, if the target value is not
allowed.
"""
valid_resets = self.get_allowed_reset_system_values()
if value not in valid_resets:
raise exceptions.InvalidParameterValueError(
parameter="value", value=value, valid_values=valid_resets
)
target_uri = self._get_reset_action_element().target_uri
self._conn.post(target_uri, data={"ResetType": value})
def get_allowed_system_boot_source_values(self):
"""Get the allowed values for changing the boot source.
:returns: A set with the allowed values.
"""
if not self.boot.boot_source_override_target_allowed_values:
LOG.warning(
"Could not figure out the allowed values for "
"configuring the boot source for System %s",
self.identity,
)
return set(constants.BOOT_SOURCE_TARGET_VALUE)
return set(constants.BOOT_SOURCE_TARGET_VALUE).intersection(
self.boot.boot_source_override_target_allowed_values
)
def get_allowed_system_boot_mode_values(self):
"""Get the allowed values for the boot source mode.
:returns: A set with the allowed values.
"""
if not self.boot.boot_source_override_mode_allowed_values:
LOG.warning(
"Could not figure out the allowed values for "
"configuring the boot mode for System %s",
self.identity,
)
return set(constants.BOOT_SOURCE_MODE_VALUE)
return set(constants.BOOT_SOURCE_MODE_VALUE).intersection(
self.boot.boot_source_override_mode_allowed_values
)
def set_system_boot_source(self, target, enabled="Once", mode=None):
"""Set the boot source.
Set the boot source to use on next reboot of the System.
:param target: The target boot source.
:param enabled: The frequency, whether to set it for the next
reboot only ("Once") or persistent to all future reboots
("Continuous") or disabled ("Disabled").
:param mode: The boot mode, UEFI ("UEFI") or BIOS ("Legacy").
:raises: InvalidParameterValueError, if any information passed is
invalid.
"""
valid_targets = self.get_allowed_system_boot_source_values()
if target not in valid_targets:
raise exceptions.InvalidParameterValueError(
parameter="target", value=target, valid_values=valid_targets
)
if enabled not in constants.BOOT_SOURCE_ENABLED_VALUE:
raise exceptions.InvalidParameterValueError(
parameter="enabled",
value=enabled,
valid_values=constants.BOOT_SOURCE_ENABLED_VALUE,
)
data = {
"Boot": {
"BootSourceOverrideTarget": target,
"BootSourceOverrideEnabled": enabled,
}
}
if mode is not None:
valid_modes = self.get_allowed_system_boot_mode_values()
if mode not in valid_modes:
raise exceptions.InvalidParameterValueError(
parameter="mode", value=mode, valid_values=valid_modes
)
data["Boot"]["BootSourceOverrideMode"] = mode
self._conn.patch(self.path, data=data)
def update(self, asset_tag):
"""Update AssetTag
:param asset_tag: The user assigned asset tag for this computer system
"""
data = {"AssetTag": asset_tag}
self._conn.patch(self.path, data=data)
@property
@utils.cache_it
def processors(self):
"""Property to provide reference to `ProcessorCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return processor.ProcessorCollection(
self._conn,
utils.get_sub_resource_path_by(self, "Processors"),
redfish_version=self.redfish_version,
)
@property
@utils.cache_it
def ethernet_interfaces(self):
"""Property to provide reference to `EthernetInterfaceCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return ethernet_interface.EthernetInterfaceCollection(
self._conn,
utils.get_sub_resource_path_by(self, "EthernetInterfaces"),
redfish_version=self.redfish_version,
)
@property
@utils.cache_it
def simple_storage(self):
"""Property to provide reference to `SimpleStorageCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return simple_storage.SimpleStorageCollection(
self._conn,
utils.get_sub_resource_path_by(self, "SimpleStorage"),
redfish_version=self.redfish_version,
)
@property
@utils.cache_it
def log_services(self):
"""Property to provide reference to `LogServiceCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return log_service.LogServiceCollection(
self._conn,
utils.get_sub_resource_path_by(self, "LogServices"),
redfish_version=self.redfish_version,
)
@property
@utils.cache_it
def memory(self):
"""Property to provide reference to `MemoryCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return memory.MemoryCollection(
self._conn,
utils.get_sub_resource_path_by(self, "Memory"),
redfish_version=self.redfish_version,
)
@property
@utils.cache_it
def storage(self):
"""Property to provide reference to `StorageCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return storage.StorageCollection(
self._conn,
utils.get_sub_resource_path_by(self, "Storage"),
redfish_version=self.redfish_version,
)
@property
@utils.cache_it
def pcie_devices(self):
"""Property to provide a list of `PCIeDevice` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return [
pcie_device.PCIeDevice(
self._conn, path, redfish_version=self.redfish_version
)
for path in rsd_lib_utils.get_sub_resource_path_list_by(
self, "PCIeDevices"
)
]
@property
@utils.cache_it
def pcie_functions(self):
"""Property to provide a list of `PCIeFunction` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return [
pcie_function.PCIeFunction(
self._conn, path, redfish_version=self.redfish_version
)
for path in rsd_lib_utils.get_sub_resource_path_list_by(
self, "PCIeFunctions"
)
]
@property
@utils.cache_it
def network_interfaces(self):
"""Property to provide reference to `NetworkInterfaceCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return network_interface.NetworkInterfaceCollection(
self._conn,
utils.get_sub_resource_path_by(self, "NetworkInterfaces"),
redfish_version=self.redfish_version,
)
class SystemCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return System | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/system/system.py | 0.685529 | 0.287449 | system.py | pypi |
import logging
from sushy.resources import base
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib.resources.v2_1.fabric import endpoint
from rsd_lib.resources.v2_1.fabric import switch
from rsd_lib.resources.v2_1.fabric import zone
from rsd_lib import utils as rsd_lib_utils
LOG = logging.getLogger(__name__)
class Fabric(rsd_lib_base.ResourceBase):
"""Fabric resource class
Fabric contains properties describing a simple fabric consisting of one
or more switches, zero or more endpoints, and zero or more zones.
"""
fabric_type = base.Field("FabricType")
"""The protocol being sent over this fabric."""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
max_zones = base.Field("MaxZones", adapter=rsd_lib_utils.num_or_none)
"""The value of this property shall contain the maximum number of zones
the switch can currently configure.
"""
# TODO(linyang): Add Action Field
@property
@utils.cache_it
def zones(self):
"""Property to provide reference to `ZoneCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return zone.ZoneCollection(
self._conn,
utils.get_sub_resource_path_by(self, "Zones"),
redfish_version=self.redfish_version,
)
@property
@utils.cache_it
def endpoints(self):
"""Property to provide reference to `EndpointCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return endpoint.EndpointCollection(
self._conn,
utils.get_sub_resource_path_by(self, "Endpoints"),
redfish_version=self.redfish_version,
)
@property
@utils.cache_it
def switches(self):
"""Property to provide reference to `SwitchCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return switch.SwitchCollection(
self._conn,
utils.get_sub_resource_path_by(self, "Switches"),
redfish_version=self.redfish_version,
)
class FabricCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return Fabric | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/fabric/fabric.py | 0.564939 | 0.286194 | fabric.py | pypi |
import logging
from sushy.resources import base
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib.resources.v2_1.common import redundancy
from rsd_lib import utils as rsd_lib_utils
LOG = logging.getLogger(__name__)
class LinksField(base.CompositeField):
mutually_exclusive_endpoints = base.Field(
"MutuallyExclusiveEndpoints", adapter=utils.get_members_identities
)
"""An array of references to the endpoints that may not be used in zones
if this endpoint is used in a zone.
"""
ports = base.Field("Ports", adapter=utils.get_members_identities)
"""An array of references to the the physical ports associated with this
endpoint.
"""
class PciIdField(base.CompositeField):
device_id = base.Field("DeviceId")
"""The Device ID of this PCIe function."""
vendor_id = base.Field("VendorId")
"""The Vendor ID of this PCIe function."""
subsystem_id = base.Field("SubsystemId")
"""The Subsystem ID of this PCIe function."""
subsystem_vendor_id = base.Field("SubsystemVendorId")
"""The Subsystem Vendor ID of this PCIe function."""
class ConnectedEntityCollectionField(base.ListField):
"""ConnectedEntity field
Represents a remote resource that is connected to the network
accessible to this endpoint.
"""
entity_type = base.Field("EntityType")
"""The type of the connected entity."""
entity_role = base.Field("EntityRole")
"""The role of the connected entity."""
entity_link = base.Field(
"EntityLink", adapter=rsd_lib_utils.get_resource_identity
)
"""A link to the associated entity."""
entity_pci_id = PciIdField("EntityPciId")
"""The PCI ID of the connected entity."""
pci_function_number = base.Field(
"PciFunctionNumber", adapter=rsd_lib_utils.num_or_none
)
"""The PCI ID of the connected entity."""
pci_class_code = base.Field("PciClassCode")
"""The Class Code and Subclass code of this PCIe function."""
identifiers = rsd_lib_base.IdentifierCollectionField("Identifiers")
"""Identifiers for the remote entity."""
class Endpoint(rsd_lib_base.ResourceBase):
"""Endpoint resource class
This is the schema definition for the Endpoint resource. It represents
the properties of an entity that sends or receives protocol defined
messages over a transport.
"""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
endpoint_protocol = base.Field("EndpointProtocol")
"""The protocol supported by this endpoint."""
connected_entities = ConnectedEntityCollectionField("ConnectedEntities")
"""All the entities connected to this endpoint."""
identifiers = rsd_lib_base.IdentifierCollectionField("Identifiers")
"""Identifiers for this endpoint"""
pci_id = PciIdField("PciId")
"""The PCI ID of the endpoint."""
host_reservation_memory_bytes = base.Field(
"HostReservationMemoryBytes", adapter=rsd_lib_utils.num_or_none
)
"""The amount of memory in Bytes that the Host should allocate to connect
to this endpoint.
"""
links = LinksField("Links")
"""The links object contains the links to other resources that are related
to this resource.
"""
redundancy = redundancy.RedundancyCollectionField("Redundancy")
"""Redundancy information for the lower level endpoints supporting this
endpoint
"""
# TODO(linyang): Add Action Field
class EndpointCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return Endpoint | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/fabric/endpoint.py | 0.588534 | 0.217836 | endpoint.py | pypi |
import logging
from sushy import exceptions
from sushy.resources import base
from sushy.resources import common
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib.resources.v2_1.chassis import log_service
from rsd_lib.resources.v2_1.common import redundancy
from rsd_lib.resources.v2_1.fabric import port
from rsd_lib import utils as rsd_lib_utils
LOG = logging.getLogger(__name__)
class LinksField(base.CompositeField):
chassis = base.Field(
"Chassis", adapter=rsd_lib_utils.get_resource_identity
)
"""A reference to the chassis which contains this switch."""
managed_by = base.Field("ManagedBy", adapter=utils.get_members_identities)
"""An array of references to the managers that manage this switch."""
class ActionsField(base.CompositeField):
reset = common.ResetActionField("#Switch.Reset")
"""The actions switch reset"""
class Switch(rsd_lib_base.ResourceBase):
"""Switch resource class
Switch contains properties describing a simple fabric switch.
"""
switch_type = base.Field("SwitchType")
"""The protocol being sent over this switch."""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
manufacturer = base.Field("Manufacturer")
"""This is the manufacturer of this switch."""
model = base.Field("Model")
"""The product model number of this switch."""
sku = base.Field("SKU")
"""This is the SKU for this switch."""
serial_number = base.Field("SerialNumber")
"""The serial number for this switch."""
part_number = base.Field("PartNumber")
"""The part number for this switch."""
asset_tag = base.Field("AssetTag")
"""The user assigned asset tag for this switch."""
domain_id = base.Field("DomainID", adapter=rsd_lib_utils.num_or_none)
"""The Domain ID for this switch."""
is_managed = base.Field("IsManaged", adapter=bool)
"""This indicates whether the switch is in a managed or unmanaged state."""
total_switch_width = base.Field(
"TotalSwitchWidth", adapter=rsd_lib_utils.num_or_none
)
"""The total number of lanes, phys, or other physical transport links that
this switch contains.
"""
indicator_led = base.Field("IndicatorLED")
"""The state of the indicator LED, used to identify the switch."""
power_state = base.Field("PowerState")
"""This is the current power state of the switch."""
links = LinksField("Links")
"""Contains references to other resources that are related to this
resource.
"""
redundancy = redundancy.RedundancyCollectionField("Redundancy")
"""Redundancy information for the switches"""
actions = ActionsField("Actions")
"""The switch actions"""
def _get_reset_action_element(self):
reset_action = self.actions.reset
if not reset_action:
raise exceptions.MissingActionError(
action="#Switch.Reset", resource=self._path
)
return reset_action
def get_allowed_reset_switch_values(self):
"""Get the allowed values for resetting the switch.
:returns: A set with the allowed values.
"""
reset_action = self._get_reset_action_element()
return reset_action.allowed_values
def reset_switch(self, value):
"""Reset the switch.
:param value: The target value.
:raises: InvalidParameterValueError, if the target value is not
allowed.
"""
valid_resets = self.get_allowed_reset_switch_values()
if value not in valid_resets:
raise exceptions.InvalidParameterValueError(
parameter="value", value=value, valid_values=valid_resets
)
target_uri = self._get_reset_action_element().target_uri
self._conn.post(target_uri, data={"ResetType": value})
@property
@utils.cache_it
def ports(self):
"""Property to provide reference to `PortCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return port.PortCollection(
self._conn,
utils.get_sub_resource_path_by(self, "Ports"),
redfish_version=self.redfish_version,
)
@property
@utils.cache_it
def log_services(self):
"""Property to provide reference to `LogServiceCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return log_service.LogServiceCollection(
self._conn,
utils.get_sub_resource_path_by(self, "LogServices"),
redfish_version=self.redfish_version,
)
class SwitchCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return Switch | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/fabric/switch.py | 0.759047 | 0.297266 | switch.py | pypi |
import logging
from sushy import exceptions
from sushy.resources import base
from sushy.resources import common
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib import utils as rsd_lib_utils
LOG = logging.getLogger(__name__)
class LinksIntelRackScaleField(base.CompositeField):
neighbor_port = base.Field(
"NeighborPort", adapter=utils.get_members_identities
)
"""This indicates neighbor port which is connected to this port"""
class LinksOemField(base.CompositeField):
intel_rackscale = LinksIntelRackScaleField("Intel_RackScale")
"""Intel Rack Scale Design specific properties."""
class ActionsField(base.CompositeField):
reset = common.ResetActionField("#Port.Reset")
"""The action port reset"""
class IntelRackScaleField(base.CompositeField):
pcie_connection_id = base.Field("PCIeConnectionId")
"""An array of references to the PCIe connection identifiers (e.g. cable
ID).
"""
class LinksField(base.CompositeField):
associated_endpoints = base.Field(
"AssociatedEndpoints", adapter=utils.get_members_identities
)
"""An array of references to the endpoints that connect to the switch
through this port.
"""
connected_switches = base.Field(
"ConnectedSwitches", adapter=utils.get_members_identities
)
"""An array of references to the switches that connect to the switch
through this port.
"""
connected_switch_ports = base.Field(
"ConnectedSwitchPorts", adapter=utils.get_members_identities
)
"""An array of references to the ports that connect to the switch through
this port.
"""
oem = LinksOemField("Oem")
"""Oem specific properties."""
class OemField(base.CompositeField):
intel_rackscale = IntelRackScaleField("Intel_RackScale")
"""The oem intel rack scale"""
class Port(rsd_lib_base.ResourceBase):
"""Port resource class
Port contains properties describing a port of a switch.
"""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
port_id = base.Field("PortId")
"""This is the label of this port on the physical switch package."""
port_protocol = base.Field("PortProtocol")
"""The protocol being sent over this port."""
port_type = base.Field("PortType")
"""This is the type of this port."""
current_speed_gbps = base.Field(
"CurrentSpeedGbps", adapter=rsd_lib_utils.num_or_none
)
"""The current speed of this port."""
max_speed_gbps = base.Field(
"MaxSpeedGbps", adapter=rsd_lib_utils.num_or_none
)
"""The maximum speed of this port as currently configured."""
width = base.Field("Width", adapter=rsd_lib_utils.num_or_none)
"""The number of lanes, phys, or other physical transport links that this
port contains.
"""
links = LinksField("Links")
"""Contains references to other resources that are related to this
resource.
"""
oem = OemField("Oem")
"""Oem specific properties."""
actions = ActionsField("Actions")
"""The port actions"""
def _get_reset_action_element(self):
reset_action = self.actions.reset
if not reset_action:
raise exceptions.MissingActionError(
action="#Port.Reset", resource=self._path
)
return reset_action
def get_allowed_reset_port_values(self):
"""Get the allowed values for resetting the port.
:returns: A set with the allowed values.
"""
reset_action = self._get_reset_action_element()
return reset_action.allowed_values
def reset_port(self, value):
"""Reset the port.
:param value: The target value.
:raises: InvalidParameterValueError, if the target value is not
allowed.
"""
valid_resets = self.get_allowed_reset_port_values()
if value not in valid_resets:
raise exceptions.InvalidParameterValueError(
parameter="value", value=value, valid_values=valid_resets
)
target_uri = self._get_reset_action_element().target_uri
self._conn.post(target_uri, data={"ResetType": value})
class PortCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return Port | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/fabric/port.py | 0.719975 | 0.356307 | port.py | pypi |
from jsonschema import validate
import logging
from sushy import exceptions
from sushy.resources import base
from sushy.resources import common
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib import constants
from rsd_lib.resources.v2_1.node import schemas as node_schemas
from rsd_lib.resources.v2_1.system import system
from rsd_lib import utils as rsd_lib_utils
LOG = logging.getLogger(__name__)
class AssembleActionField(base.CompositeField):
target_uri = base.Field("target", required=True)
class AttachEndpointActionField(common.ActionField):
allowed_values = base.Field(
"Resource@Redfish.AllowableValues",
default=(),
adapter=utils.get_members_identities,
)
class DetachEndpointActionField(common.ActionField):
allowed_values = base.Field(
"Resource@Redfish.AllowableValues",
default=(),
adapter=utils.get_members_identities,
)
class NodeActionsField(base.CompositeField):
reset = common.ResetActionField("#ComposedNode.Reset")
assemble = common.ActionField("#ComposedNode.Assemble")
attach_endpoint = AttachEndpointActionField("#ComposedNode.AttachEndpoint")
detach_endpoint = DetachEndpointActionField("#ComposedNode.DetachEndpoint")
class NodeCollectionActionsField(base.CompositeField):
compose = common.ActionField("#ComposedNodeCollection.Allocate")
class LinksField(base.CompositeField):
computer_system = base.Field(
"ComputerSystem", adapter=rsd_lib_utils.get_resource_identity
)
"""Link to base computer system of this node"""
processors = base.Field("Processors", adapter=utils.get_members_identities)
"""Link to processors of this node"""
memory = base.Field("Memory", adapter=utils.get_members_identities)
"""Link to memory of this node"""
ethernet_interfaces = base.Field(
"EthernetInterfaces", adapter=utils.get_members_identities
)
"""Link to ethernet interfaces of this node"""
local_drives = base.Field(
"LocalDrives", adapter=utils.get_members_identities
)
"""Link to local driver of this node"""
remote_drives = base.Field(
"RemoteDrives", adapter=utils.get_members_identities
)
"""Link to remote drives of this node"""
managed_by = base.Field("ManagedBy", adapter=utils.get_members_identities)
class Node(rsd_lib_base.ResourceBase):
"""ComposedNode resource class
This schema defines a node and its respective properties.
"""
links = LinksField("Links")
"""Contains links to other resources that are related to this resource."""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
composed_node_state = base.Field("ComposedNodeState")
asset_tag = base.Field("AssetTag")
"""The user definable tag that can be used to track this computer system
for inventory or other client purposes
"""
uuid = base.Field("UUID")
"""The universal unique identifier (UUID) for this system"""
power_state = base.Field("PowerState")
"""This is the current power state of the system"""
boot = system.BootField("Boot")
"""Information about the boot settings for this system"""
processors = system.ProcessorSummaryField("Processors")
"""This object describes the central processors of the system in general
detail.
"""
memory = system.MemorySummaryField("Memory")
"""This object describes the central memory of the system in general
detail.
"""
_actions = NodeActionsField("Actions", required=True)
def _get_reset_action_element(self):
reset_action = self._actions.reset
if not reset_action:
raise exceptions.MissingActionError(
action="#ComposedNode.Reset", resource=self._path
)
return reset_action
def _get_assemble_action_element(self):
assemble_action = self._actions.assemble
if not assemble_action:
raise exceptions.MissingActionError(
action="#ComposedNode.Assemble", resource=self._path
)
return assemble_action
def get_allowed_reset_node_values(self):
"""Get the allowed values for resetting the node.
:returns: A set with the allowed values.
"""
reset_action = self._get_reset_action_element()
if not reset_action.allowed_values:
LOG.warning(
"Could not figure out the allowed values for the "
"reset node action for Node %s",
self.identity,
)
return set(constants.RESET_TYPE_VALUE)
return set(constants.RESET_TYPE_VALUE).intersection(
reset_action.allowed_values
)
def reset_node(self, value):
"""Reset the node.
:param value: The target value.
:raises: InvalidParameterValueError, if the target value is not
allowed.
"""
valid_resets = self.get_allowed_reset_node_values()
if value not in valid_resets:
raise exceptions.InvalidParameterValueError(
parameter="value", value=value, valid_values=valid_resets
)
target_uri = self._get_reset_action_element().target_uri
self._conn.post(target_uri, data={"ResetType": value})
def assemble_node(self):
"""Assemble the composed node."""
target_uri = self._get_assemble_action_element().target_uri
self._conn.post(target_uri)
def get_allowed_node_boot_source_values(self):
"""Get the allowed values for changing the boot source.
:returns: A set with the allowed values.
"""
if not self.boot.boot_source_override_target_allowed_values:
LOG.warning(
"Could not figure out the allowed values for "
"configuring the boot source for Node %s",
self.identity,
)
return set(constants.BOOT_SOURCE_TARGET_VALUE)
return set(constants.BOOT_SOURCE_TARGET_VALUE).intersection(
self.boot.boot_source_override_target_allowed_values
)
def get_allowed_node_boot_mode_values(self):
"""Get the allowed values for the boot source mode.
:returns: A set with the allowed values.
"""
if not self.boot.boot_source_override_mode_allowed_values:
LOG.warning(
"Could not figure out the allowed values for "
"configuring the boot mode for Node %s",
self.identity,
)
return set(constants.BOOT_SOURCE_MODE_VALUE)
return set(constants.BOOT_SOURCE_MODE_VALUE).intersection(
self.boot.boot_source_override_mode_allowed_values
)
def set_node_boot_source(self, target, enabled="Once", mode=None):
"""Set the boot source.
Set the boot source to use on next reboot of the Node.
:param target: The target boot source.
:param enabled: The frequency, whether to set it for the next
reboot only ("Once") or persistent to all future reboots
("Continuous") or disabled ("Disabled").
:param mode: The boot mode, UEFI ("UEFI") or BIOS ("Legacy").
:raises: InvalidParameterValueError, if any information passed is
invalid.
"""
valid_targets = self.get_allowed_node_boot_source_values()
if target not in valid_targets:
raise exceptions.InvalidParameterValueError(
parameter="target", value=target, valid_values=valid_targets
)
if enabled not in constants.BOOT_SOURCE_ENABLED_VALUE:
raise exceptions.InvalidParameterValueError(
parameter="enabled",
value=enabled,
valid_values=constants.BOOT_SOURCE_ENABLED_VALUE,
)
data = {
"Boot": {
"BootSourceOverrideTarget": target,
"BootSourceOverrideEnabled": enabled,
}
}
if mode is not None:
valid_modes = self.get_allowed_node_boot_mode_values()
if mode not in valid_modes:
raise exceptions.InvalidParameterValueError(
parameter="mode", value=mode, valid_values=valid_modes
)
data["Boot"]["BootSourceOverrideMode"] = mode
self._conn.patch(self.path, data=data)
def _get_attach_endpoint_action_element(self):
attach_endpoint_action = self._actions.attach_endpoint
if not attach_endpoint_action:
raise exceptions.MissingActionError(
action="#ComposedNode.AttachEndpoint", resource=self._path
)
return attach_endpoint_action
def get_allowed_attach_endpoints(self):
"""Get the allowed endpoints for attach action.
:returns: A set with the allowed attach endpoints.
"""
attach_action = self._get_attach_endpoint_action_element()
return attach_action.allowed_values
def attach_endpoint(self, endpoint=None, capacity=None):
"""Attach endpoint from available pool to composed node
:param endpoint: Link to endpoint to attach.
:param capacity: Requested capacity of the drive in GiB.
:raises: InvalidParameterValueError
:raises: BadRequestError if at least one param isn't specified
"""
attach_action = self._get_attach_endpoint_action_element()
valid_endpoints = attach_action.allowed_values
target_uri = attach_action.target_uri
if endpoint and endpoint not in valid_endpoints:
raise exceptions.InvalidParameterValueError(
parameter="endpoint",
value=endpoint,
valid_values=valid_endpoints,
)
data = {}
if endpoint is not None:
data["Resource"] = {"@odata.id": endpoint}
if capacity is not None:
data["CapacityGiB"] = capacity
self._conn.post(target_uri, data=data)
def _get_detach_endpoint_action_element(self):
detach_endpoint_action = self._actions.detach_endpoint
if not detach_endpoint_action:
raise exceptions.MissingActionError(
action="#ComposedNode.DetachEndpoint", resource=self._path
)
return detach_endpoint_action
def get_allowed_detach_endpoints(self):
"""Get the allowed endpoints for detach action.
:returns: A set with the allowed detach endpoints.
"""
detach_action = self._get_detach_endpoint_action_element()
return detach_action.allowed_values
def detach_endpoint(self, endpoint):
"""Detach already attached endpoint from composed node
:param endpoint: Link to endpoint to detach
:raises: InvalidParameterValueError
:raises: BadRequestError
"""
detach_action = self._get_detach_endpoint_action_element()
valid_endpoints = detach_action.allowed_values
target_uri = detach_action.target_uri
if endpoint not in valid_endpoints:
raise exceptions.InvalidParameterValueError(
parameter="endpoint",
value=endpoint,
valid_values=valid_endpoints,
)
data = {"Resource": endpoint}
self._conn.post(target_uri, data=data)
def delete_node(self):
"""Delete (disassemble) the node.
When this action is called several tasks are performed. A graceful
shutdown is sent to the computer system, all VLANs except reserved ones
are removed from associated ethernet switch ports, the computer system
is deallocated and the remote target is deallocated.
"""
self._conn.delete(self.path)
class NodeCollection(rsd_lib_base.ResourceCollectionBase):
_actions = NodeCollectionActionsField("Actions", required=True)
@property
def _resource_type(self):
return Node
def _get_compose_action_element(self):
compose_action = self._actions.compose
if not compose_action:
raise exceptions.MissingActionError(
action="#ComposedNodeCollection.Allocate", resource=self._path
)
return compose_action
def _create_compose_request(
self,
name=None,
description=None,
processor_req=None,
memory_req=None,
remote_drive_req=None,
local_drive_req=None,
ethernet_interface_req=None,
total_system_core_req=None,
total_system_memory_req=None,
):
request = {}
if name is not None:
request["Name"] = name
if description is not None:
request["Description"] = description
if processor_req is not None:
validate(processor_req, node_schemas.processor_req_schema)
request["Processors"] = processor_req
if memory_req is not None:
validate(memory_req, node_schemas.memory_req_schema)
request["Memory"] = memory_req
if remote_drive_req is not None:
validate(remote_drive_req, node_schemas.remote_drive_req_schema)
request["RemoteDrives"] = remote_drive_req
if local_drive_req is not None:
validate(local_drive_req, node_schemas.local_drive_req_schema)
request["LocalDrives"] = local_drive_req
if ethernet_interface_req is not None:
validate(
ethernet_interface_req,
node_schemas.ethernet_interface_req_schema,
)
request["EthernetInterfaces"] = ethernet_interface_req
if total_system_core_req is not None:
validate(
total_system_core_req,
node_schemas.total_system_core_req_schema,
)
request["TotalSystemCoreCount"] = total_system_core_req
if total_system_memory_req is not None:
validate(
total_system_memory_req,
node_schemas.total_system_memory_req_schema,
)
request["TotalSystemMemoryMiB"] = total_system_memory_req
return request
def compose_node(
self,
name=None,
description=None,
processor_req=None,
memory_req=None,
remote_drive_req=None,
local_drive_req=None,
ethernet_interface_req=None,
total_system_core_req=None,
total_system_memory_req=None,
):
"""Compose a node from RackScale hardware
:param name: Name of node
:param description: Description of node
:param processor_req: JSON for node processors
:param memory_req: JSON for node memory modules
:param remote_drive_req: JSON for node remote drives
:param local_drive_req: JSON for node local drives
:param ethernet_interface_req: JSON for node ethernet ports
:param total_system_core_req: Total processor cores available in
composed node
:param total_system_memory_req: Total memory available in composed node
:returns: The location of the composed node
When the 'processor_req' is not none: it need a computer system
contains processors whose each processor meet all conditions in the
value.
When the 'total_system_core_req' is not none: it need a computer
system contains processors whose cores sum up to number equal or
greater than 'total_system_core_req'.
When both values are not none: it need meet all conditions.
'memory_req' and 'total_system_memory_req' is the same.
"""
target_uri = self._get_compose_action_element().target_uri
properties = self._create_compose_request(
name=name,
description=description,
processor_req=processor_req,
memory_req=memory_req,
remote_drive_req=remote_drive_req,
local_drive_req=local_drive_req,
ethernet_interface_req=ethernet_interface_req,
total_system_core_req=total_system_core_req,
total_system_memory_req=total_system_memory_req,
)
resp = self._conn.post(target_uri, data=properties)
LOG.info("Node created at %s", resp.headers["Location"])
node_url = resp.headers["Location"]
return node_url[node_url.find(self._path):] | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/node/node.py | 0.699152 | 0.191573 | node.py | pypi |
processor_req_schema = {
"type": "array",
"items": [
{
"type": "object",
"properties": {
"Model": {"type": "string"},
"TotalCores": {"type": "number"},
"AchievableSpeedMHz": {"type": "number"},
"InstructionSet": {
"type": "string",
"enum": [
"x86",
"x86-64",
"IA-64",
"ARM-A32",
"ARM-A64",
"MIPS32",
"MIPS64",
"OEM",
],
},
"Oem": {
"type": "object",
"properties": {
"Brand": {
"type": "string",
"enum": [
"E3",
"E5",
"E7",
"X3",
"X5",
"X7",
"I3",
"I5",
"I7",
"Unknown",
],
},
"Capabilities": {
"type": "array",
"items": [{"type": "string"}],
},
},
},
"Resource": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
"Chassis": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
},
"additionalProperties": False,
}
],
}
memory_req_schema = {
"type": "array",
"items": [
{
"type": "object",
"properties": {
"CapacityMiB": {"type": "number"},
"MemoryDeviceType": {
"type": "string",
"enum": [
"DDR",
"DDR2",
"DDR3",
"DDR4",
"DDR4_SDRAM",
"DDR4E_SDRAM",
"LPDDR4_SDRAM",
"DDR3_SDRAM",
"LPDDR3_SDRAM",
"DDR2_SDRAM",
"DDR2_SDRAM_FB_DIMM",
"DDR2_SDRAM_FB_DIMM_PROBE",
"DDR_SGRAM",
"DDR_SDRAM",
"ROM",
"SDRAM",
"EDO",
"FastPageMode",
"PipelinedNibble",
],
},
"SpeedMHz": {"type": "number"},
"Manufacturer": {"type": "string"},
"DataWidthBits": {"type": "number"},
"Resource": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
"Chassis": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
},
"additionalProperties": False,
}
],
}
remote_drive_req_schema = {
"type": "array",
"items": [
{
"type": "object",
"properties": {
"CapacityGiB": {"type": "number"},
"iSCSIAddress": {"type": "string"},
"Master": {
"type": "object",
"properties": {
"Type": {
"type": "string",
"enum": ["Snapshot", "Clone"],
},
"Address": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
},
},
},
"additionalProperties": False,
}
],
}
local_drive_req_schema = {
"type": "array",
"items": [
{
"type": "object",
"properties": {
"CapacityGiB": {"type": "number"},
"Type": {"type": "string", "enum": ["HDD", "SSD"]},
"MinRPM": {"type": "number"},
"SerialNumber": {"type": "string"},
"Interface": {
"type": "string",
"enum": ["SAS", "SATA", "NVMe"],
},
"Resource": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
"Chassis": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
"FabricSwitch": {"type": "boolean"},
},
"additionalProperties": False,
}
],
}
ethernet_interface_req_schema = {
"type": "array",
"items": [
{
"type": "object",
"properties": {
"SpeedMbps": {"type": "number"},
"PrimaryVLAN": {"type": "number"},
"VLANs": {
"type": "array",
"additionalItems": {
"type": "object",
"properties": {
"VLANId": {"type": "number"},
"Tagged": {"type": "boolean"},
},
},
},
"Resource": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
"Chassis": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
},
"additionalProperties": False,
}
],
}
total_system_core_req_schema = {"type": "number"}
total_system_memory_req_schema = {"type": "number"} | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/node/schemas.py | 0.499023 | 0.453141 | schemas.py | pypi |
from sushy.resources import base
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib.resources.v2_1.common import redundancy
from rsd_lib import utils as rsd_lib_utils
class PowerMetricField(base.CompositeField):
interval_in_min = base.Field(
"IntervalInMin", adapter=rsd_lib_utils.num_or_none
)
"""The time interval (or window) in which the PowerMetrics are measured
over.
"""
min_consumed_watts = base.Field(
"MinConsumedWatts", adapter=rsd_lib_utils.num_or_none
)
"""The lowest power consumption level over the measurement window (the
last IntervalInMin minutes).
"""
max_consumed_watts = base.Field(
"MaxConsumedWatts", adapter=rsd_lib_utils.num_or_none
)
"""The highest power consumption level that has occured over the
measurement window (the last IntervalInMin minutes).
"""
average_consumed_watts = base.Field(
"AverageConsumedWatts", adapter=rsd_lib_utils.num_or_none
)
"""The average power level over the measurement window (the last
IntervalInMin minutes).
"""
class PowerLimitField(base.CompositeField):
"""PowerLimit field
This object contains power limit status and configuration information
for the chassis.
"""
limit_in_watts = base.Field(
"LimitInWatts", adapter=rsd_lib_utils.num_or_none
)
"""The Power limit in watts. Set to null to disable power capping."""
limit_exception = base.Field("LimitException")
"""The action that is taken if the power cannot be maintained below the
LimitInWatts.
"""
correction_in_ms = base.Field(
"CorrectionInMs", adapter=rsd_lib_utils.num_or_none
)
"""The time required for the limiting process to reduce power consumption
to below the limit.
"""
class InputRangeCollectionField(base.ListField):
input_type = base.Field("InputType")
"""The Input type (AC or DC)"""
minimum_voltage = base.Field(
"MinimumVoltage", adapter=rsd_lib_utils.num_or_none
)
"""The minimum line input voltage at which this power supply input range
is effective
"""
maximum_voltage = base.Field(
"MaximumVoltage", adapter=rsd_lib_utils.num_or_none
)
"""The maximum line input voltage at which this power supply input range
is effective
"""
minimum_frequency_hz = base.Field(
"MinimumFrequencyHz", adapter=rsd_lib_utils.num_or_none
)
"""The minimum line input frequency at which this power supply input range
is effective
"""
maximum_frequency_hz = base.Field(
"MaximumFrequencyHz", adapter=rsd_lib_utils.num_or_none
)
"""The maximum line input frequency at which this power supply input range
is effective
"""
output_wattage = base.Field(
"OutputWattage", adapter=rsd_lib_utils.num_or_none
)
"""The maximum capacity of this Power Supply when operating in this input
range
"""
class VoltageCollectionField(rsd_lib_base.ReferenceableMemberField):
name = base.Field("Name")
"""Voltage sensor name."""
sensor_number = base.Field(
"SensorNumber", adapter=rsd_lib_utils.num_or_none
)
"""A numerical identifier to represent the voltage sensor"""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
reading_volts = base.Field(
"ReadingVolts", adapter=rsd_lib_utils.num_or_none
)
"""The current value of the voltage sensor."""
upper_threshold_non_critical = base.Field(
"UpperThresholdNonCritical", adapter=rsd_lib_utils.num_or_none
)
"""Above normal range"""
upper_threshold_critical = base.Field(
"UpperThresholdCritical", adapter=rsd_lib_utils.num_or_none
)
"""Above normal range but not yet fatal."""
upper_threshold_fatal = base.Field(
"UpperThresholdFatal", adapter=rsd_lib_utils.num_or_none
)
"""Above normal range and is fatal"""
lower_threshold_non_critical = base.Field(
"LowerThresholdNonCritical", adapter=rsd_lib_utils.num_or_none
)
"""Below normal range"""
lower_threshold_critical = base.Field(
"LowerThresholdCritical", adapter=rsd_lib_utils.num_or_none
)
"""Below normal range but not yet fatal."""
lower_threshold_fatal = base.Field(
"LowerThresholdFatal", adapter=rsd_lib_utils.num_or_none
)
"""Below normal range and is fatal"""
min_reading_range = base.Field(
"MinReadingRange", adapter=rsd_lib_utils.num_or_none
)
"""Minimum value for CurrentReading"""
max_reading_range = base.Field(
"MaxReadingRange", adapter=rsd_lib_utils.num_or_none
)
"""Maximum value for CurrentReading"""
physical_context = base.Field("PhysicalContext")
"""Describes the area or device to which this voltage measurement applies.
"""
related_item = base.Field(
"RelatedItem", adapter=utils.get_members_identities
)
"""Describes the areas or devices to which this voltage measurement
applies.
"""
class PowerControlCollectionField(rsd_lib_base.ReferenceableMemberField):
name = base.Field("Name")
"""Power Control Function name."""
power_consumed_watts = base.Field(
"PowerConsumedWatts", adapter=rsd_lib_utils.num_or_none
)
"""The actual power being consumed by the chassis."""
power_requested_watts = base.Field(
"PowerRequestedWatts", adapter=rsd_lib_utils.num_or_none
)
"""The potential power that the chassis resources are requesting which may
be higher than the current level being consumed since requested power
includes budget that the chassis resource wants for future use.
"""
power_available_watts = base.Field(
"PowerAvailableWatts", adapter=rsd_lib_utils.num_or_none
)
"""The amount of power not already budgeted and therefore available for
additional allocation. (powerCapacity - powerAllocated). This
indicates how much reserve power capacity is left.
"""
power_capacity_watts = base.Field(
"PowerCapacityWatts", adapter=rsd_lib_utils.num_or_none
)
"""The total amount of power available to the chassis for allocation. This
may the power supply capacity, or power budget assigned to the chassis
from an up-stream chassis.
"""
power_allocated_watts = base.Field(
"PowerAllocatedWatts", adapter=rsd_lib_utils.num_or_none
)
"""The total amount of power that has been allocated (or budegeted)to
chassis resources.
"""
power_metrics = PowerMetricField("PowerMetrics")
"""Power readings for this chassis."""
power_limit = PowerLimitField("PowerLimit")
"""Power limit status and configuration information for this chassis"""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
related_item = base.Field(
"RelatedItem", adapter=utils.get_members_identities
)
"""The ID(s) of the resources associated with this Power Limit"""
class PowerSupplyCollectionField(rsd_lib_base.ReferenceableMemberField):
"""PowerSupply field
Details of a power supplies associated with this system or device
"""
name = base.Field("Name")
"""The name of the Power Supply"""
power_supply_type = base.Field("PowerSupplyType")
"""The Power Supply type (AC or DC)"""
line_input_voltage_type = base.Field("LineInputVoltageType")
"""The line voltage type supported as an input to this Power Supply"""
line_input_voltage = base.Field(
"LineInputVoltage", adapter=rsd_lib_utils.num_or_none
)
"""The line input voltage at which the Power Supply is operating"""
power_capacity_watts = base.Field(
"PowerCapacityWatts", adapter=rsd_lib_utils.num_or_none
)
"""The maximum capacity of this Power Supply"""
last_power_output_watts = base.Field(
"LastPowerOutputWatts", adapter=rsd_lib_utils.num_or_none
)
"""The average power output of this Power Supply"""
model = base.Field("Model")
"""The model number for this Power Supply"""
firmware_version = base.Field("FirmwareVersion")
"""The firmware version for this Power Supply"""
serial_number = base.Field("SerialNumber")
"""The serial number for this Power Supply"""
part_number = base.Field("PartNumber")
"""The part number for this Power Supply"""
spare_part_number = base.Field("SparePartNumber")
"""The spare part number for this Power Supply"""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
related_item = base.Field(
"RelatedItem", adapter=utils.get_members_identities
)
"""The ID(s) of the resources associated with this Power Limit"""
redundancy = redundancy.RedundancyCollectionField("Redundancy")
"""This structure is used to show redundancy for power supplies. The
Component ids will reference the members of the redundancy groups.
"""
manufacturer = base.Field("Manufacturer")
"""This is the manufacturer of this power supply."""
input_ranges = InputRangeCollectionField("InputRanges")
"""This is the input ranges that the power supply can use."""
class Power(rsd_lib_base.ResourceBase):
"""Power resource class
This is the schema definition for the Power Metrics. It represents the
properties for Power Consumption and Power Limiting.
"""
power_control = PowerControlCollectionField("PowerControl")
"""This is the definition for power control function (power
reading/limiting).
"""
voltages = VoltageCollectionField("Voltages")
"""This is the definition for voltage sensors."""
power_supplies = PowerSupplyCollectionField("PowerSupplies")
"""Details of the power supplies associated with this system or device"""
redundancy = redundancy.RedundancyCollectionField("Redundancy")
"""Redundancy information for the power subsystem of this system or device
""" | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/chassis/power.py | 0.798854 | 0.523359 | power.py | pypi |
from sushy.resources import base
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib.resources.v2_1.common import redundancy
from rsd_lib import utils as rsd_lib_utils
class FanCollectionField(rsd_lib_base.ReferenceableMemberField):
fan_name = base.Field("FanName")
"""Name of the fan"""
physical_context = base.Field("PhysicalContext")
"""Describes the area or device associated with this fan."""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
reading = base.Field("Reading", adapter=rsd_lib_utils.num_or_none)
"""Current fan speed"""
upper_threshold_non_critical = base.Field(
"UpperThresholdNonCritical", adapter=rsd_lib_utils.num_or_none
)
"""Above normal range"""
upper_threshold_critical = base.Field(
"UpperThresholdCritical", adapter=rsd_lib_utils.num_or_none
)
"""Above normal range but not yet fatal"""
upper_threshold_fatal = base.Field(
"UpperThresholdFatal", adapter=rsd_lib_utils.num_or_none
)
"""Above normal range and is fatal"""
lower_threshold_non_critical = base.Field(
"LowerThresholdNonCritical", adapter=rsd_lib_utils.num_or_none
)
"""Below normal range"""
lower_threshold_critical = base.Field(
"LowerThresholdCritical", adapter=rsd_lib_utils.num_or_none
)
"""Below normal range but not yet fatal"""
lower_threshold_fatal = base.Field(
"LowerThresholdFatal", adapter=rsd_lib_utils.num_or_none
)
"""Below normal range and is fatal"""
min_reading_range = base.Field(
"MinReadingRange", adapter=rsd_lib_utils.num_or_none
)
"""Minimum value for Reading"""
max_reading_range = base.Field(
"MaxReadingRange", adapter=rsd_lib_utils.num_or_none
)
"""Maximum value for Reading"""
related_item = base.Field(
"RelatedItem", adapter=utils.get_members_identities
)
"""The ID(s) of the resources serviced with this fan"""
redundancy = redundancy.RedundancyCollectionField("Redundancy")
"""This structure is used to show redundancy for fans. The Component ids
will reference the members of the redundancy groups.
"""
reading_units = base.Field("ReadingUnits")
"""Units in which the reading and thresholds are measured."""
name = base.Field("Name")
"""Name of the fan"""
class TemperatureCollectionField(rsd_lib_base.ReferenceableMemberField):
name = base.Field("Name")
"""Temperature sensor name."""
sensor_number = base.Field(
"SensorNumber", adapter=rsd_lib_utils.num_or_none
)
"""A numerical identifier to represent the temperature sensor"""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
reading_celsius = base.Field(
"ReadingCelsius", adapter=rsd_lib_utils.num_or_none
)
"""Temperature"""
upper_threshold_non_critical = base.Field(
"UpperThresholdNonCritical", adapter=rsd_lib_utils.num_or_none
)
"""Above normal range"""
upper_threshold_critical = base.Field(
"UpperThresholdCritical", adapter=rsd_lib_utils.num_or_none
)
"""Above normal range but not yet fatal."""
upper_threshold_fatal = base.Field(
"UpperThresholdFatal", adapter=rsd_lib_utils.num_or_none
)
"""Above normal range and is fatal"""
lower_threshold_non_critical = base.Field(
"LowerThresholdNonCritical", adapter=rsd_lib_utils.num_or_none
)
"""Below normal range"""
lower_threshold_critical = base.Field(
"LowerThresholdCritical", adapter=rsd_lib_utils.num_or_none
)
"""Below normal range but not yet fatal."""
lower_threshold_fatal = base.Field(
"LowerThresholdFatal", adapter=rsd_lib_utils.num_or_none
)
"""Below normal range and is fatal"""
min_reading_range_temp = base.Field(
"MinReadingRangeTemp", adapter=rsd_lib_utils.num_or_none
)
"""Minimum value for ReadingCelsius"""
max_reading_range_temp = base.Field(
"MaxReadingRangeTemp", adapter=rsd_lib_utils.num_or_none
)
"""Maximum value for ReadingCelsius"""
physical_context = base.Field("PhysicalContext")
"""Describes the area or device to which this temperature measurement
applies.
"""
related_item = base.Field(
"RelatedItem", adapter=utils.get_members_identities
)
"""Describes the areas or devices to which this temperature measurement
applies.
"""
class Thermal(rsd_lib_base.ResourceBase):
"""Thermal resource class
This is the schema definition for the Thermal properties. It
represents the properties for Temperature and Cooling.
"""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
temperatures = TemperatureCollectionField("Temperatures")
"""This is the definition for temperature sensors."""
fans = FanCollectionField("Fans")
"""This is the definition for fans."""
redundancy = redundancy.RedundancyCollectionField("Redundancy")
"""This structure is used to show redundancy for fans. The Component ids
will reference the members of the redundancy groups.
""" | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/chassis/thermal.py | 0.772402 | 0.449634 | thermal.py | pypi |
from sushy.resources import base
from rsd_lib import base as rsd_lib_base
from rsd_lib.resources.v2_1.common import rack_location
from rsd_lib import utils as rsd_lib_utils
class PowerSupplyCollectionField(base.ListField):
name = base.Field("Name")
"""The Power Supply name"""
power_capacity_watts = base.Field(
"PowerCapacityWatts", adapter=rsd_lib_utils.num_or_none
)
"""The maximum capacity of this Power Supply"""
last_power_output_watts = base.Field(
"LastPowerOutputWatts", adapter=rsd_lib_utils.num_or_none
)
"""The average power output of this Power Supply"""
manufacturer = base.Field("Manufacturer")
"""The manufacturer of this Power Supply"""
model_number = base.Field("ModelNumber")
"""The model number for this Power Supply"""
firmware_revision = base.Field("FirmwareRevision")
"""The firmware version for this Power Supply"""
serial_number = base.Field("SerialNumber")
"""The serial number for this Power Supply"""
part_number = base.Field("PartNumber")
"""The part number for this Power Supply"""
status = rsd_lib_base.StatusField("Status")
"""The Power supply status"""
rack_location = rack_location.RackLocationField("RackLocation")
"""The PowerZone physical location"""
class PowerZone(rsd_lib_base.ResourceBase):
status = rsd_lib_base.StatusField("Status")
"""The PowerZone status"""
rack_location = rack_location.RackLocationField("RackLocation")
"""The PowerZone physical location"""
max_psus_supported = base.Field(
"MaxPSUsSupported", adapter=rsd_lib_utils.num_or_none
)
"""The maximum number of Power Supply Units supported by PowerZone"""
presence = base.Field("Presence")
"""Indicates the aggregated Power Supply Unit presence information
Aggregated Power Supply Unit presence format: Length of string indicate
total slot of Power Supply Units in PowerZone.
For each byte the string:
"1" means present
"0" means not present
"""
number_of_psus_present = base.Field(
"NumberOfPSUsPresent", adapter=rsd_lib_utils.num_or_none
)
"""Indicates the number of existing Power Supply Units in PowerZone"""
power_consumed_watts = base.Field(
"PowerConsumedWatts", adapter=rsd_lib_utils.num_or_none
)
"""The total power consumption of PowerZone, sum of trays'
power consumption
"""
power_output_watts = base.Field(
"PowerOutputWatts", adapter=rsd_lib_utils.num_or_none
)
"""The total power production of PowerZone, sum of PSUs' output"""
power_capacity_watts = base.Field(
"PowerCapacityWatts", adapter=rsd_lib_utils.num_or_none
)
"""The maximum power capacity supported by PowerZone"""
power_supplies = PowerSupplyCollectionField("PowerSupplies")
"""Details of the power supplies associated with this system or device"""
class PowerZoneCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return PowerZone | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/chassis/power_zone.py | 0.786787 | 0.345271 | power_zone.py | pypi |
from sushy.resources import base
from rsd_lib import base as rsd_lib_base
from rsd_lib.resources.v2_1.common import rack_location
from rsd_lib import utils as rsd_lib_utils
class TemperatureSensorCollectionField(base.ListField):
name = base.Field("Name")
"""The Power Supply name"""
reading_celsius = base.Field(
"ReadingCelsius", adapter=rsd_lib_utils.num_or_none
)
"""Current value of the temperature sensor's reading"""
physical_context = base.Field("PhysicalContext")
"""Describes the area or device to which this temperature measurement
applies:
"Intake" - The intake point of the chassis
"Exhaust" - The exhaust point of the chassis
"Backplane" - A backplane within the chassis
"PowerSupply" - A power supply
"SystemBoard" - The system board (PCB)
"ComputeBay" - Within a compute bay
"PowerSupplyBay" - Within a power supply bay
"""
status = rsd_lib_base.StatusField("Status")
"""The temperature sensors status"""
class FanCollectionField(base.ListField):
name = base.Field("Name")
"""The Power Supply name"""
reading_rpm = base.Field("ReadingRPM", adapter=rsd_lib_utils.num_or_none)
"""Fan RPM reading"""
status = rsd_lib_base.StatusField("Status")
"""The Fan status"""
rack_location = rack_location.RackLocationField("RackLocation")
"""The Fan physical location"""
class ThermalZone(rsd_lib_base.ResourceBase):
status = rsd_lib_base.StatusField("Status")
"""The ThermalZone status"""
rack_location = rack_location.RackLocationField("RackLocation")
"""The ThermalZone physical location"""
presence = base.Field("Presence")
"""Indicates the aggregated Power Supply Unit presence information
Aggregated Power Supply Unit presence format: Length of string indicate
total slot of Power Supply Units in PowerZone.
For each byte the string:
"1" means present
"0" means not present
"""
desired_speed_pwm = base.Field(
"DesiredSpeedPWM", adapter=rsd_lib_utils.num_or_none
)
"""The desired FAN speed in current ThermalZone present in PWM unit"""
desired_speed_rpm = base.Field(
"DesiredSpeedRPM", adapter=rsd_lib_utils.num_or_none
)
"""The desired FAN speed in current ThermalZone present in RPM unit"""
max_fans_supported = base.Field(
"MaxFansSupported", adapter=rsd_lib_utils.num_or_none
)
"""Number of maximum fans that can be installed in a given Thermal Zone"""
number_of_fans_present = base.Field(
"NumberOfFansPresent", adapter=rsd_lib_utils.num_or_none
)
"""The existing number of fans in current ThermalZone"""
volumetric_airflow = base.Field(
"VolumetricAirflow", adapter=rsd_lib_utils.num_or_none
)
"""Rack Level PTAS Telemetry - Volumetric airflow in current ThermalZone"""
fans = FanCollectionField("Fans")
"""Details of the fans associated with this thermal zone"""
temperatures = TemperatureSensorCollectionField("Temperatures")
"""Array of temperature sensors"""
class ThermalZoneCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return ThermalZone | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/chassis/thermal_zone.py | 0.784567 | 0.396769 | thermal_zone.py | pypi |
from sushy.resources import base
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib.resources.v2_1.chassis import log_service
from rsd_lib.resources.v2_1.chassis import power
from rsd_lib.resources.v2_1.chassis import power_zone
from rsd_lib.resources.v2_1.chassis import thermal
from rsd_lib.resources.v2_1.chassis import thermal_zone
from rsd_lib import utils as rsd_lib_utils
class LocationField(base.CompositeField):
identity = base.Field("Id")
"""The location ID of the chassis"""
parent_id = base.Field("ParentId")
"""The location ID of parent chassis"""
class LinksIntelRackScaleField(base.CompositeField):
switches = base.Field("Switches", adapter=utils.get_members_identities)
"""An array of references to the ethernet switches located in this Chassis.
"""
class LinksOemField(base.CompositeField):
intel_rackscale = LinksIntelRackScaleField("Intel_RackScale")
"""Intel Rack Scale Design specific properties."""
class LinksField(base.CompositeField):
computer_systems = base.Field(
"ComputerSystems", adapter=utils.get_members_identities
)
"""An array of references to the computer systems contained in this
chassis. This will only reference ComputerSystems that are directly
and wholly contained in this chassis.
"""
managed_by = base.Field("ManagedBy", adapter=utils.get_members_identities)
"""An array of references to the Managers responsible for managing this
chassis.
"""
contained_by = base.Field(
"ContainedBy", adapter=rsd_lib_utils.get_resource_identity
)
"""A reference to the chassis that this chassis is contained by."""
contains = base.Field("Contains", adapter=utils.get_members_identities)
"""An array of references to any other chassis that this chassis has in it.
"""
powered_by = base.Field("PoweredBy", adapter=utils.get_members_identities)
"""An array of ID[s] of resources that power this chassis. Normally the ID
will be a chassis or a specific set of powerSupplies
"""
cooled_by = base.Field("CooledBy", adapter=utils.get_members_identities)
"""An array of ID[s] of resources that cool this chassis. Normally the ID
will be a chassis or a specific set of fans.
"""
managers_in_chassis = base.Field(
"ManagersInChassis", adapter=utils.get_members_identities
)
"""An array of references to the managers located in this Chassis."""
drives = base.Field("Drives", adapter=utils.get_members_identities)
"""An array of references to the disk drives located in this Chassis."""
storage = base.Field("Storage", adapter=utils.get_members_identities)
"""An array of references to the storage subsystems connected to or inside
this Chassis.
"""
oem = LinksOemField("Oem")
"""Oem specific properties."""
class PhysicalSecurityField(base.CompositeField):
intrusion_sensor_number = base.Field(
"IntrusionSensorNumber", adapter=rsd_lib_utils.num_or_none
)
"""A numerical identifier to represent the physical security sensor."""
intrusion_sensor = base.Field("IntrusionSensor")
"""This indicates the known state of the physical security sensor, such as
if it is hardware intrusion detected.
"""
intrusion_sensor_re_arm = base.Field("IntrusionSensorReArm")
"""This indicates how the Normal state to be restored."""
class IntelRackScaleField(base.CompositeField):
location = LocationField("Location")
"""Property that shows this chassis ID and its parent"""
rmm_present = base.Field("RMMPresent", adapter=bool)
"""RMM presence in a rack"""
rack_supports_disaggregated_power_cooling = base.Field(
"RackSupportsDisaggregatedPowerCooling", adapter=bool
)
"""Indicates if Rack support is disaggregated (shared) power and cooling
capabilities
"""
uuid = base.Field("UUID")
"""Chassis unique ID"""
geo_tag = base.Field("GeoTag")
"""Provides info about the geographical location of this chassis"""
class OemField(base.CompositeField):
intel_rackscale = IntelRackScaleField("Intel_RackScale")
"""Intel Rack Scale Design specific properties."""
class Chassis(rsd_lib_base.ResourceBase):
"""Chassis resource class
A Chassis represents the physical components for any system. This
resource represents the sheet-metal confined spaces and logical zones
like racks, enclosures, chassis and all other containers. Subsystems
(like sensors), which operate outside of a system's data plane (meaning
the resources are not accessible to software running on the system) are
linked either directly or indirectly through this resource.
"""
chassis_type = base.Field("ChassisType")
"""This property indicates the type of physical form factor of this
resource.
"""
manufacturer = base.Field("Manufacturer")
"""This is the manufacturer of this chassis."""
model = base.Field("Model")
"""This is the model number for the chassis."""
sku = base.Field("SKU")
"""This is the SKU for this chassis."""
serial_number = base.Field("SerialNumber")
"""The serial number for this chassis."""
part_number = base.Field("PartNumber")
"""The part number for this chassis."""
asset_tag = base.Field("AssetTag")
"""The user assigned asset tag for this chassis."""
indicator_led = base.Field("IndicatorLED")
"""The state of the indicator LED, used to identify the chassis."""
links = LinksField("Links")
"""Contains references to other resources that are related to this
resource.
"""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
power_state = base.Field("PowerState")
"""This is the current power state of the chassis."""
physical_security = PhysicalSecurityField("PhysicalSecurity")
"""The state of the physical security sensor."""
location = rsd_lib_base.LocationField("Location")
oem = OemField("Oem")
"""Oem specific properties."""
@property
@utils.cache_it
def log_services(self):
"""Property to provide reference to `LogServiceCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return log_service.LogServiceCollection(
self._conn,
utils.get_sub_resource_path_by(self, "LogServices"),
redfish_version=self.redfish_version,
)
@property
@utils.cache_it
def thermal_zones(self):
"""Property to provide reference to `ThermalZoneCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return thermal_zone.ThermalZoneCollection(
self._conn,
utils.get_sub_resource_path_by(self, "ThermalZones"),
redfish_version=self.redfish_version,
)
@property
@utils.cache_it
def power_zones(self):
"""Property to provide reference to `PowerZoneCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return power_zone.PowerZoneCollection(
self._conn,
utils.get_sub_resource_path_by(self, "PowerZones"),
redfish_version=self.redfish_version,
)
@property
@utils.cache_it
def thermal(self):
"""Property to provide reference to `Thermal` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return thermal.Thermal(
self._conn,
utils.get_sub_resource_path_by(self, "Thermal"),
redfish_version=self.redfish_version,
)
@property
@utils.cache_it
def power(self):
"""Property to provide reference to `Power` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return power.Power(
self._conn,
utils.get_sub_resource_path_by(self, "Power"),
redfish_version=self.redfish_version,
)
def update(self, asset_tag=None, location_id=None):
"""Update AssetTag and Location->Id properties
:param asset_tag: The user assigned asset tag for this chassis
:param location_id: The user assigned location id for this chassis.
It can be changed only for a Rack Chassis
"""
data = {}
if asset_tag is not None:
data["AssetTag"] = asset_tag
if location_id is not None:
data["Oem"] = {
"Intel_RackScale": {"Location": {"Id": location_id}}
}
self._conn.patch(self.path, data=data)
class ChassisCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return Chassis | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/chassis/chassis.py | 0.795062 | 0.412708 | chassis.py | pypi |
from sushy.resources import base
from rsd_lib import base as rsd_lib_base
from rsd_lib import utils as rsd_lib_utils
class LinksField(base.CompositeField):
origin_of_condition = base.Field(
"OriginOfCondition", adapter=rsd_lib_utils.get_resource_identity
)
"""This is the URI of the resource that caused the log entry"""
class LogEntry(rsd_lib_base.ResourceBase):
"""LogEntry resource class
This resource defines the record format for a log. It is designed to
be used for SEL logs (from IPMI) as well as Event Logs and OEM-specific
log formats. The EntryType field indicates the type of log and the
resource includes several additional properties dependent on the
EntryType.
"""
severity = base.Field("Severity")
"""This is the severity of the log entry."""
created = base.Field("Created")
"""The time the log entry was created."""
entry_type = base.Field("EntryType")
"""his is the type of log entry."""
oem_record_format = base.Field("OemRecordFormat")
"""If the entry type is Oem, this will contain more information about the
record format from the Oem.
"""
entry_code = base.Field("EntryCode")
"""If the EntryType is SEL, this will have the entry code for the log
entry.
"""
sensor_type = base.Field("SensorType")
"""If the EntryType is SEL, this will have the sensor type that the log
entry pertains to.
"""
sensor_number = base.Field(
"SensorNumber", adapter=rsd_lib_utils.num_or_none
)
"""This property decodes from EntryType: If it is SEL, it is the sensor
number; if Event then the count of events. Otherwise, it is Oem
specific.
"""
message = base.Field("Message")
"""This property decodes from EntryType: If it is Event then it is a
message string. Otherwise, it is SEL or Oem specific. In most cases,
this will be the actual Log Entry.
"""
message_id = base.Field("MessageId")
"""This property decodes from EntryType: If it is Event then it is a
message id. Otherwise, it is SEL or Oem specific. This value is only
used for registries - for more information, see the specification.
"""
message_args = base.Field("MessageArgs")
"""The values of this property shall be any arguments for the message."""
links = LinksField("Links")
"""Contains references to other resources that are related to this
resource.
"""
event_type = base.Field("EventType")
"""This indicates the type of an event recorded in this log."""
event_id = base.Field("EventId")
"""This is a unique instance identifier of an event."""
event_timestamp = base.Field("EventTimestamp")
"""This is time the event occurred."""
class LogEntryCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return LogEntry | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/chassis/log_entry.py | 0.764979 | 0.315736 | log_entry.py | pypi |
from sushy.resources import base
from rsd_lib import base as rsd_lib_base
from rsd_lib import utils as rsd_lib_utils
class SSDProtocolField(base.CompositeField):
protocol_enabled = base.Field("ProtocolEnabled", adapter=bool)
"""Indicates if the protocol is enabled or disabled"""
port = base.Field("Port", adapter=rsd_lib_utils.num_or_none)
"""Indicates the protocol port."""
notify_multicast_interval_seconds = base.Field(
"NotifyMulticastIntervalSeconds", adapter=rsd_lib_utils.num_or_none
)
"""Indicates how often the Multicast is done from this service for SSDP."""
notify_ttl = base.Field("NotifyTTL", adapter=rsd_lib_utils.num_or_none)
"""Indicates the time to live hop count for SSDPs Notify messages."""
notify_ipv6_scope = base.Field("NotifyIPv6Scope")
"""Indicates the scope for the IPv6 Notify messages for SSDP."""
class ProtocolField(base.CompositeField):
protocol_enabled = base.Field("ProtocolEnabled", adapter=bool)
"""Indicates if the protocol is enabled or disabled"""
port = base.Field("Port", adapter=rsd_lib_utils.num_or_none)
"""Indicates the protocol port."""
class ManagerNetworkProtocol(rsd_lib_base.ResourceBase):
"""ManagerNetworkProtocol resource class
This resource is used to obtain or modify the network services managed
by a given manager.
"""
host_name = base.Field("HostName")
"""The DNS Host Name of this manager, without any domain information"""
fqdn = base.Field("FQDN")
"""This is the fully qualified domain name for the manager obtained by DNS
including the host name and top-level domain name.
"""
http = ProtocolField("HTTP")
"""Settings for this Manager's HTTP protocol support"""
https = ProtocolField("HTTPS")
"""Settings for this Manager's HTTPS protocol support"""
snmp = ProtocolField("SNMP")
"""Settings for this Manager's SNMP support"""
virtual_media = ProtocolField("VirtualMedia")
"""Settings for this Manager's Virtual Media support"""
telnet = ProtocolField("Telnet")
"""Settings for this Manager's Telnet protocol support"""
ssdp = SSDProtocolField("SSDP")
"""Settings for this Manager's SSDP support"""
ipmi = ProtocolField("IPMI")
"""Settings for this Manager's IPMI-over-LAN protocol support"""
ssh = ProtocolField("SSH")
"""Settings for this Manager's SSH (Secure Shell) protocol support"""
kvmip = ProtocolField("KVMIP")
"""Settings for this Manager's KVM-IP protocol support"""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
""" | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/manager/manager_network_protocol.py | 0.676834 | 0.229643 | manager_network_protocol.py | pypi |
from sushy.resources import base
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib.resources.v2_1.chassis import log_service
from rsd_lib.resources.v2_1.common import redundancy
from rsd_lib.resources.v2_1.manager import manager_network_protocol
from rsd_lib.resources.v2_1.manager import serial_interface
from rsd_lib.resources.v2_1.manager import virtual_media
from rsd_lib.resources.v2_1.system import ethernet_interface
from rsd_lib import utils as rsd_lib_utils
class LinksIntelRackScaleField(base.CompositeField):
manager_for_services = base.Field(
"ManagerForServices", adapter=utils.get_members_identities
)
manager_for_switches = base.Field(
"ManagerForSwitches", adapter=utils.get_members_identities
)
class LinksOemField(base.CompositeField):
intel_rackscale = LinksIntelRackScaleField("Intel_RackScale")
"""Intel Rack Scale Design specific properties."""
class LinksField(base.CompositeField):
manager_for_servers = base.Field(
"ManagerForServers", adapter=utils.get_members_identities
)
"""This property is an array of references to the systems that this
manager has control over.
"""
manager_for_chassis = base.Field(
"ManagerForChassis", adapter=utils.get_members_identities
)
"""This property is an array of references to the chassis that this
manager has control over.
"""
manager_in_chassis = base.Field(
"ManagerInChassis", adapter=rsd_lib_utils.get_resource_identity
)
"""This property is a reference to the chassis that this manager is
located in.
"""
oem = LinksOemField("Oem")
"""Oem specific properties."""
class SerialConsoleField(base.CompositeField):
"""SerialConsole field
Used for describing services like Serial Console, Command Shell or
Graphical Console
"""
service_enabled = base.Field("ServiceEnabled", adapter=bool)
"""Indicates if the service is enabled for this manager."""
max_concurrent_sessions = base.Field(
"MaxConcurrentSessions", adapter=rsd_lib_utils.num_or_none
)
"""Indicates the maximum number of service sessions, regardless of
protocol, this manager is able to support.
"""
connect_types_supported = base.Field("ConnectTypesSupported")
"""This object is used to enumerate the Serial Console connection types
allowed by the implementation.
"""
class GraphicalConsoleField(base.CompositeField):
"""GraphicalConsole field
Used for describing services like Serial Console, Command Shell or
Graphical Console
"""
service_enabled = base.Field("ServiceEnabled", adapter=bool)
"""Indicates if the service is enabled for this manager."""
max_concurrent_sessions = base.Field(
"MaxConcurrentSessions", adapter=rsd_lib_utils.num_or_none
)
"""Indicates the maximum number of service sessions, regardless of
protocol, this manager is able to support.
"""
connect_types_supported = base.Field("ConnectTypesSupported")
"""This object is used to enumerate the Graphical Console connection types
allowed by the implementation.
"""
class CommandShellField(base.CompositeField):
"""CommandShell field
Used for describing services like Serial Console, Command Shell or
Graphical Console
"""
service_enabled = base.Field("ServiceEnabled", adapter=bool)
"""Indicates if the service is enabled for this manager."""
max_concurrent_sessions = base.Field(
"MaxConcurrentSessions", adapter=rsd_lib_utils.num_or_none
)
"""Indicates the maximum number of service sessions, regardless of
protocol, this manager is able to support.
"""
connect_types_supported = base.Field("ConnectTypesSupported")
"""This object is used to enumerate the Command Shell connection types
allowed by the implementation.
"""
class Manager(rsd_lib_base.ResourceBase):
"""Manager resource class
This is the schema definition for a Manager. Examples of managers are
BMCs, Enclosure Managers, Management Controllers and other subsystems
assigned managability functions.
"""
manager_type = base.Field("ManagerType")
"""This property represents the type of manager that this resource
represents.
"""
links = LinksField("Links")
"""Contains references to other resources that are related to this
resource.
"""
service_entry_point_uuid = base.Field("ServiceEntryPointUUID")
"""The UUID of the Redfish Service provided by this manager"""
uuid = base.Field("UUID")
"""The Universal Unique Identifier (UUID) for this Manager"""
model = base.Field("Model")
"""The model information of this Manager as defined by the manufacturer"""
date_time = base.Field("DateTime")
"""The current DateTime (with offset) for the manager, used to set or read
time.
"""
date_time_local_offset = base.Field("DateTimeLocalOffset")
"""The time offset from UTC that the DateTime property is set to in
format: +06:00 .
"""
firmware_version = base.Field("FirmwareVersion")
"""The firmware version of this Manager"""
serial_console = SerialConsoleField("SerialConsole")
"""Information about the Serial Console service provided by this manager.
"""
command_shell = CommandShellField("CommandShell")
"""Information about the Command Shell service provided by this manager."""
graphical_console = GraphicalConsoleField("GraphicalConsole")
"""The value of this property shall contain the information about the
Graphical Console (KVM-IP) service of this manager.
"""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
redundancy = redundancy.RedundancyCollectionField("Redundancy")
"""Redundancy information for the managers of this system"""
power_state = base.Field("PowerState")
"""This is the current power state of the Manager."""
# TODO(linyang): Add Action Field
@property
@utils.cache_it
def ethernet_interfaces(self):
"""Property to provide reference to `EthernetInterfaceCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return ethernet_interface.EthernetInterfaceCollection(
self._conn,
utils.get_sub_resource_path_by(self, "EthernetInterfaces"),
redfish_version=self.redfish_version,
)
@property
@utils.cache_it
def serial_interfaces(self):
"""Property to provide reference to `SerialInterfaceCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return serial_interface.SerialInterfaceCollection(
self._conn,
utils.get_sub_resource_path_by(self, "SerialInterfaces"),
redfish_version=self.redfish_version,
)
@property
@utils.cache_it
def network_protocol(self):
"""Property to provide reference to `ManagerNetworkProtocol` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return manager_network_protocol.ManagerNetworkProtocol(
self._conn,
utils.get_sub_resource_path_by(self, "NetworkProtocol"),
redfish_version=self.redfish_version,
)
@property
@utils.cache_it
def log_services(self):
"""Property to provide reference to `LogServiceCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return log_service.LogServiceCollection(
self._conn,
utils.get_sub_resource_path_by(self, "LogServices"),
redfish_version=self.redfish_version,
)
@property
@utils.cache_it
def virtual_media(self):
"""Property to provide reference to `VirtualMediaCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return virtual_media.VirtualMediaCollection(
self._conn,
utils.get_sub_resource_path_by(self, "VirtualMedia"),
redfish_version=self.redfish_version,
)
class ManagerCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return Manager | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/manager/manager.py | 0.688992 | 0.258075 | manager.py | pypi |
from jsonschema import validate
import logging
from sushy.resources import base
from rsd_lib import base as rsd_lib_base
from rsd_lib.resources.v2_1.event_service import schemas \
as event_service_schemas
LOG = logging.getLogger(__name__)
class HttpHeaderPropertyCollectionField(base.ListField):
"""HttpHeaderProperty field
The value of the HTTP header is the property value. The header name is
the property name.
"""
pattern = base.Field("Pattern")
type = base.Field("Type")
class EventDestination(rsd_lib_base.ResourceBase):
"""EventDestination resource class
An Event Destination desribes the target of an event subscription,
including the types of events subscribed and context to provide to the
target in the Event payload.
"""
destination = base.Field("Destination")
"""The URI of the destination Event Service."""
event_types = base.Field("EventTypes")
"""This property shall contain the types of events that shall be sent to
the desination.
"""
context = base.Field("Context")
"""A client-supplied string that is stored with the event destination
subscription.
"""
protocol = base.Field("Protocol")
"""The protocol type of the event connection."""
http_headers = HttpHeaderPropertyCollectionField("HttpHeaders")
"""This is for setting HTTP headers, such as authorization information.
This object will be null on a GET.
"""
message_ids = base.Field("MessageIds")
"""A list of MessageIds that the service will only send."""
class EventDestinationCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return EventDestination
def create_event_subscription(self, event_subscription_req):
"""Create a new event subscription
:param event_subscription_req: JSON for event subscription
:returns: The uri of the new event subscription
"""
target_uri = self._path
validate(
event_subscription_req,
event_service_schemas.event_subscription_req_schema,
)
resp = self._conn.post(target_uri, data=event_subscription_req)
event_subscription_url = resp.headers["Location"]
LOG.info("event subscription created at %s", event_subscription_url)
return event_subscription_url[
event_subscription_url.find(self._path):
] | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/event_service/event_destination.py | 0.790409 | 0.173498 | event_destination.py | pypi |
import logging
from jsonschema import validate
from sushy.resources import base
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib.resources.v2_1.ethernet_switch import ethernet_switch_port
from rsd_lib.resources.v2_1.ethernet_switch import schemas as acl_rule_schema
from rsd_lib import utils as rsd_lib_utils
LOG = logging.getLogger(__name__)
class PortConditionTypeField(base.CompositeField):
port = base.Field("Port", adapter=rsd_lib_utils.num_or_none)
mask = base.Field("Mask", adapter=rsd_lib_utils.num_or_none)
class VlanIdConditionTypeField(base.CompositeField):
identity = base.Field("Id", adapter=rsd_lib_utils.num_or_none)
mask = base.Field("Mask", adapter=rsd_lib_utils.num_or_none)
class MACConditionTypeField(base.CompositeField):
mac_address = base.Field("MACAddress")
mask = base.Field("Mask")
class IPConditionTypeField(base.CompositeField):
ipv4_address = base.Field("IPv4Address")
mask = base.Field("Mask")
class ConditionTypeField(base.CompositeField):
ip_source = IPConditionTypeField("IPSource")
ip_destination = IPConditionTypeField("IPDestination")
mac_source = MACConditionTypeField("MACSource")
mac_destination = MACConditionTypeField("MACDestination")
vlan_id = VlanIdConditionTypeField("VLANId")
l4_source_port = PortConditionTypeField("L4SourcePort")
l4_destination_port = PortConditionTypeField("L4DestinationPort")
l4_protocol = base.Field("L4Protocol", adapter=rsd_lib_utils.num_or_none)
class EthernetSwitchACLRule(rsd_lib_base.ResourceBase):
"""EthernetSwitchACLRule resource class
A Ethernet Switch ACL Rule represents Access Control List rule for
switch.
"""
rule_id = base.Field("RuleId", adapter=rsd_lib_utils.num_or_none)
"""This is ACL rule ID which determine rule priority."""
action = base.Field("Action")
"""Action that will be executed when rule condition will be met.s"""
mirror_type = base.Field("MirrorType")
"""Type of mirroring that should be use for Mirror action."""
condition = ConditionTypeField("Condition")
"""Property contain set of conditions that should be met to trigger Rule
action.
"""
@property
@utils.cache_it
def forward_mirror_interface(self):
"""Property to provide reference to `EthernetSwitchPort` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return ethernet_switch_port.EthernetSwitchPort(
self._conn,
utils.get_sub_resource_path_by(self, "ForwardMirrorInterface"),
redfish_version=self.redfish_version,
)
@property
@utils.cache_it
def mirror_port_region(self):
"""Property to provide a list of `EthernetSwitchPort` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return [
ethernet_switch_port.EthernetSwitchPort(
self._conn, path, redfish_version=self.redfish_version
)
for path in rsd_lib_utils.get_sub_resource_path_list_by(
self, "MirrorPortRegion"
)
]
def update(self, data=None):
"""Update a new ACL rule
:param data: JSON for acl_rule
"""
update_schema = acl_rule_schema.acl_rule_req_schema
del update_schema["required"]
if data is not None or len(data) > 0:
validate(data, update_schema)
self._conn.patch(self.path, data=data)
class EthernetSwitchACLRuleCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return EthernetSwitchACLRule
def create_acl_rule(self, acl_rule_req):
"""Create a new ACL rule
:param acl_rule: JSON for acl_rule
:returns: The location of the acl rule
"""
target_uri = self._path
validate(acl_rule_req, acl_rule_schema.acl_rule_req_schema)
resp = self._conn.post(target_uri, data=acl_rule_req)
acl_rule_url = resp.headers["Location"]
LOG.info("Create ACL Rule at %s", acl_rule_url)
return acl_rule_url[acl_rule_url.find(self._path):] | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/ethernet_switch/ethernet_switch_acl_rule.py | 0.761095 | 0.190197 | ethernet_switch_acl_rule.py | pypi |
vlan_network_interface_req_schema = {
"type": "object",
"properties": {
"VLANId": {"type": "number"},
"VLANEnable": {"type": "boolean"},
"Oem": {
"type": "object",
"properties": {
"Intel_RackScale": {
"type": "object",
"properties": {"Tagged": {"type": "boolean"}},
"required": ["Tagged"],
}
},
"required": ["Intel_RackScale"],
},
},
"required": ["VLANId", "VLANEnable", "Oem"],
"additionalProperties": False,
}
acl_rule_req_schema = {
"type": "object",
"oneOf": [
{
"properties": {"Action": {"enum": ["Forward"]}},
"required": ["ForwardMirrorInterface"],
},
{
"properties": {"Action": {"enum": ["Mirror"]}},
"required": [
"ForwardMirrorInterface",
"MirrorPortRegion",
"MirrorType",
],
},
{"properties": {"Action": {"enum": ["Permit", "Deny"]}}},
],
"properties": {
"RuleId": {"type": "number"},
"Action": {
"type": "string",
"enum": ["Permit", "Deny", "Forward", "Mirror"],
},
"ForwardMirrorInterface": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
"required": ["@odata.id"],
},
"MirrorPortRegion": {
"type": "array",
"items": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
"required": ["@odata.id"],
},
},
"MirrorType": {
"type": "string",
"enum": ["Egress", "Ingress", "Bidirectional", "Redirect"],
},
"Condition": {
"type": "object",
"properties": {
"IPSource": {
"type": "object",
"properties": {
"IPv4Addresses": {"type": "string"},
"Mask": {"type": ["string", "null"]},
},
"required": ["IPv4Address"],
},
"IPDestination": {
"type": "object",
"properties": {
"IPv4Address": {"type": "string"},
"Mask": {"type": ["string", "null"]},
},
"required": ["IPv4Address"],
},
"MACSource": {
"type": "object",
"properties": {
"MACAddress": {"type": "string"},
"Mask": {"type": ["string", "null"]},
},
"required": ["MACAddress"],
},
"MACDestination": {
"type": "object",
"properties": {
"MACAddress": {"type": "string"},
"Mask": {"type": ["string", "null"]},
},
"required": ["MACAddress"],
},
"VLANId": {
"type": "object",
"properties": {
"Id": {"type": "number"},
"Mask": {"type": ["number", "null"]},
},
"required": ["Id"],
},
"L4SourcePort": {
"type": "object",
"properties": {
"Port": {"type": "number"},
"Mask": {"type": ["number", "null"]},
},
"required": ["Port"],
},
"L4DestinationPort": {
"type": "object",
"properties": {
"Port": {"type": "number"},
"Mask": {"type": ["number", "null"]},
},
"required": ["Port"],
},
"L4Protocol": {"type": ["number", "null"]},
},
},
},
"required": ["Action", "Condition"],
"additionalProperties": False,
}
port_req_schema = {
"type": "object",
"properties": {
"PortId": {"type": "number"},
"LinkType": {"type": "string", "enum": ["Ethernet", "PCIe"]},
"OperationalState": {"type": "string", "enum": ["Up", "Down"]},
"AdministrativeState": {"type": "string", "enum": ["Up", "Down"]},
"LinkSpeedMbps": {"type": ["number", "null"]},
"NeighborInfo": {
"type": "object",
"properties": {
"SwitchId": {"type": "string"},
"PortId": {"type": "string"},
"CableId": {"type": "string"},
},
"additionalProperties": False,
},
"NeighborMAC": {"type": "string"},
"FrameSize": {"type": ["number", "null"]},
"Autosense": {"type": "boolean"},
"FullDuplex": {"type": "boolean"},
"MACAddress": {"type": "string"},
"IPv4Addresses": {
"type": "array",
"items": {
"type": "object",
"properties": {
"Address": {"type": "string"},
"SubnetMask": {"type": "string"},
"AddressOrigin": {
"type": "string",
"enum": ["Static", "DHCP", "BOOTP", "IPv4LinkLocal"],
},
"Gateway": {"type": "string"},
},
"additionalProperties": False,
},
},
"IPv6Addresses": {
"type": "array",
"items": {
"type": "object",
"properties": {
"Address": {"type": "string"},
"PrefixLength": {
"type": "number",
"minimum": 1,
"maximum": 128,
},
"AddressOrigin": {
"type": "string",
"enum": ["Static", "DHCPv6", "LinkLocal", "SLAAC"],
},
"AddressState": {
"type": "string",
"enum": [
"Preferred",
"Deprecated",
"Tentative",
"Failed",
],
},
},
"additionalProperties": False,
},
},
"PortClass": {
"type": "string",
"enum": ["Physical", "Logical", "Reserved"],
},
"PortMode": {
"type": "string",
"enum": [
"LinkAggregationStatic",
"LinkAggregationDynamic",
"Unknown",
],
},
"PortType": {
"type": "string",
"enum": ["Upstream", "Downstream", "MeshPort", "Unknown"],
},
"Status": {
"type": "object",
"properties": {
"State": {
"type": "string",
"enum": [
"Enabled",
"Disabled",
"StandbyOffline",
"StandbySpare",
"InTest",
"Starting",
"Absent",
"UnavailableOffline",
"Deferring",
"Quiesced",
"Updating",
],
},
"HealthRollup": {
"type": "string",
"enum": ["OK", "Warning", "Critical"],
},
"Health": {
"type": "string",
"enum": ["OK", "Warning", "Critical"],
},
},
"additionalProperties": False,
},
"VLANs": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
"required": ["@odata.id"],
},
"StaticMACs": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
"required": ["@odata.id"],
},
"Links": {
"type": "object",
"properties": {
"PrimaryVLAN": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
"required": ["@odata.id"],
},
"Switch": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
"required": ["@odata.id"],
},
"MemberOfPort": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
"required": ["@odata.id"],
},
"PortMembers": {
"type": "array",
"items": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
"required": ["@odata.id"],
},
},
"ActiveACLs": {
"type": "array",
"items": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
"required": ["@odata.id"],
},
},
},
"additionalProperties": False,
},
},
"required": ["PortId"],
"additionalProperties": False,
} | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/ethernet_switch/schemas.py | 0.586523 | 0.397821 | schemas.py | pypi |
from jsonschema import validate
import logging
from sushy import exceptions
from sushy.resources import base
from rsd_lib import base as rsd_lib_base
from rsd_lib import utils as rsd_lib_utils
LOG = logging.getLogger(__name__)
class EthernetSwitchStaticMAC(rsd_lib_base.ResourceBase):
"""EthernetSwitchStaticMAC resource class
A Ethernet Switch ACL represents Access Control List for switch.
"""
vlan_id = base.Field("VLANId", adapter=rsd_lib_utils.num_or_none)
"""The static mac vlan id"""
mac_address = base.Field("MACAddress")
"""The static mac address"""
def update(self, mac_address, vlan_id=None):
"""Update attributes of static MAC
:param mac_address: MAC address that should be forwarded to this port
:param vlan_id: If specified, defines which packets tagged with
specific VLANId should be forwarded to this port
"""
if not isinstance(mac_address, type("")):
raise exceptions.InvalidParameterValueError(
parameter="mac_address",
value=mac_address,
valid_values="string",
)
data = {"MACAddress": mac_address}
if vlan_id is not None:
if not isinstance(vlan_id, int):
raise exceptions.InvalidParameterValueError(
parameter="vlan_id", value=vlan_id, valid_values="int"
)
data["VLANId"] = vlan_id
self._conn.patch(self.path, data=data)
class EthernetSwitchStaticMACCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return EthernetSwitchStaticMAC
def create_static_mac(self, mac_address, vlan_id=None):
"""Create new static MAC entry
:param mac_address: MAC address that should be forwarded to this port
:param vlan_id: If specified, defines which packets tagged with
specific VLANId should be forwarded to this port
:returns: The location of new static MAC entry
"""
validate(mac_address, {"type": "string"})
data = {"MACAddress": mac_address}
if vlan_id is not None:
validate(vlan_id, {"type": "number"})
data["VLANId"] = vlan_id
resp = self._conn.post(self.path, data=data)
LOG.info("Static MAC created at %s", resp.headers["Location"])
static_mac_url = resp.headers["Location"]
return static_mac_url[static_mac_url.find(self._path):] | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/ethernet_switch/ethernet_switch_static_mac.py | 0.788135 | 0.156652 | ethernet_switch_static_mac.py | pypi |
import logging
from sushy import exceptions
from sushy.resources import base
from sushy.resources import common
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib.resources.v2_1.ethernet_switch import ethernet_switch_acl_rule
LOG = logging.getLogger(__name__)
class BindActionField(common.ActionField):
allowed_values = base.Field(
"Port@Redfish.AllowableValues", adapter=utils.get_members_identities
)
class UnbindActionField(common.ActionField):
allowed_values = base.Field(
"Port@Redfish.AllowableValues", adapter=utils.get_members_identities
)
class EthernetSwitchACLActionsField(base.CompositeField):
bind = BindActionField("#EthernetSwitchACL.Bind")
unbind = UnbindActionField("#EthernetSwitchACL.Unbind")
class LinksField(base.CompositeField):
bound_ports = base.Field(
"BoundPorts", adapter=utils.get_members_identities
)
class EthernetSwitchACL(rsd_lib_base.ResourceBase):
"""EthernetSwitchACL resource class
A Ethernet Switch ACL represents Access Control List for switch.
"""
links = LinksField("Links")
_actions = EthernetSwitchACLActionsField("Actions")
def _get_bind_action_element(self):
bind_action = self._actions.bind
if not bind_action:
raise exceptions.MissingActionError(
action="#EthernetSwitchACL.Bind", resource=self._path
)
return bind_action
def get_allowed_bind_ports(self):
"""Get the allowed ports for bind action.
:returns: A set with the allowed bind ports.
"""
bind_action = self._get_bind_action_element()
return bind_action.allowed_values
def bind_port(self, port):
"""Bind port from this switch ACL
:param port: Link to port to bind.
:raises: InvalidParameterValueError
"""
bind_action = self._get_bind_action_element()
valid_ports = bind_action.allowed_values
target_uri = bind_action.target_uri
if port and port not in valid_ports:
raise exceptions.InvalidParameterValueError(
parameter="port", value=port, valid_values=valid_ports
)
data = {"Port": {"@odata.id": port}}
self._conn.post(target_uri, data=data)
def _get_unbind_action_element(self):
unbind_action = self._actions.unbind
if not unbind_action:
raise exceptions.MissingActionError(
action="#EthernetSwitchACL.Unbind", resource=self._path
)
return unbind_action
def get_allowed_unbind_ports(self):
"""Get the allowed ports for unbind action.
:returns: A set with the allowed unbind ports.
"""
unbind_action = self._get_unbind_action_element()
return unbind_action.allowed_values
def unbind_port(self, port):
"""Unbind port from this switch ACL
:param port: Link to port to unbind.
:raises: InvalidParameterValueError
"""
unbind_action = self._get_unbind_action_element()
valid_ports = unbind_action.allowed_values
target_uri = unbind_action.target_uri
if port and port not in valid_ports:
raise exceptions.InvalidParameterValueError(
parameter="port", value=port, valid_values=valid_ports
)
data = {"Port": {"@odata.id": port}}
self._conn.post(target_uri, data=data)
@property
@utils.cache_it
def rules(self):
"""Property to provide reference to `EthernetSwitchACLRuleCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return ethernet_switch_acl_rule.EthernetSwitchACLRuleCollection(
self._conn,
utils.get_sub_resource_path_by(self, "Rules"),
redfish_version=self.redfish_version,
)
class EthernetSwitchACLCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return EthernetSwitchACL
def create_acl(self):
"""Create a new ACL
:returns: The location of the acl rule
"""
target_uri = self._path
resp = self._conn.post(target_uri, data={})
acl_url = resp.headers["Location"]
LOG.info("Create ACL at %s", acl_url)
return acl_url[acl_url.find(self._path):] | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/ethernet_switch/ethernet_switch_acl.py | 0.780412 | 0.168036 | ethernet_switch_acl.py | pypi |
import logging
from jsonschema import validate
from sushy.resources import base
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib.resources.v2_1.common import ip_addresses
from rsd_lib.resources.v2_1.ethernet_switch import ethernet_switch_static_mac
from rsd_lib.resources.v2_1.ethernet_switch import schemas as port_schema
from rsd_lib.resources.v2_1.ethernet_switch import vlan_network_interface
from rsd_lib import utils as rsd_lib_utils
LOG = logging.getLogger(__name__)
class NeighborInfoField(base.CompositeField):
switch_id = base.Field("SwitchId")
port_id = base.Field("PortId")
cable_id = base.Field("CableId")
class LinksIntelRackScaleField(base.CompositeField):
neighbor_interface = base.Field(
"NeighborInterface", adapter=rsd_lib_utils.get_resource_identity
)
class LinksOemField(base.CompositeField):
intel_rackscale = LinksIntelRackScaleField("Intel_RackScale")
"""Intel Rack Scale Design specific properties."""
class LinksField(base.CompositeField):
primary_vlan = base.Field(
"PrimaryVLAN", adapter=rsd_lib_utils.get_resource_identity
)
switch = base.Field("Switch", adapter=rsd_lib_utils.get_resource_identity)
member_of_port = base.Field(
"MemberOfPort", adapter=rsd_lib_utils.get_resource_identity
)
port_members = base.Field(
"PortMembers", adapter=utils.get_members_identities
)
active_acls = base.Field(
"ActiveACLs", adapter=utils.get_members_identities
)
oem = LinksOemField("Oem")
"""Oem specific properties."""
class EthernetSwitchPort(rsd_lib_base.ResourceBase):
port_id = base.Field("PortId")
"""Switch port unique identifier."""
link_type = base.Field("LinkType")
"""Type of port link"""
operational_state = base.Field("OperationalState")
"""Port link operational state"""
administrative_state = base.Field("AdministrativeState")
"""Port link state forced by user."""
link_speed_mbps = base.Field(
"LinkSpeedMbps", adapter=rsd_lib_utils.num_or_none
)
"""Port speed"""
neighbor_info = NeighborInfoField("NeighborInfo")
"""For Upstream port type this property provide information about neighbor
switch (and switch port if available) connected to this port
"""
neighbor_mac = base.Field("NeighborMAC")
"""For Downstream port type this property provide MAC address of NIC
connected to this port.
"""
frame_size = base.Field("FrameSize", adapter=rsd_lib_utils.num_or_none)
"""MAC frame size in bytes"""
autosense = base.Field("Autosense", adapter=bool)
"""Indicates if the speed and duplex is automatically configured by the NIC
"""
full_duplex = base.Field("FullDuplex", adapter=bool)
"""Indicates if port is in Full Duplex mode or not"""
mac_address = base.Field("MACAddress")
"""MAC address of port."""
ipv4_addresses = ip_addresses.IPv4AddressCollectionField("IPv4Addresses")
"""Array of following IPv4 address"""
ipv6_addresses = ip_addresses.IPv6AddressCollectionField("IPv6Addresses")
"""Array of following IPv6 address"""
port_class = base.Field("PortClass")
"""Port class"""
port_mode = base.Field("PortMode")
"""Port working mode. The value shall correspond to the port class
(especially to the logical port definition).
"""
port_type = base.Field("PortType")
"""PortType"""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
links = LinksField("Links")
@property
@utils.cache_it
def vlans(self):
"""Property to provide reference to `VLanNetworkInterfaceCollection`
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return vlan_network_interface.VLanNetworkInterfaceCollection(
self._conn,
utils.get_sub_resource_path_by(self, "VLANs"),
redfish_version=self.redfish_version,
)
@property
@utils.cache_it
def static_macs(self):
"""Property to provide reference to `EthernetSwitchStaticMACCollection`
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return ethernet_switch_static_mac.EthernetSwitchStaticMACCollection(
self._conn,
utils.get_sub_resource_path_by(self, "StaticMACs"),
redfish_version=self.redfish_version,
)
def update(self, data=None):
"""Update a new Port
:param data: JSON for Port
"""
update_schema = port_schema.port_req_schema
del update_schema["required"]
if data is not None or len(data) > 0:
validate(data, update_schema)
self._conn.patch(self.path, data=data)
class EthernetSwitchPortCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return EthernetSwitchPort
def create_port(self, port_req):
"""Create a new Port
:param Port: JSON for Port
:returns: The location of the Port
"""
validate(port_req, port_schema.port_req_schema)
resp = self._conn.post(self._path, data=port_req)
port_url = resp.headers["Location"]
LOG.info("Create Port at %s", port_url)
return port_url[port_url.find(self._path):] | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_1/ethernet_switch/ethernet_switch_port.py | 0.798344 | 0.215351 | ethernet_switch_port.py | pypi |
from copy import deepcopy
from rsd_lib.resources.v2_1.types import RESOURCE_CLASS as RESOURCE_CLASS_V21
from rsd_lib.resources.v2_2.chassis import chassis
from rsd_lib.resources.v2_2.chassis import power
from rsd_lib.resources.v2_2.chassis import thermal
from rsd_lib.resources.v2_2.ethernet_switch import ethernet_switch
from rsd_lib.resources.v2_2.ethernet_switch import ethernet_switch_metrics
from rsd_lib.resources.v2_2.ethernet_switch import ethernet_switch_port
from rsd_lib.resources.v2_2.ethernet_switch import ethernet_switch_port_metrics
from rsd_lib.resources.v2_2.fabric import fabric
from rsd_lib.resources.v2_2.fabric import port
from rsd_lib.resources.v2_2.fabric import port_metrics
from rsd_lib.resources.v2_2.fabric import switch
from rsd_lib.resources.v2_2.node import node
from rsd_lib.resources.v2_2.system import computer_system_metrics
from rsd_lib.resources.v2_2.system import memory
from rsd_lib.resources.v2_2.system import memory_metrics
from rsd_lib.resources.v2_2.system import processor
from rsd_lib.resources.v2_2.system import processor_metrics
from rsd_lib.resources.v2_2.system import system
from rsd_lib.resources.v2_2.telemetry_service import metric
from rsd_lib.resources.v2_2.telemetry_service import metric_definition
from rsd_lib.resources.v2_2.telemetry_service import metric_report
from rsd_lib.resources.v2_2.telemetry_service import metric_report_definition
from rsd_lib.resources.v2_2.telemetry_service import telemetry_service
from rsd_lib.resources.v2_2.telemetry_service import triggers
from rsd_lib.resources.v2_2.update_service import action_info
from rsd_lib.resources.v2_2.update_service import update_service
RESOURCE_CLASS = deepcopy(RESOURCE_CLASS_V21)
RESOURCE_CLASS.update(
{
"Chassis": chassis.Chassis,
"ChassisCollection": chassis.ChassisCollection,
"ComposedNodeCollection": node.NodeCollection,
"ComputerSystem": system.System,
"ComputerSystemCollection": system.SystemCollection,
"ActionInfo": action_info.ActionInfo,
"ComputerSystemMetrics": computer_system_metrics.ComputerSystemMetrics,
"EthernetSwitch": ethernet_switch.EthernetSwitch,
"EthernetSwitchCollection": ethernet_switch.EthernetSwitchCollection,
"EthernetSwitchMetrics": ethernet_switch_metrics.EthernetSwitchMetrics,
"EthernetSwitchPort": ethernet_switch_port.EthernetSwitchPort,
"EthernetSwitchPortCollection":
ethernet_switch_port.EthernetSwitchPortCollection,
"EthernetSwitchPortMetrics":
ethernet_switch_port_metrics.EthernetSwitchPortMetrics,
"Fabric": fabric.Fabric,
"FabricCollection": fabric.FabricCollection,
"Memory": memory.Memory,
"MemoryCollection": memory.MemoryCollection,
"MemoryMetrics": memory_metrics.MemoryMetrics,
"Metric": metric.Metric,
"MetricDefinition": metric_definition.MetricDefinition,
"MetricDefinitionCollection":
metric_definition.MetricDefinitionCollection,
"MetricReport": metric_report.MetricReport,
"MetricReportCollection": metric_report.MetricReportCollection,
"MetricReportDefinition":
metric_report_definition.MetricReportDefinition,
"MetricReportDefinitionCollection":
metric_report_definition.MetricReportDefinitionCollection,
"Port": port.Port,
"PortCollection": port.PortCollection,
"PortMetrics": port_metrics.PortMetrics,
"Power": power.Power,
"Processor": processor.Processor,
"ProcessorCollection": processor.ProcessorCollection,
"ProcessorMetrics": processor_metrics.ProcessorMetrics,
"Switch": switch.Switch,
"SwitchCollection": switch.SwitchCollection,
"TelemetryService": telemetry_service.TelemetryService,
"Thermal": thermal.Thermal,
"Triggers": triggers.Triggers,
"TriggersCollection": triggers.TriggersCollection,
"UpdateService": update_service.UpdateService,
}
) | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_2/types.py | 0.617974 | 0.198821 | types.py | pypi |
from sushy.resources import base
from rsd_lib import exceptions as rsd_lib_exceptions
from rsd_lib.resources import v2_1
from rsd_lib.resources.v2_2.chassis import chassis
from rsd_lib.resources.v2_2.ethernet_switch import ethernet_switch
from rsd_lib.resources.v2_2.fabric import fabric
from rsd_lib.resources.v2_2.node import node
from rsd_lib.resources.v2_2.system import system
from rsd_lib.resources.v2_2.telemetry_service import telemetry_service
from rsd_lib.resources.v2_2.types import RESOURCE_CLASS
from rsd_lib.resources.v2_2.update_service import update_service
class RSDLibV2_2(v2_1.RSDLibV2_1):
_ethernet_switches_path = base.Field(
["Oem", "Intel_RackScale", "EthernetSwitches", "@odata.id"]
)
"""EthernetSwitchCollecton path"""
_nodes_path = base.Field(
["Oem", "Intel_RackScale", "Nodes", "@odata.id"], required=True
)
"""NodeCollection path"""
_storage_service_path = base.Field(
["Oem", "Intel_RackScale", "Services", "@odata.id"]
)
"""StorageServiceCollection path"""
_telemetry_service_path = base.Field(["TelemetryService", "@odata.id"])
"""Telemetry Service path"""
_update_service_path = base.Field(["UpdateService", "@odata.id"])
"""Update Service path"""
def get_chassis_collection(self):
"""Get the ChassisCollection object
:raises: MissingAttributeError, if the collection attribute is
not found
:returns: a ChassisCollection object
"""
return chassis.ChassisCollection(
self._conn,
self._chassis_path,
redfish_version=self.redfish_version,
)
def get_chassis(self, identity):
"""Given the identity return a Chassis object
:param identity: The identity of the Chassis resource
:returns: The Chassis object
"""
return chassis.Chassis(
self._conn, identity, redfish_version=self.redfish_version
)
def get_system(self, identity):
"""Given the identity return a System object
:param identity: The identity of the System resource
:returns: The System object
"""
return system.System(
self._conn, identity, redfish_version=self.redfish_version
)
def get_system_collection(self):
"""Get the SystemCollection object
:raises: MissingAttributeError, if the collection attribute is
not found
:returns: a SystemCollection object
"""
return system.SystemCollection(
self._conn,
self._systems_path,
redfish_version=self.redfish_version,
)
def get_node_collection(self):
"""Get the NodeCollection object
:raises: MissingAttributeError, if the collection attribute is
not found
:returns: a NodeCollection object
"""
return node.NodeCollection(
self._conn, self._nodes_path, redfish_version=self.redfish_version
)
def get_telemetry_service(self):
"""Given the identity return a Telemetry Service object
:param identity: The identity of the Telemetry Service resource
:returns: The Telemetry Service object
"""
return telemetry_service.TelemetryService(
self._conn,
self._telemetry_service_path,
redfish_version=self.redfish_version,
)
def get_ethernet_switch_collection(self):
"""Get the EthernetSwitchCollection object
:raises: MissingAttributeError, if the collection attribute is
not found
:returns: a EthernetSwitchCollection object
"""
return ethernet_switch.EthernetSwitchCollection(
self._conn,
self._ethernet_switches_path,
redfish_version=self.redfish_version,
)
def get_ethernet_switch(self, identity):
"""Given the identity return a EthernetSwitch object
:param identity: The identity of the EthernetSwitch resource
:returns: The EthernetSwitch object
"""
return ethernet_switch.EthernetSwitch(
self._conn, identity, redfish_version=self.redfish_version
)
def get_update_service(self):
"""Get a UpdateService object
:returns: The UpdateService object
"""
return update_service.UpdateService(
self._conn,
self._update_service_path,
redfish_version=self.redfish_version,
)
def get_fabric_collection(self):
"""Get the FabricCollection object
:raises: MissingAttributeError, if the collection attribute is
not found
:returns: a FabricCollection object
"""
return fabric.FabricCollection(
self._conn,
self._fabrics_path,
redfish_version=self.redfish_version,
)
def get_fabric(self, identity):
"""Given the identity return a Fabric object
:param identity: The identity of the Fabric resource
:returns: The Fabric object
"""
return fabric.Fabric(
self._conn, identity, redfish_version=self.redfish_version
)
def get_resource(self, path):
"""Return corresponding resource object from path
:param path: The path of a resource or resource collection
:returns: corresponding resource or resource collection object
"""
resource_class = self._get_resource_class_from_path(
path, RESOURCE_CLASS
)
if not resource_class:
raise rsd_lib_exceptions.NoMatchingResourceError(uri=path)
return resource_class(
self._conn, path, redfish_version=self.redfish_version
) | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_2/__init__.py | 0.785884 | 0.222109 | __init__.py | pypi |
from sushy import exceptions
from sushy.resources import base
from sushy.resources import common
from rsd_lib import base as rsd_lib_base
from rsd_lib import utils as rsd_lib_utils
class CurrentPeriodField(base.CompositeField):
"""CurrentPeriod field
This object contains the Memory metrics since last reset or
ClearCurrentPeriod action.
"""
blocks_read = base.Field("BlocksRead", adapter=rsd_lib_utils.num_or_none)
"""Number of blocks read since reset."""
blocks_written = base.Field(
"BlocksWritten", adapter=rsd_lib_utils.num_or_none
)
"""Number of blocks written since reset."""
class LifeTimeField(base.CompositeField):
"""LifeTime field
This object contains the Memory metrics for the lifetime of the Memory.
"""
blocks_read = base.Field("BlocksRead", adapter=rsd_lib_utils.num_or_none)
"""Number of blocks read for the lifetime of the Memory."""
blocks_written = base.Field(
"BlocksWritten", adapter=rsd_lib_utils.num_or_none
)
"""Number of blocks written for the lifetime of the Memory."""
class AlarmTripsField(base.CompositeField):
"""AlarmTrips field
Alarm trip information about the memory.
"""
temperature = base.Field("Temperature", adapter=bool)
"""Temperature threshold crossing alarm trip detected status."""
spare_block = base.Field("SpareBlock", adapter=bool)
"""Spare block capacity crossing alarm trip detected status."""
uncorrectable_ecc_error = base.Field("UncorrectableECCError", adapter=bool)
"""Uncorrectable data error threshold crossing alarm trip detected status.
"""
correctable_ecc_error = base.Field("CorrectableECCError", adapter=bool)
"""Correctable data error threshold crossing alarm trip detected status."""
address_parity_error = base.Field("AddressParityError", adapter=bool)
"""Address parity error detected status."""
class HealthDataField(base.CompositeField):
"""HealthData field
This type describes the health information of the memory.
"""
remaining_spare_block_percentage = base.Field(
"RemainingSpareBlockPercentage", adapter=rsd_lib_utils.num_or_none
)
"""Remaining spare blocks in percentage."""
last_shutdown_success = base.Field("LastShutdownSuccess", adapter=bool)
"""Status of last shutdown."""
data_loss_detected = base.Field("DataLossDetected", adapter=bool)
"""Data loss detection status."""
performance_degraded = base.Field("PerformanceDegraded", adapter=bool)
"""Performance degraded mode status."""
alarm_trips = AlarmTripsField("AlarmTrips")
"""Alarm trip information about the memory."""
predicted_media_life_left_percent = base.Field(
"PredictedMediaLifeLeftPercent", adapter=rsd_lib_utils.num_or_none
)
"""The percentage of reads and writes that are predicted to still be
available for the media.
"""
class IntelRackScaleField(base.CompositeField):
temperature_celsius = base.Field(
"TemperatureCelsius", adapter=rsd_lib_utils.num_or_none
)
"""Temperature of the Memory resource"""
bandwidth_percent = base.Field(
"BandwidthPercent", adapter=rsd_lib_utils.num_or_none
)
"""Memory Bandwidth in Percent"""
throttled_cycles_percent = base.Field(
"ThrottledCyclesPercent", adapter=rsd_lib_utils.num_or_none
)
"""The percentage of memory cycles that were throttled due to power
limiting.
"""
consumed_power_watt = base.Field(
"ConsumedPowerWatt", adapter=rsd_lib_utils.num_or_none
)
"""Power consumed by Memory domain resource"""
thermal_margin_celsius = base.Field(
"ThermalMarginCelsius", adapter=rsd_lib_utils.num_or_none
)
"""Memory Thermal Margin in degree Celsius"""
ecc_errors_count = base.Field(
"ECCErrorsCount", adapter=rsd_lib_utils.num_or_none
)
"""Number of ECC Errors found on this Memory module"""
health = base.Field("Health")
"""Memory module Health as a discrete sensor reading"""
class OemField(base.CompositeField):
intel_rackscale = IntelRackScaleField("Intel_RackScale")
"""Intel Rack Scale Design specific properties."""
class ActionsField(base.CompositeField):
clear_current_period = common.ActionField(
"#MemoryMetrics.ClearCurrentPeriod"
)
class MemoryMetrics(rsd_lib_base.ResourceBase):
"""MemoryMetrics resource class
MemoryMetrics contains usage and health statistics for a single Memory
module or device instance.
"""
block_size_bytes = base.Field(
"BlockSizeBytes", adapter=rsd_lib_utils.num_or_none
)
"""Block size in bytes."""
current_period = CurrentPeriodField("CurrentPeriod")
"""This object contains the Memory metrics since last reset or
ClearCurrentPeriod action.
"""
life_time = LifeTimeField("LifeTime")
"""This object contains the Memory metrics for the lifetime of the Memory.
"""
health_data = HealthDataField("HealthData")
"""This object describes the health information of the memory."""
oem = OemField("Oem")
"""Oem specific properties."""
_actions = ActionsField("Actions")
"""The Actions property shall contain the
available actions for this resource.
"""
def _get_clear_current_period_action_element(self):
clear_current_period_action = self._actions.clear_current_period
if not clear_current_period_action:
raise exceptions.MissingActionError(
action="#MemoryMetrics.ClearCurrentPeriod", resource=self._path
)
return clear_current_period_action
def clear_current_period(self):
"""Clear the current the period of memory_metrics.
:raises: MissingActionError, if no clear_current_period action exists.
"""
target_uri = self._get_clear_current_period_action_element().target_uri
self._conn.post(target_uri, data={}) | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_2/system/memory_metrics.py | 0.876977 | 0.452778 | memory_metrics.py | pypi |
from sushy.resources import base
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib.resources.v2_1.system import processor
from rsd_lib.resources.v2_2.system import processor_metrics
from rsd_lib import utils as rsd_lib_utils
class ProcessorMemoryCollectionField(base.ListField):
"""ProcessorMemory field
On die processor memory
"""
type = base.Field("Type")
"""Type of memory."""
capacity_mb = base.Field("CapacityMB", adapter=rsd_lib_utils.num_or_none)
"""Memory capacity."""
speed_mhz = base.Field("SpeedMHz", adapter=rsd_lib_utils.num_or_none)
"""Memory speed."""
class FPGAField(base.CompositeField):
"""FPGA field
FPGA properties
"""
type = base.Field("Type")
"""FPGA type."""
bit_stream_version = base.Field("BitStreamVersion")
"""Version of BitStream loaded on FPGA."""
hssi_configuration = base.Field("HSSIConfiguration")
"""High Speed Serial Interface configuration."""
hssi_sideband = base.Field("HSSISideband")
"""High Speed Serial Interface sideband interface type."""
reconfiguration_slots = base.Field(
"ReconfigurationSlots", adapter=rsd_lib_utils.num_or_none
)
"""Number of supported reconfiguration slots."""
class IntelRackScaleField(processor.IntelRackScaleField):
brand = base.Field("Brand")
"""This indicates processor brand"""
capabilities = base.Field("Capabilities")
"""This indicates array of processor capabilities"""
on_package_memory = ProcessorMemoryCollectionField("OnPackageMemory")
"""An array of references to the endpoints that connect to this processor.
"""
thermal_design_power_watt = base.Field(
"ThermalDesignPowerWatt", adapter=rsd_lib_utils.num_or_none
)
"""Thermal Design Power (TDP) of this processor."""
fpga = FPGAField("FPGA")
"""FPGA specific properties for FPGA ProcessorType."""
extended_identification_registers = rsd_lib_base.DynamicField(
"ExtendedIdentificationRegisters"
)
"""Extended contents of the Identification Registers (CPUID) for this
processor
"""
metrics = base.Field(
"Metrics", adapter=rsd_lib_utils.get_resource_identity
)
"""A reference to the Metrics associated with this Processor"""
class OemField(base.CompositeField):
intel_rackscale = IntelRackScaleField("Intel_RackScale")
"""Intel Rack Scale Design extensions ('Intel_RackScale' object)"""
class Processor(processor.Processor):
oem = OemField("Oem")
"""Oem extension object"""
@property
@utils.cache_it
def metrics(self):
"""Property to provide reference to `Metrics` instance
It is calculated once the first time it is queried. On refresh,
this property is reset.
"""
return processor_metrics.ProcessorMetrics(
self._conn,
utils.get_sub_resource_path_by(
self, ["Oem", "Intel_RackScale", "Metrics"]
),
redfish_version=self.redfish_version,
)
class ProcessorCollection(processor.ProcessorCollection):
@property
def _resource_type(self):
return Processor | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_2/system/processor.py | 0.856812 | 0.3009 | processor.py | pypi |
from sushy.resources import base
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib.resources.v2_1.system import system
from rsd_lib.resources.v2_2.system import computer_system_metrics
from rsd_lib.resources.v2_2.system import memory
from rsd_lib.resources.v2_2.system import processor
from rsd_lib import utils as rsd_lib_utils
class IntelRackScaleField(system.IntelRackScaleField):
user_mode_enabled = base.Field("UserModeEnabled", adapter=bool)
"""This indicates if platform user mode is enabled"""
trusted_execution_technology_enabled = base.Field(
"TrustedExecutionTechnologyEnabled", adapter=bool
)
"""This indicates if TXT mode is enabled"""
metrics = base.Field(
"Metrics", adapter=rsd_lib_utils.get_resource_identity
)
"""A reference to the Metrics associated with this ComputerSystem"""
class OemField(base.CompositeField):
intel_rackscale = IntelRackScaleField("Intel_RackScale")
"""Intel Rack Scale Design specific properties."""
class TrustedModulesCollectionField(base.ListField):
"""TrustedModules field
This object describes the inventory of a Trusted Modules installed in
the system.
"""
firmware_version = base.Field("FirmwareVersion")
"""The firmware version of this Trusted Module."""
interface_type = base.Field("InterfaceType")
"""This property indicates the interface type of the Trusted Module."""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
oem = base.Field("Oem")
"""The trusted_modules oem"""
firmware_version2 = base.Field("FirmwareVersion2")
"""The 2nd firmware version of this Trusted Module, if applicable."""
interface_type_selection = base.Field("InterfaceTypeSelection")
"""The Interface Type selection supported by this Trusted Module."""
class System(system.System):
trusted_modules = TrustedModulesCollectionField("TrustedModules")
"""This object describes the array of Trusted Modules in the system."""
oem = OemField("Oem")
"""Oem specific properties."""
def _get_metrics_path(self):
"""Helper function to find the System metrics path"""
return utils.get_sub_resource_path_by(
self, ["Oem", "Intel_RackScale", "Metrics"]
)
@property
@utils.cache_it
def metrics(self):
"""Property to provide reference to `ComputerSystemMetrics` instance
It is calculated once the first time it is queried. On refresh,
this property is reset.
"""
return computer_system_metrics.ComputerSystemMetrics(
self._conn,
self._get_metrics_path(),
redfish_version=self.redfish_version,
)
@property
@utils.cache_it
def processors(self):
"""Property to provide reference to `ProcessorCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return processor.ProcessorCollection(
self._conn,
utils.get_sub_resource_path_by(self, "Processors"),
redfish_version=self.redfish_version,
)
@property
@utils.cache_it
def memory(self):
"""Property to provide reference to `MemoryCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return memory.MemoryCollection(
self._conn,
utils.get_sub_resource_path_by(self, "Memory"),
redfish_version=self.redfish_version,
)
class SystemCollection(system.SystemCollection):
@property
def _resource_type(self):
return System | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_2/system/system.py | 0.835551 | 0.232882 | system.py | pypi |
from sushy.resources import base
from rsd_lib import base as rsd_lib_base
from rsd_lib import utils as rsd_lib_utils
class IntelRackScaleField(base.CompositeField):
calculation_precision = base.Field("CalculationPrecision")
"""This property specifies the precision of a calculated metric
(calculated metric shall be aligned to a value specified by This
property .
"""
discrete_metric_type = base.Field("DiscreteMetricType")
"""This array property specifies possible values of a discrete metric."""
class OemField(base.CompositeField):
intel_rackscale = IntelRackScaleField("Intel_RackScale")
"""Intel Rack Scale Design specific properties."""
class CalculationParamsTypeCollectionField(base.ListField):
source_metric = base.Field("SourceMetric")
"""The metric property used as the input into the calculation."""
result_metric = base.Field("ResultMetric")
"""The metric property used to store the results of the calculation."""
class WildcardCollectionField(base.ListField):
name = base.Field("Name")
"""The name of Wildcard."""
values = base.Field("Values")
"""An array of values to substitute for the wildcard."""
class MetricDefinition(rsd_lib_base.ResourceBase):
"""MetricDefinition resource class
A definition of a metric.
"""
implementation = base.Field("Implementation")
"""Specifies how the sensor is implemented."""
calculable = base.Field("Calculable")
"""Caculatability of this Metric."""
units = base.Field("Units")
"""Units of measure for this metric."""
data_type = base.Field("DataType")
"""The data type of the corresponding metric values."""
is_linear = base.Field("IsLinear", adapter=bool)
"""Indicates linear or non-linear values."""
metric_type = base.Field("MetricType")
"""Specifies the type of metric provided."""
wildcards = WildcardCollectionField("Wildcards")
"""Wildcards used to replace values in AppliesTo and Calculates metric
property arrays.
"""
metric_properties = base.Field("MetricProperties")
"""A collection of URI for the properties on which this metric definition
is defined.
"""
calculation_parameters = CalculationParamsTypeCollectionField(
"CalculationParameters"
)
"""Specifies the resource properties (metric) which are characterized by
this definition.
"""
physical_context = base.Field("PhysicalContext")
"""Specifies the physical context of the sensor."""
sensor_type = base.Field("SensorType")
"""This property represents the type of sensor that this resource
represents.
"""
sensing_interval = base.Field("SensingInterval")
"""This property specifies the time interval between when a metric or
sensor reading is updated.
"""
discrete_values = base.Field("DiscreteValues")
"""This array property specifies possible values of a discrete metric."""
precision = base.Field("Precision", adapter=rsd_lib_utils.num_or_none)
"""Number of significant digits in the Reading described by
MetricProperties field.
"""
accuracy = base.Field("Accuracy", adapter=rsd_lib_utils.num_or_none)
"""Estimated percent error of measured vs. actual values."""
calibration = base.Field("Calibration", adapter=rsd_lib_utils.num_or_none)
"""Specifies the calibration offset added to the Reading to obtain an
accurate value.
"""
time_stamp_accuracy = base.Field("TimeStampAccuracy")
"""Accuracy of the timestamp."""
min_reading_range = base.Field(
"MinReadingRange", adapter=rsd_lib_utils.num_or_none
)
"""Minimum value for Reading."""
max_reading_range = base.Field(
"MaxReadingRange", adapter=rsd_lib_utils.num_or_none
)
"""Maximum value for Reading."""
calculation_algorithm = base.Field("CalculationAlgorithm")
"""This property specifies the calculation which is performed on a source
metric to obtain the metric being defined.
"""
calculation_time_interval = base.Field("CalculationTimeInterval")
"""This property specifies the time interval over which a calculated
metric algorithm is performed.
"""
oem = OemField("Oem")
"""Oem specific properties."""
class MetricDefinitionCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return MetricDefinition | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_2/telemetry_service/metric_definition.py | 0.884888 | 0.610715 | metric_definition.py | pypi |
from sushy.resources import base
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib import utils as rsd_lib_utils
class MetricValueCollectionField(base.ListField):
"""MetricValue field
A metric Value.
"""
metric_id = base.Field("MetricId")
"""The value shall be the MetricId of the source metric within the
associated MetricDefinition
"""
metric_value = base.Field("MetricValue")
"""The value of the metric represented as a string. Its data type is
specified in including MetricResult.MetricDefinition.
"""
time_stamp = base.Field("TimeStamp")
"""The value shall be an ISO 8601 date time for when the metric value was
computed. Note that this may be different from the time when this
instance is created. If Volatile is true for a given metric value
instance, the TimeStamp changes whenever a new measurement snapshot
is taken. A management application may establish a time series of metric
data by retrieving the instances of metric value and sorting them
according to their TimeStamp.
"""
metric_property = base.Field("MetricProperty")
"""The value shall be a URI of a property contained in the scope of the
MetricScope
"""
metric_definition = base.Field(
"MetricDefinition", adapter=rsd_lib_utils.get_resource_identity
)
"""The value shall be a URI to the metric definition of the property"""
class MetricReport(rsd_lib_base.ResourceBase):
"""MetricReport resource class
A metric report resource that is output from Metric Report Definition.
"""
metric_values = MetricValueCollectionField("MetricValues")
"""An array of metric values for the metered items of this Metric."""
@property
@utils.cache_it
def metric_report_definition(self):
"""Property to provide reference to `MetricReportDefinition` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
from rsd_lib.resources.v2_2.telemetry_service import \
metric_report_definition
return metric_report_definition.MetricReportDefinition(
self._conn,
utils.get_sub_resource_path_by(self, "MetricReportDefinition"),
redfish_version=self.redfish_version,
)
class MetricReportCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return MetricReport | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_2/telemetry_service/metric_report.py | 0.886923 | 0.35488 | metric_report.py | pypi |
from jsonschema import validate
import logging
from sushy.resources import base
from rsd_lib import base as rsd_lib_base
from rsd_lib.resources.v2_2.telemetry_service import trigger_schemas
from rsd_lib import utils as rsd_lib_utils
LOG = logging.getLogger(__name__)
class DiscreteTriggerCollectionField(base.ListField):
"""DiscreteTrigger field
A discrete trigger.
"""
name = base.Field("Name")
"""The name of trigger."""
value = base.Field("Value")
"""This property contains the value of the trigger."""
dwell_tim_msec = base.Field(
"DwellTimMsec", adapter=rsd_lib_utils.num_or_none
)
"""This time the excursion persists before a trigger is determined."""
severity = base.Field("Severity")
"""This property contains the value of the Severity property in the Event
message.
"""
class WildcardCollectionField(base.ListField):
"""Wildcard field
Wildcards used to replace values in MetricProperties array property.
"""
name = base.Field("Name")
"""The name of Wildcard."""
values = base.Field("Values")
"""An array of values to substitute for the wildcard."""
class NumericTriggerCollectionField(base.ListField):
"""NumericTrigger field
A numeric trigger.
"""
name = base.Field("Name")
"""The name of trigger."""
value = base.Field("Value", adapter=rsd_lib_utils.num_or_none)
"""This property contains the value of the trigger."""
direction_of_crossing = base.Field("DirectionOfCrossing")
"""This property contains the value of the trigger."""
dwell_tim_msec = base.Field(
"DwellTimMsec", adapter=rsd_lib_utils.num_or_none
)
"""This time the excursion persists before a trigger is determined."""
severity = base.Field("Severity")
"""This property contains the value of the Severity property in the Event
message.
"""
class Triggers(rsd_lib_base.ResourceBase):
"""Triggers resource class
This is the schema definition for a Triggers.
"""
metric_type = base.Field("MetricType")
"""The type of trigger."""
trigger_actions = base.Field("TriggerActions")
"""This property specifies what action is perform when the MetricTrigger
occurs.
"""
numeric_triggers = NumericTriggerCollectionField("NumericTriggers")
"""List of numeric triggers."""
discrete_trigger_condition = base.Field("DiscreteTriggerCondition")
"""The type of trigger."""
discrete_triggers = DiscreteTriggerCollectionField("DiscreteTriggers")
"""List of discrete triggers."""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
wildcards = WildcardCollectionField("Wildcards")
"""Wildcards used to replace values in MetricProperties array property."""
metric_properties = base.Field("MetricProperties")
"""A collection of URI for the properties on which this metric definition
is defined.
"""
class TriggersCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return Triggers
def create_trigger(
self,
name=None,
description=None,
metric_type=None,
trigger_actions=None,
numeric_triggers=None,
discrete_trigger_condition=None,
discrete_triggers=None,
status=None,
wildcards=None,
metric_properties=None,
):
"""Create a new trigger
:param name: The trigger name
:param description: The trigger description
:param metric_type: The type of trigger
:param trigger_actions: The metric report description
:param numeric_triggers: List of numeric triggers
:param discrete_trigger_condition: The value shall indicate how the
corresponding metric
:param discrete_triggers: List of discrete triggers
:param status: The trigger status
:param wildcards: Wildcards used to replace values in MetricProperties
array property
:param metric_properties: The report definition metric properties
:returns: The uri of the new trigger
"""
target_uri = self._path
# prepare the request data of creating new trigger
data = {}
if name is not None:
data["Name"] = name
if description is not None:
data["Description"] = description
if metric_type is not None:
validate(metric_type, trigger_schemas.metric_type_schema)
data["MetricType"] = metric_type
if trigger_actions is not None:
validate(trigger_actions, trigger_schemas.trigger_actions_schema)
data["TriggerActions"] = trigger_actions
if numeric_triggers is not None:
validate(numeric_triggers, trigger_schemas.numeric_triggers_schema)
data["NumericTriggers"] = numeric_triggers
if discrete_trigger_condition is not None:
validate(
discrete_trigger_condition,
trigger_schemas.discrete_trigger_condition_schema,
)
data["DiscreteTriggerCondition"] = discrete_trigger_condition
if discrete_triggers is not None:
validate(
discrete_triggers, trigger_schemas.discrete_triggers_schema
)
data["DiscreteTriggers"] = discrete_triggers
if status is not None:
validate(status, trigger_schemas.status_schema)
data["Status"] = status
if wildcards is not None:
validate(wildcards, trigger_schemas.wildcards_schema)
data["Wildcards"] = wildcards
if metric_properties is not None:
validate(
metric_properties, trigger_schemas.metric_properties_schema
)
data["MetricProperties"] = metric_properties
# Issue POST request to create new trigger
resp = self._conn.post(target_uri, data=data)
LOG.info("Node created at %s", resp.headers["Location"])
node_url = resp.headers["Location"]
return node_url[node_url.find(self._path):] | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_2/telemetry_service/triggers.py | 0.879302 | 0.285708 | triggers.py | pypi |
from sushy.resources import base
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib.resources.v2_2.telemetry_service import metric_definition
from rsd_lib.resources.v2_2.telemetry_service import metric_report
from rsd_lib.resources.v2_2.telemetry_service import metric_report_definition
from rsd_lib.resources.v2_2.telemetry_service import triggers
from rsd_lib import utils as rsd_lib_utils
class TelemetryService(rsd_lib_base.ResourceBase):
"""TelemetryService resource class
This is the schema definition for the Metrics Service. It represents
the properties for the service itself and has links to collections of
metric definitions and metric report definitions.
"""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
max_reports = base.Field("MaxReports", adapter=rsd_lib_utils.num_or_none)
"""The maximum number of MetricReports that are supported by this service.
"""
min_collection_interval = base.Field("MinCollectionInterval")
"""The minimum supported interval between collections."""
supported_collection_functions = base.Field("SupportedCollectionFunctions")
"""Function to perform over each sample."""
@property
@utils.cache_it
def metric_definitions(self):
"""Property to provide reference to `MetricDefinitionCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return metric_definition.MetricDefinitionCollection(
self._conn,
utils.get_sub_resource_path_by(self, "MetricDefinitions"),
redfish_version=self.redfish_version,
)
@property
@utils.cache_it
def metric_report_definitions(self):
"""Property to provide reference to `MetricReportDefinitionCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return metric_report_definition.MetricReportDefinitionCollection(
self._conn,
utils.get_sub_resource_path_by(self, "MetricReportDefinitions"),
redfish_version=self.redfish_version,
)
@property
@utils.cache_it
def metric_reports(self):
"""Property to provide reference to `MetricReportCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return metric_report.MetricReportCollection(
self._conn,
utils.get_sub_resource_path_by(self, "MetricReports"),
redfish_version=self.redfish_version,
)
@property
@utils.cache_it
def triggers(self):
"""Property to provide reference to `TriggersCollection` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return triggers.TriggersCollection(
self._conn,
utils.get_sub_resource_path_by(self, "Triggers"),
redfish_version=self.redfish_version,
) | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_2/telemetry_service/telemetry_service.py | 0.900502 | 0.25201 | telemetry_service.py | pypi |
from jsonschema import validate
import logging
from sushy.resources import base
from sushy import utils
from rsd_lib import base as rsd_lib_base
from rsd_lib.resources.v2_2.telemetry_service import metric
from rsd_lib.resources.v2_2.telemetry_service import metric_report
from rsd_lib.resources.v2_2.telemetry_service \
import metric_report_definition_schemas
from rsd_lib import utils as rsd_lib_utils
LOG = logging.getLogger(__name__)
class ScheduleField(base.CompositeField):
"""Schedule field
Schedule a series of occurrences.
"""
name = base.Field("Name")
"""The Schedule name."""
lifetime = base.Field("Lifetime")
"""The time after provisioning when the schedule as a whole expires."""
max_occurrences = base.Field(
"MaxOccurrences", adapter=rsd_lib_utils.num_or_none
)
"""Maximum number of scheduled occurrences."""
initial_start_time = base.Field("InitialStartTime")
"""Time for initial occurrence."""
recurrence_interval = base.Field("RecurrenceInterval")
"""Distance until the next occurrences."""
enabled_days_of_week = base.Field("EnabledDaysOfWeek")
"""Days of the week when scheduled occurrences are enabled, for enabled
days of month and months of year.
"""
enabled_days_of_month = base.Field("EnabledDaysOfMonth")
"""Days of month when scheduled occurrences are enabled."""
enabled_months_of_year = base.Field("EnabledMonthsOfYear")
"""Months of year when scheduled occurrences are enabled."""
enabled_intervals = base.Field("EnabledIntervals")
"""Intervals when scheduled occurrences are enabled."""
class WildcardCollectionField(base.ListField):
name = base.Field("Name")
"""The name of Wildcard."""
keys = base.Field("Keys")
"""An array of Key values to substitute for the wildcard."""
class MetricReportDefinition(rsd_lib_base.ResourceBase):
"""MetricReportDefinition resource class
A set of metrics that are collected periodically.
"""
schedule = ScheduleField("Schedule")
"""A schedule for collecting metric values."""
metric_report_type = base.Field("MetricReportType")
"""The collection type for the corresponding metric values."""
collection_time_scope = base.Field("CollectionTimeScope")
"""Time scope for collecting the corresponding metric values."""
report_actions = base.Field("ReportActions")
"""This property specifies what action is perform when a metric report is
generated.
"""
volatile = base.Field("Volatile", adapter=bool)
"""Entries in the resulting metric value properties are reused on each
scheduled interval.
"""
status = rsd_lib_base.StatusField("Status")
"""This indicates the known state of the resource, such as if it is
enabled.
"""
wildcards = WildcardCollectionField("Wildcards")
"""Wildcards used to replace values in MetricProperties array property."""
metric_properties = base.Field("MetricProperties")
"""A collection of URI that relates to the metric properties that will be
included in the metric report.
"""
@property
@utils.cache_it
def metrics(self):
"""Property to provide collection to `Metric`
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return [
metric.Metric(
self._conn, path, redfish_version=self.redfish_version
)
for path in utils.get_sub_resource_path_by(
self, "Metrics", is_collection=True
)
]
@property
@utils.cache_it
def metric_report(self):
"""Property to provide reference to `MetricReport` instance
It is calculated once when it is queried for the first time. On
refresh, this property is reset.
"""
return metric_report.MetricReport(
self._conn,
utils.get_sub_resource_path_by(self, "MetricReport"),
redfish_version=self.redfish_version,
)
class MetricReportDefinitionCollection(rsd_lib_base.ResourceCollectionBase):
@property
def _resource_type(self):
return MetricReportDefinition
def create_metric_report_definition(self, metric_report_definition_req):
"""Create a new report definition
:param metric_report_definition_req: JSON for event subscription
:returns: The uri of the new event report definition
"""
target_uri = self._path
validate(
metric_report_definition_req,
metric_report_definition_schemas.report_definition_req_schema,
)
resp = self._conn.post(target_uri, data=metric_report_definition_req)
report_definition_url = resp.headers["Location"]
LOG.info("report definition created at %s", report_definition_url)
return report_definition_url[report_definition_url.find(self._path):] | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_2/telemetry_service/metric_report_definition.py | 0.89728 | 0.281264 | metric_report_definition.py | pypi |
from sushy.resources import base
from rsd_lib import base as rsd_lib_base
from rsd_lib import utils as rsd_lib_utils
class DiscreteTriggerConditionCollectionField(base.ListField):
name = base.Field("Name")
"""The name of trigger."""
trigger_value = base.Field("TriggerValue")
"""This property contains the value that sets a trigger."""
previous_value = base.Field("PreviousValue")
"""This property contains the previous value of the trigger."""
class NumericTriggerConditionCollectionField(base.ListField):
name = base.Field("Name")
"""The name of trigger."""
value = base.Field("Value", adapter=rsd_lib_utils.num_or_none)
"""This property contains the value of the trigger."""
direction_of_crossing = base.Field("DirectionOfCrossing")
"""This property contains the direction that the previous value came from.
"""
class TriggerConditionField(base.CompositeField):
"""TriggerCondition field
A trigger condition.
"""
dwell_interval = base.Field("DwellInterval")
"""The time in the triggering state before the trigger is invoked."""
trigger_type = base.Field("TriggerType")
"""The type of trigger."""
discrete_trigger_conditions = DiscreteTriggerConditionCollectionField(
"DiscreteTriggerConditions"
)
"""A Trigger condition based on TriggerDiscreteCondition."""
filter_trigger_condition = base.Field("FilterTriggerCondition")
"""A filter on the elements specified by OriginResources."""
numeric_trigger_conditions = NumericTriggerConditionCollectionField(
"NumericTriggerConditions"
)
"""A Trigger condition based on TriggerNumericCondition."""
class Metric(rsd_lib_base.ResourceBase):
"""Metric resource class
Defines the use of a set of properties as metrics.
"""
metric_properties = base.Field("MetricProperties")
"""A collection of URI for the properties on which this metric is
collected.
"""
collection_function = base.Field("CollectionFunction")
"""Function to perform over each sample."""
collection_duration = base.Field("CollectionDuration")
"""The value is the collection duration for each metric value."""
trigger_condition = TriggerConditionField("TriggerCondition")
"""A Triggering condition for the event.""" | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_2/telemetry_service/metric.py | 0.850282 | 0.323647 | metric.py | pypi |
processor_req_schema = {
"type": "array",
"items": [
{
"type": "object",
"properties": {
"Model": {"type": "string"},
"TotalCores": {"type": "number"},
"AchievableSpeedMHz": {"type": "number"},
"InstructionSet": {
"type": "string",
"enum": [
"x86",
"x86-64",
"IA-64",
"ARM-A32",
"ARM-A64",
"MIPS32",
"MIPS64",
"OEM",
],
},
"Oem": {
"type": "object",
"properties": {
"Brand": {
"type": "string",
"enum": [
"E3",
"E5",
"E7",
"X3",
"X5",
"X7",
"I3",
"I5",
"I7",
"Silver",
"Gold",
"Platinum",
"Unknown",
],
},
"Capabilities": {
"type": "array",
"items": [{"type": "string"}],
},
},
},
"Resource": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
"Chassis": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
"ProcessorType": {
"type": "string",
"enum": [
"CPU",
"FPGA",
"GPU",
"DSP",
"Accelerator",
"OEM",
],
},
},
"additionalProperties": False,
}
],
}
memory_req_schema = {
"type": "array",
"items": [
{
"type": "object",
"properties": {
"CapacityMiB": {"type": "number"},
"MemoryDeviceType": {
"type": "string",
"enum": [
"DDR",
"DDR2",
"DDR3",
"DDR4",
"DDR4_SDRAM",
"DDR4E_SDRAM",
"LPDDR4_SDRAM",
"DDR3_SDRAM",
"LPDDR3_SDRAM",
"DDR2_SDRAM",
"DDR2_SDRAM_FB_DIMM",
"DDR2_SDRAM_FB_DIMM_PROBE",
"DDR_SGRAM",
"DDR_SDRAM",
"ROM",
"SDRAM",
"EDO",
"FastPageMode",
"PipelinedNibble",
],
},
"SpeedMHz": {"type": "number"},
"Manufacturer": {"type": "string"},
"DataWidthBits": {"type": "number"},
"Resource": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
"Chassis": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
},
"additionalProperties": False,
}
],
}
remote_drive_req_schema = {
"type": "array",
"items": [
{
"type": "object",
"properties": {
"CapacityGiB": {"type": "number"},
"iSCSIAddress": {"type": "string"},
"Master": {
"type": "object",
"properties": {
"Type": {
"type": "string",
"enum": ["Snapshot", "Clone"],
},
"Address": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
},
},
},
"additionalProperties": False,
}
],
}
local_drive_req_schema = {
"type": "array",
"items": [
{
"type": "object",
"properties": {
"CapacityGiB": {"type": "number"},
"Type": {"type": "string", "enum": ["HDD", "SSD"]},
"MinRPM": {"type": "number"},
"SerialNumber": {"type": "string"},
"Interface": {
"type": "string",
"enum": ["SAS", "SATA", "NVMe"],
},
"Resource": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
"Chassis": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
"FabricSwitch": {"type": "boolean"},
},
"additionalProperties": False,
}
],
}
ethernet_interface_req_schema = {
"type": "array",
"items": [
{
"type": "object",
"properties": {
"SpeedMbps": {"type": "number"},
"PrimaryVLAN": {"type": "number"},
"VLANs": {
"type": "array",
"additionalItems": {
"type": "object",
"properties": {
"VLANId": {"type": "number"},
"Tagged": {"type": "boolean"},
},
},
},
"Resource": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
"Chassis": {
"type": "object",
"properties": {"@odata.id": {"type": "string"}},
},
},
"additionalProperties": False,
}
],
}
security_req_schema = {
"type": "object",
"properties": {
"TpmPresent": {"type": "boolean"},
"TpmInterfaceType": {"type": "string"},
"TxtEnabled": {"type": "boolean"},
},
"additionalProperties": False,
}
total_system_core_req_schema = {"type": "number"}
total_system_memory_req_schema = {"type": "number"} | /rsd-lib-1.2.0.tar.gz/rsd-lib-1.2.0/rsd_lib/resources/v2_2/node/schemas.py | 0.508788 | 0.453927 | schemas.py | pypi |
# rsdict
[](https://github.com/kihiyuki/python-rsdict/actions/workflows/python-package.yml)
[](https://badge.fury.io/py/rsdict)
[](https://pepy.tech/project/rsdict)
<!-- ref: rsdict.__doc__ -->
rsdict is a **restricted** and **resetable** dictionary,
a subclass of `dict` (inherits from built-in dictionary).
```python
>>> from rsdict import rsdict
>>> d = {"foo": 0, "bar": "baz"}
>>> rd = rsdict(d)
>>> rd
rsdict({'foo': 0, 'bar': 'baz'}, frozen=False, fixkey=True, fixtype=True, cast=False)
# fixkey=True: key restriction
>>> rd["newkey"] = 1
AttributeError
# fixtype=True: type restriction
>>> rd["foo"] = "str.value"
TypeError
>>> rd["foo"] = 999
>>> rd == d
False
# reset values to initial
>>> rd.reset()
>>> rd == d
True
```
## Installation
```sh
pip install rsdict
```
## Features
- Type-restrict(able): If activated, every type of value is fixed to its initial type.
- Key-restrict(able): If activated, cannot add or delete keys.
- Resettable: to initial value(s).
### Arguments
`rsdict(items, frozen=False, fixkey=True, fixtype=True, cast=False)`
<!-- ref: rsdict.__init__.__doc__ -->
- items (dict): Initial items (data).
Built-in dictionary only. kwargs are not supported.
- frozen (bool, optional): If True,
the instance will be frozen (immutable).
- fixkey (bool, optional): If True,
cannot add or delete keys.
- fixtype (bool, optional): If True,
cannot change type of keys.
- cast (bool, optional): If False,
cast to initial type (if possible).
If True, allow only the same type of initial value.
### Subclasses
```python
# rsdict(frozen=True) as default
from rsdict import rsdict_frozen as rsdict
# rsdict(fixkey=False, fixtype=False) as default
from rsdict import rsdict_unfix as rsdict
# rsdict(fixkey=True, fixtype=False) as default
from rsdict import rsdict_fixkey as rsdict
# rsdict(fixkey=False, fixtype=True) as default
from rsdict import rsdict_fixtype as rsdict
```
### Additional methods
- `set(key, value)`: Alias of `__setitem__`.
- `to_dict() -> dict`: Convert to dict instance.
- `reset(key: Optional[Any]) -> None`: Reset value to the initial value.
If key is None, reset all values.
- `is_changed(key: Optional[Any]) -> bool`: If True,
the values are changed from initial.
If key is not None, check the key only.
- `get_initial(key: Optional[Any]) -> dict | Any`: Return initial value(s).
If key is None, Return dict of all initial values.
## Examples
### Create (Initialize)
<!-- from rsdict.__init__.__doc__ -->
```python
>>> from rsdict import rsdict
>>> d = dict(
... name = "John",
... enable = True,
... count = 0,
... )
>>> rd = rsdict(d)
>>> rd
rsdict({'name': 'John', 'enable': True, 'count': 0},
frozen=False, fixkey=True, fixtype=False)
>>> type(rd) is dict
False
>>> isinstance(rd, dict)
True
>>> rd.frozen
False
```
### Get
Same as `dict`.
```python
>>> rd["count"] == d["count"]
True
>>> rd["xyz"]
KeyError
>>> rd.get("count") == d.get("count")
True
>>> rd.get("xyz")
None
```
### Set
```python
>>> rd["enable"] = False
>>> rd.set("enable", False)
```
```python
# If frozen, always raise an exception.
>>> rd_frozen = rsdict(d, frozen=True)
>>> rd_frozen["count"] = 2
AttributeError
```
```python
# If fixtype and not cast, different-type value raise an exception.
>>> rd["count"] = "2"
TypeError
# If fixtype and cast, cast value to initial type.
>>> rd_cast = rsdict(d, cast=True)
>>> rd_cast["count"] = "2"
>>> rd_cast["count"]
2
>>> rd_cast["count"] = "abc"
ValueError
# If not fixtype, anything can be set.
>>> rd_typefree = rsdict(d, fixtype=False)
>>> rd_typefree["count"] = "2"
>>> rd_typefree["count"]
'2'
```
```python
# If fixkey, setting with a new key raises an exception.
>>> rd["location"] = 9
AttributeError
# If not fixkey, a new key can be set.
>>> rd_keyfree = rsdict(d, fixkey=False)
>>> rd_keyfree["location"] = 9
>>> rd_keyfree["location"]
9
```
### Delete
```python
# If frozen or fixkey, deleting key raises an exception.
>>> del rd["count"]
AttributeError
# Else, delete both current and initial values.
>>> rd_keyfree = rsdict(dict(a=1, b=2, c=3), fixkey=False)
>>> del rd_keyfree["b"]
>>> rd_keyfree.keys()
dict_keys(['a', 'c'])
>>> rd_keyfree.get_initial().keys()
dict_keys(['a', 'c'])
```
### Reset
```python
# Check whether the values are changed from initial.
>>> rd.is_changed()
False
# (Change some values.)
>>> rd["enable"] = False
>>> rd["count"] = 5
>>> rd.is_changed()
True
# Reset with a key.
>>> rd.reset("count")
>>> rd["count"]
0
>>> rd.is_changed()
True
# Reset all values.
>>> rd.reset()
>>> rd.is_changed()
False
```
### Copy
```python
# Create a new rsdict with different optional arguments.
# If reset, copy initial values only.
>>> rd["name"] = "Mike"
>>> rd2 = rd.copy(reset=True)
>>> rd2 == rd.get_initial()
True
# If frozen and not reset, copy current values as new initial values.
>>> rd3 = rd.copy(frozen=True)
>>> rd3
rsdict({'name': 'Mike', 'enable': True, 'count': 0},
frozen=True, fixkey=True, fixtype=False, cast=False)
>>> rd3 == rd
True
>>> rd3.get_initial() == rd.get_initial()
False
```
### Compare
```python
>>> rd1 = rsdict({"key1": 10, "key2": "abc"})
>>> rd2 = rsdict({"key1": 20, "key2": "abc"})
# Change current value.
>>> rd2["key1"] = 10
# Current values are equal.
>>> rd1 == rd2
True
# Initial values are not equal.
>>> rd1.get_initial() == rd2.get_initial()
False
# If compare with dict, use current values.
>>> d2 = rd2.to_dict()
>>> rd2 == d2
```
### Union
(Python3.9 or later)
```python
>>> rd = rsdict({"key1": 10, "key2": "abc"}, fixkey=False)
>>> d = {"key2": 20, "key3": False}
# Return: dict
>>> rd | d
{'key1': 10, 'key2': 20, 'key3': False}
>>> d | rd
{'key2': 'abc', 'key3': False, 'key1': 10}
>>> rd |= d
>>> rd
rsdict({'key1': 10, 'key2': 20, 'key3': False},
frozen=False, fixkey=False, fixtype=True, cast=False)
# Add initial values of new keys only.
>>> rd.get_initial()
{'key1': 10, 'key2': 'abc', 'key3': False}
```
## Note
- Expected types of value:
`int`, `float`, `str`, `bool`, `None`,
`list`, `dict`, `tuple`,
`pathlib.Path`
- Some types (e.g. `numpy.ndarray`) cannot be cast.
- [Tested in Python3.5, 3.6, 3.7, 3.8, 3.9, 3.10.](https://github.com/kihiyuki/python-rsdict/actions/workflows/python-package.yml)
- Only initial items are deepcopied.
```python
>>> d = dict(a=[1])
>>> rd = rsdict(d)
>>> rd["a"].append(2)
>>> rd
rsdict({'a': [1, 2]}, frozen=False, fixkey=True, fixtype=True, cast=False)
>>> d
{'a': [1, 2]}
>>> rd.get_initial()
{'a': [1]}
```
### Performance
rsdict is slower than `dict`
due to its additional checking.

## Changelog
->
[CHANGELOG.md](https://github.com/kihiyuki/python-rsdict/blob/main/CHANGELOG.md)
| /rsdict-0.1.8.tar.gz/rsdict-0.1.8/README.md | 0.746971 | 0.779993 | README.md | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /rsdist-0.1.tar.gz/rsdist-0.1/distributions/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
# Research Software Engineering
[](https://badge.fury.io/py/rseng)
[](https://rseng.github.io/) [](https://good-labs.github.io/greater-good-affirmation)
Criteria and taxonomy for research software engineering (rseng).

## Overview
This repository serves a taxonomy and criteria for research software,
intended to be used with the [research software encyclopedia](https://github.com/rseng/rse).
The two are maintained separately for development, and because it might
be the case that the criteria and taxonomy would want to be used separately
from the encyclopedia.
## How do I contribute?
You can edit [taxonomy](rseng/main/taxonomy) or [criteria](rseng/main/criteria) items
by way of opening a pull request against the master branch. When it is merged,
an [automated task](https://github.com/rseng/rseng/blob/master/.github/workflows/staging.yml)
will update the interface served at [https://rseng.github.io/rseng](https://rseng.github.io/rseng). You can also interact with the rseng software for your own needs, shown below.
## Usage
Usage of the library includes programmatic (within Python or command line)
interaction with criteria or taxonomy, and generation of output files.
- [Criteria](#criteria)
- [Taxonomy](#taxonomy)
- [Generate](#generate)
### Criteria
For usage within Python, you will first want to instantiate a `CriteriaSet`. If you
don't provide a default file, the library default will be used.
```python
from rseng.main.criteria import CriteriaSet
cset = CriteriaSet()
# [CriteriaSet:6]
```
You can then see questions loaded. Each has a unique id that gives a sense of
what is being asked:
```python
cset.criteria
{'RSE-research-intention': <rseng.main.criteria.base.Criteria at 0x7f3d2e85d410>,
'RSE-domain-intention': <rseng.main.criteria.base.Criteria at 0x7f3d2dab8490>,
'RSE-question-intention': <rseng.main.criteria.base.Criteria at 0x7f3d2dab8910>,
'RSE-citation': <rseng.main.criteria.base.Criteria at 0x7f3d2db34810>,
'RSE-usage': <rseng.main.criteria.base.Criteria at 0x7f3d2db340d0>,
'RSE-absence': <rseng.main.criteria.base.Criteria at 0x7f3d2db34850>}
```
You can inspect any particular criteria:
```python
cset.criteria['RSE-usage']
<rseng.main.criteria.base.Criteria at 0x7f3d2db340d0>
cset.criteria['RSE-usage'].uid
# 'RSE-usage'
cset.criteria['RSE-usage'].question
# 'Has the software been used by researchers?'
cset.criteria['RSE-usage'].options
# ['yes', 'no']
```
And further interact with the CriteriaSet, for example export to a tabular file:
```python
print(cset.export()) # You can also define a "filename" and/or "sep" here.
RSE-research-intention Is the software intended for research? yes,no
RSE-domain-intention Is the software intended for a particular domain? yes,no
RSE-question-intention Was the software created with intention to solve a research question? yes,no
RSE-citation Has the software been cited? yes,no
RSE-usage Has the software been used by researchers? yes,no
RSE-absence Would taking away the software be a detriment to research? yes,no
```
or iterate through the criteria, or get a list of all of them.
```python
> list(cset)
[[Criteria:RSE-research-intention,Is the software intended for research?],
[Criteria:RSE-domain-intention,Is the software intended for a particular domain?],
[Criteria:RSE-question-intention,Was the software created with intention to solve a research question?],
[Criteria:RSE-citation,Has the software been cited?],
[Criteria:RSE-usage,Has the software been used by researchers?],
[Criteria:RSE-absence,Would taking away the software be a detriment to research?]]
for criteria in cset:
print(criteria)
[Criteria:RSE-research-intention,Is the software intended for research?]
[Criteria:RSE-domain-intention,Is the software intended for a particular domain?]
[Criteria:RSE-question-intention,Was the software created with intention to solve a research question?]
[Criteria:RSE-citation,Has the software been cited?]
[Criteria:RSE-usage,Has the software been used by researchers?]
[Criteria:RSE-absence,Would taking away the software be a detriment to research?]
```
### Taxonomy
The taxonomy is interacted with in a similar fashion.
```python
from rseng.main.taxonomy import Taxonomy
tax = Taxonomy()
```
It will show you the total number of nodes (nested too):
```python
from rseng.main.taxonomy import Taxonomy
tax = Taxonomy()
# [Taxonomy:24]
```
Validation happens as the default file is loaded. Akin to criteria, the files
are located in [rseng/main/taxonomy](rseng/main/taxonomy) in yaml format, and
are dated. You can quickly print an easily viewable, human understandable
version of the tree:
```python
for name in tax.flatten():
...: print(name)
...:
Software to directly conduct research >> Domain-specific software >> Domain-specific hardware
Software to directly conduct research >> Domain-specific software >> Domain-specific optimized software
Software to directly conduct research >> Domain-specific software >> Domain-specific analysis software
Software to directly conduct research >> General software >> Numerical libraries
Software to directly conduct research >> General software >> Data collection
Software to directly conduct research >> General software >> Visualization
Software to support research >> Explicitly for research >> Workflow managers
Software to support research >> Explicitly for research >> Interactive development environments for research
Software to support research >> Explicitly for research >> Provenance and metadata collection tools
Software to support research >> Used for research but not explicitly for it >> Databases
Software to support research >> Used for research but not explicitly for it >> Application Programming Interfaces
Software to support research >> Used for research but not explicitly for it >> Frameworks
Software to support research >> Incidentally used for research >> Operating systems
Software to support research >> Incidentally used for research >> Personal scheduling and task management
Software to support research >> Incidentally used for research >> Version control
Software to support research >> Incidentally used for research >> Text editors and integrated development environments
Software to support research >> Incidentally used for research >> Communication tools or platforms
```
As of version 0.0.13 there are assigned colors for each taxonomy item to ensure
more consistency across interface generation. The colors to choose from
can be imported from `rse.utils.colors.browser_palette`, and include those
with "medium" or "dark" in the name. This one hasn't been used yet, and the
list should be consulted for others.
```
mediumvioletred
```
### Generate
After you install rseng, the `rseng` executable should be in your path.
You can generate output files for the taxonomy or critiera to a folder
oath that doesn't exist yet. For example, to generate the markdown
files for the static documentation for each of the taxonomy and criteria
we do:
#### Markdown Jekyll Pages
```bash
# rseng generate <type> <path> <version>
$ rseng generate taxonomy docs/_taxonomy
docs/_taxonomy/RSE-taxonomy-domain-hardware.md
docs/_taxonomy/RSE-taxonomy-optimized.md
docs/_taxonomy/RSE-taxonomy-analysis.md
docs/_taxonomy/RSE-taxonomy-numerical libraries.md
docs/_taxonomy/RSE-taxonomy-data-collection.md
docs/_taxonomy/RSE-taxonomy-visualization.md
docs/_taxonomy/RSE-taxonomy-workflow-managers.md
docs/_taxonomy/RSE-taxonomy-ide-research.md
docs/_taxonomy/RSE-taxonomy-provenance-metadata-tools.md
docs/_taxonomy/RSE-taxonomy-databases.md
docs/_taxonomy/RSE-taxonomy-application-programming-interfaces.md
docs/_taxonomy/RSE-taxonomy-frameworks.md
docs/_taxonomy/RSE-taxonomy-operating-systems.md
docs/_taxonomy/RSE-taxonomy-personal-scheduling-task-management.md
docs/_taxonomy/RSE-taxonomy-version-control.md
docs/_taxonomy/RSE-taxonomy-text-editors-ides.md
docs/_taxonomy/RSE-taxonomy-communication-tools.md
```
The default version generated for each is "latest" but you can add another
version as the last argument to change that. Here is generation
of the criteria, showing using latest:
```bash
# rseng generate <type> <path> <version>
$ rseng generate criteria docs/_criteria
docs/_criteria/RSE-research-intention.md
docs/_criteria/RSE-domain-intention.md
docs/_criteria/RSE-question-intention.md
docs/_criteria/RSE-citation.md
docs/_criteria/RSE-usage.md
docs/_criteria/RSE-absence.md
```
#### Intended for Visualization (json)
You can also generate a (non flat) version of the taxonomy, specifically a json
file that plugs easily into the d3 hierarchy plots.
```
# rseng generate taxonomy-json <filename>
$ rseng generate taxonomy-json taxonomy.json
```
#### GitHub Issue Templates
If you want an issue template that can work with a GitHub workflow
(both in your software repository) to items via GitHub, both can be produced
with updated criteria or taxonomy items via:
```bash
$ rseng generate criteria-annotation-template
```
And the template will be generated (with default filename) in the present working
directory:
```markdown
---
name: Annotate Criteria
about: Select this template to annotate criteria for a software repository
title: "[CRITERIA]"
labels: ''
assignees: ''
---
## Repository
<!-- write the name of the repository here-->
## Criteria
<!-- check boxes for criteria to indicate "yes" -->
- [ ] criteria-RSE-research-intention
- [ ] criteria-RSE-domain-intention
- [ ] criteria-RSE-question-intention
- [ ] criteria-RSE-citation
- [ ] criteria-RSE-usage
- [ ] criteria-RSE-absence
```
You can do the same for a GitHub issues taxonomy annotation template:
```bash
$ rseng generate taxonomy-annotation-template
```
```
---
name: Annotate Taxonomy
about: Select this template to annotate software with taxonomy categories
title: "[TAXONOMY]"
labels: ''
assignees: ''
---
## Repository
<!-- write the name of the repository here-->
## Taxonomy
<!-- check one or more boxes for categories to indicate "yes" -->
- [ ] RSE-taxonomy-domain-hardware
Software to directly conduct research >> Domain-specific software >> Domain-specific hardware
- [ ] RSE-taxonomy-optimized
Software to directly conduct research >> Domain-specific software >> Domain-specific optimized software
- [ ] RSE-taxonomy-analysis
Software to directly conduct research >> Domain-specific software >> Domain-specific analysis software
- [ ] RSE-taxonomy-numerical libraries
Software to directly conduct research >> General software >> Numerical libraries
- [ ] RSE-taxonomy-data-collection
Software to directly conduct research >> General software >> Data collection
- [ ] RSE-taxonomy-visualization
Software to directly conduct research >> General software >> Visualization
- [ ] RSE-taxonomy-workflow-managers
Software to support research >> Explicitly for research >> Workflow managers
- [ ] RSE-taxonomy-ide-research
Software to support research >> Explicitly for research >> Interactive development environments for research
- [ ] RSE-taxonomy-provenance-metadata-tools
Software to support research >> Explicitly for research >> Provenance and metadata collection tools
- [ ] RSE-taxonomy-databases
Software to support research >> Used for research but not explicitly for it >> Databases
- [ ] RSE-taxonomy-application-programming-interfaces
Software to support research >> Used for research but not explicitly for it >> Application Programming Interfaces
- [ ] RSE-taxonomy-frameworks
Software to support research >> Used for research but not explicitly for it >> Frameworks
- [ ] RSE-taxonomy-operating-systems
Software to support research >> Incidentally used for research >> Operating systems
- [ ] RSE-taxonomy-personal-scheduling-task-management
Software to support research >> Incidentally used for research >> Personal scheduling and task management
- [ ] RSE-taxonomy-version-control
Software to support research >> Incidentally used for research >> Version control
- [ ] RSE-taxonomy-text-editors-ides
Software to support research >> Incidentally used for research >> Text editors and integrated development environments
- [ ] RSE-taxonomy-communication-tools
Software to support research >> Incidentally used for research >> Communication tools or platforms
```
Example in the wild include [this one for criteria](https://github.com/rseng/software/blob/master/.github/ISSUE_TEMPLATE/annotate-criteria.md) and [this one for the taxonomy](https://github.com/rseng/software/blob/master/.github/ISSUE_TEMPLATE/annotate-taxonomy.md).
Note that you should add the templates along with creating labels, one for each
of `taxonomy` and `criteria`. A workflow to automatically update criteria/taxonomy items
is being written and will be added soon.
## License
* Free software: MPL 2.0 License
| /rseng-0.0.18.tar.gz/rseng-0.0.18/README.md | 0.530236 | 0.974288 | README.md | pypi |
from functools import wraps
from time import time as tm
import resource
DEFAULT_TIMING_FMT = "@timecall: {0} took {1} seconds"
DEFAULT_MEMORY_FMT = "@trackmem: {0} start: {1} end: {2} used: {3} {4}"
def timethis(fmt_func=None, verbose=True):
"""
Parameterized decorator for tracking time of a function call
:fmt_func is a string formatting function that should take func_name, duration
paramters and return a string to print the timing info
"""
def decorator(func):
@wraps(func)
def time_call(*args, **kwargs):
t1 = tm()
result = func(*args, **kwargs)
t2 = tm()
d = t2-t1
if verbose:
if fmt_func is None:
print(DEFAULT_TIMING_FMT.format(func.func_name, d))
else:
print(fmt_func(func.func_name, d))
return result
return time_call
return decorator
def trackmem(fmt_func=None, verbose=True, unit="MB"):
"""
Paramaterized decorator for tracking memory usage of a function calls via resource module
Note that there has not been any specific review of how gargage collection affects this profiling
"""
def decorator(func):
@wraps(func)
def memtrack_call(*args, **kwargs):
m1 = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
result = func(*args, **kwargs)
m2 = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
usage = m2-m1
cv = size([m1, m2, usage], unit)
if verbose:
if fmt_func is None:
print(DEFAULT_MEMORY_FMT.format(func.func_name, cv[0], cv[1], cv[2], unit.upper()))
else:
print(fmt_func(func.func_name, cv[0], cv[1], cv[2], unit.upper()))
return result
return memtrack_call
return decorator
def timethis_returnstats(func):
"""
this decorator changes the function signature of `func` passed in
returns a tuple where the first item is the normal result and the second is a dictionary of timing info
"""
@wraps(func)
def time_call(*args, **kwargs):
t1 = tm()
result = func(*args, **kwargs)
t2 = tm()
d = t2-t1
print("@timecall: %s took %s seconds" % (func.func_name, d))
return result, {'func_name': func.func_name, 'duration': d}
return time_call
def size(values, unit):
converted = []
u = unit.lower()
UNITS = ('b','kb','mb','gb')
if u not in UNITS:
raise ValueError("Must provide unit as one of %s" % list(UNITS))
for v in values:
if u == 'b':
cv = v
elif u == 'kb':
cv = v / 1024.0
elif u == 'mb':
cv = v / 1024 / 1024.0
elif u == 'gb':
cv = v / 1024 / 1024 / 1024.0
else:
raise ValueError("case not handled for unit")
converted.append(cv)
return converted | /rsformat-0.0.1.tar.gz/rsformat-0.0.1/rformat/utils/bench.py | 0.644784 | 0.355216 | bench.py | pypi |
[](https://github.com/nlessmann/rsgt/actions)
[](https://codecov.io/gh/nlessmann/rsgt)
[](https://rsgt.readthedocs.io/en/latest/?badge=latest)
[](https://pypi.org/project/rsgt/)
# Random Smooth Grayvalue Transformations
Convolutional neural networks trained for a detection or segmentation task in a specific type of medical gray value images, such as CT or MR images, typically
fail in other medical gray value images, even if the target structure *looks* similar in both types of images. Random smooth gray value transformations are a
data augmentation technique aimed at forcing the network to become gray value invariant. During training, the gray value of the training images or patches are
randomly changed, but using a smooth and continous transfer function so that shape and texture information is largely retained.
API documentation: http://rsgt.readthedocs.io/
## Installation
To use data augmentation with random smooth gray value transformations in your own project, simply install the `rsgt` package:
```
pip install rsgt
```
* Requires Python 2.7+ or 3.5+
* Numpy is the only other dependency
## Data augmentation
The expected input is a numpy array with integer values, which is usually the case for medical gray value images, such as CT and MR scans.
```python
from rsgt.augmentation import random_smooth_grayvalue_transform
# Apply gray value transformation to a numpy array
new_image = random_smooth_grayvalue_transform(image, dtype='float32')
```
The returned numpy array will have a floating point dataype and values in the range [0,1].
### Mini-batches
While the function supports input data with any number of dimensions, it does currently not support mini-batches. A mini-batch of 3D images can be treated as a
4D input array, but all images in the mini-batch will then be subject to the same transformation. This means that a single random look up table will be computed
and applied to all images in the mini-batch. There is currently no vectorized implementation of the transformation function, so a for loop is at this point the
only way to transform images in a mini-batch with different transformation functions:
```python
for i in range(minibatch.shape[0]):
minibatch[i] = random_smooth_grayvalue_transform(minibatch[i], dtype='float32')
```
### Examples
<img alt="Original CT scan" src="/examples/ct0.png" width="216"><img alt="Transformed CT scan #1" src="/examples/ct1.png" width="216"><img alt="Transformed CT scan #2" src="/examples/ct2.png" width="216"><img alt="Transformed CT scan #3" src="/examples/ct3.png" width="216">
The left most image is the original CT slice. The other images show the same slice with random smooth gray value transformations applied. The transformation
function is shown below the transformed image.
This CT scan is from the [kits19 challenge](https://kits-challenge.org) ([CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/) license).
## Normalization functions
Because the augmentation function returns values in the range [0,1], it is necessary to either also apply the gray value transformation at inference time, or to
normalize input images at inference time to [0,1]. The `rsgt` package comes with helper functions for CT and MR scans:
### CT scans
Expected values of the original image are Hounsfield units ranging from -1000 for air (and below for background outside the image area) to around 3000 for very
dense structures like metal implants.
```python
from rsgt.normalization import normalize_ct_scan
normalized_image = normalize_ct_scan(image, dtype='float32')
```
### MR scans
Because values of MR scans are not standardized like those of CT scans, the normalization is based on the 5% and the 95% percentiles of the input values. Values
below and above are clipped.
```python
from rsgt.normalization import normalize_mr_scan
normalized_image = normalize_mr_scan(image, dtype='float32')
```
This normalization can also be used in combination with the augmentation technique:
```python
from rsgt.augmentation import random_smooth_grayvalue_transform
from rsgt.normalization import normalize_mr_scan
N = 4096 # number of bins
normalized_integer_image = (normalize_mr_scan(image, dtype='float32') * N).round().astype(int)
new_image = random_smooth_grayvalue_transform(normalized_integer_image, min_max_val=(0, N), dtype='float32')
```
## Citation
Please cite our short paper describing random smooth gray value transformations for data augmentation when using this technique in your work:
> N. Lessmann and B. van Ginneken, "Random smooth gray value transformations for cross modality learning with gray value invariant networks",
> [arXiv:2003.06158](https://arxiv.org/abs/2003.06158)
## License
This package is released under the [MIT license](LICENSE), as found in the LICENSE file, with the exception of the images in the `/examples` directory, which
are released under a Creative Commons license ([CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/)).
| /rsgt-1.1.0.tar.gz/rsgt-1.1.0/README.md | 0.811452 | 0.993461 | README.md | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /rsherer_udacity_gaussian_distributions-0.9.tar.gz/rsherer_udacity_gaussian_distributions-0.9/rsherer_udacity_gaussian_distributions/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
import re
from .connector import Connector
from .interface import ICommand
class Stats(ICommand):
__url_crow_stat = "https://robertsspaceindustries.com/api/stats/getCrowdfundStats"
__url_roadmap = "https://robertsspaceindustries.com/api/roadmap/v1/init"
__regex_api_live = r'(live\s*version\s*:\s*(?P<live_api>(\d+(\.\d+)*))?)'
__regex_api_ptu = r'(ptu\s*version\s*:\s*(?P<ptu_version>(\d+(\.\d+)*))?)'
__regex_api_etf = r'(ptu\s*version\s*:\s*(?P<etf_version>(\d+(\.\d+)*)(.*?)((ETF)|(Evocati))))'
async def execute_async(self):
return self.execute()
def execute(self):
""" Get general info
"""
# Get stats info
stats = self.__get_crow_stats()
if stats is None:
return None
fleet = stats['fleet']
if fleet is not None:
fleet = int(fleet)
req = Connector().request(self.__url_roadmap, method="get")
if req is None or req.status_code != 200:
return None
text = req.text
live_pu = self.__get_live_pu(text)
live_ptu = self.__get_live_ptu(text)
live_etf = self.__get_live_etf(text)
return {
"fans": int(stats['fans']),
"funds": float(stats['funds']) / 100,
"fleet": fleet,
'current_live': live_pu,
'current_ptu': live_ptu,
'current_etf': live_etf,
}
def __get_live_pu(self, text):
m = re.search(self.__regex_api_live, text, re.IGNORECASE)
if m:
return m.group(3)
return None
def __get_live_ptu(self, text):
m = re.search(self.__regex_api_ptu, text, re.IGNORECASE)
if m:
return m.group(3)
return None
def __get_live_etf(self, text):
m = re.search(self.__regex_api_etf, text, re.IGNORECASE)
if m:
return m.group(3)
return None
def __get_crow_stats(self, chart="day"):
""" Get all Crow stats
Returns:
dict: assoc array of stats.
"""
data = {
"alpha_slots": True,
"chart": chart,
"fans": True,
"fleet": True,
"funds": True,
}
req = Connector().request(self.__url_crow_stat, method="post", json_data=data)
if req is None or req.status_code != 200:
return None
resp = req.json()
# check response
if resp['success'] != 1 or "data" not in resp:
return None
return resp['data'] | /rsi-scraper-0.6.8.tar.gz/rsi-scraper-0.6.8/rsi_scraper/stats.py | 0.450359 | 0.221098 | stats.py | pypi |
import re
import asyncio
from lxml import html
from .connector import Connector
from .interface import ICommand
class Ship(ICommand):
"""Get ships
"""
__url_search_ships = "https://robertsspaceindustries.com/api/store/getShips"
__url_ships_id = "https://robertsspaceindustries.com/ship-matrix/index"
def __init__(self, **kwargs):
"""Get ships
Args:
**kwargs: Arbitrary keyword arguments.
Keyword Args:
name (str):
The name of the ship to search.
classification (str):
The type of ship to get, you can pass this parameter at multiple times. ( combat , transport , exploration , industrial , support , competition , ground , multi ).
length_min (int):
The minimal length (in meters).
length_max (int):
The maximal length (in meters).
crew_min (int):
The minimal crew count.
crew_max (int):
The maximal crew count.
price_min (int):
The minimal price (in US dollars).
price_max (int):
The maximal price (in US dollars).
mass_min (int):
The minimal mass (Kg).
mass_max (int):
The maximal mass (Kg).
page_max (int):
The numbers of pages to process (1 page contains 10 ships), one page takes around 15s to process.
page (int):
The page to fetch (starts at 1).
"""
self.kwargs = kwargs
async def execute_async(self):
return asyncio.run(self.get_ships_pages_async())
def execute(self):
res = asyncio.run(self.get_ships_pages_async())
return res
async def get_ships_pages_async(self):
"""Get all ships using advanced search through pages.
Returns:
list: Ships.
"""
name = self.convert_val(self.kwargs.get("name"))
classification = self.convert_val(self.kwargs.get("classification"))
length_min = self.convert_val(self.kwargs.get("length_min"))
length_max = self.convert_val(self.kwargs.get("length_max"))
crew_min = self.convert_val(self.kwargs.get("crew_min"))
crew_max = self.convert_val(self.kwargs.get("crew_max"))
price_min = self.convert_val(self.kwargs.get("price_min"))
price_max = self.convert_val(self.kwargs.get("price_max"))
mass_min = self.convert_val(self.kwargs.get("mass_min"))
mass_max = self.convert_val(self.kwargs.get("mass_max"))
page = self.convert_val(self.kwargs.get("page"))
page_max = self.convert_val(self.kwargs.get("page_max"))
if re.match(r"^\d+?$", str(page)):
page = int(page)
else:
page = 1
if re.match(r"^\d+?$", page_max):
page_max = int(page_max)
else:
page_max = 1
data = {
'classification': classification,
'itemType': 'ships',
'length': self.http_formatter(length_min, length_max),
'manufacturer_id': [],
'mass': self.http_formatter(mass_min, mass_max),
'max_crew': self.http_formatter(crew_min, crew_max),
'msrp': self.http_formatter(price_min, price_max),
'search': name,
'page': page,
'sort': 'id',
'storefront': 'pledge',
'type': '',
}
req = Connector().request(self.__url_search_ships, data)
if req is None or req.status_code == 404:
return {}
elif req.status_code != 200:
return None
resp = req.json()
# get html contents
if resp['success'] != 1:
return None
ships = []
tree = html.fromstring(resp['data']['html'])
tasks_ship = []
ships_res = []
for v in tree.xpath("//*[contains(@class, 'ship-item')]/@data-ship-id"):
# Create async task
tasks_ship.append(asyncio.create_task(self.get_ships_async(v.strip())))
# Wait tasks to finish
for t in tasks_ship:
await t
result = t.result()
if result is not None:
ships_res.append(result)
for resp_ship, req_page in ships_res:
page_tree = html.fromstring(req_page.content)
# Get the Ship price
price = 0
for price in page_tree.xpath("//*[@class='final-price']/@data-value"):
price = int(str(price))
p = int(str(price)) / 100
if price == 0 or price > p:
price = p
resp_ship['price'] = price
ships.append(resp_ship)
if resp['data']['rowcount'] != 0 and page < page_max:
self.kwargs['page'] = page + 1
val = self.execute(**self.kwargs)
if val != []:
ships.extend(val)
return ships
async def get_ships_async(self, ship_id: int):
"""Get ship by its id.
Args:
ship_id (int): The ship id.
Returns:
dict: The ship.
requests.models.Response:
"""
resp_ship = await self.get_ship_by_id(ship_id)
req_page = None
if resp_ship is not None:
req_page = (await asyncio.gather(Connector().request_async(Connector().url_host + resp_ship['url'])))[0]
if req_page is None or req_page.status_code != 200:
return None
return (resp_ship, req_page)
async def get_ship_by_id(self, ship_id: int = None, get_price=False):
"""Get a ship by his ship_id.
Args:
ship_id (int, optional): The ship ID. Default to None.
get_price (bool, optional): if the price need to be retrieved. Default to False.
Returns:
dict: The ship.
"""
parameters = {}
if ship_id is not None:
parameters = {'id': ship_id}
req = Connector().request(self.__url_ships_id, parameters)
if req is None:
return None
elif req.status_code == 404:
return {}
elif req.status_code != 200:
return None
resp_ship = req.json()
if resp_ship['success'] != 1 or 'data' not in resp_ship or len(resp_ship['data']) == 0:
return None
res = []
if ship_id is None:
res = resp_ship['data']
else:
res = resp_ship['data'][0]
if get_price:
for s in res:
req_page = Connector().request(Connector().url_host + s['url'])
page_tree = html.fromstring(req_page.content)
# Get the Ship price
price = 0
for v in page_tree.xpath("//*[@class='final-price']/@data-value"):
p = int(str(v)) / 100
if price == 0 or price > p:
price = p
s['price'] = price
return res | /rsi-scraper-0.6.8.tar.gz/rsi-scraper-0.6.8/rsi_scraper/ship.py | 0.653238 | 0.209429 | ship.py | pypi |
from .connector import Connector
from .interface import ICommand
class StarmapSystems(ICommand):
"""StarmapSystems
"""
def __init__(self, name: str = None):
"""
Args:
name (str, optional): The name of the System. Default to None.
"""
self.name = name
async def execute_async(self):
return self.execute()
# Get Systems info
def execute(self):
systems = get_data("systems")
if systems is None:
return None
if self.name is None:
res = systems
else:
res = list(filter(lambda s: (str(s['name']).lower() == str(self.name).lower()), systems))
if len(res) == 0:
return None
return res
class StarmapTunnels(ICommand):
"""StarmapTunnels
"""
def __init__(self, tid: str = None):
"""
Args:
tid (str, optional): The tunnel identifier (tid). Default to None.
"""
self.tid = tid
async def execute_async(self):
return self.execute()
# Get Tunnels info
def execute(self):
tunnels = get_data("tunnels")
if tunnels is None:
return None
if self.tid is None:
res = tunnels
else:
res = list(filter(lambda s: str(s['id']).lower() == str(self.tid).lower(), tunnels))
if len(res) == 0:
return None
return res
class StarmapSpecies(ICommand):
"""StarmapSpecies
"""
def __init__(self, name: str = None):
"""
Args:
name (str, optional): The name of the specie. Default to None.
"""
self.name = name
async def execute_async(self):
return self.execute()
# Get Species info
def execute(self):
species = get_data("species")
if species is None:
return None
if self.name is None:
res = species
else:
res = list(filter(
lambda s: str(s['name']).lower() == str(self.name).lower() or str(s['code']).lower() == str(
self.name).lower(), species))
if len(res) == 0:
return None
return res
class StarmapAffiliations(ICommand):
"""StarmapAffiliations
"""
def __init__(self, name: str = None):
"""
Args:
name (str, optional): The name of the affiliation. Default to None.
"""
self.name = name
async def execute_async(self):
return self.execute()
# Get Affiliations info
def execute(self):
affiliations = get_data("affiliations")
if affiliations is None:
return None
if self.name is None:
res = affiliations
else:
res = list(filter(lambda s: s['name'] == self.name or s['code'] == self.name, affiliations))
if len(res) == 0:
return None
return res
class StarmapStarSystems(ICommand):
"""StarmapStarSystems
"""
__url_star_system = "https://robertsspaceindustries.com/api/starmap/star-systems/{0}"
def __init__(self, code: str):
"""
Args:
code (str): The code of the star system.
"""
self.code = code
async def execute_async(self):
return self.execute()
# Get Celestial Objects info
def execute(self):
req = Connector().request(self.__url_star_system.format(self.code))
if req is None or req.status_code != 200:
return None
resp = req.json()
# check response
if resp['success'] != 1 or "data" not in resp:
return None
return resp['data']['resultset']
class StarmapCelestialObjects(ICommand):
"""StarmapCelestialObjects
"""
__url_celestial = "https://robertsspaceindustries.com/api/starmap/celestial-objects/{0}"
def __init__(self, code: str):
"""
Args:
code (str): The code field of the object.
"""
self.code = code
async def execute_async(self):
return self.execute()
# Get Celetial Objects info
def execute(self):
req = Connector().request(self.__url_celestial.format(self.code))
if req is None or req.status_code != 200:
return None
resp = req.json()
# check response
if resp['success'] != 1 or "data" not in resp:
return None
return resp['data']['resultset']
class StarmapSearch(ICommand):
"""StarmapSearch
"""
__url_find = "https://robertsspaceindustries.com/api/starmap/find"
def __init__(self, search: str):
"""
Args:
search (str): The search.
"""
self.search = search
async def execute_async(self):
return self.execute()
def execute(self):
data = {
"query": self.search
}
req = Connector().request(self.__url_find, data)
if req is None or req.status_code != 200:
return None
resp = req.json()
# check response
if resp['success'] != 1 or "data" not in resp:
return None
res = {
"systems": resp['data']['systems']['resultset'],
"objects": resp['data']['objects']['resultset']
}
return res
class StarmapRouteSearch(ICommand):
"""StarmapRouteSearch
"""
__url_find = "https://robertsspaceindustries.com/api/starmap/routes/find"
def __init__(self, _from: str, to: str, ship_size: str):
"""
Args:
_from (str): The object code for the departing system
to (str): The object code for the destination system
ship_size (str): The size of the ship traveling. Valid values are S, M, L
"""
self._from = _from
self._to = to
self._size = ship_size
async def execute_async(self):
return self.execute()
def execute(self):
data = {
"departure": self._from,
"destination": self._to,
"ship_size": self._size
}
req = Connector().request(self.__url_find, data)
if req is None or req.status_code != 200:
return None
resp = req.json()
# check response
if resp['success'] != 1 or "data" not in resp:
return None
res = {
"shortest": resp['data']['shortest'],
"leastjumps": resp['data']['leastjumps']
}
return res
__url_systems = "https://robertsspaceindustries.com/api/starmap/bootup"
def get_data(field: str):
"""Retrieved data from the website
Args:
field (str): The field to return
Returns:
dict: The selected data
"""
req = Connector().request(__url_systems)
if req is None or req.status_code != 200:
return None
resp = req.json()
# check response
if resp['success'] != 1 or "data" not in resp:
return None
return resp['data'][field]['resultset'] | /rsi-scraper-0.6.8.tar.gz/rsi-scraper-0.6.8/rsi_scraper/starmap.py | 0.776114 | 0.191441 | starmap.py | pypi |
from .connector import Connector
from .interface import ICommand
class ProgressTracker(ICommand):
"""ProgressTracker
"""
__url_progress_tracker = "https://robertsspaceindustries.com/graphql"
def __init__(self, date_min="2020-01-01", date_max="2022-12-31"):
"""
Args:
date_min (str): The roadmap start date.
date_max (str): The roadmap end date.
"""
self.date_min = date_min
self.date_max = date_max
self.limit = 20
self.offset = 0
async def execute_async(self):
return self.execute()
def execute(self):
return self.get_teams()
def get_teams(self):
data = [
{
"operationName": "teams",
"query": """query
teams(
$startDate: String!,
$endDate: String!,
$search: String,
$teamSlug: String,
$deliverableSlug: String,
$projectSlugs: [String],
$disciplineSlugs: [String],
$sortBy: SortMethod,
$offset: Int,
$limit: Int
) {
progressTracker {
teams(
startDate: $startDate
endDate: $endDate
search: $search
teamSlug: $teamSlug
deliverableSlug: $deliverableSlug
projectSlugs: $projectSlugs
disciplineSlugs: $disciplineSlugs
sortBy: $sortBy
offset: $offset
limit: $limit) {
totalCount
metaData {
...Team
timeAllocations {
...TimeAllocation
__typename
}
__typename
}
__typename
}
__typename
}
}
fragment Team on Team {
title
description
uuid
abbreviation
startDate
endDate
numberOfDeliverables
slug
__typename
}
fragment TimeAllocation on TimeAllocation {
startDate
endDate
uuid
partialTime
__typename
}
""",
"variables": {
"startDate": self.date_min,
"endDate": self.date_max,
"offset": self.offset,
"limit": self.limit
}
}
]
req = Connector().request(self.__url_progress_tracker, method="post", json_data=data)
if req is None or req.status_code != 200:
return None
try:
resp = req.json()
except Exception as e:
print(e, flush=True)
return None
if len(resp) > 0 and 'data' not in resp[0]:
return None
return resp[0]['data']['progressTracker']['teams']['metaData']
class ProgressTrackerInfo(ICommand):
"""ProgressTrackerInfo
"""
__url_progress_tracker = "https://robertsspaceindustries.com/graphql"
def __init__(self, slug: str, date_min="2020-01-01", date_max="2022-12-31"):
"""
Args:
slug (str): The slug identifier to get.
date_min (str): The roadmap start date.
date_max (str): The roadmap end date.
"""
self.team_slug = slug
self.date_min = date_min
self.date_max = date_max
async def execute_async(self):
return self.execute()
def execute(self):
deliverables = self.get_deliverables(self.team_slug)
disciplines_slugs = [deliv['slug'] for deliv in deliverables]
disciplines = self.get_disciplines(self.team_slug, disciplines_slugs)
for i in range(len(deliverables)):
deliverables[i]['disciplines'] = disciplines[i]
return deliverables
def get_deliverables(self, team_slug):
data = [
{
"operationName": "deliverables",
"query": """query
deliverables(
$startDate: String!,
$endDate: String!,
$search: String,
$deliverableSlug: String,
$teamSlug: String,
$projectSlugs: [String],
$categoryIds: [Int],
$sortBy: SortMethod,
$offset: Int,
$limit: Int
) {
progressTracker {
deliverables(
startDate: $startDate
endDate: $endDate
search: $search
deliverableSlug: $deliverableSlug
teamSlug: $teamSlug
projectSlugs: $projectSlugs
categoryIds: $categoryIds
sortBy: $sortBy
offset: $offset
limit: $limit
) {
totalCount
metaData {
...Deliverable card {
...Card
__typename
}
projects {
...Project
__typename
}
__typename
}
__typename
}
__typename
}
}
fragment Deliverable on Deliverable {
uuid
slug
title
description
startDate
endDate
numberOfDisciplines
numberOfTeams
updateDate
totalCount
__typename
}
fragment Card on Card {
id
title
description
category
release {
id
title
__typename
}
board {
id
title
__typename
}
updateDate
thumbnail
__typename
}
fragment Project on Project {
title
logo
__typename
}
""",
"variables": {
"teamSlug": team_slug,
"startDate": self.date_min,
"endDate": self.date_max,
}
}
]
req = Connector().request(self.__url_progress_tracker, method="post", json_data=data)
if req is None or req.status_code != 200:
return None
try:
resp = req.json()
except Exception as e:
print(e, flush=True)
return None
if len(resp) > 0 and 'data' not in resp[0]:
return None
return resp[0]['data']['progressTracker']['deliverables']['metaData']
def get_disciplines(self, team_slug: str, delivery_slugs: list()):
data = []
for del_slug in delivery_slugs:
data.append({
"operationName": "disciplines",
"query": """query
disciplines($teamSlug: String! $deliverableSlug: String! $startDate: String! $endDate: String! ) {
progressTracker {
disciplines(teamSlug: $teamSlug deliverableSlug: $deliverableSlug startDate: $startDate endDate: $endDate ) {
...Discipline
timeAllocations {
...TimeAllocation
__typename
}
__typename
}
__typename
}
}
fragment Discipline on Discipline {
title
color
uuid
numberOfMembers
__typename
}
fragment TimeAllocation on TimeAllocation {
startDate
endDate
uuid
partialTime
__typename
}
""",
"variables": {
"teamSlug": team_slug,
"deliverableSlug": del_slug,
"startDate": self.date_min,
"endDate": self.date_max,
}
})
req = Connector().request(self.__url_progress_tracker, method="post", json_data=data)
if req is None or req.status_code != 200:
return None
try:
resp = req.json()
except Exception as e:
print(e, flush=True)
return None
if len(resp) > 0 and 'data' not in resp[0]:
return None
return [d['data']['progressTracker']['disciplines'][0] for d in resp] | /rsi-scraper-0.6.8.tar.gz/rsi-scraper-0.6.8/rsi_scraper/progress_tracker.py | 0.675978 | 0.212334 | progress_tracker.py | pypi |
import logging
import os.path
import struct
from time import sleep
from typing import List, Union, Tuple
from rsl_comm_py.um7_serial import RslException
class SpiCommunication:
def __init__(self, *args, **kwargs):
super().__init__(**kwargs)
def connect(self, *args, **kwargs):
pass
def xfer(self, msg):
raise NotImplemented("This method should be implemented in child classes!")
def spi_xfer(self, msg):
# self.ssn_pin.state = False
# sleep(0.05)
response = self.xfer(msg)
logging.info(f'msg: {msg}\t\tresponse: {response}')
# sleep(0.01)
# self.ssn_pin.state = True
return response
def read_register(self, reg_addr: int, **kw) -> Tuple[bool, bytes]:
msg = [0x00, reg_addr] + [0x00] * 4
response = self.spi_xfer(msg)
return True, bytes(response[2:])
def write_register(self, reg_addr: int, reg_value: Union[int, bytes, float], **kw):
if type(reg_value) == float:
reg_value = struct.pack('>f', reg_value)
elif type(reg_value) == bytes:
reg_value = list(reg_value)
elif type(reg_value) == int:
reg_value = int.to_bytes(reg_value, length=4, byteorder='big', signed=False)
msg = [0x01, reg_addr] + list(reg_value)
self.spi_xfer(msg)
return True
def read_consecutive_registers(self, reg_addr: int, num_registers: int):
msg = [0x00, reg_addr] + [0x00] * 4 * num_registers
response = self.spi_xfer(msg)
return True, bytes(response[2:])
class RslSpiLinuxPort(SpiCommunication):
def __init__(self, *args, **kwargs):
import spidev
super().__init__(args, kwargs)
self.bus = kwargs.get('bus') if kwargs.get('bus') is not None else 0
self.device = kwargs.get('device') if kwargs.get('device') is not None else 0
self.spi_device_path = f'/dev/spidev{self.bus}.{self.device}'
if not os.path.exists(self.spi_device_path):
raise RslException(f'SPI device not found: {self.spi_device_path}')
self.spi = spidev.SpiDev()
self.connect()
def connect(self, *args, **kwargs):
self.spi.open(self.bus, self.device)
self.spi.max_speed_hz = 500000
def xfer(self, bytes_to_send: List[int]) -> List[int]:
tx_bytes = bytes_to_send[:]
self.spi.xfer(tx_bytes)
return tx_bytes
class RslSpiUsbIss(SpiCommunication):
def __init__(self, **kwargs):
from usb_iss import UsbIss
super().__init__(**kwargs)
self.iss = UsbIss()
self.port = kwargs.get('port') if kwargs.get('port') is not None else '/dev/ttyACM0'
import platform
if platform.system().lower() == 'linux':
if not os.path.exists(self.port):
raise RslException(f'SPI port not found: {self.port}: USB-ISS needs to connect to ACM'
f' port for SPI communication. The port does not exist, specify `port` argument!')
self.connect()
def connect(self, *args, **kwargs):
self.iss.open(self.port)
self.iss.setup_spi(clock_khz=200)
def xfer(self, bytes_to_send: List[int]) -> List[int]:
recv_bytes = self.iss.spi.transfer(bytes_to_send)
return recv_bytes
if __name__ == '__main__':
pass | /rsl_comm_py-0.1.11.tar.gz/rsl_comm_py-0.1.11/rsl_comm_py/rsl_spi.py | 0.716417 | 0.194272 | rsl_spi.py | pypi |
# Author: Dr. Konstantin Selyunin
# License: MIT
import textwrap
from typing import Tuple
from pathlib import Path
from jinja2 import Environment, DictLoader
from .rsl_xml_svd.rsl_svd_parser import Register, RslSvdParser
class RslGenerator(RslSvdParser):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def retrieve_payload_description(self, register: Register) -> str:
payload_description = ''
for field in register.fields:
field_description = ""
if field.bit_range[0] > field.bit_range[1]:
field_description += f"[{field.bit_range[0]}:{field.bit_range[1]}]"
else:
field_description += f"[{field.bit_range[0]}]"
indent_spaces = 8 - len(field_description)
field_description += " " * indent_spaces + f": {field.name} -- {field.description}\n"
payload_description += field_description
return payload_description[:-1]
@staticmethod
def render_template_to_str(template_file: Path, params_dict: dict) -> str:
if not template_file.exists():
raise FileNotFoundError("Template file to render is not found!")
with open(template_file, 'r') as fd:
jinja_template = fd.read()
template = Environment(loader=DictLoader({'jinja_template': jinja_template}))
return template.get_template('jinja_template').render(params_dict)
def retrieve_return_description(self, register: Register):
return_description = ' '
for idx, field in enumerate(register.fields):
return_description += f"{field.name} as {field.data_type}; "
return return_description
def get_struct_fmt_for_data_type(self, el: str):
data_types_mapping = {
'uint8_t': 'B',
'int8_t': 'b',
'uint16_t': 'H',
'int16_t': 'h',
'uint32_t': 'I',
'int32_t': 'i',
'uint64_t': 'Q',
'int64_t': 'q',
'float': 'f',
'double': 'd',
'string': 's'
}
return data_types_mapping.get(el)
def get_struct_fmt_for_register(self, register: Register):
struct_fmt = '>' # big-endian
dword_bit_sets = set(range(31, 24, -1)), set(range(23, 16, -1)), set(range(15, 8, -1)), set(range(7, 0, -1))
packed_fields = [field for field in register.fields if field.data_type != 'bitField']
fields = [None] * 4
for idx, bits in enumerate(dword_bit_sets):
for field in packed_fields:
if bits.issubset(set(range(*field.bit_range, -1))):
fields[idx] = field.name
# remove duplicates in field names while keeping the MSB..LSB order
fields_name_uniq = []
field_name_seen = set()
for field in fields:
if field is None:
fields_name_uniq.append(field)
elif field in field_name_seen:
continue
else:
fields_name_uniq.append(field)
field_name_seen.add(field)
# iterate over uniq field names of packed (i.e. non-bitField data)
for field_name in fields_name_uniq:
if field_name is None:
struct_fmt += 'x'
else:
field = register.find_field_by(name=field_name)
struct_fmt += self.get_struct_fmt_for_data_type(field.data_type)
return struct_fmt
def interpret_bitfields(self, register: Register) -> Tuple[str, str]:
generated_code = ""
return_vars = ['reg']
register_bitfields = [field for field in register.fields if field.data_type == 'bitField']
generated_code += f"reg.raw_value, = struct.unpack('>I', payload[0:4])\n"
for field in register_bitfields:
generated_code += f"# find value for {field.name} bit field\n"
bit_mask = 2 ** (field.bit_range[0] - field.bit_range[1] + 1) - 1
field_value_var = f"{field.name.lower()}_val"
generated_code += f"{field_value_var} = (reg.raw_value >> {field.bit_range[1]}) & 0x{bit_mask:04X}\n"
return_var = f"{field.name.lower()}_enum"
return_vars.append(return_var)
generated_code += f"{return_var} = reg.find_field_by(name='{field.name}')"\
f".find_enum_entry_by(value={field_value_var})\n"
return ", ".join(return_vars), generated_code
def interpret_packed_data(self, register: Register) -> Tuple[str, str]:
return_vars = tuple(field.name.lower() for field in register.fields if field.data_type != 'bitField')
generated_code = ''
if len(return_vars) == 1:
return_vars = tuple((*return_vars, ''))
generated_code += f"reg.raw_value, = struct.unpack('{self.get_struct_fmt_for_register(register)}', payload[0:4])\n"
else:
generated_code += f"reg.raw_value, = struct.unpack('>I', payload[0:4])\n"
generated_code += ", ".join(return_vars) + \
f" = struct.unpack('{self.get_struct_fmt_for_register(register)}', payload[0:4])"
return "reg, " + ", ".join(return_vars), generated_code
def interpret_string_data(self, register: Register) -> Tuple[str, str]:
return_vars = tuple(field.name.lower() for field in register.fields)
if len(return_vars) > 1:
raise NotImplementedError(f"Multiple string fields in register is not supported! Check {register.name}!")
field_name = return_vars[0]
generated_code = f"reg.raw_value, = struct.unpack('>I', payload[0:4])\n"
generated_code += f"{field_name} = struct.unpack('>4s', payload[0:4])[0].decode('utf-8')"
return f'{field_name}', generated_code
def interpret_payload(self, register: Register) -> Tuple[str, str]:
field_type = [el.data_type for el in register.fields]
if all([el == 'bitField' for el in field_type]):
# all fields are bitfields
return self.interpret_bitfields(register)
elif not any([el == 'bitField' for el in field_type]) and not any([el == 'string' for el in field_type]):
# all fields are of: uint8, int8, uint16, int16, uint32, int32, or float, i.e. in struct.fmt
return self.interpret_packed_data(register)
elif all([el == 'string' for el in field_type]):
# fields are strings
return self.interpret_string_data(register)
elif any([el == 'bitField' for el in field_type]) and not any([el == 'string' for el in field_type]):
# fields are combination of "bitFields" with types: uint8, int8, uint16, int16, uint32, int32, or float
return_vars_packed, generated_code_packed = self.interpret_packed_data(register)
return_vars_bitfields, generated_code_bitfields = self.interpret_bitfields(register)
return return_vars_packed + ', ' + return_vars_bitfields, \
generated_code_packed + '\n' + generated_code_bitfields
else:
return '', f'Not Implemented for {register}'
def create_getter_property(self, register: Register, is_hidden: bool = False) -> str:
return_vars, generated_code = self.interpret_payload(register)
params_dict = {
'register_name': register.name.lower(),
'register_svd_name': register.name,
'comment_short': textwrap.indent("\n".join(textwrap.wrap(register.description, 110)), ' ' * 4),
'payload_structure_description': textwrap.indent(self.retrieve_payload_description(register), ' ' * 4),
'return_field_description': self.retrieve_return_description(register),
'register_addr': register.address,
'hidden': is_hidden,
'interpreted_receive_fields': textwrap.indent(generated_code, ' ' * 8),
'return_values': return_vars
}
script_folder = Path(__file__).parent
getter_template_file = script_folder / 'templates' / 'getter_template.jinja2'
getter = RslGenerator.render_template_to_str(getter_template_file, params_dict)
return getter
def create_default_getter(self, register: Register) -> str:
params_dict = {
'register_name': register.name.lower()
}
script_folder = Path(__file__).parent
no_getter_template_file = script_folder / 'templates' / 'no_getter_template.jinja2'
no_getter = RslGenerator.render_template_to_str(no_getter_template_file, params_dict)
return no_getter
def create_setter_property(self, register: Register, is_hidden: bool = False):
params_dict = {
'register_name': register.name.lower(),
'register_addr': register.address,
'hidden': is_hidden
}
script_folder = Path(__file__).parent
setter_template_file = script_folder / 'templates' / 'setter_template.jinja2'
setter = RslGenerator.render_template_to_str(setter_template_file, params_dict)
return setter
def generate_props_for_register(self, register: Register, is_hidden: bool = False):
if register.access == 'read-only':
return self.create_getter_property(register, is_hidden)
elif register.access == 'write-only':
return self.create_default_getter(register) + \
self.create_setter_property(register, is_hidden)
elif register.access == 'read-write':
return self.create_getter_property(register, is_hidden) + \
self.create_setter_property(register, is_hidden)
else:
raise NotImplementedError(f"register access can only be: `read-write`, `read-only`, `write-only`, "
f"but you provide: {register.access}")
def generate_props_for_main_register_map(self):
generated_main_register_map = ''
for reg in self.regs:
generated_main_register_map += self.generate_props_for_register(reg, is_hidden=False)
return generated_main_register_map
def generate_props_for_hidden_registers(self):
generated_hidden_register_map = ''
for reg in self.hidden_regs:
generated_hidden_register_map += self.generate_props_for_register(reg, is_hidden=True)
return generated_hidden_register_map
if __name__ == '__main__':
pass | /rsl_comm_py-0.1.11.tar.gz/rsl_comm_py-0.1.11/rsl_comm_py/rsl_generator.py | 0.759939 | 0.211295 | rsl_generator.py | pypi |
from dataclasses import dataclass
@dataclass
class ShearWaterAllRawPacket:
gyro_1_raw_x: int
gyro_1_raw_y: int
gyro_1_raw_z: int
gyro_1_raw_time: float
gyro_2_raw_x: int
gyro_2_raw_y: int
gyro_2_raw_z: int
gyro_2_raw_time: float
accel_1_raw_x: int
accel_1_raw_y: int
accel_1_raw_z: int
accel_1_raw_time: float
mag_1_raw_x: int
mag_1_raw_y: int
mag_1_raw_z: int
mag_1_raw_time: float
mag_2_raw_x: int
mag_2_raw_y: int
mag_2_raw_z: int
mag_2_raw_time: float
temperature: float
temperature_time: float
def __repr__(self):
return f"RawPacket("\
f"gyro_1=[{self.gyro_1_raw_x:>+5d}, {self.gyro_1_raw_y:>+5d}, {self.gyro_1_raw_z:>+5d}], "\
f"gyro_1_t={self.gyro_1_raw_time:>6.3f}; " \
f"gyro_2=[{self.gyro_2_raw_x:>+5d}, {self.gyro_2_raw_y:>+5d}, {self.gyro_2_raw_z:>+5d}], " \
f"gyro_2_t={self.gyro_2_raw_time:>6.3f}; " \
f"accel_1=[{self.accel_1_raw_x:>+5d}, {self.accel_1_raw_y:>+5d}, {self.accel_1_raw_z:>+5d}], " \
f"accel_1_t={self.accel_1_raw_time:>6.3f}; " \
f"mag_1=[{self.mag_1_raw_x:>+8d}, {self.mag_1_raw_y:>+8d}, {self.mag_1_raw_z:>+8d}], " \
f"mag_1_t={self.mag_1_raw_time:>6.3f}; " \
f"mag_2=[{self.mag_2_raw_x:>+5d}, {self.mag_2_raw_y:>+5d}, {self.mag_2_raw_z:>+5d}], " \
f"mag_2_t={self.mag_2_raw_time:>6.3f}; " \
f"T={self.temperature:>+3.2f}, " \
f"T_t={self.temperature_time:>6.3f})"
def to_csv(self):
return f"{self.gyro_1_raw_x:>+5d};{self.gyro_1_raw_y:>+5d};{self.gyro_1_raw_z:>+5d};" \
f"{self.gyro_1_raw_time:>6.5f};" \
f"{self.gyro_2_raw_x:>+5d};{self.gyro_2_raw_y:>+5d};{self.gyro_2_raw_z:>+5d};" \
f"{self.gyro_2_raw_time:>6.5f};" \
f"{self.accel_1_raw_x:>+5d};{self.accel_1_raw_y:>+5d};{self.accel_1_raw_z:>+5d};" \
f"{self.accel_1_raw_time:>6.5f};" \
f"{self.mag_1_raw_x:>+8d};{self.mag_1_raw_y:>+8d};{self.mag_1_raw_z:>+8d};" \
f"{self.mag_1_raw_time:>6.5f};" \
f"{self.mag_2_raw_x:>+5d};{self.mag_2_raw_y:>+5d};{self.mag_2_raw_z:>+5d};" \
f"{self.mag_2_raw_time:>6.5f};" \
f"{self.temperature:>+3.2f};" \
f"{self.temperature_time:>6.5f};\n"
@staticmethod
def csv_header():
return "gyro_1_raw_x;gyro_1_raw_y;gyro_1_raw_z;gyro_1_raw_time;" \
"gyro_2_raw_x;gyro_2_raw_y;gyro_2_raw_z;gyro_2_raw_time;" \
"accel_1_raw_x;accel_1_raw_y;accel_1_raw_z;accel_1_raw_time;" \
"mag_1_raw_x;mag_1_raw_y;mag_1_raw_z;mag_1_raw_time;" \
"mag_2_raw_x;mag_2_raw_y;mag_2_raw_z;mag_2_raw_time;" \
"temperature;temperature_time;\n"
@dataclass
class ShearWaterAllProcPacket:
gyro_1_proc_x: float
gyro_1_proc_y: float
gyro_1_proc_z: float
gyro_1_proc_time: float
gyro_2_proc_x: float
gyro_2_proc_y: float
gyro_2_proc_z: float
gyro_2_proc_time: float
accel_1_proc_x: float
accel_1_proc_y: float
accel_1_proc_z: float
accel_1_proc_time: float
mag_1_proc_x: float
mag_1_proc_y: float
mag_1_proc_z: float
mag_1_norm: float
mag_1_proc_time: float
mag_2_proc_x: float
mag_2_proc_y: float
mag_2_proc_z: float
mag_2_norm: float
mag_2_proc_time: float
def __repr__(self):
return f"ProcPacket("\
f"gyro_1=[{self.gyro_1_proc_x:>+8.3f}, {self.gyro_1_proc_y:>+8.3f}, {self.gyro_1_proc_z:>+8.3f}], "\
f"gyro_1_t={self.gyro_1_proc_time:>6.3f}; " \
f"gyro_2=[{self.gyro_2_proc_x:>+8.3f}, {self.gyro_2_proc_y:>+8.3f}, {self.gyro_2_proc_z:>+8.3f}], " \
f"gyro_2_t={self.gyro_2_proc_time:>6.3f}; " \
f"accel_1=[{self.accel_1_proc_x:>+8.3f}, {self.accel_1_proc_y:>+8.3f}, {self.accel_1_proc_z:>+8.3f}], " \
f"accel_1_t={self.accel_1_proc_time:>6.3f}; " \
f"mag_1=[{self.mag_1_proc_x:>+8.6f}, {self.mag_1_proc_y:>+8.6f}, {self.mag_1_proc_z:>+8.6f}], " \
f"mag_1_norm={self.mag_1_norm:>+8.6f}, " \
f"mag_1_t={self.mag_1_proc_time:>6.3f}; " \
f"mag_2=[{self.mag_2_proc_x:>+8.3f}, {self.mag_2_proc_y:>+8.3f}, {self.mag_2_proc_z:>+8.3f}], " \
f"mag_2_norm={self.mag_2_norm:>+8.3f}, " \
f"mag_2_t={self.mag_2_proc_time:>6.3f})"
def to_csv(self):
return f"{self.gyro_1_proc_x:>+8.3f};{self.gyro_1_proc_y:>+8.3f};{self.gyro_1_proc_z:>+8.3f};" \
f"{self.gyro_1_proc_time:>6.5f};" \
f"{self.gyro_2_proc_x:>+8.3f};{self.gyro_2_proc_y:>+8.3f};{self.gyro_2_proc_z:>+8.3f};" \
f"{self.gyro_2_proc_time:>6.5f};" \
f"{self.accel_1_proc_x:>+8.3f};{self.accel_1_proc_y:>+8.3f};{self.accel_1_proc_z:>+8.3f};" \
f"{self.accel_1_proc_time:>6.5f};" \
f"{self.mag_1_proc_x:>+8.6f};{self.mag_1_proc_y:>+8.6f};{self.mag_1_proc_z:>+8.6f};" \
f"{self.mag_1_norm:>+8.6f};" \
f"{self.mag_1_proc_time:>6.5f};" \
f"{self.mag_2_proc_x:>+8.3f};{self.mag_2_proc_y:>+8.3f};{self.mag_2_proc_z:>+8.3f};" \
f"{self.mag_2_norm:>+8.3f};" \
f"{self.mag_2_proc_time:>6.5f};\n"
@staticmethod
def csv_header():
return "gyro_1_proc_x;gyro_1_proc_y;gyro_1_proc_z;gyro_1_proc_time;" \
"gyro_2_proc_x;gyro_2_proc_y;gyro_2_proc_z;gyro_2_proc_time;" \
"accel_1_proc_x;accel_1_proc_y;accel_1_proc_z;accel_1_proc_time;" \
"mag_1_proc_x;mag_1_proc_y;mag_1_proc_z;mag_1_norm;mag_1_proc_time;" \
"mag_2_proc_x;mag_2_proc_y;mag_2_proc_z;mag_1_norm;mag_2_proc_time;\n"
@dataclass
class ShearWaterEulerPacket:
roll: float
pitch: float
yaw: float
roll_rate: float
pitch_rate: float
yaw_rate: float
time_stamp: float
def __repr__(self):
return f"EulerPacket("\
f"roll={self.roll:>+8.3f}; pitch={self.pitch:>+8.3f}; yaw={self.yaw:>+8.3f}; "\
f"roll_rate={self.roll_rate:>+8.3f}; pitch_rate={self.pitch_rate:>+8.3f}; yaw_rate={self.yaw_rate:>+8.3f}; " \
f"time_stamp={self.time_stamp:>6.3f})"
def to_csv(self):
return f"{self.roll:>+8.3f};{self.pitch:>+8.3f};{self.yaw:>+8.3f};" \
f"{self.roll_rate:>+8.3f};{self.pitch_rate:>+8.3f};{self.yaw_rate:>+8.3f};" \
f"{self.time_stamp:>6.5f};\n"
@staticmethod
def csv_header():
return "roll;pitch;yaw;roll_rate;pitch_rate;yaw_rate;time_stamp;\n"
@dataclass
class ShearWaterHealthPacket:
health: int
def __repr__(self):
return f"HealthPacket("\
f"raw_value=0x{self.health:04X}; " \
f"OVF={bool((self.health >> 8) & 0x01)}, " \
f"ACC1_N={bool((self.health >> 7) & 0x01)}, " \
f"MAG1_N={bool((self.health >> 6) & 0x01)}, " \
f"MAG2_N={bool((self.health >> 5) & 0x01)}, " \
f"ACCEL1={bool((self.health >> 4) & 0x01)}, "\
f"GYRO1={bool((self.health >> 3) & 0x01)}, " \
f"GYRO2={bool((self.health >> 2) & 0x01)}, " \
f"MAG1={bool((self.health >> 1) & 0x01)}, " \
f"MAG2={bool((self.health >> 0) & 0x01)})"
@dataclass
class ShearWaterRawAccel1Packet:
accel_1_raw_x: int
accel_1_raw_y: int
accel_1_raw_z: int
accel_1_raw_time: float
@dataclass
class ShearWaterRawGyro1Packet:
gyro_1_raw_x: int
gyro_1_raw_y: int
gyro_1_raw_z: int
gyro_1_raw_time: float
@dataclass
class ShearWaterRawGyro2Packet:
gyro_2_raw_x: int
gyro_2_raw_y: int
gyro_2_raw_z: int
gyro_2_raw_time: float
@dataclass
class ShearWaterRawMag1Packet:
mag_1_raw_x: int
mag_1_raw_y: int
mag_1_raw_z: int
mag_1_raw_time: float
@dataclass
class ShearWaterRawMag2Packet:
mag_2_raw_x: int
mag_2_raw_y: int
mag_2_raw_z: int
mag_2_raw_time: float
@dataclass
class ShearWaterTemperaturePacket:
temperature: float
temperature_time: float
@dataclass
class ShearWaterProcAccel1Packet:
accel_1_proc_x: float
accel_1_proc_y: float
accel_1_proc_z: float
accel_1_proc_time: float
@dataclass
class ShearWaterProcGyro1Packet:
gyro_1_proc_x: float
gyro_1_proc_y: float
gyro_1_proc_z: float
gyro_1_proc_time: float
@dataclass
class ShearWaterProcGyro2Packet:
gyro_2_proc_x: float
gyro_2_proc_y: float
gyro_2_proc_z: float
gyro_2_proc_time: float
@dataclass
class ShearWaterProcMag1Packet:
mag_1_proc_x: float
mag_1_proc_y: float
mag_1_proc_z: float
mag_1_proc_norm: float
mag_1_proc_time: float
@dataclass
class ShearWaterProcMag2Packet:
mag_2_proc_x: float
mag_2_proc_y: float
mag_2_proc_z: float
mag_2_proc_norm: float
mag_2_proc_time: float
@dataclass
class ShearWaterQuaternionPacket:
q_w: float
q_x: float
q_y: float
q_z: float
q_time: float
def __repr__(self):
return f"QuaternionPacket("\
f"q_w={self.q_w:>+3.5f}; q_x={self.q_x:>+3.5f}; q_y={self.q_y:>+3.5f}; q_z={self.q_z:>+3.5f};"\
f" time_stamp={self.q_time:>6.5f})"
def to_csv(self):
return f"{self.q_w:>+3.5f};{self.q_x:>+3.5f};{self.q_y:>+3.5f};{self.q_z:>+3.5f};" \
f"{self.q_time:>6.5f};\n"
@staticmethod
def csv_header():
return "q_w;q_x;q_y;q_z;q_time;\n"
@dataclass
class ShearWaterEulerPosePacket:
roll: float
pitch: float
yaw: float
roll_rate: float
pitch_rate: float
yaw_rate: float
euler_time: float
position_north: float
position_east: float
position_up: float
position_time: float
@dataclass
class ShearWaterPosePacket:
position_north: float
position_east: float
position_up: float
position_time: float
@dataclass
class ShearWaterVelocityPacket:
velocity_north: float
velocity_east: float
velocity_up: float
velocity_time: float
@dataclass
class ShearWaterGyro1BiasPacket:
gyro_1_bias_x: float
gyro_1_bias_y: float
gyro_1_bias_z: float
@dataclass
class ShearWaterGyro2BiasPacket:
gyro_2_bias_x: float
gyro_2_bias_y: float
gyro_2_bias_z: float | /rsl_comm_py-0.1.11.tar.gz/rsl_comm_py-0.1.11/rsl_comm_py/shearwater_broadcast_packets.py | 0.814311 | 0.350408 | shearwater_broadcast_packets.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.