code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
# noqa: E501
from typing import Union, Optional
from remotior_sensus.core import configurations as cfg
from remotior_sensus.core.bandset_catalog import BandSet
from remotior_sensus.core.bandset_catalog import BandSetCatalog
from remotior_sensus.core.output_manager import OutputManager
from remotior_sensus.util import shared_tools, raster_vector
def band_resample(
input_bands: Union[list, int, BandSet],
output_path: Optional[str] = None,
epsg_code: Optional[str] = None,
align_raster: Optional[Union[str, BandSet, int]] = None,
overwrite: Optional[bool] = False,
resampling: Optional[str] = None,
nodata_value: Optional[int] = None,
x_y_resolution: Optional[Union[list, int]] = None,
resample_pixel_factor: Optional[float] = None,
output_data_type: Optional[str] = None,
same_extent: Optional[bool] = False,
virtual_output: Optional[bool] = None,
compress=None, compress_format=None,
prefix: Optional[str] = '',
extent_list: Optional[list] = None,
n_processes: Optional[int] = None,
available_ram: Optional[int] = None,
bandset_catalog: Optional[BandSetCatalog] = None
):
"""Performs band resample and reprojection.
This tool performs the resampling and reprojection of raster bands.
Available resampling methods are:
- nearest_neighbour
- average
- sum
- maximum
- minimum
- mode
- median
- first_quartile
- third_quartile
Args:
input_bands: input of type BandSet or list of paths or integer
number of BandSet.
output_path: string of output path directory or list of paths.
epsg_code: optional EPSG code for output.
align_raster: string path of raster used for aligning output pixels and projections.
overwrite: if True, output overwrites existing files.
resampling: method of resample such as nearest_neighbour (default), average, sum, maximum, minimum, mode, median, first_quartile, third_quartile.
nodata_value: value to be considered as nodata.
x_y_resolution: integer pixel size of output raster or pixel size as list of x, y.
resample_pixel_factor: define output resolution by multiplying original pixel size to this value.
output_data_type: optional raster output data type, if None the data type is the same as input raster.
same_extent: if True, output extent is the same as align_raster.
virtual_output: if True (and output_path is directory), save output as virtual raster of multiprocess parts.
prefix: optional string for output name prefix.
extent_list: list of boundary coordinates left top right bottom.
compress: if True, compress output.
compress_format: format of compressions such as LZW or DEFLATE.
n_processes: number of parallel processes.
available_ram: number of megabytes of RAM available to processes.
bandset_catalog: optional type BandSetCatalog for BandSet number.
Returns:
Object :func:`~remotior_sensus.core.output_manager.OutputManager` with
- paths = output list
Examples:
Perform the band resample
>>> band_resample(input_bands=['path_1', 'path_2'],
... output_path='output_path', resampling='mode',
... resample_pixel_factor=2)
""" # noqa: E501
cfg.logger.log.info('start')
cfg.progress.update(
process=__name__.split('.')[-1].replace('_', ' '), message='starting',
start=True
)
if resampling == 'nearest_neighbour':
resample = 'near'
elif resampling == 'average':
resample = 'average'
elif resampling == 'sum':
resample = 'sum'
elif resampling == 'maximum':
resample = 'max'
elif resampling == 'minimum':
resample = 'min'
elif resampling == 'mode':
resample = 'mode'
elif resampling == 'median':
resample = 'med'
elif resampling == 'first_quartile':
resample = 'q1'
elif resampling == 'third_quartile':
resample = 'q3'
else:
resample = 'near'
# prepare process files
prepared = shared_tools.prepare_process_files(
input_bands=input_bands, output_path=output_path, overwrite=overwrite,
n_processes=n_processes, box_coordinate_list=extent_list,
bandset_catalog=bandset_catalog, prefix=prefix,
multiple_output=True, multiple_input=True,
virtual_output=virtual_output
)
input_raster_list = prepared['input_raster_list']
raster_info = prepared['raster_info']
n_processes = prepared['n_processes']
nodata_list = prepared['nodata_list']
output_list = prepared['output_list']
if type(x_y_resolution) is not list:
x_y_resolution = [x_y_resolution, x_y_resolution]
if resample_pixel_factor is None:
resample_pixel_factor = 1
try:
resample_pixel_factor = float(resample_pixel_factor)
except Exception as err:
cfg.logger.log.error(str(err))
cfg.messages.error(str(err))
return OutputManager(check=False)
if epsg_code is None:
epsg = False
else:
epsg = None
resample_parameters = None
if align_raster is None:
left = top = right = bottom = p_x = p_y = align_sys_ref = None
else:
# raster extent and pixel size
(gt, crs, crs_unit, xy_count, nd, number_of_bands, block_size,
scale_offset, data_type) = raster_vector.raster_info(align_raster)
# copy raster
left = gt[0]
top = gt[3]
right = gt[0] + gt[1] * xy_count[0]
bottom = gt[3] + gt[5] * xy_count[1]
p_x = gt[1]
p_y = abs(gt[5])
# check projections
align_sys_ref = raster_vector.auto_set_epsg(align_raster)
epsg = False
new_p_x = p_x * resample_pixel_factor
new_p_y = p_y * resample_pixel_factor
resample_parameters = '-tr %s %s -te %s %s %s %s' % (
str(new_p_x), str(new_p_y), str(left), str(bottom), str(right),
str(top))
if compress_format == 'DEFLATE21':
compress_format = 'DEFLATE -co PREDICTOR=2 -co ZLEVEL=1'
min_progress = 1
one_progress = int((99 - 1) / len(input_raster_list))
max_progress = one_progress
for band in range(0, len(input_raster_list)):
# raster extent and pixel size
info = raster_vector.image_geotransformation(input_raster_list[band])
left_input = info['left']
top_input = info['top']
right_input = info['right']
bottom_input = info['bottom']
p_x_input = info['pixel_size_x']
p_y_input = info['pixel_size_y']
proj_input = info['projection']
if output_data_type is None:
output_data_type = raster_info[band][8]
# calculate minimal extent
if align_raster is not None:
input_sys_ref = raster_vector.get_spatial_reference(proj_input)
left_projected, top_projected = (
raster_vector.project_point_coordinates(
left_input, top_input, input_sys_ref, align_sys_ref
)
)
right_projected, bottom_projected = (
raster_vector.project_point_coordinates(
right_input, bottom_input, input_sys_ref, align_sys_ref
)
)
if same_extent is False:
# minimum extent
if left_projected < left:
left_output = left - int(
2 + (left - left_projected) / p_x
) * p_x
else:
left_output = left + int(
(left_projected - left) / p_x - 2
) * p_x
if right_projected > right:
right_output = right + int(
2 + (right_projected - right) / p_x
) * p_x
else:
right_output = right - int(
(right - right_projected) / p_x - 2
) * p_x
if top_projected > top:
top_output = top + int(
2 + (top_projected - top) / p_y
) * p_y
else:
top_output = top - int(
(top - top_projected) / p_y - 2
) * p_y
if bottom_projected > bottom:
bottom_output = bottom + int(
(bottom_projected - bottom) / p_y - 2
) * p_y
else:
bottom_output = bottom - int(
2 + (bottom - bottom_projected) / p_y
) * p_y
else:
left_output = left
top_output = top
right_output = right
bottom_output = bottom
resample_parameters = '-tr %s %s -te %s %s %s %s ' % (
str(p_x), str(p_y), str(left_output), str(bottom_output),
str(right_output), str(top_output))
# use epsg
elif epsg_code is not None:
# spatial reference
resample_parameters = None
try:
epsg = int(epsg_code)
except Exception as err:
cfg.logger.log.error(str(err))
cfg.messages.error(str(err))
return OutputManager(check=False)
if same_extent is False:
try:
resample_parameters = '-tr %s %s' % (
str(float(x_y_resolution[0])),
str(float(x_y_resolution[1])))
except Exception as err:
str(err)
else:
left_output = left
top_output = top
right_output = right
bottom_output = bottom
try:
resample_parameters = '-tr %s %s -te %s %s %s %s ' % (
str(float(x_y_resolution[0])),
str(float(x_y_resolution[1])),
str(left_output), str(bottom_output),
str(right_output), str(top_output)
)
except Exception as err:
str(err)
# resample
else:
if epsg is False:
epsg = None
p_x = p_x_input * resample_pixel_factor
p_y = p_y_input * resample_pixel_factor
if same_extent is False:
try:
resample_parameters = '-tr %s %s' % (str(p_x), str(p_y))
except Exception as err:
str(err)
else:
left_output = left
top_output = top
right_output = right
bottom_output = bottom
try:
resample_parameters = '-tr %s %s -te %s %s %s %s ' % (
str(p_x), str(p_y), str(left_output),
str(bottom_output), str(right_output), str(top_output)
)
except Exception as err:
str(err)
if epsg is not None:
if epsg is False:
epsg_output = proj_input
else:
epsg_output = 'epsg:%s' % str(epsg)
else:
epsg_output = None
if nodata_value is None:
nodata_value = nodata_list[band]
cfg.logger.log.debug('resample_parameters: %s' % resample_parameters)
# calculation
raster_vector.gdal_warping(
input_raster=input_raster_list[band],
output=output_list[band],
output_format='GTiff',
resample_method=resample,
t_srs=epsg_output,
compression=compress,
compress_format=compress_format,
additional_params=resample_parameters,
raster_data_type=output_data_type,
dst_nodata=nodata_value,
available_ram=available_ram,
n_processes=n_processes,
min_progress=min_progress, max_progress=max_progress
)
min_progress = max_progress
max_progress += one_progress
cfg.progress.update(end=True)
cfg.logger.log.info('end; band resample: %s' % str(output_list))
return OutputManager(paths=output_list) | /remotior_sensus-0.0.79-py3-none-any.whl/remotior_sensus/tools/band_resample.py | 0.858689 | 0.400691 | band_resample.py | pypi |
from typing import Union, Optional
from remotior_sensus.core import configurations as cfg
from remotior_sensus.core.bandset_catalog import BandSet
from remotior_sensus.core.bandset_catalog import BandSetCatalog
from remotior_sensus.core.output_manager import OutputManager
from remotior_sensus.util.files_directories import move_file, parent_directory
from remotior_sensus.util.shared_tools import prepare_process_files
from remotior_sensus.util.raster_vector import get_vector_values
from remotior_sensus.core.processor_functions import clip_raster
def band_clip(
input_bands: Union[list, int, BandSet],
output_path: Union[list, str] = None,
vector_path: Optional[str] = '',
vector_field: Optional[str] = None,
overwrite: Optional[bool] = False,
prefix: Optional[str] = '',
extent_list: Optional[list] = None,
n_processes: Optional[int] = None,
available_ram: Optional[int] = None,
bandset_catalog: Optional[BandSetCatalog] = None,
virtual_output: Optional[bool] = None
) -> OutputManager:
"""Perform band clip.
This tool allows for clipping the bands of a BandSet based on a vector or list of boundary coordinates left top right bottom.
Args:
input_bands: input of type BandSet or list of paths or integer
number of BandSet.
output_path: string of output path directory or list of paths.
overwrite: if True, output overwrites existing files.
vector_path: path of vector used to clip.
vector_field: vector field name used to clip for every unique ID.
virtual_output: if True (and output_path is directory), save output
as virtual raster of multiprocess parts
prefix: optional string for output name prefix.
extent_list: list of boundary coordinates left top right bottom.
n_processes: number of parallel processes.
available_ram: number of megabytes of RAM available to processes.
bandset_catalog: optional type BandSetCatalog for BandSet number
Returns:
Object :func:`~remotior_sensus.core.output_manager.OutputManager` with
- paths = output list
Examples:
Clip using vector
>>> # start the process
>>> clip = band_clip(input_bands=['path_1', 'path_2'],
... output_path='output_path', vector_path='vector_path',
... prefix='clip_')
""" # noqa: E501
cfg.logger.log.info('start')
cfg.progress.update(
process=__name__.split('.')[-1].replace('_', ' '), message='starting',
start=True
)
if n_processes is None:
n_processes = cfg.n_processes
if available_ram is None:
available_ram = cfg.available_ram
ram = int(available_ram / n_processes)
# prepare process files
prepared = prepare_process_files(
input_bands=input_bands, output_path=output_path, overwrite=overwrite,
n_processes=n_processes, bandset_catalog=bandset_catalog,
box_coordinate_list=extent_list,
prefix=prefix, multiple_output=True, multiple_input=True,
virtual_output=virtual_output
)
input_raster_list = prepared['input_raster_list']
output_list = prepared['output_list']
# build function argument list of dictionaries
argument_list = []
function_list = []
output_raster_list = []
if virtual_output is True and extent_list is not None:
for raster in range(0, len(input_raster_list)):
output_file = '%s.vrt' % (output_list[raster][:-4])
move_file(input_raster_list[raster], output_file)
output_raster_list.append(output_file)
else:
if vector_field is not None:
# find unique values of vector_field
unique_values = get_vector_values(
vector_path=vector_path, field_name=vector_field)
for value in unique_values:
for raster in range(0, len(input_raster_list)):
output_p = '%s_%s_%s.tif' % (
output_list[raster][:-4], str(vector_field),
str(value))
output_raster_list.append(output_p)
argument_list.append(
{
'input_raster': input_raster_list[raster],
'extent_list': None,
'vector_path': vector_path,
'available_ram': ram,
'output': output_p,
'gdal_path': cfg.gdal_path,
'compress_format': 'LZW',
'where': "%s = %s" % (vector_field, value)
}
)
function_list.append(clip_raster)
else:
for raster in range(0, len(input_raster_list)):
argument_list.append(
{
'input_raster': input_raster_list[raster],
'extent_list': extent_list,
'vector_path': vector_path,
'available_ram': ram,
'output': output_list[raster],
'gdal_path': cfg.gdal_path,
'compress_format': 'LZW',
'where': None
}
)
function_list.append(clip_raster)
output_raster_list.append(output_list[raster])
cfg.multiprocess.run_iterative_process(
function_list=function_list, argument_list=argument_list
)
cfg.progress.update(end=True)
cfg.logger.log.info('end; band clip: %s' % output_raster_list)
return OutputManager(paths=output_raster_list) | /remotior_sensus-0.0.79-py3-none-any.whl/remotior_sensus/tools/band_clip.py | 0.862337 | 0.308672 | band_clip.py | pypi |
from typing import Union, Optional
from remotior_sensus.core import configurations as cfg
from remotior_sensus.core.bandset_catalog import BandSet
from remotior_sensus.core.bandset_catalog import BandSetCatalog
from remotior_sensus.core.output_manager import OutputManager
from remotior_sensus.util import shared_tools
def band_sieve(
input_bands: Union[list, int, BandSet], size: int,
output_path: Union[list, str] = None, connected: Optional[bool] = None,
overwrite: Optional[bool] = False,
prefix: Optional[str] = '', extent_list: Optional[list] = None,
n_processes: Optional[int] = None,
available_ram: Optional[int] = None,
bandset_catalog: Optional[BandSetCatalog] = None,
virtual_output: Optional[bool] = None
) -> OutputManager:
"""Perform band sieve.
This tool allows for performing the sieve of raster bands removing
patches having size lower than a threshold (i.e. number of pixels).
Args:
input_bands: input of type BandSet or list of paths or integer
number of BandSet.
output_path: string of output path directory or list of paths.
overwrite: if True, output overwrites existing files.
size: size of dilation in pixels.
virtual_output: if True (and output_path is directory), save output
as virtual raster of multiprocess parts
connected: if True, consider 8 pixel connection; if False, consider 4 pixel connection.
prefix: optional string for output name prefix.
extent_list: list of boundary coordinates left top right bottom.
n_processes: number of parallel processes.
available_ram: number of megabytes of RAM available to processes.
bandset_catalog: optional type BandSetCatalog for BandSet number
Returns:
Object :func:`~remotior_sensus.core.output_manager.OutputManager` with
- paths = output list
Examples:
Perform the sieve of size 3 with connected pixel (8 connection)
>>> sieve = band_sieve(input_bands=['file1.tif', 'file2.tif'],size=3,output_path='directory_path',connected=True,prefix='sieve_')
""" # noqa: E501
cfg.logger.log.info('start')
cfg.progress.update(
process=__name__.split('.')[-1].replace('_', ' '), message='starting',
start=True
)
# prepare process files
prepared = shared_tools.prepare_process_files(
input_bands=input_bands, output_path=output_path, overwrite=overwrite,
n_processes=n_processes, bandset_catalog=bandset_catalog,
box_coordinate_list=extent_list,
prefix=prefix, multiple_output=True, multiple_input=True,
virtual_output=virtual_output
)
input_raster_list = prepared['input_raster_list']
raster_info = prepared['raster_info']
n_processes = prepared['n_processes']
nodata_list = prepared['nodata_list']
output_list = prepared['output_list']
# 4 connected pixels
if connected:
connected = 8
elif not connected:
connected = 4
else:
connected = 4
# process calculation
n = 0
min_p = 1
max_p = int((99 - 1) / len(input_raster_list))
for i in input_raster_list:
cfg.progress.update(message='processing raster %s' % (n + 1))
out = output_list[n]
nd = nodata_list[n]
data_type = raster_info[n][8]
# perform sieve
cfg.multiprocess.multiprocess_raster_sieve(
raster_path=i, n_processes=n_processes,
available_ram=available_ram, sieve_size=size,
connected=connected, output_nodata_value=nd, output=out,
output_data_type=data_type, compress=cfg.raster_compression,
min_progress=min_p + max_p * n,
max_progress=min_p + max_p * (n + 1)
)
n += 1
cfg.progress.update(end=True)
cfg.logger.log.info('end; band sieve: %s' % output_list)
return OutputManager(paths=output_list) | /remotior_sensus-0.0.79-py3-none-any.whl/remotior_sensus/tools/band_sieve.py | 0.885192 | 0.325722 | band_sieve.py | pypi |
from typing import Union, Optional
from remotior_sensus.core import configurations as cfg
from remotior_sensus.core.bandset_catalog import BandSet
from remotior_sensus.core.bandset_catalog import BandSetCatalog
from remotior_sensus.core.output_manager import OutputManager
from remotior_sensus.core.processor_functions import raster_neighbor
from remotior_sensus.util import shared_tools
def band_neighbor_pixels(
input_bands: Union[list, int, BandSet], size: int,
output_path: Optional[str] = None,
overwrite: Optional[bool] = False,
stat_name: str = None, structure: Optional[any] = None,
circular_structure: Optional[bool] = True,
stat_percentile: Optional[Union[int, str]] = None,
output_data_type: Optional[str] = None,
virtual_output: Optional[bool] = None, prefix: Optional[str] = '',
extent_list: Optional[list] = None,
n_processes: Optional[int] = None, available_ram: Optional[int] = None,
bandset_catalog: Optional[BandSetCatalog] = None
) -> OutputManager:
"""Performs band neighbor pixels.
This tool calculates a function over neighbor pixels defined by
size (i.e. number of pixels) or structure.
A new raster is created for each input band, where each pixel is the result
of the calculation of the function over the neighbor pixels (e.g. the mean
of the pixel values of a 3x3 window around the pixel).
Available functions are:
- Count
- Max
- Mean
- Median
- Min
- Percentile
- StandardDeviation
- Sum
Args:
input_bands: input of type BandSet or list of paths or integer
number of BandSet.
output_path: string of output path directory or list of paths.
overwrite: if True, output overwrites existing files.
size: size of dilation in pixels.
structure: optional path to csv file of structures, if None then the
structure is created from size.
circular_structure: if True use circular structure.
stat_percentile: integer value for percentile parameter.
stat_name: statistic name as in configurations.statistics_list.
output_data_type: optional raster output data type, if None the data type is the same as input raster.
virtual_output: if True (and output_path is directory), save output as virtual raster of multiprocess parts.
prefix: optional string for output name prefix.
extent_list: list of boundary coordinates left top right bottom.
n_processes: number of parallel processes.
available_ram: number of megabytes of RAM available to processes.
bandset_catalog: optional type BandSetCatalog for BandSet number.
Returns:
Object :func:`~remotior_sensus.core.output_manager.OutputManager` with
- paths = output list
Examples:
Perform the band neighbor of size 10 pixels with the function Sum
>>> neighbor = band_neighbor_pixels(input_bands=['file1.tif', 'file2.tif'],size=10,output_path='directory_path',stat_name='Sum',circular_structure=True)
""" # noqa: E501
cfg.logger.log.info('start')
cfg.progress.update(
process=__name__.split('.')[-1].replace('_', ' '), message='starting',
start=True
)
# prepare process files
prepared = shared_tools.prepare_process_files(
input_bands=input_bands, output_path=output_path, overwrite=overwrite,
n_processes=n_processes, box_coordinate_list=extent_list,
bandset_catalog=bandset_catalog, prefix=prefix,
multiple_output=True, multiple_input=True,
virtual_output=virtual_output
)
input_raster_list = prepared['input_raster_list']
raster_info = prepared['raster_info']
n_processes = prepared['n_processes']
nodata_list = prepared['nodata_list']
output_list = prepared['output_list']
vrt_list = prepared['vrt_list']
stat_numpy = None
for i in cfg.statistics_list:
if i[0].lower() == stat_name.lower():
stat_numpy = i[1]
break
cfg.logger.log.debug('stat_numpy: %s' % str(stat_numpy))
if cfg.stat_percentile in stat_numpy:
function_numpy = stat_numpy.replace('array', 'A')
try:
stat_percentile = int(stat_percentile)
function_numpy = function_numpy.replace(
cfg.stat_percentile, str(stat_percentile)
)
except Exception as err:
cfg.logger.log.error(err)
cfg.messages.error(str(err))
return OutputManager(check=False)
else:
function_numpy = stat_numpy.replace('array', 'A, axis=2')
cfg.logger.log.debug('function_numpy: %s' % str(function_numpy))
if structure is None:
if not circular_structure:
structure = shared_tools.create_base_structure(size * 2 + 1)
else:
structure = shared_tools.create_circular_structure(size)
else:
try:
structure = shared_tools.open_structure(structure)
except Exception as err:
cfg.logger.log.error(err)
cfg.messages.error(str(err))
return OutputManager(check=False)
# process calculation
n = 0
min_p = 1
max_p = int((99 - 1) / len(input_raster_list))
# dummy bands for memory calculation
dummy_bands = 3
for i in input_raster_list:
out = output_list[n]
nd = nodata_list[n]
if output_data_type is None:
output_data_type = raster_info[n][8]
cfg.multiprocess.run(
raster_path=i, function=raster_neighbor,
function_argument=structure,
function_variable=[function_numpy], output_raster_path=out,
output_data_type=output_data_type, output_nodata_value=nd,
compress=cfg.raster_compression, dummy_bands=dummy_bands,
n_processes=n_processes, available_ram=available_ram,
boundary_size=structure.shape[0] + 1, virtual_raster=vrt_list[n],
progress_message='processing raster %s' % str(n + 1),
min_progress=min_p + max_p * n,
max_progress=min_p + max_p * (n + 1)
)
n += 1
cfg.progress.update(end=True)
cfg.logger.log.info('end; neighbor pixel: %s' % str(output_list))
return OutputManager(paths=output_list) | /remotior_sensus-0.0.79-py3-none-any.whl/remotior_sensus/tools/band_neighbor_pixels.py | 0.843911 | 0.417212 | band_neighbor_pixels.py | pypi |
from typing import Union, Optional
from remotior_sensus.core import configurations as cfg
from remotior_sensus.core.bandset_catalog import BandSet
from remotior_sensus.core.bandset_catalog import BandSetCatalog
from remotior_sensus.core.output_manager import OutputManager
from remotior_sensus.core.processor_functions import raster_erosion
from remotior_sensus.util import shared_tools
def band_erosion(
input_bands: Union[list, int, BandSet], value_list: list, size: int,
output_path: Union[list, str] = None,
overwrite: Optional[bool] = False,
circular_structure: Optional[bool] = None,
prefix: Optional[str] = '', extent_list: Optional[list] = None,
n_processes: Optional[int] = None,
available_ram: Optional[int] = None,
bandset_catalog: Optional[BandSetCatalog] = None,
virtual_output: Optional[bool] = None
):
"""Perform erosion of band pixels.
This tool performs the erosion of pixels identified by a list of values.
A new raster is created for each input band.
Args:
input_bands: input of type BandSet or list of paths or integer
number of BandSet.
output_path: string of output path directory or list of paths.
overwrite: if True, output overwrites existing files.
value_list: list of values for dilation.
size: size of dilation in pixels.
virtual_output: if True (and output_path is directory), save output
as virtual raster of multiprocess parts.
circular_structure: if True, use circular structure; if False, square structure.
prefix: optional string for output name prefix.
extent_list: list of boundary coordinates left top right bottom.
n_processes: number of parallel processes.
available_ram: number of megabytes of RAM available to processes.
bandset_catalog: optional type BandSetCatalog for BandSet number.
Returns:
Object :func:`~remotior_sensus.core.output_manager.OutputManager` with
- paths = output list
Examples:
Perform the erosion of size 1 for value 1 and 2
>>> dilation = band_erosion(input_bands=['path_1', 'path_2'],value_list=[1, 2],size=1,output_path='directory_path',circular_structure=True)
""" # noqa: E501
cfg.logger.log.info('start')
cfg.progress.update(
process=__name__.split('.')[-1].replace('_', ' '), message='starting',
start=True
)
# prepare process files
prepared = shared_tools.prepare_process_files(
input_bands=input_bands, output_path=output_path, overwrite=overwrite,
n_processes=n_processes, bandset_catalog=bandset_catalog,
prefix=prefix, box_coordinate_list=extent_list,
multiple_output=True, multiple_input=True,
virtual_output=virtual_output
)
input_raster_list = prepared['input_raster_list']
raster_info = prepared['raster_info']
n_processes = prepared['n_processes']
nodata_list = prepared['nodata_list']
output_list = prepared['output_list']
vrt_list = prepared['vrt_list']
if not circular_structure:
structure = shared_tools.create_base_structure(3)
else:
structure = shared_tools.create_circular_structure(1)
# process calculation
n = 0
min_p = 1
max_p = int((99 - 1) / len(input_raster_list))
for i in input_raster_list:
cfg.progress.update(message='processing raster %s' % (n + 1))
out = output_list[n]
nd = nodata_list[n]
data_type = raster_info[n][8]
# dummy bands for memory calculation
dummy_bands = 7
cfg.multiprocess.run(
raster_path=i, function=raster_erosion,
function_argument=structure, function_variable=[size, value_list],
output_raster_path=out, output_data_type=data_type,
n_processes=n_processes, available_ram=available_ram,
output_nodata_value=nd, compress=cfg.raster_compression,
dummy_bands=dummy_bands, boundary_size=structure.shape[0] + 1,
virtual_raster=vrt_list[n],
progress_message='processing raster %s' % (n + 1),
min_progress=min_p + max_p * n,
max_progress=min_p + max_p * (n + 1)
)
n += 1
cfg.progress.update(end=True)
cfg.logger.log.info('end; band erosion: %s' % output_list)
return OutputManager(paths=output_list) | /remotior_sensus-0.0.79-py3-none-any.whl/remotior_sensus/tools/band_erosion.py | 0.907716 | 0.327749 | band_erosion.py | pypi |
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from . import diagnostics_api_pb2 as diagnostics__api__pb2
class DiagnosticsServiceStub(object):
"""*
Read data by service identifier
Data identifiers:
- 0x22 read data by identinifier (Service id).
- 0x1f90 DID (Data identifier) for VIN number.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SendDiagnosticsQuery = channel.unary_unary(
'/base.DiagnosticsService/SendDiagnosticsQuery',
request_serializer=diagnostics__api__pb2.DiagnosticsRequest.SerializeToString,
response_deserializer=diagnostics__api__pb2.DiagnosticsResponse.FromString,
)
class DiagnosticsServiceServicer(object):
"""*
Read data by service identifier
Data identifiers:
- 0x22 read data by identinifier (Service id).
- 0x1f90 DID (Data identifier) for VIN number.
"""
def SendDiagnosticsQuery(self, request, context):
"""/ Send diagnostics request
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_DiagnosticsServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'SendDiagnosticsQuery': grpc.unary_unary_rpc_method_handler(
servicer.SendDiagnosticsQuery,
request_deserializer=diagnostics__api__pb2.DiagnosticsRequest.FromString,
response_serializer=diagnostics__api__pb2.DiagnosticsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'base.DiagnosticsService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class DiagnosticsService(object):
"""*
Read data by service identifier
Data identifiers:
- 0x22 read data by identinifier (Service id).
- 0x1f90 DID (Data identifier) for VIN number.
"""
@staticmethod
def SendDiagnosticsQuery(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/base.DiagnosticsService/SendDiagnosticsQuery',
diagnostics__api__pb2.DiagnosticsRequest.SerializeToString,
diagnostics__api__pb2.DiagnosticsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata) | /remotivelabs_broker-0.2.0b2.tar.gz/remotivelabs_broker-0.2.0b2/remotivelabs/broker/generated/sync/diagnostics_api_pb2_grpc.py | 0.719384 | 0.231799 | diagnostics_api_pb2_grpc.py | pypi |
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from . import common_pb2 as common__pb2
from . import traffic_api_pb2 as traffic__api__pb2
class TrafficServiceStub(object):
"""/ Handle playback and recordings.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.PlayTraffic = channel.unary_unary(
'/base.TrafficService/PlayTraffic',
request_serializer=traffic__api__pb2.PlaybackInfos.SerializeToString,
response_deserializer=traffic__api__pb2.PlaybackInfos.FromString,
)
self.PlayTrafficStatus = channel.unary_stream(
'/base.TrafficService/PlayTrafficStatus',
request_serializer=common__pb2.Empty.SerializeToString,
response_deserializer=traffic__api__pb2.PlaybackInfos.FromString,
)
class TrafficServiceServicer(object):
"""/ Handle playback and recordings.
"""
def PlayTraffic(self, request, context):
"""/ Play a playback or start recording.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PlayTrafficStatus(self, request, context):
"""/ Get continious status of playback states.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_TrafficServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'PlayTraffic': grpc.unary_unary_rpc_method_handler(
servicer.PlayTraffic,
request_deserializer=traffic__api__pb2.PlaybackInfos.FromString,
response_serializer=traffic__api__pb2.PlaybackInfos.SerializeToString,
),
'PlayTrafficStatus': grpc.unary_stream_rpc_method_handler(
servicer.PlayTrafficStatus,
request_deserializer=common__pb2.Empty.FromString,
response_serializer=traffic__api__pb2.PlaybackInfos.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'base.TrafficService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class TrafficService(object):
"""/ Handle playback and recordings.
"""
@staticmethod
def PlayTraffic(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/base.TrafficService/PlayTraffic',
traffic__api__pb2.PlaybackInfos.SerializeToString,
traffic__api__pb2.PlaybackInfos.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def PlayTrafficStatus(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/base.TrafficService/PlayTrafficStatus',
common__pb2.Empty.SerializeToString,
traffic__api__pb2.PlaybackInfos.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata) | /remotivelabs_broker-0.2.0b2.tar.gz/remotivelabs_broker-0.2.0b2/remotivelabs/broker/generated/sync/traffic_api_pb2_grpc.py | 0.747524 | 0.16975 | traffic_api_pb2_grpc.py | pypi |
import os
import signal
import binascii
import queue
from threading import Thread
import remotivelabs.broker.sync as br
import remotivelabs.broker.sync.helper as br_helper
class Broker:
def __init__(self, url, api_key):
self.url = url
self.api_key = api_key
self.q = queue.Queue()
"""Main function, checking arguments passed to script, setting up stubs, configuration and starting Threads."""
# Setting up stubs and configuration
self.intercept_channel = br.create_channel(url, api_key)
self.network_stub = br.network_api_pb2_grpc.NetworkServiceStub(self.intercept_channel)
self.system_stub = br.system_api_pb2_grpc.SystemServiceStub(self.intercept_channel)
self.traffic_stub = br.traffic_api_pb2_grpc.TrafficServiceStub(self.intercept_channel)
self.signal_creator = br.SignalCreator(self.system_stub)
def play(self, namespace: str, path: str):
playback_list = [
{
"namespace": namespace,
"path": path,
"mode": br.traffic_api_pb2.Mode.PLAY,
}]
status = self.traffic_stub.PlayTraffic(
br.traffic_api_pb2.PlaybackInfos(
playbackInfo=list(map(self.__create_playback_config, playback_list))
)
)
def download(self, file:str, dest:str):
br_helper.download_file(system_stub=self.system_stub, path=file, dest_path=dest)
def list_namespaces(self):
# Lists available signals
configuration = self.system_stub.GetConfiguration(br.common_pb2.Empty())
namespaces = []
for networkInfo in configuration.networkInfo:
namespaces.append(networkInfo.namespace.name)
return namespaces
def list_signal_names(self, namespace):
# Lists available signals
configuration = self.system_stub.GetConfiguration(br.common_pb2.Empty())
signal_names = []
for networkInfo in configuration.networkInfo:
res = self.system_stub.ListSignals(networkInfo.namespace)
for finfo in res.frame:
for sinfo in finfo.childInfo:
signal_names.append({'signal': sinfo.id.name, 'namespace' : networkInfo.namespace.name})
return signal_names
def list_signal_names(self):
# Lists available signals
configuration = self.system_stub.GetConfiguration(br.common_pb2.Empty())
signal_names = []
for networkInfo in configuration.networkInfo:
res = self.system_stub.ListSignals(networkInfo.namespace)
for finfo in res.frame:
for sinfo in finfo.childInfo:
signal_names.append({'signal': sinfo.id.name, 'namespace' : networkInfo.namespace.name})
return signal_names
def subscribe(self, signals: list, namespace, on_frame, changed_values_only: bool = True):
client_id = br.common_pb2.ClientId(id="cloud_demo")
signals_to_subscribe_on = \
map(lambda signal: self.signal_creator.signal(signal, namespace), signals)
Thread(
target=br.act_on_signal,
args=(
client_id,
self.network_stub,
signals_to_subscribe_on,
changed_values_only, # True: only report when signal changes
lambda frame: self.__each_signal(frame, on_frame),
lambda sub: (self.q.put(("cloud_demo", sub))),
),
).start()
# Wait for subscription
ecu, subscription = self.q.get()
return subscription
@classmethod
def connect(cls, url, api_key):
return Broker(url, api_key)
def __each_signal(self, signals, callback):
callback(map(lambda s: {
'timestamp_nanos': s.timestamp,
'name': s.id.name,
'value': self.__get_value(s)
}, signals))
@staticmethod
def __get_value(signal):
if signal.raw != b"":
return "0x" + binascii.hexlify(signal.raw).decode("ascii")
elif signal.HasField("integer"):
return signal.integer
elif signal.HasField("double"):
return signal.double
elif signal.HasField("arbitration"):
return signal.arbitration
else:
return "empty"
@staticmethod
def __create_playback_config(item):
"""Creating configuration for playback
Parameters
----------
item : dict
Dictionary containing 'path', 'namespace' and 'mode'
Returns
-------
PlaybackInfo
Object instance of class
"""
playbackConfig = br.traffic_api_pb2.PlaybackConfig(
fileDescription=br.system_api_pb2.FileDescription(path=item["path"]),
namespace=br.common_pb2.NameSpace(name=item["namespace"]),
)
return br.traffic_api_pb2.PlaybackInfo(
playbackConfig=playbackConfig,
playbackMode=br.traffic_api_pb2.PlaybackMode(mode=item["mode"]),
)
Broker.connect = classmethod(Broker.connect) | /remotivelabs_cli-0.0.1a15-py3-none-any.whl/cli/lib/broker.py | 0.511473 | 0.214609 | broker.py | pypi |
import lxml.html
import logging
from functools import total_ordering
import sys
import re
ATTRIBUTES_PATTERNS = [
"cookie",
"notice",
"qc",
"didomi",
"consent",
"cybot",
"policy",
"privacy",
"advert",
"popup",
"advert",
"alert",
"dismiss",
"banner",
"modal",
"directive",
"notification",
"cnil",
"cc",
"page",
"disclaimer",
"content",
]
ATTRIBUTES_LIST = ["id", "class", "data-cookie-banner", "data-testid"]
TEXT_PATTERNS = [
"cookie",
"j'accepte",
"conditions générales",
"contenu personnalisé",
"nos partenaires",
"informations personnelles",
"analyser l'audience",
"campagnes de publicité ciblées",
"configuration des cookies",
"personnaliser le contenu",
"politique de confidentialité",
"publicités",
"politique d’utilisation des cookies",
"vous nous autorisez à collecter des informations",
"consent to our use of cookies",
"cookie policy",
"meilleure expérience",
"optimiser votre expérience",
"adapter la publicité",
"consentez à l’utilisation de ces cookies",
"publicités personnalisée",
"vous acceptez leur utilisation",
"politique relative aux cookies",
"statistiques de visites",
"proposer",
"services",
"offres",
"publicités",
"partenaires tiers",
"centres d’intérêt",
"centre d’intérêt",
"utilisation de cookies",
"personnaliser votre expérience",
"analyser notre trafic",
"partageons des informations",
"partenaires",
"médias sociaux",
"publicité",
"analyse",
"traitements de données",
"accéder à des informations sur votre appareil",
]
TO_KEEP_TAGS = ["body"]
logger = logging.getLogger("CookiesNoticeRemover")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
handler.setFormatter(formatter)
logger.addHandler(handler)
@total_ordering
class ToRemoveTag:
def __init__(self, lxml_element, weight: int):
self.lxml_element = lxml_element
self.tag = lxml_element.tag.lower()
self.weight = weight
self.attributes = lxml_element.attrib
def __eq__(self, other):
if not isinstance(other, ToRemoveTag):
return False
return self.lxml_element == other.lxml_element
def __neq__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if not isinstance(other, ToRemoveTag):
return True
return self.weight < other.weight
def __repr__(self):
return f"{self.tag} with weight {self.weight} and attributes {self.attributes}"
class CookiesNoticeRemover:
def __init__(
self,
minimum_attribute_hints=2,
minimum_density=0.1,
no_childrens_evidence_treshold=2,
verbose=False,
):
self.__attribute_patterns_set = {
attribute_pattern.lower()
for attribute_pattern in ATTRIBUTES_PATTERNS
}
self.__text_patterns = sorted(
TEXT_PATTERNS, key=lambda t: len(t), reverse=True
)
self.__logger = None
if verbose:
self.__logger = logger
self.__minimum_attribute_hints = minimum_attribute_hints
self.__minimum_density = minimum_density
self.__no_childrens_evidence_treshold = no_childrens_evidence_treshold
def remove(self, content):
tree = lxml.html.fromstring(content)
attribute_matching_elements = self.__search_attribute_patterns(tree)
self.__write_log(
f"Will remove {attribute_matching_elements} attribute matching elements"
)
self.__remove_matching_elements(attribute_matching_elements)
text_matching_elements = self.__search_text_patterns(
tree, self.__minimum_density
)
self.__write_log(
f"Will remove {text_matching_elements} text matching elements"
)
self.__remove_matching_elements(text_matching_elements)
return lxml.html.tostring(tree, pretty_print=True)
def __search_attribute_patterns(self, element):
return self.__search_attribute_patterns_acc(element, [])
def __search_attribute_patterns_acc(self, element, matching_elements):
element_attributes = self.__get_element_attributes(element)
attributes_found_hints = self.__compute_attributes_hints(
element_attributes
)
matching_attributes = [
attribute
for attribute, nb_hints in attributes_found_hints.items()
if nb_hints >= self.__minimum_attribute_hints
]
if len(matching_attributes) > 0:
matching_elements.append(
ToRemoveTag(
element, 1
) # Attributes matched elements all have same weight
)
return matching_elements
else:
for child in element:
self.__search_attribute_patterns_acc(child, matching_elements)
return matching_elements
def __get_element_attributes(self, element):
return {
attribute_value.lower()
for attribute_name, attribute_string_value in element.attrib.items()
for attribute_value in attribute_string_value.split(" ")
if attribute_name.lower() in ATTRIBUTES_LIST
}
def __compute_attributes_hints(self, element_attributes):
attributes_found_hints = {
attribute: 0 for attribute in element_attributes
}
for attribute_hint in self.__attribute_patterns_set:
for element_attribute in element_attributes:
if attribute_hint in element_attribute:
attributes_found_hints[element_attribute] += 1
return attributes_found_hints
def __remove_matching_elements(self, elements):
sorted_elements = sorted(elements, reverse=True)
for element in sorted_elements:
self.__remove_matching_element(element)
def __remove_matching_element(self, element):
if element.tag not in TO_KEEP_TAGS:
element.lxml_element.getparent().remove(element.lxml_element)
else:
self.__write_log("Will not remove element because in TO_KEEP_TAGS")
def __search_text_patterns(self, element, minimum_density):
return self.__search_text_patterns_acc(element, [], minimum_density)
def __search_text_patterns_acc(
self, element, matching_elements, minimum_density
):
matching_text_patterns = self.__search_matching_text_patterns(element)
matched_density = self.__compute_matched_density(
element, matching_text_patterns
)
if len(matching_text_patterns) == 0:
# Optimisation to cut empty tree branches
return matching_elements
elif matched_density >= minimum_density:
# Matched text is dense enough for removal
matching_elements.append(
ToRemoveTag(element, len(matching_text_patterns))
)
return matching_elements
elif (
len(element) == 0
and len(matching_text_patterns)
> self.__no_childrens_evidence_treshold
):
# Element with no child is not dense enough
# But it has enough pattern to be sure
matching_elements.append(
ToRemoveTag(element, len(matching_text_patterns))
)
return matching_elements
else:
# Matched text is not dense enough
# Continue searching dense enough regions in childrens
for child in element:
self.__search_text_patterns_acc(
child, matching_elements, minimum_density
)
return matching_elements
def __search_matching_text_patterns(self, element):
text = self.__get_element_text(element)
detected_patterns = []
for pattern in self.__text_patterns:
regex_pattern = f".*{pattern}.*"
match = re.match(regex_pattern, text, re.IGNORECASE)
if match:
detected_patterns.append(pattern)
text = re.sub(pattern, " ", text, flags=re.IGNORECASE)
return detected_patterns
def __compute_matched_density(self, element, matching_text_patterns):
"""Compute density by text coverage.
Density = (text_covered_by_patterns) / total_length
"""
text_length = len(self.__get_element_text(element))
if text_length == 0:
return 0
total_matched_length = sum(
[len(pattern) for pattern in matching_text_patterns]
)
return total_matched_length / text_length
def __get_element_text(self, element):
"""Clean an lxml element text."""
try:
text = element.text_content()
text = re.sub("\t", " ", text)
text = re.sub("\r", " ", text)
text = re.sub(" ?\n ?", "\n", text)
text = re.sub("\n{2,}", "\n", text)
text = re.sub("\n", ". ", text)
text = re.sub(r"\.+", ".", text)
text = re.sub(
r"([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)", "", text
) # Removes emails
text = re.sub(
r"([a-zA-Z0-9_.+-]+ ?\[ ?a ?\] ?[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)",
"",
text,
) # Remove obfuscated emails
text = re.sub(
r"(http[s]?://)?([A-Z0-9a-z]+\.)+[A-Z0-9a-z]{1,6}", "", text,
) # Remove urls
text = re.sub(" +", " ", text)
text = text.strip()
return text
except Exception:
return ""
def __write_log(self, message):
if self.__logger is not None:
self.__logger.info(message) | /remove_cookies_notice-0.0.2.tar.gz/remove_cookies_notice-0.0.2/remove_cookies_notice/CookiesNoticeRemover.py | 0.426083 | 0.266605 | CookiesNoticeRemover.py | pypi |
import warnings
from pathlib import Path
from typing import List, Optional, Union
import numpy as np
from pymoab import core, types
from pymoab.types import MBENTITYSET
def create_moab_core(input):
moab_core = core.Core()
moab_core.load_file(str(input))
tag_name = moab_core.tag_get_handle(str(types.NAME_TAG_NAME))
tag_category = moab_core.tag_get_handle(str(types.CATEGORY_TAG_NAME))
root = moab_core.get_root_set()
# An array of tag values to be matched for entities returned by the
# following call.
group_tag_values = np.array(["Group"])
# Retrieve all EntitySets with a category tag of the user input value.
group_categories = list(moab_core.get_entities_by_type_and_tag(
root, MBENTITYSET, tag_category, group_tag_values))
return moab_core, group_categories, tag_name
def find_tags(
input: Optional[str] = 'dagmc.h5m',
) -> List[str]:
"""Removes a specific tag from a dagmc h5m file and saves the remaining
geometry as a new h5m file. Useful for visulising the geometry by removing
the graveyard tag and then the vtk file can be made without a bounding box
graveyard obstructing the view. Adapted from
https://github.com/svalinn/DAGMC-viz source code
Arguments:
input: The name of the h5m file to remove the dagmc tags from
output: The name of the outfile file(s) with the tags removed.
Supported extentions are .vtk and .h5m
tags: The tag or tags to remove.
verbose:
Returns:
filename of the new dagmc h5m file with the tags removed
"""
moab_core, group_categories, tag_name = create_moab_core(input)
# Retrieve all EntitySets with a name tag.
group_names = moab_core.tag_get_data(tag_name, group_categories, flat=True)
return group_names
def remove_tags(
input: Optional[str] = 'dagmc.h5m',
output: Optional[Union[str, List[str]]] = 'dagmc_removed_tag.vtk',
tags: Optional[Union[str, List[str]]] = 'graveyard',
verbose: Optional[bool] = False,
):
# -> List[List[str], List[str], List[str]]:
"""Removes a specific tag from a dagmc h5m file and saves the remaining
geometry as a new h5m file. Useful for visulising the geometry by removing
the graveyard tag and then the vtk file can be made without a bounding box
graveyard obstructing the view. Adapted from
https://github.com/svalinn/DAGMC-viz source code
Arguments:
input: The name of the h5m file to remove the dagmc tags from.
output: The name of the outfile file(s) with the tags removed.
Supported extentions are .vtk and .h5m.
tags: The tag(s) to be removed.
verbose: Print out additional information (True) or not (False).
Returns:
filename(s) of the output files produced, names of tags removed, names
of all the tags available.
"""
moab_core, group_categories, tag_name = create_moab_core(input)
group_names = find_tags(input=input)
if isinstance(tags, str):
tags_to_remove = [tags]
else:
tags_to_remove = tags
if verbose is True:
print('\ntag names that will be remove:')
for tag in tags_to_remove:
print(' ', tag, end='\n\n')
# Find the EntitySet whose name includes tag provided
sets_to_remove = []
names_to_remove = []
names_not_remove_removed = []
for group_set, name in zip(group_categories, group_names):
for tag_to_remove in tags_to_remove:
if tag_to_remove in str(name.lower()):
names_to_remove.append(name.lower())
sets_to_remove.append(group_set)
else:
names_not_remove_removed.append(name.lower())
names_to_remove = list(sorted(set(names_to_remove)))
if len(sets_to_remove) == 0:
warnings.warn('No tags removed.')
# prints out
if verbose is True:
print('tag names found in h5m file:')
for name in sorted(set(group_names)):
if str(name.lower()) in tags_to_remove:
print(' ', str(name.lower()), ' ---- > Removing tag')
else:
print(' ', str(name.lower()))
print()
# Remove the EntitySet from the data.
groups_to_write = [
group_set for group_set in group_categories if group_set not in sets_to_remove]
if isinstance(output, (str, Path)):
output = [output]
for out in output:
if verbose is True:
print('Writing', out, end='\n\n')
moab_core.write_file(str(out), output_sets=groups_to_write)
return output, names_to_remove, sorted(set(group_names)) | /remove_dagmc_tags-0.0.5.tar.gz/remove_dagmc_tags-0.0.5/remove_dagmc_tags/core.py | 0.862294 | 0.445349 | core.py | pypi |
Если что, я знаю про gettext"""
from abc import ABC, abstractmethod
import csv
import sqlite3
class IDataProvider(ABC):
"""Поставщик данных (ключ-значение)"""
@abstractmethod
def get_value(self, key: str) -> str:
...
def __call__(self, key: str):
return self.get_value(key)
@abstractmethod
def __len__(self):
...
class CSVProvider(IDataProvider):
"""CSV internationalization data provider"""
def __init__(self, file_name: str, key_field_name: str, lang: str, default_lang="EN"):
self._storage = file_name
self._key_field = key_field_name
self._value_field = lang
self._vals = dict()
# заполнение словаря
try:
self._fill_stor(self._key_field, lang)
except IndexError:
pass
except LookupError:
pass
except ValueError:
pass
else:
return # исключения не было!
# было исключение, возможно задан неверный язык локализации
# последняя попытка с языком по умолчанию
self._fill_stor(self._key_field, default_lang)
def _fill_stor(self, key: str, value: str):
self._vals.clear()
for k, v in self._get_fields_by_names((key, value)):
self._vals[k] = v
def _get_fields_by_names(self, column_names: [tuple, list], delimiter: str = ',') -> tuple:
"""Итератор, который возвращает за каждый вызов кортеж из полей csv файла, имена которых (первая строка),
в виде строк, содержит последовательность field_names"""
with open(self._storage, mode='r', encoding="utf-8", newline='') as csv_file:
row_reader = csv.reader(csv_file, delimiter=delimiter)
_b = True
for _row in row_reader:
if _b: # первая строка cvs файла должна содержать названия столбцов! создание кортежа индексов столбцов
column_indexes = [_row.index(column_name) for column_name in column_names]
_b = False
continue
# кортеж значений строк нужных столбцов
yield tuple([_row[_index] for _index in column_indexes])
def get_value(self, key: str) -> str:
return self._vals[key]
def __len__(self):
return len(self._vals)
class SQLiteDataProvider(IDataProvider):
"""SQLite internationalization data provider"""
def __init__(self, connection_string: str, key_field_name: str, lang: str, default_lang="EN"):
self._conn_str = connection_string
self._key_field = key_field_name
self._value_field = lang
self._vals = dict()
# заполнение словаря
try:
self._fill_stor(self._key_field, lang)
except sqlite3.Error:
pass
except ValueError:
pass
else:
return # исключения не было!
# было исключение, возможно задан неверный язык локализации
# последняя попытка с языком по умолчанию
self._fill_stor(self._key_field, default_lang)
def _fill_stor(self, key: str, value: str):
self._vals.clear()
for k, v in self._get_fields_by_names(key, value):
self._vals[k] = v
def _get_fields_by_names(self, str_id_column_name: str, lang_column_name: str) -> tuple:
"""Итератор, который возвращает за каждый вызов кортеж из полей csv файла, имена которых (первая строка),
в виде строк, содержит последовательность field_names"""
with sqlite3.connect(f"file:{self._conn_str}?mode=ro") as connection: # open as read only!
str_sql = f"select {str_id_column_name}, {lang_column_name} from istrings;"
for row in connection.execute(str_sql):
# кортеж значений строк нужных столбцов
yield row
def get_value(self, key: str) -> str:
return self._vals[key]
def __len__(self):
return len(self._vals) | /remove_duplicates-1.0.5-py3-none-any.whl/remove_duplicates/internationalization.py | 0.429908 | 0.264979 | internationalization.py | pypi |
# removestar
[](https://github.com/asmeurer/removestar/actions?query=branch:master)
Tool to automatically replace `import *` imports in Python files with explicit imports
Requires pyflakes.
Current limitations:
- Assumes only names in the current file are used by star imports (e.g., it
won't work to replace star imports in `__init__.py`).
For files within the same module, removestar determines missing imported names
statically. For external library imports, including imports of standard
library modules, it dynamically imports the module to determine the names.
This can be disabled with the `--no-dynamic-importing` flag.
See the [issue tracker](https://github.com/asmeurer/removestar/issues). Pull
requests are welcome.
## Installation
```
pip install removestar
```
or if you use conda
```
conda install -c conda-forge removestar
```
## Usage
```
$ removestar file.py # Shows diff but does not edit file.py
$ removestar -i file.py # Edits file.py in-place
$ removestar -i module/ # Modifies every Python file in module/ recursively
```
## Why is `import *` so bad?
Doing `from module import *` is generally frowned upon in Python. It is
considered acceptable when working interactively at a `python` prompt, or in
`__init__.py` files (removestar skips `__init__.py` files by default).
Some reasons why `import *` is bad:
- It hides which names are actually imported.
- It is difficult both for human readers and static analyzers such as
pyflakes to tell where a given name comes from when `import *` is used. For
example, pyflakes cannot detect unused names (for instance, from typos) in
the presence of `import *`.
- If there are multiple `import *` statements, it may not be clear which names
come from which module. In some cases, both modules may have a given name,
but only the second import will end up being used. This can break people's
intuition that the order of imports in a Python file generally does not
matter.
- `import *` often imports more names than you would expect. Unless the module
you import defines `__all__` or carefully `del`s unused names at the module
level, `import *` will import every public (doesn't start with an
underscore) name defined in the module file. This can often include things
like standard library imports or loop variables defined at the top-level of
the file. For imports from modules (from `__init__.py`), `from module import
*` will include every submodule defined in that module. Using `__all__` in
modules and `__init__.py` files is also good practice, as these things are
also often confusing even for interactive use where `import *` is
acceptable.
- In Python 3, `import *` is syntactically not allowed inside of a function.
Here are some official Python references stating not to use `import *` in
files:
- [The official Python
FAQ](https://docs.python.org/3/faq/programming.html?highlight=faq#what-are-the-best-practices-for-using-import-in-a-module):
> In general, don’t use `from modulename import *`. Doing so clutters the
> importer’s namespace, and makes it much harder for linters to detect
> undefined names.
- [PEP 8](https://www.python.org/dev/peps/pep-0008/#imports) (the official
Python style guide):
> Wildcard imports (`from <module> import *`) should be avoided, as they
> make it unclear which names are present in the namespace, confusing both
> readers and many automated tools.
Unfortunately, if you come across a file in the wild that uses `import *`, it
can be hard to fix it, because you need to find every name in the file that is
imported from the `*`. Removestar makes this easy by finding which names come
from `*` imports and replacing the import lines in the file automatically.
## Example
Suppose you have a module `mymod` like
```
mymod/
| __init__.py
| a.py
| b.py
```
With
```py
# mymod/a.py
from .b import *
def func(x):
return x + y
```
```py
# mymod/b.py
x = 1
y = 2
```
Then `removestar` works like:
```
$ removestar mymod/
--- original/mymod/a.py
+++ fixed/mymod/a.py
@@ -1,5 +1,5 @@
# mymod/a.py
-from .b import *
+from .b import y
def func(x):
return x + y
```
This does not edit `a.py` by default. The `-i` flag causes it to edit `a.py` in-place:
```
$ removestar -i mymod/
$ cat mymod/a.py
# mymod/a.py
from .b import y
def func(x):
return x + y
```
## Command line options
<!-- TODO: Autogenerate this somehow -->
```
$ removestar --help
usage: removestar [-h] [-i] [--version] [--no-skip-init]
[--no-dynamic-importing] [-v] [-q]
[--max-line-length MAX_LINE_LENGTH]
PATH [PATH ...]
Tool to automatically replace "import *" imports with explicit imports
Requires pyflakes.
Usage:
$ removestar file.py # Shows diff but does not edit file.py
$ removestar -i file.py # Edits file.py in-place
$ removestar -i module/ # Modifies every Python file in module/ recursively
positional arguments:
PATH Files or directories to fix
optional arguments:
-h, --help show this help message and exit
-i, --in-place Edit the files in-place. (default: False)
--version Show removestar version number and exit.
--no-skip-init Don't skip __init__.py files (they are skipped by
default) (default: True)
--no-dynamic-importing
Don't dynamically import modules to determine the list
of names. This is required for star imports from
external modules and modules in the standard library.
(default: True)
-v, --verbose Print information about every imported name that is
replaced. (default: False)
-q, --quiet Don't print any warning messages. (default: False)
--max-line-length MAX_LINE_LENGTH
The maximum line length for replaced imports before
they are wrapped. Set to 0 to disable line wrapping.
(default: 100)
```
## Whitelisting star imports
`removestar` does not replace star import lines that are marked with
[Flake8 `noqa` comments][noqa-comments] that permit star imports (`F401` or
`F403`).
[noqa-comments]: https://flake8.pycqa.org/en/3.1.1/user/ignoring-errors.html#in-line-ignoring-errors
For example, the star imports in this module would be kept:
```py
from os import * # noqa: F401
from .b import * # noqa
def func(x):
return x + y
```
## Changelog
See the [CHANGELOG](CHANGELOG.md) file.
## License
[MIT](LICENSE)
| /removestar-1.3.1.tar.gz/removestar-1.3.1/README.md | 0.561455 | 0.831759 | README.md | pypi |
.. _example02:
===============================
Example 02 -- Simple model fits
===============================
Aims
====
* Create a :class:`.LikelihoodCalculator` with the response matrix
and experiment data
* Calculate likelihoods of model predictions
* Create a :class:`.HypothesisTester` and calculate p-values
* Fit parameters and calculate p-values of composite hypotheses
* Construct confidence intervals of parameters of composite hypotheses
Instructions
============
The calculation of likelihoods and p-values is handled by the classes in the
:mod:`.likelihood` module::
import numpy as np
from remu import binning
from remu import plotting
from remu import likelihood
Some calculations handled in this module can be parallelized by setting the
``mapper`` function to something that uses parallel processes or threads, e.g.
the map function of a :class:`multiprocess.Pool` object::
from multiprocess import Pool
pool = Pool(8)
likelihood.mapper = pool.map
This is completely optional, but can speed up the calculation of p-values
considerably. Please not the use of the ``multiprocess`` package, instead of
Python's native ``multiprocessing``. The latter does not support the pickling
of arbitrary functions, so it does not work.
First we will create :class:`.DataModel` and :class:`.ResponseMatrixPredictor`
objects from the information of the previous examples::
response_matrix = "../01/response_matrix.npz"
with open("../01/reco-binning.yml", 'rt') as f:
reco_binning = binning.yaml.full_load(f)
with open("../01/optimised-truth-binning.yml", 'rt') as f:
truth_binning = binning.yaml.full_load(f)
reco_binning.fill_from_csv_file("../00/real_data.txt")
data = reco_binning.get_entries_as_ndarray()
data_model = likelihood.PoissonData(data)
matrix_predictor = likelihood.ResponseMatrixPredictor(response_matrix)
The data model knows how to compare event rate predictions to the data and
calculate the respective likelihoods, in this case using Poisson statistics.
The matrix predictor contains the information of the previously built response
matrix and is used to predict reco-space event rates from truth-space event
rates. These two can now be combined into a :class:`.LikelihoodCalculator` and
:class:`.HypothesisTester`::
calc = likelihood.LikelihoodCalculator(data_model, matrix_predictor)
test = likelihood.HypothesisTester(calc)
Likelihood calculators are in charge of computing likelihoods of parameter
sets. In this case, it will calculate the likelihoods of truth-space
event-rates, as that is what the predictor is expecting as parameters.
Hypothesis testers use the likelihood calculator to do statistical tests and
calculate p-values.
Now we need some models to test against the data. We will use the models A and
B of the previous steps, but we will turn them into area-normalised templates::
truth_binning.fill_from_csv_file("../00/modelA_truth.txt")
modelA = truth_binning.get_values_as_ndarray()
modelA /= np.sum(modelA)
truth_binning.reset()
truth_binning.fill_from_csv_file("../00/modelB_truth.txt")
modelB = truth_binning.get_values_as_ndarray()
modelB /= np.sum(modelB)
Let us calculate some likelihoods and p-values with these templates, assuming
total of 1000 expected events in the truth space (i.e. before efficiency
effects)::
print(calc(modelA*1000))
print(test.likelihood_p_value(modelA*1000))
print(calc(modelB*1000))
print(test.likelihood_p_value(modelB*1000))
.. include:: simple_hypotheses.txt
:literal:
The exact results may vary due to statistical fluctuations. Especially the
p-values are calculated by generating random data sets assuming the tested
model is true. The fraction of data sets with a worse likelihood than the
actual measured one is the p-value. Depending on the required confidence level,
we could exclude the "1000*A" hypothesis, while the "1000*B" hypothesis is more
compatible with the data.
Models that predict the true distribution of events usually have some free
parameters. For example we could assume that the shapes of models A and B are
well motivated but the total number of events is not well predicted. To test
these more flexible models, we can create predictors that take the free
parameters of the models as inputs and predict event rates in truth space.
By composing (i.e. "chaining") these predictors to the matrix predictor,
we can then build a likelihood calculator that takes these parameters as
inputs directly::
modelA_shape = likelihood.TemplatePredictor([modelA])
modelA_reco_shape = matrix_predictor.compose(modelA_shape)
calcA = likelihood.LikelihoodCalculator(data_model, modelA_reco_shape)
This example uses the :class:`.TemplatePredictor` class, which takes a list of
templates as its initialisation parameter and creates a predictor with one
template weight parameter per template. Since we only provide one template
here, it only takes one parameter.
We can now do a maximum likelihood fit with the model, using a
:class:`.BasinHoppingMaximiser`::
maxi = likelihood.BasinHoppingMaximizer()
retA = maxi(calcA)
print(retA)
.. include:: modelA_fit.txt
:literal:
The parameter values of the maximum likelihood solution are returned as
``ret.x``. The actual maximum log likelihood is stored in
``ret.log_likelihood``. The other properties of the returned object show the
status of the optimisation and are not important for this example.
Instead of composing the predictors and building a new likelihood calculator
with the result, it is also possible to directly compose the model predictor
with the likelihood calculator::
modelB_shape = likelihood.TemplatePredictor([modelB])
calcB = calc.compose(modelB_shape)
retB = maxi(calcB)
print(retB)
.. include:: modelB_fit.txt
:literal:
The maximum likelihood solutions for model A shows a lower number of events
than that of model B. This is due to the higher average efficiency of
reconstructing the events of model A, i.e. their distribution in ``y``. The
maximum log likelihood of model B is higher than for model A. So model B is
able to describe the given data better than model A. This is also reflected in
the p-values::
testA = likelihood.HypothesisTester(calcA, maximizer=maxi)
testB = likelihood.HypothesisTester(calcB, maximizer=maxi)
print(testA.max_likelihood_p_value())
print(testB.max_likelihood_p_value())
.. include:: fit_p-values.txt
:literal:
Here we explicitly told the hypothesis testers which maximiser to use, but this
is optional.
Again the p-value is calculated from randomly generated data sets assuming the
given model is true. This time it is the ratio of data sets that yield a worse
*maximum* likelihood though. This means a fit is performed for each mock data
set.
We can also take a qualitative look at the fit of data and the two models by
plotting the result in reco space::
pltr = plotting.get_plotter(reco_binning)
pltr.plot_entries(label='data', hatch=None)
modelA_reco, modelA_weights = modelA_reco_shape(retA.x)
modelA_logL = calcA(retA.x)
modelA_p = testA.likelihood_p_value(retA.x)
modelB_reco, modelB_weights = calcB.predictor(retB.x)
modelB_logL = calcB(retB.x)
modelB_p = testB.likelihood_p_value(retB.x)
pltr.plot_array(modelA_reco,
label='model A: $\log L=%.1f$, $p=%.3f$'%(modelA_logL, modelA_p),
hatch=None, linestyle='dashed')
pltr.plot_array(modelB_reco,
label='model B: $\log L=%.1f$, $p=%.3f$'%(modelB_logL, modelB_p),
hatch=None, linestyle='dotted')
pltr.legend(loc='lower center')
pltr.savefig("reco-comparison.png")
.. image:: reco-comparison.png
The p-value shown in this plot is again the :meth:`.likelihood_p_value` (as
opposed to the :meth:`.max_likelihood_p_value`). This is a better
representation of the goodness of fit of the maximum likelihood solution,
roughly equivalent to checking for a "chi-square" close to the number of bins.
The return values of the predictors are formated to accomodate multiple
weighted predictions per set of parameters, i.e. systematic incertainties. We
can ignore the weights for now.
Usually there is more than one template to be fitted to the data. Let's see
what happens if we allow combinations of model A and B::
mix_model = likelihood.TemplatePredictor([modelA, modelB])
calc_mix = calc.compose(mix_model)
ret = maxi.maximize_log_likelihood(calc_mix)
print(ret)
.. include:: mix_model_fit.txt
:literal:
::
test = likelihood.HypothesisTester(calc_mix)
print(test.max_likelihood_p_value())
.. include:: mix_model_p_value.txt
:literal:
The two parameters of this new combined model are the weights of model A and B
respectively. This allows a contribution of model A in the maximum likelihood
solution.
It might be useful to calculate a confidence interval for a parameter embedded
in a larger hypothesis with more parameters. This can be done by fixing that
parameter at different values (reducing the number of free parameters) and
calculating the likelihood ratio of this new embedded hypothesis and the
embedding original hypothesis. Comparing this likelihood ratio with the
expected distribution of likelihood ratios assuming the embedded hypothesis is
true yields p-values that can be used to construct the confidence interval::
p_values = []
A_values = np.linspace(0, 1000, 11)
for A in A_values:
p = test.max_likelihood_ratio_p_value((A,None))
print(A, p)
p_values.append(p)
Calculating these might take a while. The method
:meth:`.max_likelihood_ratio_p_value` fixes the specified parameters and
generates toy data sets at the best fit point of the remaining parameters. It
then computes the maximum likelihoods for both the fixed and unfixed version of
the predictors for all toy data sets. The fraction of toy data sets with an
equal or worse maximum likelihood ratio then the real data is the p-value.
This p-value is sometimes called the “profile plug-in p-value”, as one “plugs
in” the maximum likelihood estimate of the hypothesis' (nuisance) parameters to
generate the toy data and calculate the p-value. It’s coverage properties are
not exact, so care has to be taken to make sure it performs as expected (e.g.
by testing it with simulated data).
In the limit of "large statistics", the maximum log likelihood ratio should be
distributed like a chi-square distribution, according to Wilks' theorem. This
can be used to speed up the calculation of p-values considerably, as it skips
the generation of fit to toy data sets::
wilks_p_values = []
fine_A_values = np.linspace(0, 1000, 100)
for A in fine_A_values:
p = test.wilks_max_likelihood_ratio_p_value((A,None))
print(A, p)
wilks_p_values.append(p)
This can then be plotted with your usual plotting libraries::
from matplotlib import pyplot as plt
fig, ax = plt.subplots()
ax.set_xlabel("Model A weight")
ax.set_ylabel("p-value")
ax.plot(A_values, p_values, label="Profile plug-in")
ax.plot(fine_A_values, wilks_p_values, label="Wilks")
ax.axvline(ret.x[0], color='k', linestyle='solid')
ax.axhline(0.32, color='k', linestyle='dashed')
ax.axhline(0.05, color='k', linestyle='dashed')
ax.legend(loc='best')
fig.savefig("p-values.png")
.. image:: p-values.png
The confidence interval is the region of the parameter space with a p-value
over the desired test significance. The maximum likelihood solution is shown as
vertical line. Please note that this assumes that the full hypothesis with no
fixed parameters is true, i.e. that some combination of model A and model B
templates actually describes reality.
| /remu-0.8.0.tar.gz/remu-0.8.0/docs/examples/02/README.rst | 0.956917 | 0.851274 | README.rst | pypi |
import numpy as np
from multiprocess import Pool
from remu import binning, likelihood, plotting
pool = Pool(8)
likelihood.mapper = pool.map
response_matrix = "../01/response_matrix.npz"
with open("../01/reco-binning.yml") as f:
reco_binning = binning.yaml.full_load(f)
with open("../01/optimised-truth-binning.yml") as f:
truth_binning = binning.yaml.full_load(f)
reco_binning.fill_from_csv_file("../00/real_data.txt")
data = reco_binning.get_entries_as_ndarray()
data_model = likelihood.PoissonData(data)
matrix_predictor = likelihood.ResponseMatrixPredictor(response_matrix)
calc = likelihood.LikelihoodCalculator(data_model, matrix_predictor)
test = likelihood.HypothesisTester(calc)
truth_binning.fill_from_csv_file("../00/modelA_truth.txt")
modelA = truth_binning.get_values_as_ndarray()
modelA /= np.sum(modelA)
truth_binning.reset()
truth_binning.fill_from_csv_file("../00/modelB_truth.txt")
modelB = truth_binning.get_values_as_ndarray()
modelB /= np.sum(modelB)
with open("simple_hypotheses.txt", "w") as f:
print(calc(modelA * 1000), file=f)
print(test.likelihood_p_value(modelA * 1000), file=f)
print(calc(modelB * 1000), file=f)
print(test.likelihood_p_value(modelB * 1000), file=f)
modelA_shape = likelihood.TemplatePredictor([modelA])
modelA_reco_shape = matrix_predictor.compose(modelA_shape)
calcA = likelihood.LikelihoodCalculator(data_model, modelA_reco_shape)
maxi = likelihood.BasinHoppingMaximizer()
retA = maxi(calcA)
with open("modelA_fit.txt", "w") as f:
print(retA, file=f)
modelB_shape = likelihood.TemplatePredictor([modelB])
calcB = calc.compose(modelB_shape)
retB = maxi(calcB)
with open("modelB_fit.txt", "w") as f:
print(retB, file=f)
testA = likelihood.HypothesisTester(calcA, maximizer=maxi)
testB = likelihood.HypothesisTester(calcB, maximizer=maxi)
with open("fit_p-values.txt", "w") as f:
print(testA.max_likelihood_p_value(), file=f)
print(testB.max_likelihood_p_value(), file=f)
pltr = plotting.get_plotter(reco_binning)
pltr.plot_entries(label="data", hatch=None)
modelA_reco, modelA_weights = modelA_reco_shape(retA.x)
modelA_logL = calcA(retA.x)
modelA_p = testA.likelihood_p_value(retA.x)
modelB_reco, modelB_weights = calcB.predictor(retB.x)
modelB_logL = calcB(retB.x)
modelB_p = testB.likelihood_p_value(retB.x)
pltr.plot_array(
modelA_reco,
label=rf"model A: $\log L={modelA_logL:.1f}$, $p={modelA_p:.3f}$",
hatch=None,
linestyle="dashed",
)
pltr.plot_array(
modelB_reco,
label=rf"model B: $\log L={modelB_logL:.1f}$, $p={modelB_p:.3f}$",
hatch=None,
linestyle="dotted",
)
pltr.legend(loc="lower center")
pltr.savefig("reco-comparison.png")
del pltr
mix_model = likelihood.TemplatePredictor([modelA, modelB])
calc_mix = calc.compose(mix_model)
ret = maxi.maximize_log_likelihood(calc_mix)
with open("mix_model_fit.txt", "w") as f:
print(ret, file=f)
test = likelihood.HypothesisTester(calc_mix)
with open("mix_model_p_value.txt", "w") as f:
print(test.max_likelihood_p_value(), file=f)
p_values = []
A_values = np.linspace(0, 1000, 11)
for A in A_values:
p = test.max_likelihood_ratio_p_value((A, None))
print(A, p)
p_values.append(p)
wilks_p_values = []
fine_A_values = np.linspace(0, 1000, 100)
for A in fine_A_values:
p = test.wilks_max_likelihood_ratio_p_value((A, None))
print(A, p)
wilks_p_values.append(p)
from matplotlib import pyplot as plt
fig, ax = plt.subplots()
ax.set_xlabel("Model A weight")
ax.set_ylabel("p-value")
ax.plot(A_values, p_values, label="Profile plug-in")
ax.plot(fine_A_values, wilks_p_values, label="Wilks")
ax.axvline(ret.x[0], color="k", linestyle="solid")
ax.axhline(0.32, color="k", linestyle="dashed")
ax.axhline(0.05, color="k", linestyle="dashed")
ax.legend(loc="best")
fig.savefig("p-values.png")
# Avoid thread cleanup messes
likelihood.mapper = map
del pool | /remu-0.8.0.tar.gz/remu-0.8.0/docs/examples/02/fit_models.py | 0.5144 | 0.55103 | fit_models.py | pypi |
import numpy as np
from scipy.special import ndtr # 1/2[1 + erf(z/sqrt(2))]
def build_array(dictionary):
"""Turn a dict of arrays into a structured array."""
keys = dictionary.keys()
dtype = []
for k in keys:
dtype.append((k, dictionary[k].dtype))
dtype = np.dtype(dtype)
arr = np.empty(len(dictionary[k]), dtype=dtype)
for k in keys:
arr[k] = dictionary[k]
return arr
class Generator:
"""Generates "true" events according to model."""
def __init__(self, cross_section=100.0):
self.cross_section = cross_section
def generate_exposed(self, exposure, **kwargs):
"""Generate events according to experiment exposure."""
n = np.random.poisson(lam=self.cross_section * exposure)
return self.generate(n, **kwargs)
def generate(self, n, **kwargs):
"""Generate n events."""
return self._generate(n, **kwargs)
class ModelAGenerator(Generator):
"""Model A
x, y ~ Normal(mean=[0.1, 0.20], cov=[[1.0,0.0],[0.0,1.0]])
"""
def _generate(self, n):
x, y = np.random.multivariate_normal(
mean=[0.1, 0.20], cov=[[1.0, 0.0], [0.0, 1.0]], size=n
).T
return build_array({"true_x": x, "true_y": y})
class ModelBGenerator(Generator):
"""Model B
x, y ~ Normal(mean=[0.0, 0.0], cov=[[1.0,0.5],[0.5,1.0]])
"""
def _generate(self, n):
x, y = np.random.multivariate_normal(
mean=[0.0, 0.0], cov=[[1.0, 0.5], [0.5, 1.0]], size=n
).T
return build_array({"true_x": x, "true_y": y})
class BGGenerator(Generator):
"""Background
x, y ~ Normal(mean=[0.5, 0.5], cov=[[0.5,0.0],[0.0,0.5]])
"""
def _generate(self, n):
x, y = np.random.multivariate_normal(
mean=[0.5, 0.5], cov=[[0.5, 0.0], [0.0, 0.5]], size=n
).T
return build_array({"true_x": x, "true_y": y})
class NoiseGenerator(Generator):
"""Noise
x, y ~ Normal(mean=[0.0, 0.0], cov=[[4.0,0.0],[0.0,4.0]])
"""
def _generate(self, n):
x, y = np.random.multivariate_normal(
mean=[0.0, 0.0], cov=[[4.0, 0.0], [0.0, 4.0]], size=n
).T
return build_array({"reco_x": x, "reco_y": y})
class Detector:
"""Turn truth data into reconstructed events.
``x`` is smeared with a normal ``sigma=1``.
The efficienct depends on ``y``: ``eff = ndtr(slope*(y-y0))`` with ``slope=1.``
"""
def __init__(self, smear_sigma=1, eff_slope=1.0, eff_offset=0.0, max_eff=0.9):
self.smear_sigma = smear_sigma
self.eff_slope = eff_slope
self.eff_offset = eff_offset
self.max_eff = max_eff
def reconstruct(self, events, keep_truth=False):
"""Turn events into reconstructed events."""
reconstruction_probability = self.efficiency(events)
reconstructed_events = events[
np.random.uniform(low=0.0, high=1.0, size=events.shape)
<= reconstruction_probability
]
return self.smear(reconstructed_events, keep_truth=keep_truth)
def efficiency(self, events):
"""Return efficiency of given true events."""
eff = self.max_eff * ndtr(self.eff_slope * (events["true_y"] - self.eff_offset))
return eff
def smear(self, events, keep_truth=False):
y = np.array(events["true_y"])
x = np.array(events["true_x"])
n = len(x)
x += np.random.normal(loc=0.0, scale=self.smear_sigma, size=n)
dic = {"reco_x": x, "reco_y": y}
if keep_truth:
dic.update({"true_x": events["true_x"], "true_y": events["true_y"]})
return build_array(dic) | /remu-0.8.0.tar.gz/remu-0.8.0/docs/examples/simple_experiment/experiment.py | 0.824214 | 0.561876 | experiment.py | pypi |
import argparse
import csv
import experiment
import numpy as np
import numpy.lib.recfunctions as rfn
parser = argparse.ArgumentParser(
description="Modify the reconstructed events of a simulation."
)
parser.add_argument("inputfilename", help="where to get the data")
parser.add_argument("datafilename", help="where to store the data")
args = parser.parse_args()
# Nominal detector
nominal_detector = experiment.Detector()
# Toy parameters
np.random.seed(1337) # Make sure the variations are always the same
n_toys = 100
eff_slopes = np.abs(1.0 + 0.1 * np.random.randn(n_toys))
eff_offsets = 0.1 * np.random.randn(n_toys)
smear_sigmas = np.abs(1.0 + 0.1 * np.random.randn(n_toys))
max_effs = 1.0 - np.abs(0.1 + 0.03 * np.random.randn(n_toys))
# Toy detectors
toy_detectors = []
for slope, offset, sigma, max_eff in zip(
eff_slopes, eff_offsets, smear_sigmas, max_effs
):
toy_detectors.append(
experiment.Detector(
eff_slope=slope, eff_offset=offset, smear_sigma=sigma, max_eff=max_eff
)
)
events = np.genfromtxt(args.inputfilename, names=True, delimiter=",")
# Calculate weights as efficiency ratio of nominal and toy detectors
weights = []
nominal = nominal_detector.efficiency(events)
for i, toy in enumerate(toy_detectors):
weights.append(
np.array(toy.efficiency(events) / nominal, dtype=[("weight_%i" % (i,), float)])
)
weights = rfn.merge_arrays(weights, flatten=True, usemask=False)
# Modify x smearing by sigma ratio
reco_x = []
nominal = 1.0
for i, toy in enumerate(smear_sigmas):
tru = events["true_x"]
dif = events["reco_x"] - tru
new = toy / nominal * dif + tru
reco_x.append(np.array(new, dtype=[("reco_x_%i" % (i,), float)]))
reco_x = rfn.merge_arrays(reco_x, flatten=True, usemask=False)
events = rfn.drop_fields(events, ["reco_x"])
events = rfn.merge_arrays([events, weights, reco_x], flatten=True, usemask=False)
csvfields = events.dtype.names
with open(args.datafilename, "w") as f:
writer = csv.DictWriter(f, csvfields, delimiter=",")
writer.writerow({fn: fn for fn in csvfields}) # Write the field names
for event in events:
writer.writerow({k: event[k] for k in event.dtype.names}) | /remu-0.8.0.tar.gz/remu-0.8.0/docs/examples/simple_experiment/vary_detector.py | 0.624752 | 0.387603 | vary_detector.py | pypi |
import argparse
import csv
import experiment
parser = argparse.ArgumentParser(
description="Simulate the experiment and get reconstructed and true events."
)
parser.add_argument(
"years", type=float, help="how many years worth of data should be geneated"
)
parser.add_argument(
"model",
help="what model to simulate",
choices=["modelA", "modelB", "background", "noise"],
)
parser.add_argument("datafilename", help="where to store the data")
parser.add_argument("truthfilename", help="where to store the truth")
args = parser.parse_args()
# Get the model
if args.model == "modelA":
gen = experiment.ModelAGenerator(cross_section=100)
elif args.model == "modelB":
gen = experiment.ModelBGenerator(cross_section=100)
elif args.model == "background":
gen = experiment.BGGenerator(cross_section=30)
elif args.model == "noise":
gen = experiment.NoiseGenerator(cross_section=20)
# Generate the events
true_events = gen.generate_exposed(args.years)
csvfields = true_events.dtype.names
with open(args.truthfilename, "w") as f:
writer = csv.DictWriter(f, csvfields, delimiter=",")
writer.writerow({fn: fn for fn in csvfields}) # Write the field names
for event in true_events:
writer.writerow({k: event[k] for k in event.dtype.names})
# Reconstruct events
if args.model == "background":
# Detector reacts differently to BG events
detector = experiment.Detector(smear_sigma=2.0)
reco_events = detector.reconstruct(true_events, keep_truth=True)
elif args.model == "noise":
# Noise is already reco
reco_events = true_events
else:
detector = experiment.Detector()
reco_events = detector.reconstruct(true_events, keep_truth=True)
csvfields = reco_events.dtype.names
with open(args.datafilename, "w") as f:
writer = csv.DictWriter(f, csvfields, delimiter=",")
writer.writerow({fn: fn for fn in csvfields}) # Write the field names
for event in reco_events:
writer.writerow({k: event[k] for k in event.dtype.names}) | /remu-0.8.0.tar.gz/remu-0.8.0/docs/examples/simple_experiment/simulate_experiment.py | 0.508056 | 0.305011 | simulate_experiment.py | pypi |
.. _example01:
========================================
Example 01 -- Building a response matrix
========================================
Aims
====
* Use simulated events to build detector response matrix
* Test the model-independence of the matrix
* Optimise the binning
Instructions
============
A response matrix describes the probabilities of true events to be
reconstructed with different reconstructed properties. It is defined by the
binning of events in truth and reco space, and the simulated events that are
used to fill the matrix.
Let us assume we would like to investigate the variable ``x`` of the example
data. The files ``reco-binning.yml`` and ``coarse-truth-binning.yml`` contain
the bin edges in ``x`` that we will use to build the response matrix. The
response matrix object is created using the respective binning objects::
import numpy as np
from remu import binning
from remu import migration
from remu import plotting
from remu import matrix_utils
with open("reco-binning.yml", 'rt') as f:
reco_binning = binning.yaml.full_load(f)
with open("coarse-truth-binning.yml", 'rt') as f:
truth_binning = binning.yaml.full_load(f)
respA = migration.ResponseMatrix(reco_binning, truth_binning)
This :class:`.ResponseMatrix` object provides methods to populate it with
simulated events::
respA.fill_from_csv_file("../00/modelA_data.txt")
Often, the true distributions of the events before the selection are stored in
a dedicated data set. To get the efficiencies right, the response matrix
objects have a method to "top up" only the truth distributions::
respA.fill_up_truth_from_csv_file("../00/modelA_truth.txt")
This will not affect the reconstructed distributions. It is assumed that the
reconstructed events are a subset of the truth events.
We can now take a look at the matrix using the various plotting methods in the
:mod:`.matrix_utils` and :mod:`.plotting` modules. The mean migration matrix
(ignoring statistical uncertainties) can be plotted like this::
matrix_utils.plot_mean_response_matrix(respA, "response_matrix_A.png")
.. image:: response_matrix_A.png
The sum of the matrix elements projected on the truth bins gives us the
efficiencies of the respective bins in the mean matrix. The very low and very
high bins seem to show a constant efficiency of exactly 0.5. This is caused by
the prior assumption of the response matrix. Before adding any actual events,
all efficiencies are considered to be equally likely and the mean of that flat
prior is 0.5. The low and high bins indeed have only very few events in them::
pltr = plotting.get_plotter(respA.truth_binning)
pltr.plot_entries()
pltr.savefig("entries_A.png")
.. image:: entries_A.png
Note that the y-axis of this plot shows the *density* of events, i.e. the
number of events in the bin divided by the relative bin width. This behaviour
can be switched off with the ``density`` argument::
pltr = plotting.get_plotter(respA.truth_binning)
pltr.plot_entries(density=False, hatch=None)
pltr.savefig("abs_entries_A.png")
.. image:: abs_entries_A.png
The efficiency of this matrix looks reasonably flat (within the statistical
fluctuations). This would seem to make sense, since the efficiency of the
example experiment does not depend on ``x``. In fact, this is misleading
though, since the plot implicitly integrates over ``y`` and its influence on
the efficiency. This can be seen when repeating the previous steps with a
different model ("model B")::
reco_binning = reco_binning.clone()
truth_binning = truth_binning.clone()
reco_binning.reset()
truth_binning.reset()
respB = migration.ResponseMatrix(reco_binning, truth_binning)
respB.fill_from_csv_file("../00/modelB_data.txt")
respB.fill_up_truth_from_csv_file("../00/modelB_truth.txt")
matrix_utils.plot_mean_response_matrix(respB, "response_matrix_B.png")
.. image:: response_matrix_B.png
Here the efficiency clearly varies over ``x``, despite the efficiency of the
detector not actually depending on it. The reason for this is simple: In model
B, ``x`` and ``y`` are correlated.
This implicit dependency on a variable that is not part of the truth binning,
leads to the two created response matrices being very different. We can see
that explicitly when plotting the Mahalanobis distance (i.e. "chi-square")
between the two matrices::
matrix_utils.plot_mahalanobis_distance(respA, respB, "mahalanobis_distance.png")
.. image:: mahalanobis_distance.png
The differences are much larger than would be expected from purely statistical
fluctuations. This is also reflected in the overall distance between the
matrices, which can be used to define a compatibility score::
matrix_utils.plot_compatibility(respA, respB, "compatibility.png")
.. image:: compatibility.png
The plot shows the expected distributions of distances according to the
statistical uncertainties, if the matrices were describing identical responses.
The vertical line shows the actual distance. The compatibility ``C`` is defined
as the fraction of matrices in the distributions that have a larger distance
than the actually measured one.
It is obvious that the chosen binning is not sufficient to ensure a
model-independent response matrix. We clearly need to bin the truth also in
``y`` to cover the differences in the detector response. It is important to
keep in mind that in general, this kind of dependence might not show up clearly
in all cases. If the models used to build the response matrix are too similar,
the matrices might evaluate as compatible despite remaining model dependence in
the binning. It is important to understand the detector and analysis from first
principles and use that understanding to identify possible causes of varying
responses.
So let us re-do the previous steps with a finer truth-binning, also taking
``y`` into account::
with open("reco-binning.yml", 'rt') as f:
reco_binning = binning.yaml.full_load(f)
with open("fine-truth-binning.yml", 'rt') as f:
truth_binning = binning.yaml.full_load(f)
respA = migration.ResponseMatrix(reco_binning, truth_binning)
reco_binning = reco_binning.clone()
truth_binning = truth_binning.clone()
respB = migration.ResponseMatrix(reco_binning, truth_binning)
respA.fill_from_csv_file("../00/modelA_data.txt")
respA.fill_up_truth_from_csv_file("../00/modelA_truth.txt")
respB.fill_from_csv_file("../00/modelB_data.txt")
respB.fill_up_truth_from_csv_file("../00/modelB_truth.txt")
The correlation between ``x`` and ``y`` is now apparent in the plot of events
in the truth binning of response matrix B::
pltr = plotting.get_plotter(respB.truth_binning)
pltr.plot_entries()
pltr.savefig("fine_entries_B.png")
.. image:: fine_entries_B.png
Because of the fine 2D binning of the true events, the matrix itself has become
a bit unwieldy::
matrix_utils.plot_mean_response_matrix(respB, "fine_response_matrix_A.png")
.. image:: fine_response_matrix_A.png
To get a better idea of how the mean efficiencies vary over the different truth
variables, we can plot the projected minimum, maximum, and median of the mean
(i.e. ignoring statistical uncertainties) efficiencies::
matrix_utils.plot_mean_efficiency(respA, "fine_efficiency_A.png")
.. image:: fine_efficiency_A.png
::
matrix_utils.plot_mean_efficiency(respB, "fine_efficiency_B.png")
.. image:: fine_efficiency_B.png
The efficiencies of the two models are not identical, due to statistical
fluctuations and different phase space coverage. The differences are very close
to the expectations though::
matrix_utils.plot_mahalanobis_distance(respA, respB, "fine_mahalanobis_distance.png")
.. image:: fine_mahalanobis_distance.png
In fact, the two matrices are perfectly compatible within the statistical
uncertainties::
matrix_utils.plot_compatibility(respA, respB, "fine_compatibility.png")
.. image:: fine_compatibility.png
Since the two matrices are compatible with each other, and we are confident
that the binning ensures a physics-model-independent matrix, we can use the
simulated of both model A and model B to build the detector response matrix.
This can be done by filling a new matrix with the data from all the available
files::
resp.fill_from_csv_file(["../00/modelA_data.txt", "../00/modelB_data.txt"])
resp.fill_up_truth_from_csv_file(["../00/modelA_truth.txt", "../00/modelB_truth.txt"])
But since we already have the two individual matrices, it is easier to just
combine them::
resp = respA + respB
Another quality criterion for response matrices is the variation of the detector response
*within* a single bin. It is estimated from the differences between neighbouring bins and
can be plotted like this::
matrix_utils.plot_in_bin_variation(resp, "fine_inbin_var.png")
.. image:: fine_inbin_var.png
It should be compared to the statistical uncertainty of the matrix elements::
matrix_utils.plot_statistical_uncertainty(resp, "fine_stat_var.png")
.. image:: fine_stat_var.png
The easiest way to do that is by plotting the in-bin variance normalized by the
statistical uncertainty. If the in-bin variation is much larger than the
statistical uncertainty, this indicates that the bins are too wide to reflect a
rapid change of detector response::
matrix_utils.plot_relative_in_bin_variation(resp, "fine_rel_inbin_var.png")
.. image:: fine_rel_inbin_var.png
In this case, the in-bin variation is of the same order as the statistical uncertainty.
The combined matrix still has lots of bins with very low statistics at the
edges::
pltr = plotting.get_plotter(resp.truth_binning)
pltr.plot_entries()
pltr.savefig("fine_entries.png")
.. image:: fine_entries.png
We can increase the statistics in the sparsely populated bins by increasing the
size of those bins. This can be done manually by modifying the truth binning
file. Alternatively, the :mod:`.matrix_utils` module offers a function to merge
some bins to try to increase the minimum number of events per bin::
entries = resp.get_truth_entries_as_ndarray()
optimised = resp
while np.min(entries) < 10:
optimised = matrix_utils.improve_stats(optimised)
entries = optimised.get_truth_entries_as_ndarray()
This will keep removing bin edges of the bins with the lowest number of events
until the required minimum is reached. The result is a matrix with at least 10
events in every single truth bin::
pltr = plotting.get_plotter(optimised.truth_binning)
pltr.plot_entries()
pltr.savefig("optimised_entries.png")
.. image:: optimised_entries.png
To see how the bins compare, it is useful to plot the entries without an area
normalization::
pltr = plotting.get_plotter(optimised.truth_binning)
pltr.plot_entries(density=False, label="min", hatch=None, margin_function=np.min)
pltr.plot_entries(density=False, label="max", hatch=None, margin_function=np.max)
pltr.plot_entries(density=False, label="median", hatch=None, margin_function=np.median)
pltr.legend()
pltr.savefig("optimised_abs_entries.png")
.. image:: optimised_abs_entries.png
Let us quickly check that the efficiencies of the resulting matrix behave as
expected::
matrix_utils.plot_mean_efficiency(optimised, "optimised_efficiency.png")
.. image:: optimised_efficiency.png
As well as that the in-bin variation is reasonable::
matrix_utils.plot_relative_in_bin_variation(optimised, "optimised_rel_inbin_var.png")
.. image:: optimised_rel_inbin_var.png
And just to be sure, we can check again whether the matrices generated with the
single models A and B are still compatible with the optimised truth binning::
reco_binning = optimised.reco_binning.clone()
reco_binning.reset()
truth_binning = optimised.truth_binning.clone()
truth_binning.reset()
respA = migration.ResponseMatrix(reco_binning, truth_binning)
reco_binning = reco_binning.clone()
truth_binning = truth_binning.clone()
respB = migration.ResponseMatrix(reco_binning, truth_binning)
respA.fill_from_csv_file("../00/modelA_data.txt")
respA.fill_up_truth_from_csv_file("../00/modelA_truth.txt")
respB.fill_from_csv_file(["../00/modelB_data.txt"])
respB.fill_up_truth_from_csv_file(["../00/modelB_truth.txt"])
matrix_utils.plot_mahalanobis_distance(respA, respB, "optimised_mahalanobis_distance.png")
.. image:: optimised_mahalanobis_distance.png
::
matrix_utils.plot_compatibility(respA, respB, "optimised_compatibility.png")
.. image:: optimised_compatibility.png
Now that that we are confident in the quality of the final response matrix, we
must save the necessary information for the model comparisons in later example
steps::
with open("optimised-truth-binning.yml", 'w') as f:
binning.yaml.dump(optimised.truth_binning, f)
optimised.export("response_matrix.npz")
This will export the mean response matrix, as well as some auxiliary
information that is required for the calculation of likelihoods and hypothesis
tests.
| /remu-0.8.0.tar.gz/remu-0.8.0/docs/examples/01/README.rst | 0.95403 | 0.889241 | README.rst | pypi |
import numpy as np
from remu import binning, matrix_utils, migration, plotting
with open("reco-binning.yml") as f:
reco_binning = binning.yaml.full_load(f)
with open("coarse-truth-binning.yml") as f:
truth_binning = binning.yaml.full_load(f)
respA = migration.ResponseMatrix(reco_binning, truth_binning)
respA.fill_from_csv_file("../00/modelA_data.txt")
respA.fill_up_truth_from_csv_file("../00/modelA_truth.txt")
matrix_utils.plot_mean_response_matrix(respA, "response_matrix_A.png")
pltr = plotting.get_plotter(respA.truth_binning)
pltr.plot_entries()
pltr.savefig("entries_A.png")
pltr = plotting.get_plotter(respA.truth_binning)
pltr.plot_entries(density=False, hatch=None)
pltr.savefig("abs_entries_A.png")
reco_binning = reco_binning.clone()
truth_binning = truth_binning.clone()
reco_binning.reset()
truth_binning.reset()
respB = migration.ResponseMatrix(reco_binning, truth_binning)
respB.fill_from_csv_file("../00/modelB_data.txt")
respB.fill_up_truth_from_csv_file("../00/modelB_truth.txt")
matrix_utils.plot_mean_response_matrix(respB, "response_matrix_B.png")
matrix_utils.plot_mahalanobis_distance(respA, respB, "mahalanobis_distance.png")
matrix_utils.plot_compatibility(respA, respB, "compatibility.png")
with open("reco-binning.yml") as f:
reco_binning = binning.yaml.full_load(f)
with open("fine-truth-binning.yml") as f:
truth_binning = binning.yaml.full_load(f)
respA = migration.ResponseMatrix(reco_binning, truth_binning)
reco_binning = reco_binning.clone()
truth_binning = truth_binning.clone()
respB = migration.ResponseMatrix(reco_binning, truth_binning)
respA.fill_from_csv_file("../00/modelA_data.txt")
respA.fill_up_truth_from_csv_file("../00/modelA_truth.txt")
respB.fill_from_csv_file("../00/modelB_data.txt")
respB.fill_up_truth_from_csv_file("../00/modelB_truth.txt")
pltr = plotting.get_plotter(respB.truth_binning)
pltr.plot_entries()
pltr.savefig("fine_entries_B.png")
matrix_utils.plot_mean_response_matrix(respB, "fine_response_matrix_A.png")
matrix_utils.plot_mean_efficiency(respA, "fine_efficiency_A.png")
matrix_utils.plot_mean_efficiency(respB, "fine_efficiency_B.png")
matrix_utils.plot_mahalanobis_distance(respA, respB, "fine_mahalanobis_distance.png")
matrix_utils.plot_compatibility(respA, respB, "fine_compatibility.png")
resp = respA + respB
matrix_utils.plot_in_bin_variation(resp, "fine_inbin_var.png")
matrix_utils.plot_statistical_uncertainty(resp, "fine_stat_var.png")
matrix_utils.plot_relative_in_bin_variation(resp, "fine_rel_inbin_var.png")
pltr = plotting.get_plotter(resp.truth_binning)
pltr.plot_entries()
pltr.savefig("fine_entries.png")
entries = resp.get_truth_entries_as_ndarray()
optimised = resp
while np.min(entries) < 10:
optimised = matrix_utils.improve_stats(optimised)
entries = optimised.get_truth_entries_as_ndarray()
pltr = plotting.get_plotter(optimised.truth_binning)
pltr.plot_entries()
pltr.savefig("optimised_entries.png")
pltr = plotting.get_plotter(optimised.truth_binning)
pltr.plot_entries(density=False, label="min", hatch=None, margin_function=np.min)
pltr.plot_entries(density=False, label="max", hatch=None, margin_function=np.max)
pltr.plot_entries(density=False, label="median", hatch=None, margin_function=np.median)
pltr.legend()
pltr.savefig("optimised_abs_entries.png")
matrix_utils.plot_mean_efficiency(optimised, "optimised_efficiency.png")
matrix_utils.plot_relative_in_bin_variation(optimised, "optimised_rel_inbin_var.png")
reco_binning = optimised.reco_binning.clone()
reco_binning.reset()
truth_binning = optimised.truth_binning.clone()
truth_binning.reset()
respA = migration.ResponseMatrix(reco_binning, truth_binning)
reco_binning = reco_binning.clone()
truth_binning = truth_binning.clone()
respB = migration.ResponseMatrix(reco_binning, truth_binning)
respA.fill_from_csv_file("../00/modelA_data.txt")
respA.fill_up_truth_from_csv_file("../00/modelA_truth.txt")
respB.fill_from_csv_file(["../00/modelB_data.txt"])
respB.fill_up_truth_from_csv_file(["../00/modelB_truth.txt"])
matrix_utils.plot_mahalanobis_distance(
respA, respB, "optimised_mahalanobis_distance.png"
)
matrix_utils.plot_compatibility(respA, respB, "optimised_compatibility.png")
with open("optimised-truth-binning.yml", "w") as f:
binning.yaml.dump(optimised.truth_binning, f)
optimised.export("response_matrix.npz") | /remu-0.8.0.tar.gz/remu-0.8.0/docs/examples/01/build_response_matrix.py | 0.413951 | 0.27181 | build_response_matrix.py | pypi |
.. _example06:
=================================================
Example 06 -- Cross sections & flux uncertainties
=================================================
Aims
====
* Turn cross sections as input parameters into event numbers using fluxes
* Implement flux uncertainties
Instructions
============
So far we have concentrated on raw event numbers or simple template scaling
parameters to parameterize the number of events we expect to see in our
detector. But these raw numbers or templates themselves depend on other more
fundamental parameters.
E.g, if you are recording products of a radioactive decay, they will depend on
the number of radioactive nuclei inside the detector, the decay rates into the
different states you are distinguishing, and the total amount of time the
experiment was collecting data::
n_true_events_in_state_j = N_nuclei * time * decay_rate_to_state_j
Of these three parameters, the decay rate is usually the truly interesting one.
It is the physics one wants to examine with the experimental setup. The number
of nuclei and the time that the experiment ran on the other hand are just
properties of the experimental setup and the data that was recorded. So just
like the detector response, it would be good if we could absorb them into a
:class:`.Predictor`, so that we can do statistical tests with the interesting
physics parameters directly.
A slightly more complicated example is the measurement of interactions of a
particle beam with a target material, e.g. a neutrino beam with a water target.
Here the number of true events depends and the neutrino flux as a function of
the neutrino energy, the number of target molecules in the detector, and the
neutrino interaction cross sections for the different final states
distinguished by the detector::
n_true_events_j = T * sum_k(sigma_jk * F_k)
Here `T` is the number of targets, `F_k` is the integrated recorded neutrino
flux in neutrino energy bin `k` (unit: neutrinos/m^2), and `sigma_jk` is the
cross section for neutrinos in energy bin `k` to cause a reaction to the final
state `j` that can be recorded by the detector.
In general, a lot of experimental setups can be described with a matrix
multiplication in the form::
n_true_events_j = sum_k(physics_parameter_matrix_jk * exposure_k)
Depending on the details of the experiment, the parameter matrix and exposure
vector have slightly different meanings. In the case of the radioactive decay
measurement, the physics parameters would be the decay rates, and the exposure
would be the product of number of nuclei and the recording time. In the
neutrino case the parameters would be the cross sections, and the exposure
would be the product of integrated neutrino fluxes and target mass. In the
context of a collider experiment, the exposure would probably be called an
integrated luminosity.
We will extend the previous example and treat it like a neutrino beam experiment.
First we will need to get the binning in the true event properties::
import numpy as np
import pandas as pd
from remu import binning
with open("../05/truth-binning.yml") as f:
truth_binning = binning.yaml.full_load(f)
# Get truth binnings for BG and signal
bg_truth_binning = truth_binning.subbinnings[1].clone()
signal_truth_binning = truth_binning.subbinnings[2].clone()
As a binning for the flux, we will use a simple linear binning in the neutrino
energy:
.. include:: flux-binning.yml
:literal:
::
# Define flux binning
with open("flux-binning.yml") as f:
flux_binning = binning.yaml.full_load(f)
The binning of the cross section is done as a :class:`.CartesianProductBinning`.
Every possible combination of true kinematic bin and true neutrino energy bin
gets its own cross-section values::
# Create cross-section binnings
bg_flux_binning = flux_binning.clone()
bg_xsec_binning = binning.CartesianProductBinning((bg_truth_binning, bg_flux_binning))
signal_flux_binning = flux_binning.clone()
signal_xsec_binning = binning.CartesianProductBinning(
(signal_truth_binning, signal_flux_binning)
)
We can have a look at the structure of the resulting binning::
# Check binning structure
n_bg_truth = bg_truth_binning.data_size
n_signal_truth = signal_truth_binning.data_size
n_flux = flux_binning.data_size
n_bg_xsec = bg_xsec_binning.data_size
n_signal_xsec = signal_xsec_binning.data_size
print(n_bg_truth, n_signal_truth, n_flux)
print(n_bg_xsec, n_signal_xsec)
print(signal_xsec_binning.bins[0].data_indices)
print(signal_xsec_binning.bins[1].data_indices)
print(signal_xsec_binning.bins[n_flux].data_indices)
.. include:: check_binning.txt
:literal:
An increase of the bin index by one means that we increase the corresponding
data index in the flux binning by one. Once every flux bin has been stepped
through, the data index in the truth binning increases by one.
To create a :class:`.Predictor` that can turn cross sections into event
numbers, we need an actual neutrino flux. We can easily create one by filling
the flux binning::
# Fill flux with exposure units (proportional to neutrinos per m^2)
from remu import plotting
from numpy.random import default_rng
rng = default_rng()
E = rng.normal(loc=8.0, scale=2.0, size=1000)
df = pd.DataFrame({"E": E})
flux_binning.fill(df, weight=0.01)
We fill the binning with a weight of 0.01 so that the total amount of exposure
in the binning is 10, corresponding with the assumed 10 years of data taking
used for the event generation in the previous examples. So one "unit" of
exposure corresponds to one year of data taking.
The "simulated" flux looks like this::
pltr = plotting.get_plotter(flux_binning)
pltr.plot_values()
pltr.savefig("flux.png")
.. image:: flux.png
Of course the exact flux is never known (especially with neutrino experiments).
To simulate a flux uncertainty, we can just create lots of throws::
# Create fluctuated flux predictions
E_throws = rng.normal(loc=8.0, scale=2.0, size=(100,1000))
flux = []
for E in E_throws:
df = pd.DataFrame({"E": E})
flux_binning.reset()
flux_binning.fill(df, weight=0.01)
flux.append(flux_binning.get_values_as_ndarray())
flux = np.asfarray(flux)
print(flux.shape)
.. include:: flux_shape.txt
:literal:
Now we can use those flux predictions to create a
:class:`.LinearEinsumPredictor` that will do the correct matrix multiplication
to combine the cross sections and the flux into event numbers::
# Create event number predictors
from multiprocess import Pool
from remu import likelihood
pool = Pool(8)
likelihood.mapper = pool.map
bg_predictor = likelihood.LinearEinsumPredictor(
"ij,...kj->...ik",
flux,
reshape_parameters=(n_bg_truth, n_flux),
bounds=[(0.0, np.inf)] * bg_xsec_binning.data_size,
)
signal_predictor = likelihood.LinearEinsumPredictor(
"ij,...kj->...ik",
flux,
reshape_parameters=(n_signal_truth, n_flux),
bounds=[(0.0, np.inf)] * signal_xsec_binning.data_size,
)
Let us do a quick test of the predictions::
# Test cross-section predictions
signal_xsec_binning.reset()
signal_xsec_binning.fill({"E": 8.0, "true_x": 0.0, "true_y": 0.0}, 100.0)
signal_xsec = signal_xsec_binning.get_values_as_ndarray()
signal_events, weights = signal_predictor(signal_xsec)
signal_truth_binning.set_values_from_ndarray(signal_events)
pltr = plotting.get_plotter(signal_truth_binning)
pltr.plot_values(density=False)
pltr.savefig("many_events.png")
.. image:: many_events.png
::
signal_xsec_binning.reset()
signal_xsec_binning.fill({"E": 3.0, "true_x": 0.0, "true_y": 0.0}, 100.0)
signal_xsec = signal_xsec_binning.get_values_as_ndarray()
signal_events, weights = signal_predictor(signal_xsec)
signal_truth_binning.set_values_from_ndarray(signal_events)
pltr = plotting.get_plotter(signal_truth_binning)
pltr.plot_values(density=False)
pltr.savefig("few_events.png")
.. image:: few_events.png
Despite filling the same cross section to the same true kinematics int the two
cases, the resulting number of events was different. This is of course because
the flux at 3 is smaller than the flux at 8. So even if the cross sections at 3
and 8 are the same, the different flux will lead to a different number of
predicted events.
Finally we need a predictor for the noise events. These are events that do not
correspond to any interesting physics and are just weighted up or down with a
single parameter as input to the response matrix, so we will just pass through
that single parameter. We will use the :class:`.TemplatePredictor` for this,
since it set the parameter limits to ``(0, np.inf)`` by default, which is
convenient here. In order or the systematics to line up, we will have to create
100 identical "variations"::
# Create noise predictor
noise_predictor = likelihood.TemplatePredictor([[[1.0]]] * 100)
Now that we have the three separate predictors for the noise, background, and
signal events, we need to combine them into a single
:class:`.ConcatenatedPredictor`::
# Combine into single predictor
event_predictor = likelihood.ConcatenatedPredictor(
[noise_predictor, bg_predictor, signal_predictor],
combine_systematics="same"
)
The input of the combined predictor will be a concatenation of the separate
inputs. I.e. first the single noise scaling parameter, then the background
cross-section parameters, and finally the signal cross-section parameters. The
output likewise will be a concatenation of the single output: first the
unmodified noise scaling parameter, then the background events numbers, and
finally the signal event numbers. If we have done everything right, this
corresponds exactly to the meaning of the data of the original `truth_binning`,
so it can be used as input for the response matrix.
At this point we could combine the event predictor with a response matrix and
likelihood calculator to do statistical tests with the cross-section
parameters. With hundreds or thousands of parameters, this is a computationally
intensive task though. So for this tutorial we will again define some models as
templates and investigate them by varying their weights.
In order to to simplify that task, we will first create a binning that
encapsulates all input parameters for the `event_predictor`::
parameter_binning = truth_binning.marginalize_subbinnings()
parameter_binning = parameter_binning.insert_subbinning(1, bg_xsec_binning)
parameter_binning = parameter_binning.insert_subbinning(2, signal_xsec_binning)
We started by marginalizing out all subbinnings in the original
`truth_binning`, which yield a binning that only distinguishes the three event
types in order: noise, background, signal. Then we inserted the cross-section
binnings as subbinnings into their respective top level bins. This leaves us
with a binning where the desired data structure: first a single bin for the
noise, then the background cross-section bins as defined by the
`bg_xsec_binning`, and finally the signal cross-section bins as defined by
`signal_xsec_binning`.
The template for the noise events is simple. Again, we just want to scale the
noise parameter directly::
noise_template = np.zeros(parameter_binning.data_size)
noise_template[0] = 1.0
For the cross-section templates, we need to take a look at the cross-section
bins, so we can understand what values to put in them::
for i, b in enumerate(bg_xsec_binning.bins):
# Get truth and flux bin from Cartesian Product
truth_bin, flux_bin = b.get_marginal_bins()
print(i)
print(truth_bin)
print(flux_bin)
break
.. include:: marginal_bins.txt
:literal:
The method :meth:`.CartesianProductBin.get_marginal_bins` returns the bins of
the original binnings, that correspond to the given bin of the
:class:`.CartesianProductBinning`. In this case, these are the bin of the true
kinematics, and the bin of the neutrino Energy. For each we have the `edges` of
the bin, so we can use those to calculate the correct cross section: The number
of true events expected in the given truth bin, for each unit of exposure in
the given flux bin::
from scipy.stats import expon, norm, uniform
def calculate_bg_xsec(E_min, E_max, x_min, x_max, y_min, y_max):
"""Calculate the cross section for the BG process."""
# We need to make an assumption about the E dsitribution within the E bin
# Bin edges can be +/- np.inf
if np.isfinite(E_min) and np.isfinite(E_max):
# Uniform in given bounds
E_dist = uniform(loc=E_min, scale=E_max - E_min)
elif np.isfinite(E_min):
# Exponential from E_min to inf
E_dist = expon(loc=E_min)
else:
# Exponential from -inf to E_max
E_dist = expon(loc=E_max, scale=-1)
# Simple overall cross section: One unit of exposure yields 30 true events
xsec = 30.0
# True x is True E with a shift and scale
# Average XSEC in bin is proportional to overlap
E_0 = (x_min - 0.5) * 2 / np.sqrt(0.5) + 8
E_1 = (x_max - 0.5) * 2 / np.sqrt(0.5) + 8
lower = max(E_min, E_0)
upper = min(E_max, E_1)
if upper >= lower:
xsec *= E_dist.cdf(upper) - E_dist.cdf(lower)
else:
xsec = 0.0
# Differential XSEC in y is Gaussian
# Independent of x
y_dist = norm(loc=0.5, scale=np.sqrt(0.5))
xsec *= y_dist.cdf(y_max) - y_dist.cdf(y_min)
return xsec
bg_template = np.zeros(parameter_binning.data_size)
bg_xsec = np.zeros(bg_xsec_binning.data_size)
bg_offset = parameter_binning.get_bin_data_index(1)
for i, b in enumerate(bg_xsec_binning.bins):
# Get truth and flux bin from Cartesian Product
truth_bin, flux_bin = b.get_marginal_bins()
E_min, E_max = flux_bin.edges[0]
x_min, x_max = truth_bin.edges[0]
y_min, y_max = truth_bin.edges[1]
bg_xsec[i] = calculate_bg_xsec(E_min, E_max, x_min, x_max, y_min, y_max)
bg_template[i + bg_offset] = bg_xsec[i]
pltr = plotting.get_plotter(bg_xsec_binning)
pltr.plot_array(bg_xsec)
pltr.savefig("bg_xsec.png")
.. image:: bg_xsec.png
This plot of the template is not very intuitive, since the plotter of a general
:class:`.CartesianProductBinning` does not know the meaning of the single bins
in the constituent binnings. So it can only plot bin numbers vs one another. A
:class:`.CatesianProductBinning` consisting of only
:class:`.RectilinearBinning`, and :class:`.LinearBinning` without any
subbinnings has the same data structure as a :class:`.RectilinearBinning`
though, so we can use that plotter to get a slightly more readable plot::
bg_xsec_plot_binning = binning.RectilinearBinning(
bg_truth_binning.variables + (flux_binning.variable,),
bg_truth_binning.bin_edges + (flux_binning.bin_edges,),
)
pltr = plotting.get_plotter(bg_xsec_plot_binning)
pltr.plot_array(bg_xsec, density=[0, 1], hatch=None)
pltr.savefig("bg_xsec_pretty.png")
.. image:: bg_xsec_pretty.png
Here we told the plotter to only plot densities with relation to variables 0
and 1, i.e. `true_x` and `true_y`. This means the plots are "differential" in
`true_X` and `true_y` but not in E. The marginal plot of the E "distribution"
shows the total cross section of each bin, and not a density. The marginal
plots of `true_x` and `true_y` are technically differential cross sections, but
in this particular case they are flux integrated over a flux with one unit of
exposure in each energy bin. In order to see the flux integrated cross sections
in the actual flux, we need to use the predictors to actually predict the
number of events::
bg_truth_binning.reset()
bg_truth_binning.fill_from_csv_file("../05/bg_truth.txt", weight=0.1)
pltr = plotting.get_plotter(bg_truth_binning)
pltr.plot_values(scatter=500, label="generator")
bg_pred, w = bg_predictor(bg_xsec)
pltr.plot_array(bg_pred, scatter=500, label="xsec")
pltr.legend()
pltr.savefig("bg_prediction.png")
.. image:: bg_prediction.png
Here we also plotted the previously generated number of events for comparison.
The prediction from the cross-section model is not a perfect match, but it is
very close.
Now that we have a cross section for the background process, we need to repeat
this process for the two signal processes::
def calculate_model_A_xsec(E_min, E_max, x_min, x_max, y_min, y_max):
"""Calculate the cross section for the model A process."""
# We need to make an assumption about the E dsitribution within the E bin
# Bin edges can be +/- np.inf
if np.isfinite(E_min) and np.isfinite(E_max):
# Uniform in given bounds
E_dist = uniform(loc=E_min, scale=E_max - E_min)
elif np.isfinite(E_min):
# Exponential from E_min to inf
E_dist = expon(loc=E_min)
else:
# Exponential from -inf to E_max
E_dist = expon(loc=E_max, scale=-1)
# Simple overall cross section: One unit of exposure yields 100 true events
xsec = 100.0
# True x is True E with a shift and scale
# Average XSEC in bin is proportional to overlap
E_0 = (x_min - 0.1) * 2.0 + 8
E_1 = (x_max - 0.1) * 2.0 + 8
lower = max(E_min, E_0)
upper = min(E_max, E_1)
if upper >= lower:
xsec *= E_dist.cdf(upper) - E_dist.cdf(lower)
else:
xsec = 0.0
# Differential XSEC in y is Gaussian
# Independent of x
y_dist = norm(loc=0.2, scale=1.0)
xsec *= y_dist.cdf(y_max) - y_dist.cdf(y_min)
return xsec
model_A_xsec = np.zeros(signal_xsec_binning.data_size)
model_A_template = np.zeros(parameter_binning.data_size)
signal_offset = parameter_binning.get_bin_data_index(2)
for i, b in enumerate(signal_xsec_binning.bins):
# Get truth and flux bin from Cartesian Product
truth_bin, flux_bin = b.get_marginal_bins()
E_min, E_max = flux_bin.edges[0]
x_min, x_max = truth_bin.edges[0]
y_min, y_max = truth_bin.edges[1]
model_A_xsec[i] = calculate_model_A_xsec(E_min, E_max, x_min, x_max, y_min, y_max)
model_A_template[i + signal_offset] = model_A_xsec[i]
signal_xsec_plot_binning = binning.RectilinearBinning(
signal_truth_binning.variables + (flux_binning.variable,),
signal_truth_binning.bin_edges + (flux_binning.bin_edges,),
)
pltr = plotting.get_plotter(signal_xsec_plot_binning)
pltr.plot_array(model_A_xsec, density=[0, 1], hatch=None)
pltr.savefig("model_A_xsec.png")
.. image:: model_A_xsec.png
::
signal_truth_binning.reset()
signal_truth_binning.fill_from_csv_file("../00/modelA_truth.txt", weight=0.1)
pltr = plotting.get_plotter(signal_truth_binning)
pltr.plot_values(scatter=500, label="generator")
model_A_pred, w = signal_predictor(model_A_xsec)
pltr.plot_array(model_A_pred, scatter=500, label="xsec")
pltr.legend()
pltr.savefig("model_A_prediction.png")
.. image:: model_A_prediction.png
::
model_B_xsec = np.zeros(signal_xsec_binning.data_size)
model_B_template = np.zeros(parameter_binning.data_size)
signal_offset = 1 + bg_xsec_binning.data_size
def calculate_model_B_xsec(E_min, E_max, x_min, x_max, y_min, y_max):
"""Calculate the cross section for the model A process."""
# We need to make an assumption about the E dsitribution within the E bin
# Bin edges can be +/- np.inf
if np.isfinite(E_min) and np.isfinite(E_max):
# Uniform in given bounds
E_dist = uniform(loc=E_min, scale=E_max - E_min)
elif np.isfinite(E_min):
# Exponential from E_min to inf
E_dist = expon(loc=E_min)
else:
# Exponential from -inf to E_max
E_dist = expon(loc=E_max, scale=-1)
# Simple overall cross section: One unit of exposure yields 100 true events
xsec = 100.0
# True x is True E with a shift and scale
# Average XSEC in bin is proportional to overlap
E_0 = (x_min - 0.0) * 2.0 + 8
E_1 = (x_max - 0.0) * 2.0 + 8
lower = max(E_min, E_0)
upper = min(E_max, E_1)
if upper >= lower:
xsec *= E_dist.cdf(upper) - E_dist.cdf(lower)
else:
xsec = 0.0
# Differential XSEC in y is Gaussian
# Correalted with x
# Should integrate 2D distribution of x/E and y
# Instead, cheat and assume median E value
if np.isfinite(lower) and np.isfinite(upper):
E_m = (upper + lower) / 2
elif np.isfinite(lower):
E_m = lower + 1
elif np.isfinite(upper):
E_m = upper - 1
else:
E_m = 0.0
x_m = (E_m - 8.0) / 2
y_dist = norm(loc=0.5 * x_m, scale=1.0 - 0.5**2)
xsec *= y_dist.cdf(y_max) - y_dist.cdf(y_min)
return xsec
for i, b in enumerate(signal_xsec_binning.bins):
# Get truth and flux bin from Cartesian Product
truth_bin, flux_bin = b.get_marginal_bins()
E_min, E_max = flux_bin.edges[0]
x_min, x_max = truth_bin.edges[0]
y_min, y_max = truth_bin.edges[1]
model_B_xsec[i] = calculate_model_B_xsec(E_min, E_max, x_min, x_max, y_min, y_max)
model_B_template[i + signal_offset] = model_B_xsec[i]
pltr = plotting.get_plotter(signal_xsec_plot_binning)
pltr.plot_array(model_B_xsec, density=[0, 1], hatch=None)
pltr.savefig("model_B_xsec.png")
.. image:: model_B_xsec.png
::
signal_truth_binning.reset()
signal_truth_binning.fill_from_csv_file("../00/modelB_truth.txt", weight=0.1)
pltr = plotting.get_plotter(signal_truth_binning)
pltr.plot_values(scatter=500, label="generator")
model_B_pred, w = signal_predictor(model_B_xsec)
pltr.plot_array(model_B_pred, scatter=500, label="xsec")
pltr.legend()
pltr.savefig("model_B_prediction.png")
.. image:: model_B_prediction.png
Now that we have the templates, we can combine them into a
:class:`.TemplatePredictor`::
# Template predictor for noise, bg, model A, model B
xsec_template_predictor = likelihood.TemplatePredictor(
[noise_template, bg_template, model_A_template, model_B_template]
)
This can then be combined with the detector response and event predictor to get
the full chain to the reconstructed level::
# Load data and response matrix
with open("../01/reco-binning.yml") as f:
reco_binning = binning.yaml.full_load(f)
reco_binning.fill_from_csv_file("../05/real_data.txt")
data = reco_binning.get_entries_as_ndarray()
data_model = likelihood.PoissonData(data)
response_matrix = "../05/response_matrix.npz"
matrix_predictor = likelihood.ResponseMatrixPredictor(response_matrix)
# Combine into linear predictor
data_predictor = likelihood.ComposedPredictor(
[matrix_predictor, event_predictor],
combine_systematics="same",
)
template_predictor = likelihood.ComposedMatrixPredictor(
[data_predictor, xsec_template_predictor], combine_systematics="cartesian"
)
We composed the `matrix_predictor` and `event_predictor` with the ``same``
strategy for combining systematics. This means that the 100 flux variations are
matched one-to-one to the 100 detector response variations, for a total of 100
systematic variations. The default ``cartesian`` strategy would man that each
of the 100 detector variations is combined with all of the 100 flux variations,
for a total of 10,000 systematic variations.
For the final `template_predictor` we compose the `data_predictor` and
`xsec_template_predictor` using a :class:`.ComposedMatrixPredictor`. This is a
special kind of :class:`.ComposedPredictor`, which pre-computes a linear
approximation of the composed predictors in the form of a matrix
multiplication. So for a prediction, it does _not_ need to call the original
predictors in order, but only needs to do a simple matrix multiplication, where
the matrix is the size of the number of output parameters times the number of
input parameters. This speeds things up considerably, since we effectively
circumvent the hundreds of cross-section parameters that would otherwise need
to be calculate each time the predictor is called. And since all predictors
that go into this final predictor are actually linear, the linear
"approximation" is actually exact (modulo numerical variations).
Now that we have a performant predictor of the reconstructed data, we can run
statistical analyses with it, like a simple maximum likelihood fit::
# Likelihood caclulator and hypothesis tester
calc = likelihood.LikelihoodCalculator(data_model, template_predictor)
maxi = likelihood.BasinHoppingMaximizer()
# Fit everything
ret = maxi.maximize_log_likelihood(calc)
print(ret)
.. include:: fit.txt
:literal:
We can calculate the overall p-values for the two hypotheses::
# Calculate p-values for hypotheses overall
calc_A = calc.fix_parameters([None, None, None, 0.0])
calc_B = calc.fix_parameters([None, None, 0.0, None])
test_A = likelihood.HypothesisTester(calc_A)
test_B = likelihood.HypothesisTester(calc_B)
print(test_A.max_likelihood_p_value())
print(test_B.max_likelihood_p_value())
.. include:: p-values.txt
:literal:
Or we can do a scan of the Wilks p-value for the model template weights::
# Get Wilks' p-values for models
norms = np.linspace(0.5, 1.5, 50)
p_values_A = []
p_values_B = []
for n in norms:
p_values_A.append(test_A.wilks_max_likelihood_ratio_p_value([None, None, n]))
p_values_B.append(test_B.wilks_max_likelihood_ratio_p_value([None, None, n]))
from matplotlib import pyplot as plt
fig, ax = plt.subplots()
ax.set_xlabel("Model weight")
ax.set_ylabel("p-value")
ax.plot(norms, p_values_A, label="Model A", color="C1")
ax.plot(norms, p_values_B, label="Model B", color="C2")
ax.axhline(0.32, color="k", linestyle="dashed")
ax.axhline(0.05, color="k", linestyle="dashed")
ax.legend(loc="best")
fig.savefig("wilks-p-values.png")
.. image:: wilks-p-values.png
Since the flux integrations and uncertainty is now handled by the predictors,
the cross-section parameters (or templates of cross-section parameters) can be
used just like described in the previous examples.
| /remu-0.8.0.tar.gz/remu-0.8.0/docs/examples/06/README.rst | 0.960119 | 0.861305 | README.rst | pypi |
import numpy as np
import pandas as pd
from remu import binning
with open("../05/truth-binning.yml") as f:
truth_binning = binning.yaml.full_load(f)
# Get truth binnings for BG and signal
bg_truth_binning = truth_binning.subbinnings[1].clone()
signal_truth_binning = truth_binning.subbinnings[2].clone()
# Define flux binning
with open("flux-binning.yml") as f:
flux_binning = binning.yaml.full_load(f)
# Create cross-section binnings
bg_flux_binning = flux_binning.clone()
bg_xsec_binning = binning.CartesianProductBinning((bg_truth_binning, bg_flux_binning))
signal_flux_binning = flux_binning.clone()
signal_xsec_binning = binning.CartesianProductBinning(
(signal_truth_binning, signal_flux_binning)
)
# Check binning structure
n_bg_truth = bg_truth_binning.data_size
n_signal_truth = signal_truth_binning.data_size
n_flux = flux_binning.data_size
n_bg_xsec = bg_xsec_binning.data_size
n_signal_xsec = signal_xsec_binning.data_size
with open("check_binning.txt", "w") as f:
print(n_bg_truth, n_signal_truth, n_flux, file=f)
print(n_bg_xsec, n_signal_xsec, file=f)
print(signal_xsec_binning.bins[0].data_indices, file=f)
print(signal_xsec_binning.bins[1].data_indices, file=f)
print(signal_xsec_binning.bins[n_flux].data_indices, file=f)
from numpy.random import default_rng
# Fill flux with exposure units (proportional to neutrinos per m^2)
from remu import plotting
rng = default_rng()
E = rng.normal(loc=8.0, scale=2.0, size=1000)
df = pd.DataFrame({"E": E})
flux_binning.fill(df, weight=0.01)
pltr = plotting.get_plotter(flux_binning)
pltr.plot_values()
pltr.savefig("flux.png")
# Create fluctuated flux predictions
E_throws = rng.normal(loc=8.0, scale=2.0, size=(100, 1000))
flux = []
for E in E_throws:
df = pd.DataFrame({"E": E})
flux_binning.reset()
flux_binning.fill(df, weight=0.01)
flux.append(flux_binning.get_values_as_ndarray())
flux = np.asfarray(flux)
with open("flux_shape.txt", "w") as f:
print(flux.shape, file=f)
# Create event number predictors
from multiprocess import Pool
from remu import likelihood
pool = Pool(8)
likelihood.mapper = pool.map
bg_predictor = likelihood.LinearEinsumPredictor(
"ij,...kj->...ik",
flux,
reshape_parameters=(n_bg_truth, n_flux),
bounds=[(0.0, np.inf)] * bg_xsec_binning.data_size,
)
signal_predictor = likelihood.LinearEinsumPredictor(
"ij,...kj->...ik",
flux,
reshape_parameters=(n_signal_truth, n_flux),
bounds=[(0.0, np.inf)] * signal_xsec_binning.data_size,
)
# Test cross-section predictions
signal_xsec_binning.reset()
signal_xsec_binning.fill({"E": 8.0, "true_x": 0.0, "true_y": 0.0}, 100.0)
signal_xsec = signal_xsec_binning.get_values_as_ndarray()
signal_events, weights = signal_predictor(signal_xsec)
signal_truth_binning.set_values_from_ndarray(signal_events)
pltr = plotting.get_plotter(signal_truth_binning)
pltr.plot_values(density=False)
pltr.savefig("many_events.png")
signal_xsec_binning.reset()
signal_xsec_binning.fill({"E": 3.0, "true_x": 0.0, "true_y": 0.0}, 100.0)
signal_xsec = signal_xsec_binning.get_values_as_ndarray()
signal_events, weights = signal_predictor(signal_xsec)
signal_truth_binning.set_values_from_ndarray(signal_events)
pltr = plotting.get_plotter(signal_truth_binning)
pltr.plot_values(density=False)
pltr.savefig("few_events.png")
# Create noise predictor
noise_predictor = likelihood.TemplatePredictor([[[1.0]]] * 100)
# Combine into single predictor
event_predictor = likelihood.ConcatenatedPredictor(
[noise_predictor, bg_predictor, signal_predictor], combine_systematics="same"
)
parameter_binning = truth_binning.marginalize_subbinnings()
parameter_binning = parameter_binning.insert_subbinning(1, bg_xsec_binning)
parameter_binning = parameter_binning.insert_subbinning(2, signal_xsec_binning)
# Create some theory templates
noise_template = np.zeros(parameter_binning.data_size)
noise_template[0] = 1.0
for i, b in enumerate(bg_xsec_binning.bins):
# Get truth and flux bin from Cartesian Product
truth_bin, flux_bin = b.get_marginal_bins()
with open("marginal_bins.txt", "w") as f:
print(i, file=f)
print(truth_bin, file=f)
print(flux_bin, file=f)
break
from scipy.stats import expon, norm, uniform
def calculate_bg_xsec(E_min, E_max, x_min, x_max, y_min, y_max):
"""Calculate the cross section for the BG process."""
# We need to make an assumption about the E dsitribution within the E bin
# Bin edges can be +/- np.inf
if np.isfinite(E_min) and np.isfinite(E_max):
# Uniform in given bounds
E_dist = uniform(loc=E_min, scale=E_max - E_min)
elif np.isfinite(E_min):
# Exponential from E_min to inf
E_dist = expon(loc=E_min)
else:
# Exponential from -inf to E_max
E_dist = expon(loc=E_max, scale=-1)
# Simple overall cross section: One unit of exposure yields 30 true events
xsec = 30.0
# True x is True E with a shift and scale
# Average XSEC in bin is proportional to overlap
E_0 = (x_min - 0.5) * 2 / np.sqrt(0.5) + 8
E_1 = (x_max - 0.5) * 2 / np.sqrt(0.5) + 8
lower = max(E_min, E_0)
upper = min(E_max, E_1)
if upper >= lower:
xsec *= E_dist.cdf(upper) - E_dist.cdf(lower)
else:
xsec = 0.0
# Differential XSEC in y is Gaussian
# Independent of x
y_dist = norm(loc=0.5, scale=np.sqrt(0.5))
xsec *= y_dist.cdf(y_max) - y_dist.cdf(y_min)
return xsec
bg_template = np.zeros(parameter_binning.data_size)
bg_xsec = np.zeros(bg_xsec_binning.data_size)
bg_offset = parameter_binning.get_bin_data_index(1)
for i, b in enumerate(bg_xsec_binning.bins):
# Get truth and flux bin from Cartesian Product
truth_bin, flux_bin = b.get_marginal_bins()
E_min, E_max = flux_bin.edges[0]
x_min, x_max = truth_bin.edges[0]
y_min, y_max = truth_bin.edges[1]
bg_xsec[i] = calculate_bg_xsec(E_min, E_max, x_min, x_max, y_min, y_max)
bg_template[i + bg_offset] = bg_xsec[i]
pltr = plotting.get_plotter(bg_xsec_binning)
pltr.plot_array(bg_xsec)
pltr.savefig("bg_xsec.png")
bg_xsec_plot_binning = binning.RectilinearBinning(
bg_truth_binning.variables + (flux_binning.variable,),
bg_truth_binning.bin_edges + (flux_binning.bin_edges,),
)
pltr = plotting.get_plotter(bg_xsec_plot_binning)
pltr.plot_array(bg_xsec, density=[0, 1], hatch=None)
pltr.savefig("bg_xsec_pretty.png")
bg_truth_binning.reset()
bg_truth_binning.fill_from_csv_file("../05/bg_truth.txt", weight=0.1)
pltr = plotting.get_plotter(bg_truth_binning)
pltr.plot_values(scatter=500, label="generator")
bg_pred, w = bg_predictor(bg_xsec)
pltr.plot_array(bg_pred, scatter=500, label="xsec")
pltr.legend()
pltr.savefig("bg_prediction.png")
def calculate_model_A_xsec(E_min, E_max, x_min, x_max, y_min, y_max):
"""Calculate the cross section for the model A process."""
# We need to make an assumption about the E dsitribution within the E bin
# Bin edges can be +/- np.inf
if np.isfinite(E_min) and np.isfinite(E_max):
# Uniform in given bounds
E_dist = uniform(loc=E_min, scale=E_max - E_min)
elif np.isfinite(E_min):
# Exponential from E_min to inf
E_dist = expon(loc=E_min)
else:
# Exponential from -inf to E_max
E_dist = expon(loc=E_max, scale=-1)
# Simple overall cross section: One unit of exposure yields 100 true events
xsec = 100.0
# True x is True E with a shift and scale
# Average XSEC in bin is proportional to overlap
E_0 = (x_min - 0.1) * 2.0 + 8
E_1 = (x_max - 0.1) * 2.0 + 8
lower = max(E_min, E_0)
upper = min(E_max, E_1)
if upper >= lower:
xsec *= E_dist.cdf(upper) - E_dist.cdf(lower)
else:
xsec = 0.0
# Differential XSEC in y is Gaussian
# Independent of x
y_dist = norm(loc=0.2, scale=1.0)
xsec *= y_dist.cdf(y_max) - y_dist.cdf(y_min)
return xsec
model_A_xsec = np.zeros(signal_xsec_binning.data_size)
model_A_template = np.zeros(parameter_binning.data_size)
signal_offset = parameter_binning.get_bin_data_index(2)
for i, b in enumerate(signal_xsec_binning.bins):
# Get truth and flux bin from Cartesian Product
truth_bin, flux_bin = b.get_marginal_bins()
E_min, E_max = flux_bin.edges[0]
x_min, x_max = truth_bin.edges[0]
y_min, y_max = truth_bin.edges[1]
model_A_xsec[i] = calculate_model_A_xsec(E_min, E_max, x_min, x_max, y_min, y_max)
model_A_template[i + signal_offset] = model_A_xsec[i]
signal_xsec_plot_binning = binning.RectilinearBinning(
signal_truth_binning.variables + (flux_binning.variable,),
signal_truth_binning.bin_edges + (flux_binning.bin_edges,),
)
pltr = plotting.get_plotter(signal_xsec_plot_binning)
pltr.plot_array(model_A_xsec, density=[0, 1], hatch=None)
pltr.savefig("model_A_xsec.png")
signal_truth_binning.reset()
signal_truth_binning.fill_from_csv_file("../00/modelA_truth.txt", weight=0.1)
pltr = plotting.get_plotter(signal_truth_binning)
pltr.plot_values(scatter=500, label="generator")
model_A_pred, w = signal_predictor(model_A_xsec)
pltr.plot_array(model_A_pred, scatter=500, label="xsec")
pltr.legend()
pltr.savefig("model_A_prediction.png")
model_B_xsec = np.zeros(signal_xsec_binning.data_size)
model_B_template = np.zeros(parameter_binning.data_size)
signal_offset = 1 + bg_xsec_binning.data_size
def calculate_model_B_xsec(E_min, E_max, x_min, x_max, y_min, y_max):
"""Calculate the cross section for the model A process."""
# We need to make an assumption about the E dsitribution within the E bin
# Bin edges can be +/- np.inf
if np.isfinite(E_min) and np.isfinite(E_max):
# Uniform in given bounds
E_dist = uniform(loc=E_min, scale=E_max - E_min)
elif np.isfinite(E_min):
# Exponential from E_min to inf
E_dist = expon(loc=E_min)
else:
# Exponential from -inf to E_max
E_dist = expon(loc=E_max, scale=-1)
# Simple overall cross section: One unit of exposure yields 100 true events
xsec = 100.0
# True x is True E with a shift and scale
# Average XSEC in bin is proportional to overlap
E_0 = (x_min - 0.0) * 2.0 + 8
E_1 = (x_max - 0.0) * 2.0 + 8
lower = max(E_min, E_0)
upper = min(E_max, E_1)
if upper >= lower:
xsec *= E_dist.cdf(upper) - E_dist.cdf(lower)
else:
xsec = 0.0
# Differential XSEC in y is Gaussian
# Correalted with x
# Should integrate 2D distribution of x/E and y
# Instead, cheat and assume median E value
if np.isfinite(lower) and np.isfinite(upper):
E_m = (upper + lower) / 2
elif np.isfinite(lower):
E_m = lower + 1
elif np.isfinite(upper):
E_m = upper - 1
else:
E_m = 0.0
x_m = (E_m - 8.0) / 2
y_dist = norm(loc=0.5 * x_m, scale=1.0 - 0.5**2)
xsec *= y_dist.cdf(y_max) - y_dist.cdf(y_min)
return xsec
for i, b in enumerate(signal_xsec_binning.bins):
# Get truth and flux bin from Cartesian Product
truth_bin, flux_bin = b.get_marginal_bins()
E_min, E_max = flux_bin.edges[0]
x_min, x_max = truth_bin.edges[0]
y_min, y_max = truth_bin.edges[1]
model_B_xsec[i] = calculate_model_B_xsec(E_min, E_max, x_min, x_max, y_min, y_max)
model_B_template[i + signal_offset] = model_B_xsec[i]
pltr = plotting.get_plotter(signal_xsec_plot_binning)
pltr.plot_array(model_B_xsec, density=[0, 1], hatch=None)
pltr.savefig("model_B_xsec.png")
signal_truth_binning.reset()
signal_truth_binning.fill_from_csv_file("../00/modelB_truth.txt", weight=0.1)
pltr = plotting.get_plotter(signal_truth_binning)
pltr.plot_values(scatter=500, label="generator")
model_B_pred, w = signal_predictor(model_B_xsec)
pltr.plot_array(model_B_pred, scatter=500, label="xsec")
pltr.legend()
pltr.savefig("model_B_prediction.png")
# Template predictor for noise, bg, model A, model B
xsec_template_predictor = likelihood.TemplatePredictor(
[noise_template, bg_template, model_A_template, model_B_template]
)
# Load data and response matrix
with open("../01/reco-binning.yml") as f:
reco_binning = binning.yaml.full_load(f)
reco_binning.fill_from_csv_file("../05/real_data.txt")
data = reco_binning.get_entries_as_ndarray()
data_model = likelihood.PoissonData(data)
response_matrix = "../05/response_matrix.npz"
matrix_predictor = likelihood.ResponseMatrixPredictor(response_matrix)
# Combine into linear predictor
data_predictor = likelihood.ComposedPredictor(
[matrix_predictor, event_predictor],
combine_systematics="same",
)
template_predictor = likelihood.ComposedMatrixPredictor(
[data_predictor, xsec_template_predictor], combine_systematics="cartesian"
)
# Likelihood caclulator and hypothesis tester
calc = likelihood.LikelihoodCalculator(data_model, template_predictor)
maxi = likelihood.BasinHoppingMaximizer()
# Fit everything
ret = maxi.maximize_log_likelihood(calc)
with open("fit.txt", "w") as f:
print(ret, file=f)
# Calculate p-values for hypotheses overall
calc_A = calc.fix_parameters([None, None, None, 0.0])
calc_B = calc.fix_parameters([None, None, 0.0, None])
test_A = likelihood.HypothesisTester(calc_A)
test_B = likelihood.HypothesisTester(calc_B)
with open("p-values.txt", "w") as f:
print(test_A.max_likelihood_p_value(), file=f)
print(test_B.max_likelihood_p_value(), file=f)
# Get Wilks' p-values for models
norms = np.linspace(0.5, 1.5, 50)
p_values_A = []
p_values_B = []
for n in norms:
p_values_A.append(test_A.wilks_max_likelihood_ratio_p_value([None, None, n]))
p_values_B.append(test_B.wilks_max_likelihood_ratio_p_value([None, None, n]))
from matplotlib import pyplot as plt
fig, ax = plt.subplots()
ax.set_xlabel("Model weight")
ax.set_ylabel("p-value")
ax.plot(norms, p_values_A, label="Model A", color="C1")
ax.plot(norms, p_values_B, label="Model B", color="C2")
ax.axhline(0.32, color="k", linestyle="dashed")
ax.axhline(0.05, color="k", linestyle="dashed")
ax.legend(loc="best")
fig.savefig("wilks-p-values.png")
# Avoid exceptions during clean-up
pool.close() | /remu-0.8.0.tar.gz/remu-0.8.0/docs/examples/06/flux-integrate.py | 0.713931 | 0.406744 | flux-integrate.py | pypi |
.. _example03:
====================================
Example 03 -- Detector uncertainties
====================================
Aims
====
* Generate random detector response variations
* Include detector uncertainties in the response matrix by using a
:class:`.ResponseMatrixArrayBuilder`
* Include statistical uncertainties by generating randomly varied matrices
* Use varied response matrices for fits and hypothesis tests
Instructions
============
The properties of experimental setups are usually only known to a finite
precision. The remaining uncertainty on these properties, and thus the
uncertainty on the detector response to true events, must be reflected in the
response matrix.
We can create an event-by-event variation of the detector response with the
provided script::
$ ../simple_experiment/vary_detector.py ../00/modelA_data.txt modelA_data.txt
$ ../simple_experiment/vary_detector.py ../00/modelB_data.txt modelB_data.txt
It creates 100 randomly generated variations of the simulated detector and
varies the simulation that was created in example 00 accordingly. Each
variation describes one possible real detector and is often called a "toy
variation" or a "universe".
Events are varied in two ways: The values of reconstructed variables can be
different for each toy, and each event can get assigned a weight. The former
can be used to cover uncertainties in the detector precision and accuracy,
while the latter is a simple way to deal with uncertainties in the overall
probability of reconstructing an event.
In this case, the values of the reconstructed ``reco_x`` are varied and saved
as ``reco_x_0`` to ``reco_x_99``. The detection efficiency is also varied and
the ratio of the events nominal efficiency and the efficiency assuming the toy
detector stored as ``weight_0`` up to ``weight_99``. Let us plot the different
reconstructed distributions we get from the toy variations:
.. include:: plot_data.py
:literal:
.. image:: data.png
Here we simply plot one histogram for each toy data set with a low alpha value
(i.e with high transparency), as well as a solid histogram for the mean values.
Also we scaled the model predictions by a factor 10, simply because that makes
"experiment running time" of the model predictions correspond to the data.
We are using a few specific arguments to the ``fill_from_csv_file`` method:
``weightfield``
Tells the object which field to use as the weight of the events.
Does not need to follow any naming conventions.
Here we tell it to use the field corresponding to each toy.
``rename``
The binning of the response matrix expects a variable called ``reco_x``.
This variable is not present in the varied data sets. Here we can specify a
dictionary with columns in the data that will be renamed before filling the
matrix.
``buffer_csv_files``
Reading a CSV file from disk and turning it into an array of floating point
values takes quite a bit of time. We can speed up the process by buffering
the intermediate array on disk. This means the time consuming parsing of a
text file only has to happen once per CSV file.
ReMU handles the detector uncertainties by building a response matrix for each
toy detector separately. These matrices are then used in parallel to calculate
likelihoods. In the end, the marginal (i.e. average) likelihood is used as the
final answer. Some advanced features of ReMU (like sparse matrices, or nuisance
bins) require quite a bit of book-keeping to make the toy matrices behave
consistently and as expected. This is handled by the
:class:`.ResponseMatrixArrayBuilder`::
from remu import binning
from remu import migration
builder = migration.ResponseMatrixArrayBuilder(1)
Its only argument is the number of randomly drawn statistical variations per
toy response matrix. The number of simulated events that are used to build the
response matrix influences the statistical uncertainty of the response matrix
elements. This can be seen as an additional "systematic" uncertainty of the
detector response. By generating randomly fluctuated response matrices from
the toy matrices, it can be handled organically with the other systematics.
In this case we generate 1 randomly varied matrix per toy matrix, yielding a
total of 100 matrices in the end.
The toy matrices themselves are built like the nominal matrix in the previous
examples and then added to the builder::
with open("../01/reco-binning.yml", 'rt') as f:
reco_binning = binning.yaml.full_load(f)
with open("../01/optimised-truth-binning.yml", 'rt') as f:
truth_binning = binning.yaml.full_load(f)
resp = migration.ResponseMatrix(reco_binning, truth_binning)
n_toys = 100
for i in range(n_toys):
resp.reset()
resp.fill_from_csv_file(["modelA_data.txt", "modelB_data.txt"],
weightfield='weight_%i'%(i,), rename={'reco_x_%i'%(i,): 'reco_x'},
buffer_csv_files=True)
resp.fill_up_truth_from_csv_file(
["../00/modelA_truth.txt", "../00/modelB_truth.txt"],
buffer_csv_files=True)
builder.add_matrix(resp)
.. note::
The toy variations in ``modelA_data.txt`` and ``modelB_data.txt``
must be identical! Otherwise it is not possible to fill the toy matrices with
events from both files. It would mix events reconstructed with different
detectors.
This might create some warnings like this::
UserWarning: Filled-up values are less than the original filling in 1 bins. This should not happen!
return self._replace_smaller_truth(new_truth_binning)
This occurs when events in a bin with already high efficiency are re-weighted
to give the impression of an efficiency greater than 100%. If it does not
happen too often, it can be ignored.
The builder generates the actual array of floating point numbers as soon as the
:class:`.ResponseMatrix` object is added. It retains no connection to the object
itself.
Now we just need to save the set of response matrices for later use in the
likelihood fits::
builder.export("response_matrix.npz")
The :class:`.LikelihoodCalculator` is created just like in the previous example::
import numpy as np
from matplotlib import pyplot as plt
from remu import binning
from remu import plotting
from remu import likelihood
from multiprocess import Pool
pool = Pool(8)
likelihood.mapper = pool.map
with open("../01/reco-binning.yml", 'rt') as f:
reco_binning = binning.yaml.full_load(f)
with open("../01/optimised-truth-binning.yml", 'rt') as f:
truth_binning = binning.yaml.full_load(f)
reco_binning.fill_from_csv_file("../00/real_data.txt")
data = reco_binning.get_entries_as_ndarray()
data_model = likelihood.PoissonData(data)
# No systematics LikelihoodCalculator
response_matrix = "../01/response_matrix.npz"
matrix_predictor = likelihood.ResponseMatrixPredictor(response_matrix)
calc = likelihood.LikelihoodCalculator(data_model, matrix_predictor)
# Systematics LikelihoodCalculator
response_matrix_syst = "response_matrix.npz"
matrix_predictor_syst = likelihood.ResponseMatrixPredictor(response_matrix_syst)
calc_syst = likelihood.LikelihoodCalculator(data_model, matrix_predictor_syst)
To show the influence of the systematic uncertainties, we create two
:class:`.LikelihoodCalculator` objects here. One with the non-varied detector
response and one with the set of systematically varied responses.
Now we can test some models against the data, just like in the previous
example::
truth_binning.fill_from_csv_file("../00/modelA_truth.txt")
modelA = truth_binning.get_values_as_ndarray()
modelA /= np.sum(modelA)
truth_binning.reset()
truth_binning.fill_from_csv_file("../00/modelB_truth.txt")
modelB = truth_binning.get_values_as_ndarray()
modelB /= np.sum(modelB)
maxi = likelihood.BasinHoppingMaximizer()
modelA_shape = likelihood.TemplatePredictor([modelA])
calcA = calc.compose(modelA_shape)
retA = maxi(calcA)
print(retA)
.. include:: modelA_fit.txt
:literal:
::
calcA_syst = calc_syst.compose(modelA_shape)
retA_syst = maxi(calcA_syst)
print(retA_syst)
.. include:: modelA_fit_syst.txt
:literal:
::
modelB_shape = likelihood.TemplatePredictor([modelB])
calcB = calc.compose(modelB_shape)
retB = maxi(calcB)
print(retB)
.. include:: modelB_fit.txt
:literal:
::
calcB_syst = calc_syst.compose(modelB_shape)
retB_syst = maxi(calcB_syst)
print(retB_syst)
.. include:: modelB_fit_syst.txt
:literal:
Let us take another look at how the fitted templates and the data compare in
reco space. This time we are going to use `ReMU`'s built in functions to plot a
set of bin contents, i.e. the set of model predictions varied by the detector
systematics::
pltr = plotting.get_plotter(reco_binning)
pltr.plot_values(edgecolor='C0', label='data', hatch=None, linewidth=2.)
modelA_reco, modelA_weights = calcA.predictor(retA.x)
modelB_reco, modelB_weights = calcB.predictor(retB.x)
modelA_syst_reco, modelA_syst_weights = calcA_syst.predictor(retA.x)
modelB_syst_reco, modelB_syst_weights = calcB_syst.predictor(retB.x)
pltr.plot_array(modelA_reco, label='model A', edgecolor='C1', hatch=None)
pltr.plot_array(modelA_syst_reco, label='model A syst', edgecolor='C1',
hatch=r'//', stack_function=0.68)
pltr.plot_array(modelB_reco, label='model B', edgecolor='C2', hatch=None)
pltr.plot_array(modelB_syst_reco, label='model B syst', edgecolor='C2',
hatch=r'\\', stack_function=0.68)
pltr.legend()
pltr.savefig('reco-comparison.png')
.. image:: reco-comparison.png
The ``modelX_syst_reco`` arrays now contain 100 different predictions,
corresponding to the 100 detector variations. The parameter ``stac_function ==
0.68`` tells the plotter to draw the area of the central 68% of the range of
predictions.
And of course we can compare the p-values of the two fitted models assuming the
nominal detector response::
testA = likelihood.HypothesisTester(calcA)
testB = likelihood.HypothesisTester(calcB)
print(testA.max_likelihood_p_value())
print(testB.max_likelihood_p_value())
.. include:: fit_p-values.txt
:literal:
As well as the p-values yielded when taking the systematic uncertainties into account::
testA_syst = likelihood.HypothesisTester(calcA_syst)
testB_syst = likelihood.HypothesisTester(calcB_syst)
print(testA_syst.max_likelihood_p_value())
print(testB_syst.max_likelihood_p_value())
.. include:: fit_p-values_syst.txt
:literal:
We can construct confidence intervals on the template weights of the models
using the maximum likelihood p-value just like before::
p_values_A = []
p_values_B = []
p_values_A_syst = []
p_values_B_syst = []
values = np.linspace(600, 1600, 21)
for v in values:
A = testA.max_likelihood_p_value([v])
A_syst = testA_syst.max_likelihood_p_value([v])
B = testB.max_likelihood_p_value([v])
B_syst = testB_syst.max_likelihood_p_value([v])
print(v, A, A_syst, B, B_syst)
p_values_A.append(A)
p_values_B.append(B)
p_values_A_syst.append(A_syst)
p_values_B_syst.append(B_syst)
fig, ax = plt.subplots()
ax.set_xlabel("Model weight")
ax.set_ylabel("p-value")
ax.plot(values, p_values_A, label="Model A", color='C1', linestyle='dotted')
ax.plot(values, p_values_A_syst, label="Model A syst", color='C1', linestyle='solid')
ax.plot(values, p_values_B, label="Model B", color='C2', linestyle='dotted')
ax.plot(values, p_values_B_syst, label="Model B syst", color='C2', linestyle='solid')
ax.axvline(retA.x[0], color='C1', linestyle='dotted')
ax.axvline(retA_syst.x[0], color='C1', linestyle='solid')
ax.axvline(retB.x[0], color='C2', linestyle='dotted')
ax.axvline(retB_syst.x[0], color='C2', linestyle='solid')
ax.axhline(0.32, color='k', linestyle='dashed')
ax.axhline(0.05, color='k', linestyle='dashed')
ax.legend(loc='best')
fig.savefig("p-values.png")
.. image:: p-values.png
The "fixed" models have no free parameters here, making them degenerate. This
means using the :meth:`.max_likelihood_p_value` is equivalent to testing the
corresponding simple hypotheses using the :meth:`.likelihood_p_value`. No
actual maximisation is taking place. Note that the p-values for model A are
generally smaller than those for model B. This is consistent with previous
results showing that the data is better described by model B.
Finally, let us construct confidence intervals of the template weights,
*assuming that the corresponding model is correct*::
p_values_A = []
p_values_B = []
p_values_A_syst = []
p_values_B_syst = []
values = np.linspace(600, 1600, 21)
for v in values:
A = testA.max_likelihood_ratio_p_value([v])
A_syst = testA_syst.max_likelihood_ratio_p_value([v])
B = testB.max_likelihood_ratio_p_value([v])
B_syst = testB_syst.max_likelihood_ratio_p_value([v])
print(v, A, A_syst, B, B_syst)
p_values_A.append(A)
p_values_B.append(B)
p_values_A_syst.append(A_syst)
p_values_B_syst.append(B_syst)
p_values_A_wilks = []
p_values_B_wilks = []
fine_values = np.linspace(600, 1600, 100)
for v in fine_values:
A = testA_syst.wilks_max_likelihood_ratio_p_value([v])
B = testB_syst.wilks_max_likelihood_ratio_p_value([v])
print(v, A, B)
p_values_A_wilks.append(A)
p_values_B_wilks.append(B)
fig, ax = plt.subplots()
ax.set_xlabel("Model weight")
ax.set_ylabel("p-value")
ax.plot(values, p_values_A, label="Model A", color='C1', linestyle='dotted')
ax.plot(values, p_values_A_syst, label="Model A syst", color='C1', linestyle='solid')
ax.plot(fine_values, p_values_A_wilks, label="Model A Wilks", color='C1', linestyle='dashed')
ax.plot(values, p_values_B, label="Model B", color='C2', linestyle='dotted')
ax.plot(values, p_values_B_syst, label="Model B syst", color='C2', linestyle='solid')
ax.plot(fine_values, p_values_B_wilks, label="Model B Wilks", color='C2', linestyle='dashed')
ax.axvline(retA.x[0], color='C1', linestyle='dotted')
ax.axvline(retA_syst.x[0], color='C1', linestyle='solid')
ax.axvline(retB.x[0], color='C2', linestyle='dotted')
ax.axvline(retB_syst.x[0], color='C2', linestyle='solid')
ax.axhline(0.32, color='k', linestyle='dashed')
ax.axhline(0.05, color='k', linestyle='dashed')
ax.legend(loc='best')
fig.savefig("ratio-p-values.png")
.. image:: ratio-p-values.png
Note that the p-values for both models go up to 1.0 here (within the
granularity of the parameter scan) and that the constructed confidence
intervals are very different from the ones before. This is because we are
asking a different question now. Before we asked the question "What is the
probability of getting a worse likelihood assuming that the tested
model-parameter is true?". Now we ask the question "What is the probability of
getting a worse best-fit likelihood ratio, assuming the tested model-parameter
is true?". Since the likelihood ratio is 1.0 at the best fit point and the
likelihood ratio of nested hypotheses is less than or equal to 1.0 by
construction, the p-value is 100% there.
The method :meth:`.wilks_max_likelihood_ratio_p_value` calculates the same
p-value as :meth:`.max_likelihood_ratio_p_value`, but does so assuming Wilks'
theorem holds. This does not require the generation of random data and
subsequent likelihood maximisations, so it is much faster.
| /remu-0.8.0.tar.gz/remu-0.8.0/docs/examples/03/README.rst | 0.957258 | 0.818084 | README.rst | pypi |
import numpy as np
from matplotlib import pyplot as plt
from multiprocess import Pool
from remu import binning, likelihood, plotting
pool = Pool(8)
likelihood.mapper = pool.map
with open("../01/reco-binning.yml") as f:
reco_binning = binning.yaml.full_load(f)
with open("../01/optimised-truth-binning.yml") as f:
truth_binning = binning.yaml.full_load(f)
reco_binning.fill_from_csv_file("../00/real_data.txt")
data = reco_binning.get_entries_as_ndarray()
data_model = likelihood.PoissonData(data)
# No systematics LikelihoodCalculator
response_matrix = "../01/response_matrix.npz"
matrix_predictor = likelihood.ResponseMatrixPredictor(response_matrix)
calc = likelihood.LikelihoodCalculator(data_model, matrix_predictor)
# Systematics LikelihoodCalculator
response_matrix_syst = "response_matrix.npz"
matrix_predictor_syst = likelihood.ResponseMatrixPredictor(response_matrix_syst)
calc_syst = likelihood.LikelihoodCalculator(data_model, matrix_predictor_syst)
truth_binning.fill_from_csv_file("../00/modelA_truth.txt")
modelA = truth_binning.get_values_as_ndarray()
modelA /= np.sum(modelA)
truth_binning.reset()
truth_binning.fill_from_csv_file("../00/modelB_truth.txt")
modelB = truth_binning.get_values_as_ndarray()
modelB /= np.sum(modelB)
maxi = likelihood.BasinHoppingMaximizer()
modelA_shape = likelihood.TemplatePredictor([modelA])
calcA = calc.compose(modelA_shape)
retA = maxi(calcA)
with open("modelA_fit.txt", "w") as f:
print(retA, file=f)
calcA_syst = calc_syst.compose(modelA_shape)
retA_syst = maxi(calcA_syst)
with open("modelA_fit_syst.txt", "w") as f:
print(retA_syst, file=f)
modelB_shape = likelihood.TemplatePredictor([modelB])
calcB = calc.compose(modelB_shape)
retB = maxi(calcB)
with open("modelB_fit.txt", "w") as f:
print(retB, file=f)
calcB_syst = calc_syst.compose(modelB_shape)
retB_syst = maxi(calcB_syst)
with open("modelB_fit_syst.txt", "w") as f:
print(retB_syst, file=f)
pltr = plotting.get_plotter(reco_binning)
pltr.plot_values(edgecolor="C0", label="data", hatch=None, linewidth=2.0)
modelA_reco, modelA_weights = calcA.predictor(retA.x)
modelB_reco, modelB_weights = calcB.predictor(retB.x)
modelA_syst_reco, modelA_syst_weights = calcA_syst.predictor(retA.x)
modelB_syst_reco, modelB_syst_weights = calcB_syst.predictor(retB.x)
pltr.plot_array(modelA_reco, label="model A", edgecolor="C1", hatch=None)
pltr.plot_array(
modelA_syst_reco,
label="model A syst",
edgecolor="C1",
hatch=r"//",
stack_function=0.68,
)
pltr.plot_array(modelB_reco, label="model B", edgecolor="C2", hatch=None)
pltr.plot_array(
modelB_syst_reco,
label="model B syst",
edgecolor="C2",
hatch=r"\\",
stack_function=0.68,
)
pltr.legend()
pltr.savefig("reco-comparison.png")
del pltr
testA = likelihood.HypothesisTester(calcA)
testB = likelihood.HypothesisTester(calcB)
with open("fit_p-values.txt", "w") as f:
print(testA.max_likelihood_p_value(), file=f)
print(testB.max_likelihood_p_value(), file=f)
testA_syst = likelihood.HypothesisTester(calcA_syst)
testB_syst = likelihood.HypothesisTester(calcB_syst)
with open("fit_p-values_syst.txt", "w") as f:
print(testA_syst.max_likelihood_p_value(), file=f)
print(testB_syst.max_likelihood_p_value(), file=f)
p_values_A = []
p_values_B = []
p_values_A_syst = []
p_values_B_syst = []
values = np.linspace(600, 1600, 21)
for v in values:
A = testA.max_likelihood_p_value([v])
A_syst = testA_syst.max_likelihood_p_value([v])
B = testB.max_likelihood_p_value([v])
B_syst = testB_syst.max_likelihood_p_value([v])
print(v, A, A_syst, B, B_syst)
p_values_A.append(A)
p_values_B.append(B)
p_values_A_syst.append(A_syst)
p_values_B_syst.append(B_syst)
fig, ax = plt.subplots()
ax.set_xlabel("Model weight")
ax.set_ylabel("p-value")
ax.plot(values, p_values_A, label="Model A", color="C1", linestyle="dotted")
ax.plot(values, p_values_A_syst, label="Model A syst", color="C1", linestyle="solid")
ax.plot(values, p_values_B, label="Model B", color="C2", linestyle="dotted")
ax.plot(values, p_values_B_syst, label="Model B syst", color="C2", linestyle="solid")
ax.axvline(retA.x[0], color="C1", linestyle="dotted")
ax.axvline(retA_syst.x[0], color="C1", linestyle="solid")
ax.axvline(retB.x[0], color="C2", linestyle="dotted")
ax.axvline(retB_syst.x[0], color="C2", linestyle="solid")
ax.axhline(0.32, color="k", linestyle="dashed")
ax.axhline(0.05, color="k", linestyle="dashed")
ax.legend(loc="best")
fig.savefig("p-values.png")
p_values_A = []
p_values_B = []
p_values_A_syst = []
p_values_B_syst = []
values = np.linspace(600, 1600, 21)
for v in values:
A = testA.max_likelihood_ratio_p_value([v])
A_syst = testA_syst.max_likelihood_ratio_p_value([v])
B = testB.max_likelihood_ratio_p_value([v])
B_syst = testB_syst.max_likelihood_ratio_p_value([v])
print(v, A, A_syst, B, B_syst)
p_values_A.append(A)
p_values_B.append(B)
p_values_A_syst.append(A_syst)
p_values_B_syst.append(B_syst)
p_values_A_wilks = []
p_values_B_wilks = []
fine_values = np.linspace(600, 1600, 100)
for v in fine_values:
A = testA_syst.wilks_max_likelihood_ratio_p_value([v])
B = testB_syst.wilks_max_likelihood_ratio_p_value([v])
print(v, A, B)
p_values_A_wilks.append(A)
p_values_B_wilks.append(B)
fig, ax = plt.subplots()
ax.set_xlabel("Model weight")
ax.set_ylabel("p-value")
ax.plot(values, p_values_A, label="Model A", color="C1", linestyle="dotted")
ax.plot(values, p_values_A_syst, label="Model A syst", color="C1", linestyle="solid")
ax.plot(
fine_values, p_values_A_wilks, label="Model A Wilks", color="C1", linestyle="dashed"
)
ax.plot(values, p_values_B, label="Model B", color="C2", linestyle="dotted")
ax.plot(values, p_values_B_syst, label="Model B syst", color="C2", linestyle="solid")
ax.plot(
fine_values, p_values_B_wilks, label="Model B Wilks", color="C2", linestyle="dashed"
)
ax.axvline(retA.x[0], color="C1", linestyle="dotted")
ax.axvline(retA_syst.x[0], color="C1", linestyle="solid")
ax.axvline(retB.x[0], color="C2", linestyle="dotted")
ax.axvline(retB_syst.x[0], color="C2", linestyle="solid")
ax.axhline(0.32, color="k", linestyle="dashed")
ax.axhline(0.05, color="k", linestyle="dashed")
ax.legend(loc="best")
fig.savefig("ratio-p-values.png")
likelihood.mapper = map
del pool | /remu-0.8.0.tar.gz/remu-0.8.0/docs/examples/03/fit_models.py | 0.483648 | 0.441553 | fit_models.py | pypi |
.. _example05:
=========================
Example 05 -- Backgrounds
=========================
Aims
====
* Understand and deal with background events in measurements
Instructions
============
Real experiments will almost always include some sort of background or noise
events in the recorded data. ReMU is well equipped to deal with those in a
consistent fashion.
Let us first create the noisy data as well as simulations of background and
noise events::
# Create "real" data
../simple_experiment/run_experiment.py 10 real_data.txt --enable-background
# Create BG simulation
../simple_experiment/simulate_experiment.py 100 background nominal_bg_data.txt bg_truth.txt
# Create Noise simulation
../simple_experiment/simulate_experiment.py 100 noise noise_data.txt noise_truth.txt
# Create Variations
../simple_experiment/vary_detector.py nominal_bg_data.txt bg_data.txt
In this context, we call "noise" events that are recorded, but that do not have
a meaningful true information associated with them. "Background" on the other
hand can be understood as regular events in the detector, but which we are not
interested in (e.g. because they are created by a different process than our
main subject of study).
Background events can behave differently than signal events, but because they
also have a defined true information, we can treat them just like signal. The
only difference is that they will occupy different truth bins than the signal
events.
Noise events on the other hand do not have meaningful true properties, so we
will just assign them all to a single truth bin. The content of that single bin
will then determine the number of expected noise events in the reconstructed
binnings.
Starting from the truth binning we created for the signal models in a previous
example, let us create a binning for the background events::
import numpy as np
from remu import binning
from remu import migration
from remu import matrix_utils
from remu import plotting
builder = migration.ResponseMatrixArrayBuilder(1)
with open("../01/reco-binning.yml", 'rt') as f:
reco_binning = binning.yaml.full_load(f)
with open("../01/optimised-truth-binning.yml", 'rt') as f:
signal_binning = binning.yaml.full_load(f)
bg_binning = signal_binning.clone()
resp = migration.ResponseMatrix(reco_binning, bg_binning)
i = 0
resp.fill_from_csv_file("bg_data.txt", weightfield='weight_%i'%(i,),
rename={'reco_x_%i'%(i,): 'reco_x'}, buffer_csv_files=True)
resp.fill_up_truth_from_csv_file("bg_truth.txt", buffer_csv_files=True)
entries = resp.get_truth_entries_as_ndarray()
while np.min(entries) < 10:
resp = matrix_utils.improve_stats(resp)
entries = resp.get_truth_entries_as_ndarray()
bg_binning = resp.truth_binning
bg_binning.reset()
reco_binning.reset()
This ``bg_binning`` will now include at least 10 events in each truth bin, when
building the response matrix.
Next we will create a binning that can distinguish between noise, background
and signal events, according to a variable aptly called ``event_type``::
truth_binning = binning.LinearBinning(
variable = 'event_type',
bin_edges = [-1.5, -0.5, 0.5, 1.5],
subbinnings = {
1: bg_binning,
2: signal_binning,
}
)
with open("truth-binning.yml", 'wt') as f:
binning.yaml.dump(truth_binning, f)
The :class:`.LinearBinning` would have only three bins, but we insert the
previously created background and signal binnings as subbinnings into the
second and third bin respectively. This means that events which would fall into
those bins will get further subdivided by the subbinnings. We thus have a
binning that puts noise events (``event_type == -1``) into a single bin, sorts
background events (``event_type == 0``) according to ``bg_binning``, and sorts
signal events (``event_type == 1``) according to ``signal_binning``.
Now we create a :class:`.ResponseMatrix` with this binning, just like we did in
previous examples::
resp = migration.ResponseMatrix(reco_binning, truth_binning,
nuisance_indices=[0])
To fill the matrix, we need to tell it which ``event_type`` each event is,
though. This information might already be part of the simulated data, but
in this case we have to add that variable by hand.
For this we can use the ``cut_function`` parameter. A cut function takes the
data (a structured numpy array) as its only argument and returns the data that
should be filled into the binning::
import numpy.lib.recfunctions as rfn
def set_signal(data):
return rfn.append_fields(data, 'event_type', np.full_like(data['true_x'], 1.))
def set_bg(data):
return rfn.append_fields(data, 'event_type', np.full_like(data['true_x'], 0.))
def set_noise(data):
return rfn.append_fields(data, 'event_type', np.full_like(data['reco_x'], -1.))
n_toys = 100
for i in range(n_toys):
resp.reset()
resp.fill_from_csv_file(["../03/modelA_data.txt", "../03/modelB_data.txt"],
weightfield='weight_%i'%(i,), rename={'reco_x_%i'%(i,): 'reco_x'},
cut_function=set_signal, buffer_csv_files=True)
resp.fill_up_truth_from_csv_file(
["../00/modelA_truth.txt", "../00/modelB_truth.txt"],
cut_function=set_signal, buffer_csv_files=True)
resp.fill_from_csv_file("bg_data.txt", weightfield='weight_%i'%(i,),
rename={'reco_x_%i'%(i,): 'reco_x'}, cut_function=set_bg,
buffer_csv_files=True)
resp.fill_up_truth_from_csv_file("bg_truth.txt", cut_function=set_bg,
buffer_csv_files=True)
# Calling `fill_up_truth_from_csv` twice only works because
# the files fill completely different bins
resp.fill_from_csv_file("noise_data.txt", cut_function=set_noise,
buffer_csv_files=True)
builder.add_matrix(resp)
builder.export("response_matrix.npz")
We can take a look at the truth information that has been filled into the last
of the matrices::
pltr = plotting.get_plotter(truth_binning)
pltr.plot_values(density=False)
pltr.savefig('truth.png')
.. image:: truth.png
The base binning is a :class:`.LinearBinning` with only three bins. The
corresponding plotter does not know how to plot the subbinnings, so it just
marginalizes them out. To plot the content of all truth bins, we can use the
basic :class:`.BinningPlotter`, which simply plots the content of each bin::
pltr = plotting.BinningPlotter(truth_binning)
pltr.plot_values(density=False)
pltr.savefig('all_truth.png')
.. image:: all_truth.png
We can look at the content of the subbinings directly for some nicer plots::
pltr = plotting.get_plotter(signal_binning)
pltr.plot_values()
pltr.savefig('signal_truth.png')
.. image:: signal_truth.png
::
pltr = plotting.get_plotter(bg_binning)
pltr.plot_values()
pltr.savefig('bg_truth.png')
.. image:: bg_truth.png
And we can take a look at the efficiencies using the corresponding convenience
function::
matrix_utils.plot_mean_efficiency(resp, "efficiency.png")
.. image:: efficiency.png
Next we can use the response matrix for some hypothesis tests. First we need to
create the :class:`.ResponseMatrixPredictor` and the templates to be used with
the :class:`.TemplatePredictor`::
from remu import likelihood
from multiprocess import Pool
pool = Pool(8)
likelihood.mapper = pool.map
with open("../01/reco-binning.yml", 'rt') as f:
reco_binning = binning.yaml.full_load(f)
with open("truth-binning.yml", 'rt') as f:
truth_binning = binning.yaml.full_load(f)
reco_binning.fill_from_csv_file("real_data.txt")
data = reco_binning.get_entries_as_ndarray()
data_model = likelihood.PoissonData(data)
response_matrix = "response_matrix.npz"
matrix_predictor = likelihood.ResponseMatrixPredictor(response_matrix)
calc = likelihood.LikelihoodCalculator(data_model, matrix_predictor)
maxi = likelihood.BasinHoppingMaximizer()
import numpy.lib.recfunctions as rfn
def set_signal(data):
return rfn.append_fields(data, 'event_type', np.full_like(data['true_x'], 1.))
def set_bg(data):
return rfn.append_fields(data, 'event_type', np.full_like(data['true_x'], 0.))
truth_binning.fill_from_csv_file("../00/modelA_truth.txt",
cut_function=set_signal)
modelA = truth_binning.get_values_as_ndarray()
modelA /= np.sum(modelA)
truth_binning.reset()
truth_binning.fill_from_csv_file("../00/modelB_truth.txt",
cut_function=set_signal)
modelB = truth_binning.get_values_as_ndarray()
modelB /= np.sum(modelB)
truth_binning.reset()
truth_binning.fill_from_csv_file("bg_truth.txt", cut_function=set_bg)
bg = truth_binning.get_values_as_ndarray()
bg /= np.sum(bg)
truth_binning.reset()
noise = truth_binning.get_values_as_ndarray()
noise[0] = 1.
Since we put all noise events into the first truth bin, the noise template is
just a value of 1 in that bin.
Now we start by trying to fit the model A template without background or noise
events::
modelA_only = likelihood.TemplatePredictor([modelA])
calcA_only = calc.compose(modelA_only)
retA_only = maxi(calcA_only)
print(retA_only)
.. include:: modelA_only_fit.txt
:literal:
To judge how well the result actually fit, we can consult the results p-value::
testA_only = likelihood.HypothesisTester(calcA_only)
print(testA_only.likelihood_p_value(retA_only.x))
.. include:: modelA_only_gof.txt
:literal:
It clearly is a very bad fit, which is reflected in the maximum likelihood
p-value as well::
print(testA_only.max_likelihood_p_value())
.. include:: modelA_only_p_value.txt
:literal:
So the signal-only model A hypothesis can be excluded. Now let us try again
with background and noise templates added in::
modelA_bg = likelihood.TemplatePredictor([noise, bg, modelA])
calcA_bg = calc.compose(modelA_bg)
retA_bg = maxi(calcA_bg)
print(retA_bg)
.. include:: modelA_bg_fit.txt
:literal:
::
testA_bg = likelihood.HypothesisTester(calcA_bg)
print(testA_bg.likelihood_p_value(retA_bg.x))
.. include:: modelA_bg_gof.txt
:literal:
::
print(testA_bg.max_likelihood_p_value(), file=f)
.. include:: modelA_bg_p_value.txt
:literal:
This fit is clearly much better. We can repeat the same with model B::
modelB_only = likelihood.TemplatePredictor([modelB])
calcB_only = calc.compose(modelB_only)
retB_only = maxi(calcB_only)
print(retB_only)
.. include:: modelB_only_fit.txt
:literal:
::
testB_only = likelihood.HypothesisTester(calcB_only)
print(testB_only.likelihood_p_value(retB_only.x))
.. include:: modelB_only_gof.txt
:literal:
::
print(testB_only.max_likelihood_p_value())
.. include:: modelB_only_p_value.txt
:literal:
::
modelB_bg = likelihood.TemplatePredictor([noise, bg, modelB])
calcB_bg = calc.compose(modelB_bg)
retB_bg = maxi(calcB_bg)
print(retB_bg)
.. include:: modelB_bg_fit.txt
:literal:
::
testB_bg = likelihood.HypothesisTester(calcB_bg)
print(testB_bg.likelihood_p_value(retB_bg.x))
.. include:: modelB_bg_gof.txt
:literal:
::
print(testB_bg.max_likelihood_p_value())
.. include:: modelB_bg_p_value.txt
:literal:
We can also take a qualitative look at the results by plotting the maximum
likelihood predictions in reco and truth space::
pltr = plotting.get_plotter(reco_binning)
modelA_reco, modelA_weights = calcA_only.predictor(retA_only.x)
modelB_reco, modelB_weights = calcB_only.predictor(retB_only.x)
modelA_bg_reco, modelA_bg_weights = calcA_bg.predictor(retA_bg.x)
modelB_bg_reco, modelB_bg_weights = calcB_bg.predictor(retB_bg.x)
pltr.plot_array(modelA_reco, label='model A only', stack_function=0.68,
hatch=r'//', edgecolor='C1')
pltr.plot_array(modelA_bg_reco, label='model A + bg', stack_function=0.68,
hatch=r'*', edgecolor='C1')
pltr.plot_array(modelB_reco, label='model B only', stack_function=0.68,
hatch=r'\\', edgecolor='C2')
pltr.plot_array(modelB_bg_reco, label='model B + bg', stack_function=0.68,
hatch=r'O', edgecolor='C2')
pltr.plot_entries(edgecolor='C0', label='data', hatch=None, linewidth=2.)
pltr.legend()
pltr.savefig('reco-comparison.png')
.. image:: reco-comparison.png
::
pltr = plotting.get_plotter(truth_binning)
pltr.plot_array(modelA_only(retA_only.x)[0], label='model A only',
hatch=r'//', edgecolor='C1', density=False)
pltr.plot_array(modelA_bg(retA_bg.x)[0], label='model A + bg',
hatch=r'*', edgecolor='C1', density=False)
pltr.plot_array(modelB_only(retB_only.x)[0], label='model B only',
hatch=r'\\', edgecolor='C2', density=False)
pltr.plot_array(modelB_bg(retB_bg.x)[0], label='model B + bg',
hatch=r'O', edgecolor='C2', density=False)
pltr.legend(loc='upper left')
pltr.savefig('truth-comparison.png')
.. image:: truth-comparison.png
| /remu-0.8.0.tar.gz/remu-0.8.0/docs/examples/05/README.rst | 0.898109 | 0.659981 | README.rst | pypi |
import numpy as np
from remu import binning, matrix_utils, migration, plotting
builder = migration.ResponseMatrixArrayBuilder(1)
with open("../01/reco-binning.yml") as f:
reco_binning = binning.yaml.full_load(f)
with open("../01/optimised-truth-binning.yml") as f:
signal_binning = binning.yaml.full_load(f)
bg_binning = signal_binning.clone()
resp = migration.ResponseMatrix(reco_binning, bg_binning)
i = 0
resp.fill_from_csv_file(
"bg_data.txt",
weightfield="weight_%i" % (i,),
rename={"reco_x_%i" % (i,): "reco_x"},
buffer_csv_files=True,
)
resp.fill_up_truth_from_csv_file("bg_truth.txt", buffer_csv_files=True)
entries = resp.get_truth_entries_as_ndarray()
while np.min(entries) < 10:
resp = matrix_utils.improve_stats(resp)
entries = resp.get_truth_entries_as_ndarray()
bg_binning = resp.truth_binning
bg_binning.reset()
reco_binning.reset()
truth_binning = binning.LinearBinning(
variable="event_type",
bin_edges=[-1.5, -0.5, 0.5, 1.5],
subbinnings={
1: bg_binning,
2: signal_binning,
},
)
with open("truth-binning.yml", "w") as f:
binning.yaml.dump(truth_binning, f)
resp = migration.ResponseMatrix(reco_binning, truth_binning, nuisance_indices=[0])
import numpy.lib.recfunctions as rfn
def set_signal(data):
return rfn.append_fields(data, "event_type", np.full_like(data["true_x"], 1.0))
def set_bg(data):
return rfn.append_fields(data, "event_type", np.full_like(data["true_x"], 0.0))
def set_noise(data):
return rfn.append_fields(data, "event_type", np.full_like(data["reco_x"], -1.0))
n_toys = 100
for i in range(n_toys):
resp.reset()
resp.fill_from_csv_file(
["../03/modelA_data.txt", "../03/modelB_data.txt"],
weightfield="weight_%i" % (i,),
rename={"reco_x_%i" % (i,): "reco_x"},
cut_function=set_signal,
buffer_csv_files=True,
)
resp.fill_up_truth_from_csv_file(
["../00/modelA_truth.txt", "../00/modelB_truth.txt"],
cut_function=set_signal,
buffer_csv_files=True,
)
resp.fill_from_csv_file(
"bg_data.txt",
weightfield="weight_%i" % (i,),
rename={"reco_x_%i" % (i,): "reco_x"},
cut_function=set_bg,
buffer_csv_files=True,
)
resp.fill_up_truth_from_csv_file(
"bg_truth.txt", cut_function=set_bg, buffer_csv_files=True
)
# Calling `fill_up_truth_from_csv` twice only works because
# the files fill completely different bins
resp.fill_from_csv_file(
"noise_data.txt", cut_function=set_noise, buffer_csv_files=True
)
builder.add_matrix(resp)
builder.export("response_matrix.npz")
pltr = plotting.get_plotter(truth_binning)
pltr.plot_values(density=False)
pltr.savefig("truth.png")
pltr = plotting.BinningPlotter(truth_binning)
pltr.plot_values(density=False)
pltr.savefig("all_truth.png")
pltr = plotting.get_plotter(signal_binning)
pltr.plot_values()
pltr.savefig("signal_truth.png")
pltr = plotting.get_plotter(bg_binning)
pltr.plot_values()
pltr.savefig("bg_truth.png")
matrix_utils.plot_mean_efficiency(resp, "efficiency.png") | /remu-0.8.0.tar.gz/remu-0.8.0/docs/examples/05/build_response_matrix.py | 0.458591 | 0.283726 | build_response_matrix.py | pypi |
import numpy as np
from multiprocess import Pool
from remu import binning, likelihood, plotting
pool = Pool(8)
likelihood.mapper = pool.map
with open("../01/reco-binning.yml") as f:
reco_binning = binning.yaml.full_load(f)
with open("truth-binning.yml") as f:
truth_binning = binning.yaml.full_load(f)
reco_binning.fill_from_csv_file("real_data.txt")
data = reco_binning.get_entries_as_ndarray()
data_model = likelihood.PoissonData(data)
response_matrix = "response_matrix.npz"
matrix_predictor = likelihood.ResponseMatrixPredictor(response_matrix)
calc = likelihood.LikelihoodCalculator(data_model, matrix_predictor)
maxi = likelihood.BasinHoppingMaximizer()
import numpy.lib.recfunctions as rfn
def set_signal(data):
return rfn.append_fields(data, "event_type", np.full_like(data["true_x"], 1.0))
def set_bg(data):
return rfn.append_fields(data, "event_type", np.full_like(data["true_x"], 0.0))
truth_binning.fill_from_csv_file("../00/modelA_truth.txt", cut_function=set_signal)
modelA = truth_binning.get_values_as_ndarray()
modelA /= np.sum(modelA)
truth_binning.reset()
truth_binning.fill_from_csv_file("../00/modelB_truth.txt", cut_function=set_signal)
modelB = truth_binning.get_values_as_ndarray()
modelB /= np.sum(modelB)
truth_binning.reset()
truth_binning.fill_from_csv_file("bg_truth.txt", cut_function=set_bg)
bg = truth_binning.get_values_as_ndarray()
bg /= np.sum(bg)
truth_binning.reset()
noise = truth_binning.get_values_as_ndarray()
noise[0] = 1.0
modelA_only = likelihood.TemplatePredictor([modelA])
calcA_only = calc.compose(modelA_only)
retA_only = maxi(calcA_only)
with open("modelA_only_fit.txt", "w") as f:
print(retA_only, file=f)
testA_only = likelihood.HypothesisTester(calcA_only)
with open("modelA_only_gof.txt", "w") as f:
print(testA_only.likelihood_p_value(retA_only.x), file=f)
with open("modelA_only_p_value.txt", "w") as f:
print(testA_only.max_likelihood_p_value(), file=f)
modelA_bg = likelihood.TemplatePredictor([noise, bg, modelA])
calcA_bg = calc.compose(modelA_bg)
retA_bg = maxi(calcA_bg)
with open("modelA_bg_fit.txt", "w") as f:
print(retA_bg, file=f)
testA_bg = likelihood.HypothesisTester(calcA_bg)
with open("modelA_bg_gof.txt", "w") as f:
print(testA_bg.likelihood_p_value(retA_bg.x), file=f)
with open("modelA_bg_p_value.txt", "w") as f:
print(testA_bg.max_likelihood_p_value(), file=f)
modelB_only = likelihood.TemplatePredictor([modelB])
calcB_only = calc.compose(modelB_only)
retB_only = maxi(calcB_only)
with open("modelB_only_fit.txt", "w") as f:
print(retB_only, file=f)
testB_only = likelihood.HypothesisTester(calcB_only)
with open("modelB_only_gof.txt", "w") as f:
print(testB_only.likelihood_p_value(retB_only.x), file=f)
with open("modelB_only_p_value.txt", "w") as f:
print(testB_only.max_likelihood_p_value(), file=f)
modelB_bg = likelihood.TemplatePredictor([noise, bg, modelB])
calcB_bg = calc.compose(modelB_bg)
retB_bg = maxi(calcB_bg)
with open("modelB_bg_fit.txt", "w") as f:
print(retB_bg, file=f)
testB_bg = likelihood.HypothesisTester(calcB_bg)
with open("modelB_bg_gof.txt", "w") as f:
print(testB_bg.likelihood_p_value(retB_bg.x), file=f)
with open("modelB_bg_p_value.txt", "w") as f:
print(testB_bg.max_likelihood_p_value(), file=f)
pltr = plotting.get_plotter(reco_binning)
modelA_reco, modelA_weights = calcA_only.predictor(retA_only.x)
modelB_reco, modelB_weights = calcB_only.predictor(retB_only.x)
modelA_bg_reco, modelA_bg_weights = calcA_bg.predictor(retA_bg.x)
modelB_bg_reco, modelB_bg_weights = calcB_bg.predictor(retB_bg.x)
pltr.plot_array(
modelA_reco, label="model A only", stack_function=0.68, hatch=r"//", edgecolor="C1"
)
pltr.plot_array(
modelA_bg_reco,
label="model A + bg",
stack_function=0.68,
hatch=r"*",
edgecolor="C1",
)
pltr.plot_array(
modelB_reco, label="model B only", stack_function=0.68, hatch=r"\\", edgecolor="C2"
)
pltr.plot_array(
modelB_bg_reco,
label="model B + bg",
stack_function=0.68,
hatch=r"O",
edgecolor="C2",
)
pltr.plot_entries(edgecolor="C0", label="data", hatch=None, linewidth=2.0)
pltr.legend()
pltr.savefig("reco-comparison.png")
pltr = plotting.get_plotter(truth_binning)
pltr.plot_array(
modelA_only(retA_only.x)[0],
label="model A only",
hatch=r"//",
edgecolor="C1",
density=False,
)
pltr.plot_array(
modelA_bg(retA_bg.x)[0],
label="model A + bg",
hatch=r"*",
edgecolor="C1",
density=False,
)
pltr.plot_array(
modelB_only(retB_only.x)[0],
label="model B only",
hatch=r"\\",
edgecolor="C2",
density=False,
)
pltr.plot_array(
modelB_bg(retB_bg.x)[0],
label="model B + bg",
hatch=r"O",
edgecolor="C2",
density=False,
)
pltr.legend(loc="upper left")
pltr.savefig("truth-comparison.png")
del pltr
likelihood.mapper = map
del pool | /remu-0.8.0.tar.gz/remu-0.8.0/docs/examples/05/fit_models.py | 0.460532 | 0.341912 | fit_models.py | pypi |
.. _example04:
======================================
Example 04 -- Markov Chain Monte Carlo
======================================
Aims
====
* Use :class:`.LikelihoodCalculator` objects in a Marcov Chain Monte Carlo
Sampling
Instructions
============
:class:`.LikelihoodCalculator` objects allow direct access to the likelihood
function of theoretical models given the measured data. As such they can easily
be used in Bayesian inference using Marcov Chain Monte Carlo (MCMC) methods.
ReMU has some built-in support to simplyfy the use of
:class:`.LikelihoodCalculator` objects with ``emcee``, a MCMC package for
python:
https://emcee.readthedocs.io/
Before we can use the MCMC, we have to create the response matrix,
:class:`.LikelihoodCalculator` objects, just like in the previous examples::
import numpy as np
from matplotlib import pyplot as plt
from remu import binning
from remu import plotting
from remu import likelihood
from remu import likelihood_utils
import emcee
with open("../01/reco-binning.yml", 'rt') as f:
reco_binning = binning.yaml.full_load(f)
with open("../01/optimised-truth-binning.yml", 'rt') as f:
truth_binning = binning.yaml.full_load(f)
reco_binning.fill_from_csv_file("../00/real_data.txt")
data = reco_binning.get_entries_as_ndarray()
data_model = likelihood.PoissonData(data)
response_matrix = "../03/response_matrix.npz"
matrix_predictor = likelihood.ResponseMatrixPredictor(response_matrix)
calc = likelihood.LikelihoodCalculator(data_model, matrix_predictor)
truth_binning.fill_from_csv_file("../00/modelA_truth.txt")
modelA = truth_binning.get_values_as_ndarray()
modelA /= np.sum(modelA)
modelA_shape = likelihood.TemplatePredictor([modelA])
calcA = calc.compose(modelA_shape)
Now we can create a sampler and inital guesses for the parameters
using the :mod:`.likelihood_utils` module::
samplerA = likelihood_utils.emcee_sampler(calcA)
guessA = likelihood_utils.emcee_initial_guess(calcA)
These can then be used to draw from the posterior distribution. Since the
likelihood function used here is not modified by a prior, this is equivalent to
using flat priors for the parameters. See the emcee documentation for details
about how to use these objects::
state = samplerA.run_mcmc(guessA, 100)
chain = samplerA.get_chain(flat=True)
print(chain.shape)
.. include:: chain_shape.txt
:literal:
Note that the number of data points is higher than the requested chain length.
This is due to the fact that ``emcee`` will sample multiple chains in parallel,
which get combined into one long chain when using the ``flat`` option. The
number of chains depends on the number of free parameters.
We can now plot the distribution of the template weight parameter::
fig, ax = plt.subplots()
ax.hist(chain[:,0])
ax.set_xlabel("model A weight")
fig.savefig("burn_short.png")
.. image:: burn_short.png
There clearly is something wrong with this distribution. The MCMC has not
converged yet. The authors of ``emcee`` suggest to use the autocorrelation time
as a measure of whether a chain has converged::
try:
tau = samplerA.get_autocorr_time()
print(tau)
except emcee.autocorr.AutocorrError as e:
print(e)
.. include:: burn_short_tau.txt
:literal:
So let us try again with a longer chain::
samplerA.reset()
state = samplerA.run_mcmc(guessA, 200*50)
chain = samplerA.get_chain(flat=True)
try:
tau = samplerA.get_autocorr_time()
print(tau)
except emcee.autocorr.AutocorrError as e:
print(e)
.. include:: burn_long_tau.txt
:literal:
Now the chain is long enough and we can have another look at the distribution::
fig, ax = plt.subplots()
ax.hist(chain[:,0])
ax.set_xlabel("model A weight")
fig.savefig("burn_long.png")
.. image:: burn_long.png
Now we can discard these events and generate a new set from the last state of
the sampler::
samplerA.reset()
state = samplerA.run_mcmc(state, 100*50)
chain = samplerA.get_chain(flat=True)
try:
tau = samplerA.get_autocorr_time()
print(tau)
except emcee.autocorr.AutocorrError as e:
print(e)
.. include:: tauA.txt
:literal:
::
fig, ax = plt.subplots()
ax.hist(chain[:,0])
ax.set_xlabel("model A weight")
fig.savefig("weightA.png")
.. image:: weightA.png
We can also take a look at how these predictions look in truth and reco space::
truth, _ = modelA_shape(chain)
truth.shape = (np.prod(truth.shape[:-1]), truth.shape[-1])
pltr = plotting.get_plotter(truth_binning)
pltr.plot_array(truth, stack_function=np.median, label="Post. median", hatch=None)
pltr.plot_array(truth, stack_function=0.68, label="Post. 68%", scatter=0)
pltr.legend()
pltr.savefig("truthA.png")
.. image:: truthA.png
::
reco, _ = calcA.predictor(chain)
reco.shape = (np.prod(reco.shape[:-1]), reco.shape[-1])
pltr = plotting.get_plotter(reco_binning)
pltr.plot_array(reco, stack_function=np.median, label="Post. median", hatch=None)
pltr.plot_array(reco, stack_function=0.68, label="Post. 68%")
pltr.plot_array(data, label="Data", hatch=None, linewidth=2)
pltr.legend()
pltr.savefig("recoA.png")
.. image:: recoA.png
All of this also works with more parameters, of course::
truth_binning.reset()
truth_binning.fill_from_csv_file("../00/modelB_truth.txt")
modelB = truth_binning.get_values_as_ndarray()
modelB /= np.sum(modelB)
combined = likelihood.TemplatePredictor([modelA, modelB])
calcC = calc.compose(combined)
samplerC = likelihood_utils.emcee_sampler(calcC)
guessC = likelihood_utils.emcee_initial_guess(calcC)
state = samplerC.run_mcmc(guessC, 200*50)
chain = samplerC.get_chain(flat=True)
print(chain.shape)
.. include:: combined_chain_shape.txt
:literal:
::
try:
tau = samplerC.get_autocorr_time()
print(tau)
except emcee.autocorr.AutocorrError as e:
print(e)
.. include:: burn_combined_tau.txt
:literal:
::
samplerC.reset()
state = samplerC.run_mcmc(state, 100*50)
chain = samplerC.get_chain(flat=True)
try:
tau = samplerC.get_autocorr_time()
print(tau)
except emcee.autocorr.AutocorrError as e:
print(e)
.. include:: combined_tau.txt
:literal:
::
fig, ax = plt.subplots()
ax.hist2d(chain[:,0], chain[:,1])
ax.set_xlabel("model A weight")
ax.set_ylabel("model B weight")
fig.savefig("combined.png")
.. image:: combined.png
::
fig, ax = plt.subplots()
ax.hist(np.sum(chain, axis=-1))
ax.set_xlabel("model A weight + model B weight")
fig.savefig("total.png")
.. image:: total.png
| /remu-0.8.0.tar.gz/remu-0.8.0/docs/examples/04/README.rst | 0.92936 | 0.788135 | README.rst | pypi |
import emcee
import numpy as np
from matplotlib import pyplot as plt
from remu import binning, likelihood, likelihood_utils, plotting
with open("../01/reco-binning.yml") as f:
reco_binning = binning.yaml.full_load(f)
with open("../01/optimised-truth-binning.yml") as f:
truth_binning = binning.yaml.full_load(f)
reco_binning.fill_from_csv_file("../00/real_data.txt")
data = reco_binning.get_entries_as_ndarray()
data_model = likelihood.PoissonData(data)
response_matrix = "../03/response_matrix.npz"
matrix_predictor = likelihood.ResponseMatrixPredictor(response_matrix)
calc = likelihood.LikelihoodCalculator(data_model, matrix_predictor)
truth_binning.fill_from_csv_file("../00/modelA_truth.txt")
modelA = truth_binning.get_values_as_ndarray()
modelA /= np.sum(modelA)
modelA_shape = likelihood.TemplatePredictor([modelA])
calcA = calc.compose(modelA_shape)
samplerA = likelihood_utils.emcee_sampler(calcA)
guessA = likelihood_utils.emcee_initial_guess(calcA)
state = samplerA.run_mcmc(guessA, 100)
chain = samplerA.get_chain(flat=True)
with open("chain_shape.txt", "w") as f:
print(chain.shape, file=f)
fig, ax = plt.subplots()
ax.hist(chain[:, 0])
ax.set_xlabel("model A weight")
fig.savefig("burn_short.png")
with open("burn_short_tau.txt", "w") as f:
try:
tau = samplerA.get_autocorr_time()
print(tau, file=f)
except emcee.autocorr.AutocorrError as e:
print(e, file=f)
samplerA.reset()
state = samplerA.run_mcmc(guessA, 200 * 50)
chain = samplerA.get_chain(flat=True)
with open("burn_long_tau.txt", "w") as f:
try:
tau = samplerA.get_autocorr_time()
print(tau, file=f)
except emcee.autocorr.AutocorrError as e:
print(e, file=f)
fig, ax = plt.subplots()
ax.hist(chain[:, 0])
ax.set_xlabel("model A weight")
fig.savefig("burn_long.png")
samplerA.reset()
state = samplerA.run_mcmc(state, 100 * 50)
chain = samplerA.get_chain(flat=True)
with open("tauA.txt", "w") as f:
try:
tau = samplerA.get_autocorr_time()
print(tau, file=f)
except emcee.autocorr.AutocorrError as e:
print(e, file=f)
fig, ax = plt.subplots()
ax.hist(chain[:, 0])
ax.set_xlabel("model A weight")
fig.savefig("weightA.png")
truth, _ = modelA_shape(chain)
truth.shape = (np.prod(truth.shape[:-1]), truth.shape[-1])
pltr = plotting.get_plotter(truth_binning)
pltr.plot_array(truth, stack_function=np.median, label="Post. median", hatch=None)
pltr.plot_array(truth, stack_function=0.68, label="Post. 68%", scatter=0)
pltr.legend()
pltr.savefig("truthA.png")
reco, _ = calcA.predictor(chain)
reco.shape = (np.prod(reco.shape[:-1]), reco.shape[-1])
pltr = plotting.get_plotter(reco_binning)
pltr.plot_array(reco, stack_function=np.median, label="Post. median", hatch=None)
pltr.plot_array(reco, stack_function=0.68, label="Post. 68%")
pltr.plot_array(data, label="Data", hatch=None, linewidth=2)
pltr.legend()
pltr.savefig("recoA.png")
del pltr
truth_binning.reset()
truth_binning.fill_from_csv_file("../00/modelB_truth.txt")
modelB = truth_binning.get_values_as_ndarray()
modelB /= np.sum(modelB)
combined = likelihood.TemplatePredictor([modelA, modelB])
calcC = calc.compose(combined)
samplerC = likelihood_utils.emcee_sampler(calcC)
guessC = likelihood_utils.emcee_initial_guess(calcC)
state = samplerC.run_mcmc(guessC, 200 * 50)
chain = samplerC.get_chain(flat=True)
with open("combined_chain_shape.txt", "w") as f:
print(chain.shape, file=f)
with open("burn_combined_tau.txt", "w") as f:
try:
tau = samplerC.get_autocorr_time()
print(tau, file=f)
except emcee.autocorr.AutocorrError as e:
print(e, file=f)
samplerC.reset()
state = samplerC.run_mcmc(state, 100 * 50)
chain = samplerC.get_chain(flat=True)
with open("combined_tau.txt", "w") as f:
try:
tau = samplerC.get_autocorr_time()
print(tau, file=f)
except emcee.autocorr.AutocorrError as e:
print(e, file=f)
fig, ax = plt.subplots()
ax.hist2d(chain[:, 0], chain[:, 1])
ax.set_xlabel("model A weight")
ax.set_ylabel("model B weight")
fig.savefig("combined.png")
fig, ax = plt.subplots()
ax.hist(np.sum(chain, axis=-1))
ax.set_xlabel("model A weight + model B weight")
fig.savefig("total.png") | /remu-0.8.0.tar.gz/remu-0.8.0/docs/examples/04/do_mcmc.py | 0.405802 | 0.44559 | do_mcmc.py | pypi |
============
Introduction
============
Forward folding
===============
ReMU is a framework for statistical analyses of binned counting experiments. It
follows a "forward-folding" approach to interpreting the data. To understand
the principle of forward folding we must first understand how we interpret the
process of measuring something:
.. image:: real.svg
Nature provides certain expectation values for how many events with different
properties will happen in the detector. These expectation values do not have to
be integers, as they only describe the average number of events that would
occur in each bin if you repeat the experiment many times. In the actual
experiment, we only get a sample from these expectation values. Since these are
actually occurring events, their numbers must be integers. They are randomly
distributed around the expectation values.
We cannot directly observe these events though. Depending on the properties of
the events and the detector that is recording them, some events are missed
(detection efficiency) and the reconstructed properties are not exactly the
true properties of the events (smearing). The only information that is
available to us is the reconstructed sample of events.
In general, we are interested in the expectation values in the true properties
space of the event (truth space), as the reconstructed properties depend on the
specific experiment (reco space). The best way of checking whether a certain
model prediction is compatible with the measured data, is to do a full detector
simulation:
.. image:: fullsim.svg
By simulating a large number of events it is possible to calculate the
expectation value in the reconstructed space. This can then be compared to the
recorded data using Poissonian statistics.
This is a very involved process that requires a lot of computing power and
expert knowledge of the detector. It is usually only done within the
experiments' collaborations and only for a few select models. This process can
be sped up though, if one can find a universal linear relation between the
expectation values in truth :math:`(\mu_j)` and in reco space :math:`(\nu_i)`:
.. math::
\nu_i = \sum_j P(\text{reco bin} = i \,|\, \text{truth bin} = j) \cdot \mu_j
That relation is the detector response matrix :math:`R_{ij} = P(\text{reco bin}
= i \,|\, \text{truth bin} = j)`:
.. image:: fold.svg
Multiplying a vector of truth expectation values with a matrix is a
computationally simple task. And with the right tools it should be very easily
done also by non-experts (of the detector). ReMU aims to be such a tool.
With the tools, the detector response matrix, and the actual data, it should
be possible to test many different models on short time scales:
.. image:: fasttest.svg
This multiplication of the model predictions with the response matrix is often
called "forward folding". It translates universal model predictions in truth
space to experiment-specific model predictions in reco space. It can be seen
as alternative (or complementary) approach to "unfolding", where the measured
experiment-specific data in reco space is translated to an unfolded universal
spectrum in truth space.
Building the response matrix
============================
Using a response matrix does not require expert knowledge of the detector it
models. Creating the response matrix and making sure it has the desired
properties does.
The most important property of a "correct" response matrix is its
model-independence. No matter what physics model you use to generate the
truth-space expectation values :math:`\mu_j`, the linear coefficients
:math:`R_{ij}` must stay unchanged (within their uncertainties, see below).
Only then can you use the same matrix to test the many different models
that you are interested in.
In practice this usually means that the binning in truth space must be quite
fine, and in more variables than one is interested in for the measurement. For
example, if in a particle physics experiment one is interested in the momentum
of a particle, but the detection efficiency depends on the particle's
direction, it is necessary to also distinguish events by (i.e. "to bin in") the
true direction of the particle. The binning in reco space does not have to be
affected by this, and can be chosen by the physics aims and expected number of
events. This can lead to very asymmetric response matrices:
.. image:: asym.svg
ReMU provides :class:`.Binning` classes to define the truth and reco binning::
with open("reco-binning.yml", 'r') as f:
reco_binning = binning.yaml.full_load(f)
reco_binning.fill_from_csv_file("real_data.txt")
It also provides methods to plot the content of the bins::
pltr = plotting.get_plotter(reco_binning)
pltr.plot_values()
pltr.savefig("modelA_data.png")
.. image:: ../examples/00/real_data.png
These are then combined into a :class:`.ResponseMatrix` object. The default
method to populate the response matrix is by filling it event by event from a
CSV file with the reconstructed and true properties of the events::
with open("reco-binning.yml", 'rt') as f:
reco_binning = binning.yaml.full_load(f)
with open("coarse-truth-binning.yml", 'rt') as f:
truth_binning = binning.yaml.full_load(f)
respA = migration.ResponseMatrix(reco_binning, truth_binning)
respA.fill_from_csv_file("../00/modelA_data.txt")
See :ref:`example00` and :ref:`example01` for details.
ReMU also supports Panda's :class:`DataFrame` objects as input for filling
the matrices. Together with the `uproot` package, this allows it to read
in ROOT files. See :ref:`examplePD`.
Detector uncertainties
======================
ReMU bases all comparisons of data and model predictions on the Poissonian
likelihood, i.e. the probability of measuring the given reconstructed data,
assuming that the given model is true:
.. math::
L(\mu) = \prod_i \frac{\nu_i^{n_i} \exp(-\nu_i)}{n_i!}
= \prod_i \frac{(R_{ij}\mu_j)^{n_i} \exp(-R_{ij}\mu_j)}{n_i!}
Here we are using the Einstein summation convention and sum over all doubled
indices, i.e. :math:`R_{ij}\mu_j = \sum_j R_{ij}\mu_j`.
This likelihood assumes a perfect knowledge of the response matrix elements,
i.e. the detector performance. This is rarely the case. Usually the detector
properties (like resolutions and efficiencies) are only known within given
uncertainties. This knowledge can be incorporated into the likelihood:
.. math::
L(\mu) = \int_{\phi} P(\phi) \prod_i \frac{(R(\phi)_{ij}\mu_j)^{n_i} \exp(-R(\phi)_{ij}\mu_j)}{n_i!}
The response matrix :math:`R(\phi)_{ij}` now depends on the detector parameters
:math:`\phi`. The prior knowledge of these parameters lies in their probability
distribution :math:`P(\phi)`.
In practical terms it can be very hard to do the integration over all possible
response matrices. ReMU thus replaces the infinite integral with a sum over a
(sufficiently large) set of matrices sampled from :math:`P(\phi)`:
.. math::
L(\mu) = \frac{1}{N_{\text{toys}}} \sum_{t}^{N_{\text{toys}}} \prod_i \frac{(R^t_{ij}\mu_j)^{n_i} \exp(-R^t_{ij}\mu_j)}{n_i!}
It is common practice to do an analysis with many different assumed detector
properties to evaluate systematic uncertainties. The single instances of the
analysis are often called "universes" or "toys". Each toy (with index
:math:`t`) can be used to create its corresponding response matrix
:math:`R^t_{ij}`. The set of all toy matrices will then include the expert
knowledge of the detector uncertainties, and make it available for the use by
non-experts.
When testing models against the data, each toy matrix will yield its own reco
expectation values and its own Poissonian likelihood. The average over all
toy likelihoods yields the overall likelihood of the tested model:
.. image:: systematics.svg
ReMU handles all of this in the background in the provided
:class:`.LikelihoodCalculator` class. Its instances are created with the
measured data and the toy response matrices provided by the detector experts.
The user then only has to provide a model to be tested and it will return the
total likelihood including all detector effects::
data_model = likelihood.PoissonData(data)
matrix_predictor = likelihood.ResponseMatrixPredictor(response_matrix)
calc = likelihood.LikelihoodCalculator(data_model, matrix_predictor)
log_likelihood = calc(model)
See :ref:`example02` and :ref:`example03` for details.
Frequentist analyses
====================
ReMU offers a couple of methods and classes to help with the statistical
analysis and interpretation of the likelihood information obtained as described
above. For example, the likelihood is only well defined if the model has no
free parameters and all expectation values in truth space are known/predicted.
To deal with models that are not fully constrained, ReMU offers the option to
compose (or "chain") :class:`.Predictor` objects. This way it is possible to
create likelihood calculators that accept arbitrary model parameters, for
example template weights::
modelA_shape = likelihood.TemplatePredictor([modelA])
modelA_reco_shape = matrix_predictor.compose(modelA_shape)
calcA = likelihood.LikelihoodCalculator(data_model, modelA_reco_shape)
log_likelihood = calcA(template_weight)
To actually do hypothesis tests, ReMU provides the :class:`.HypothesisTester`
class::
testA = likelihood.HypothesisTester(calcA)
testA.likelihood_p_value(template_weight)
testA.max_likelihood_p_value()
testA.max_likelihood_ratio_p_value(template_weight)
Likelihood ratio p-values are especially useful to construct confidence
intervals for parameters of a model::
for v in values:
p = testA.max_likelihood_ratio_p_value([v])
p_values.append(p)
If multiple models share a parameter, this can be used to easily compare
how the model assumptions influence the resulting confidence intervals.
Here is a comparison of the total normalisation of two different templates,
each with and without considering the detector systematics:
.. image:: ../examples/03/ratio-p-values.png
See :ref:`example02` and :ref:`example03` for details.
Bayesian analyses
=================
ReMU also offers methods for Bayesian analyses, especially to do a
Markov Chain Monte Carlo (MCMC) sampling of the posterior probability
distribution of hypothesis parameters::
samplerA = likelihood_utils.emcee_sampler(calcA)
guessA = likelihood_utils.emcee_initial_guess(calcA)
state = samplerA.run_mcmc(guessA, 200*50)
chain = samplerA.get_chain(flat=True)
fig, ax = plt.subplots()
ax.hist(chain[:,0])
ax.set_xlabel("model A weight")
fig.savefig("burn_long.png")
.. image:: ../examples/04/burn_long.png
This can easily handle many free parameters at once::
combined = likelihood.TemplatePredictor([modelA, modelB])
calcC = calc.compose(combined)
samplerC = likelihood_utils.emcee_sampler(calcC)
guessC = likelihood_utils.emcee_initial_guess(calcC)
state = samplerC.run_mcmc(guessC, 200*50)
chain = samplerC.get_chain(flat=True)
fig, ax = plt.subplots()
ax.hist2d(chain[:,0], chain[:,1])
ax.set_xlabel("model A weight")
ax.set_ylabel("model B weight")
fig.savefig("combined.png")
.. image:: ../examples/04/combined.png
See :ref:`example04` for details.
Backgrounds
===========
Real experiments have to deal not only with the loss of events (efficiency) and
the slight mis-reconstruction of event properties (smearing), but also with the
erroneous inclusion of events in the data that are not actually part of the
signal definition (background). ReMU is able to handle these events
organically. For this, the response matrix must simply provide a set of truth
bins that correspond to the background events:
.. image:: folded-BG.svg
Depending on the type of background, the model builders might not be able to
predict the expectation values of the background. In this case, the background
expectation values can be left free-floating, as nuisance parameters
in the :class:`.CompositeHypothesis`.
This can lead to a high number of degrees of freedom that make likelihood fits
very difficult, though. Also, the background could be such that the measured
data is not good at constraining its contribution. To deal with that, the
detector experts can provide one or many background templates that describe the
background's shape and/or strength in truth space. These can then be added to
the signal predictions as is, or as part of a simultaneous fit::
combined = likelihood.TemplatePredictor([modelA, modelB, background])
calcC = calc.compose(combined)
For background that is detector specific and does not depend (much) on
(interesting) physics-model parameters, the background templates could also be
made a part of the response matrix:
.. image:: template-BG.svg
See :ref:`example05` for details.
Flux integration
================
So far we have only talked about the translation of expectation values of true
event numbers to expectation values of reconstructed event numbers. To
calculate the expected number of true events, it is often necessary to assume a
certain flux of incoming particles or exposure of an experiment. These
exposures themselves are not certain though and it is usually undesirable to
have the users of a data set deal with the flux uncertainties in their event
predictions.
In the forward-folding scheme, the flux uncertainties can naturally be
incorporated into the detector uncertainties. To calculate event number in
different true kinematic bins, a cross section matrix is multiplied with an
exposure vector. Each row of the matrix corresponds to a true kinematic bin for
which we want to calculate the event numbers, and each column corresponds to
one energy bin in the flux/exposure.
Uncertainties are again handled by providing multiple varied exposure vectors,
and those can then either be matched one-to-one or in a Cartesian product with
the response matrix variations to calculate the varied reconstruction
expectation values and likelihoods.
.. image:: flux-folding.svg
See :ref:`example06` for details.
| /remu-0.8.0.tar.gz/remu-0.8.0/docs/introduction/README.rst | 0.951369 | 0.960137 | README.rst | pypi |
class ReportParser:
"""
Parser with generic functionality for all Report Types (Tabular, Summary, Matrix)
Parameters
----------
report: dict, return value of Connection.get_report()
"""
def __init__(self, report):
self.data = report
self.type = self.data["reportMetadata"]["reportFormat"]
self.has_details = self.data["hasDetailRows"]
def get_grand_total(self):
return self.data["factMap"]["T!T"]["aggregates"][0]["value"]
@staticmethod
def _flatten_record(record):
return [field["label"] for field in record]
def _get_field_labels(self):
columns = self.data["reportMetadata"]["detailColumns"]
column_details = self.data["reportExtendedMetadata"]["detailColumnInfo"]
return {key: column_details[value]["label"] for key, value in enumerate(columns)}
def records(self):
"""
Return a list of all records included in the report. If detail rows are not included
in the report a ValueError is returned instead.
Returns
-------
records: list
"""
if not self.has_details:
raise ValueError('Report does not include details so cannot access individual records')
records = []
fact_map = self.data["factMap"]
for group in fact_map.values():
rows = group["rows"]
group_records = (self._flatten_record(row["dataCells"]) for row in rows)
for record in group_records:
records.append(record)
return records
def records_dict(self):
"""
Return a list of dictionaries for all records in the report in {field: value} format. If detail rows
are not included in the report a ValueError is returned instead.
Returns
-------
records: list of dictionaries in {field: value, field: value...} format
"""
if not self.has_details:
raise ValueError('Report does not include details so cannot access individual records')
records = []
fact_map = self.data["factMap"]
field_labels = self._get_field_labels()
for group in fact_map.values():
rows = group["rows"]
group_records = (self._flatten_record(row["dataCells"]) for row in rows)
for record in group_records:
labelled_record = {field_labels[key]: value for key, value in enumerate(record)}
records.append(labelled_record)
return records
class MatrixParser(ReportParser):
"""
Parser with specific functionality for matrix reports
Parameters
----------
report: dict, return value of Connection.get_report()
"""
def __init__(self, report):
super().__init__(report)
self.data = report
self._check_type()
def _check_type(self):
expected = "MATRIX"
if self.type != expected:
raise ValueError("Incorrect report type. Expected {}, received {}.".format(expected, self.type))
else:
pass
def get_col_total(self, col_label, default=None):
"""
Return the total for the specified column. The default arg makes it possible to specify the return
value if the column label is not found.
Parameters
----------
col_label: string
default: string, optional, default None
If column is not found determines the return value
Returns
-------
total: int
"""
grp_across_list = self.data["groupingsAcross"]["groupings"]
col_dict = {grp['label']: int(grp['key']) for grp in grp_across_list}
try:
col_key = col_dict[col_label]
return self.data["factMap"]['T!{}'.format(col_key)]["aggregates"][0]["value"]
except KeyError:
return default
def get_row_total(self, row_label, default=None):
"""
Return the total for the specified row. The default arg makes it possible to specify the return
value if the column label is not found.
Parameters
----------
row_label: string
default: string, optional, default None
If row is not found determines the return value
Returns
-------
total: int
"""
grp_down_list = self.data["groupingsDown"]["groupings"]
row_dict = {grp["label"]: int(grp["key"]) for grp in grp_down_list}
try:
row_key = row_dict[row_label]
return self.data["factMap"]['{}!T'.format(row_key)]["aggregates"][0]["value"]
except KeyError:
return default
@staticmethod
def _convert_parameter(parameter):
if type(parameter) is str:
new_parameter = [parameter]
elif parameter is None:
new_parameter = []
elif type(parameter) is list:
new_parameter = parameter
else:
raise ValueError
return new_parameter
@staticmethod
def _get_subgroup_index(group_above, subgroup_name):
subgroups_with_index = {subgroup['label']: index for index, subgroup in enumerate(group_above)}
index = subgroups_with_index[subgroup_name]
return index
def _get_grouping(self, groups_of_interest, start_grouping, count):
current_grouping = start_grouping
while count > 1:
group_name = groups_of_interest[count - 2]
subgroup_index = self._get_subgroup_index(current_grouping, group_name)
current_grouping = current_grouping[subgroup_index]["groupings"]
count -= 1
self._get_grouping(group_name, current_grouping, count)
return current_grouping
def _get_static_key(self, groups_of_interest, static_grouping_key):
grouping_depth = len(groups_of_interest)
group_index = grouping_depth - 1
top_grouping = self.data[static_grouping_key]["groupings"]
grouping = self._get_grouping(groups_of_interest, top_grouping, grouping_depth)
keys = {group['label']: group['key'] for group in grouping}
static_key = keys[groups_of_interest[group_index]]
return static_key
def _get_dynamic_keys(self, groups_of_interest, dynamic_grouping_key):
grouping_depth = len(groups_of_interest) + 1
top_grouping = self.data[dynamic_grouping_key]["groupings"]
grouping = self._get_grouping(groups_of_interest, top_grouping, grouping_depth)
dynamic_keys = [group["key"] for group in grouping]
labels = [group["label"] for group in grouping]
return {"keys": dynamic_keys, "labels": labels}
def _build_keys(self, static_groups_of_interest, dynamic_groups_of_interest, static_grouping_key,
dynamic_grouping_key):
static_key = self._get_static_key(static_groups_of_interest, static_grouping_key)
dynamic_keys = self._get_dynamic_keys(dynamic_groups_of_interest, dynamic_grouping_key)
keys = []
if static_grouping_key == "groupingsAcross":
for el in dynamic_keys["keys"]:
key = "{}!{}".format(el, static_key)
keys.append(key)
else:
for el in dynamic_keys["keys"]:
key = "{}!{}".format(static_key, el)
keys.append(key)
return {"keys": keys, "labels": dynamic_keys["labels"]}
def _series(self, static_groups_of_interest, static_grouping_key, dynamic_grouping_key,
dynamic_groups_of_interest=None, value_position=0):
static_groups_of_interest = self._convert_parameter(static_groups_of_interest)
dynamic_groups_of_interest = self._convert_parameter(dynamic_groups_of_interest)
keys_labels = self._build_keys(static_groups_of_interest, dynamic_groups_of_interest,
static_grouping_key, dynamic_grouping_key)
labels = keys_labels["labels"]
values = []
for key in keys_labels["keys"]:
value = self.data["factMap"][key]["aggregates"][value_position]["value"]
values.append(value)
series = dict(zip(labels, values))
return series
def series_down(self, column_groups, row_groups=None, value_position=0):
"""
Return selected slice of a report on a vertical axis
Parameters
----------
column_groups: string or list
The selected column to return series from
If multiple grouping levels a list is used to identify grouping of interest
row_groups: string, list or None, optional, default None
Limits rows included in Series to those within specified grouping
value_position: int, default 0
Index of value of interest, if only one value included by default will select
correct value
Returns
-------
series: dict, {label: value, ...}
"""
static_grouping_key = "groupingsAcross"
dynamic_grouping_key = "groupingsDown"
return self._series(column_groups, static_grouping_key, dynamic_grouping_key,
dynamic_groups_of_interest=row_groups, value_position=value_position)
def series_across(self, row_groups, col_groups=None, value_position=0):
"""
Return selected slice of a report on a horizontal axis
Parameters
----------
row_groups: string or list
The selected row to return series from
If multiple grouping levels a list is used to identify grouping of interest
col_groups: string, list or None, optional, default None
Limits cols included in Series to those within specified grouping
value_position: int, default 0
Index of value of interest, if only one value included by default will select
correct value
Returns
-------
series: dict, {label: value, ...}
"""
static_grouping_key = "groupingsDown"
dynamic_grouping_key = "groupingsAcross"
return self._series(row_groups, static_grouping_key, dynamic_grouping_key,
dynamic_groups_of_interest=col_groups, value_position=value_position) | /ren-salesforce-reporting-0.1.3.tar.gz/ren-salesforce-reporting-0.1.3/ren_salesforce_reporting/parsers.py | 0.906454 | 0.366533 | parsers.py | pypi |
import nibabel as nib
import numpy as np
import os
from nibabel.processing import conform
from segment.data import fetch
from skimage.measure import label, regionprops
from skimage.transform import resize
from tensorflow.keras import backend as K
from tensorflow.keras.models import load_model
# Define Classes
class Tkv:
"""
A class to generate total kidney masks form T2-weighted images. These
are then used to calculate total kidney volume (TKV).
Attributes
----------
mask : np.ndarray
The estimated probability that each voxel is renal tissue
tkv : np.float64
Total kidney volume (in ml)
lkv : np.float64
Left kidney volume (in ml)
rkv : np.float64
Right kidney volume (in ml)
path : str
Full path to the raw data
directory : str
Directory the raw data is located in
data : np.ndarray
Numpy array of the raw data
affine : np.ndarray
A matrix giving the relationship between voxel coordinates and
world coordinates.
shape : tuple
The shape of the input data/output mask
zoom : tuple
Length of a single voxel along each axis
orientation : tuple
Radiological direction of each axis e.g. ('L', 'S', 'P') means that
increasing in index along the 0 axis is moving towards the left of
the body.
_img : nib.Nifti1Image
Nibabel object of the raw input data
_mask_img : nib.Nifti1Image
Nibabel object of the output mask
"""
def __init__(self, path_or_buf):
"""
Initialise the Tkv class instance.
Parameters
----------
path_or_buf : str or nibabel object
Path to the input data, can be any data file nibabel is capable
of reading or the nibabel object itself.
"""
if type(path_or_buf) is str:
self.path = path_or_buf
self.directory, self.base, self.extension = self._split_path(self.path)
self._img = nib.Nifti1Image
self.data = np.array
self.affine = np.array
self.shape = tuple
self.zoom = tuple
self.orientation = None
self._load_data()
else:
self._img = path_or_buf
self.path = self._img.get_filename()
self.directory, self.base, self.extension = self._split_path(self.path)
self.data = self._img.get_fdata()
self.affine = self._img.affine
self.shape = self._img.shape
self.zoom = self._img.header.get_zooms()
self.orientation = nib.orientations.aff2axcodes(self.affine)
self.mask = np.array
self._mask_img = nib.Nifti1Image
self.tkv = np.nan
self.lkv = np.nan
self.rkv = np.nan
@staticmethod
def _split_path(path):
"""
Split a path to a file into the files directory, file name and file
extension.
"""
if type(path) is not str:
directory, base, extension = None, None, None
else:
directory = os.path.dirname(path)
base = os.path.splitext(os.path.basename(path))[0]
extension = os.path.splitext(os.path.basename(path))[1]
if extension == '.gz' and base[-4:] == '.nii':
extension = '.nii.gz'
base = base[:-4]
return directory, base, extension
def _load_data(self):
"""
Load raw data into the class. Loads Philips PAR/REC in floating
point mode.
"""
if self.extension == '.PAR':
self._img = nib.load(self.path, scaling='fp')
else:
self._img = nib.load(self.path)
self.data = self._img.get_fdata()
self.affine = self._img.affine
self.shape = self._img.shape
self.zoom = self._img.header.get_zooms()
self.orientation = nib.orientations.aff2axcodes(self.affine)
def get_mask(self, weights_path=None, post_process=True,
binary=True, inplace=False):
"""
Estimate a mask from the provided input data.
Parameters
----------
weights_path : str, optional
Path to custom neural network weights. Defaults to segment home
and will download the latest weights if nothing is specified.
post_process : bool, optional
Default True
Keep only the two largest connected volumes in the mask. Note
this may cause issue with subjects that have more or less than
two kidneys.
binary : bool, optional
Default True.
If True, the mask returned will be an array of ints, where 1
represents voxels that are renal tissue and 0 represents
voxels that are not renal tissue. If False, the mask returned
will be the probability that each voxel is renal tissue.
inplace : bool, optional
Default False
If true, no numpy array of the mask will be returned, instead
only the mask attributes in the class will be updated. Can be
useful if only kidney volumes are desired rather than the voxel
by voxel masks.
Returns
-------
mask : np.ndarray, optional
The estimated probability that each voxel is/binary mask of renal
tissue
"""
if weights_path is None:
weights_path = fetch.Weights().path
img = conform(self._img, out_shape=(240, 240, self.shape[-1]),
voxel_size=(1.458, 1.458, self.zoom[-1] * 0.998),
orientation='LIP')
data = img.get_fdata()
data = np.flip(data, 1)
data = np.swapaxes(data, 0, 2)
data = np.swapaxes(data, 1, 2)
data = self._rescale(data)
data = resize(data, (data.shape[0], 256, 256))
data = data.reshape((data.shape[0], data.shape[1], data.shape[2], 1))
model = load_model(weights_path,
custom_objects={'dice_coef_loss':
self._dice_coef_loss,
'dice_coef': self._dice_coef})
batch_size = 2 ** 3
mask = model.predict(data, batch_size=batch_size)
mask = np.squeeze(mask)
mask = np.swapaxes(mask, 0, 2)
mask = np.swapaxes(mask, 0, 1)
mask = np.flip(mask, 1)
mask = resize(mask, (240, 240, self.shape[-1]))
if post_process:
cleaned_mask = self._cleanup(mask > 0.05)
mask[cleaned_mask < 0.5] = 0.0
mask_img = nib.Nifti1Image(mask, img.affine)
self._mask_img = conform(mask_img,
out_shape=self.shape,
voxel_size=self.zoom,
orientation=self.orientation)
self.mask = self._rescale(self._mask_img.get_fdata(), 0, 1)
if binary:
self.mask = np.round(self.mask).astype(np.uint16)
self._mask_img = nib.Nifti1Image(self.mask, self._mask_img.affine)
self.tkv = (np.sum(self.mask > 0.5) *
np.prod(self.zoom))/1000
self.lkv = (np.sum(self.mask[120:] > 0.5) *
np.prod(self.zoom))/1000
self.rkv = (np.sum(self.mask[:120] > 0.5) *
np.prod(self.zoom)) / 1000
if not inplace:
return self.mask
def mask_to_nifti(self, path=None):
"""
Save the estimated mask as a nifti file.
Parameters
----------
path : str, optional
Path to the folder where the nifti file will be saved. Default
is the same as the raw data, with _mask appended to the filename.
"""
if path is None:
if self.directory is None:
raise TypeError('Directory could not be inferred from input '
'data, please specify the `path` argument in '
'mask_to_nifti.')
elif self.base is None:
raise TypeError('Filename could not be inferred from input '
'data, please specify the `path` argument in '
'mask_to_nifti.')
else:
path = os.path.join(self.directory, self.base + '_mask.nii.gz')
# Generate the mask if that hasn't already been done
if type(self._mask_img) is type:
self.get_mask(inplace=True)
nib.save(self._mask_img, path)
def data_to_nifti(self, path=None):
"""
Save the raw data as a nifti file.
Parameters
----------
path : str, optional
Path to the folder where the nifti file will be saved. Default
is the same as the raw data.
"""
if path is None:
if self.directory is None:
raise TypeError('Directory could not be inferred from input '
'data, please specify the `path` argument in '
'data_to_nifti.')
elif self.base is None:
raise TypeError('Filename could not be inferred from input '
'data, please specify the `path` argument in '
'data_to_nifti.')
else:
path = os.path.join(self.directory, self.base + '.nii.gz')
nib.save(self._img, path)
@staticmethod
def _rescale(data, black=None, white=None):
"""
Rescaled the intensity of a image so that the value of black is 0 and
the value of white is 1. If black and white values aren't specified,
they are set to half a standard deviation below the mean and four
standard deviations above the mean respectively.
"""
if black is None:
black = np.mean(data) - 0.5 * np.std(data)
if black < data.min():
black = data.min()
if white is None:
white = np.mean(data) + 4 * np.std(data)
if white > data.max():
white = data.max()
data = np.clip(data, black, white) - black
data = data / (white - black)
return data
@staticmethod
def _cleanup(mask):
"""
Removes all but the two largest connected areas in the mask.
"""
clean_mask = np.zeros(mask.shape, dtype=np.uint8)
label_mask = label(mask > 0.5, connectivity=1)
props = regionprops(label_mask)
areas = [region.area for region in props]
# This means there have to be two kidneys in the scan...
kidney_labels = np.argpartition(areas, -2)[-2:]
clean_mask[label_mask == props[kidney_labels[0]].label] = 1
clean_mask[label_mask == props[kidney_labels[1]].label] = 1
return clean_mask
@staticmethod
def _dice_coef(y_true, y_pred):
smooth = 1.0
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (
K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def _dice_coef_loss(self, y_true, y_pred):
loss = 1 - self._dice_coef(y_true, y_pred)
return loss | /renalsegmentor-1.3.8-py3-none-any.whl/segment/segment.py | 0.775477 | 0.608478 | segment.py | pypi |
import re
import sys
import logging
from typing import Optional, List
import tqdm
import click
import requests
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
__version__ = None
logger = logging.getLogger(__name__)
GITHUB_API_URL = "https://api.github.com"
def match_repo_name(patterns: List[str], name: str) -> bool:
return len(patterns) == 0 or any(p.search(name) for p in patterns)
def list_repos(
session: requests.Session, current: str, patterns: List[str],
) -> List[str]:
patterns = [re.compile(p, re.I) for p in patterns]
params = {} if patterns else {"affiliation": "owner"}
r = session.get(GITHUB_API_URL + "/user/repos", params=params)
r.raise_for_status()
# Deal with pagination
repos = []
while True:
repos += [
repo["full_name"]
for repo in r.json()
if not repo["fork"]
and match_repo_name(patterns, repo["full_name"])
]
if "next" not in r.links:
break
url = r.links["next"]["url"]
r = session.get(url)
r.raise_for_status()
return repos
def rename_default_branch(
session: requests.Session,
repo_name: str,
current: str,
target: str,
delete_current: bool = False,
) -> None:
# First, look up the SHA for the current default branch
r = session.get(
GITHUB_API_URL + f"/repos/{repo_name}/git/refs/heads/{current}"
)
if r.status_code == 404:
logger.info(f"no branch named {current} on {repo_name}")
return
r.raise_for_status()
sha = r.json()["object"]["sha"]
# Try to create a new branch with the name given by target
r = session.post(
GITHUB_API_URL + f"/repos/{repo_name}/git/refs",
json={"ref": f"refs/heads/{target}", "sha": sha},
)
if r.status_code == 422:
logger.info(f"branch {target} already exists on {repo_name}")
# If this branch, make sure that it has the right
r = session.get(
GITHUB_API_URL + f"/repos/{repo_name}/git/refs/heads/{target}"
)
r.raise_for_status()
if r.json()["object"]["sha"] != sha:
logger.warning(
f"the SHA of branch {target} on {repo_name} does not match "
f"{current}"
)
return
else:
# This happens if the repo is read-only
if r.status_code == 403:
return
r.raise_for_status()
# Rename the default branch
r = session.patch(
GITHUB_API_URL + f"/repos/{repo_name}",
json={"name": repo_name.split("/")[1], "default_branch": target},
)
r.raise_for_status()
# Delete the existing branch if requested
if delete_current:
r = session.delete(
GITHUB_API_URL + f"/repos/{repo_name}/git/refs/heads/{current}"
)
r.raise_for_status()
@click.command()
@click.option(
"--token", help="A personal access token for this user", type=str
)
@click.option(
"--current",
"-c",
help="The current default branch name to change",
type=str,
default="master",
)
@click.option(
"--target",
"-t",
help="The new default branch name to use",
type=str,
default="main",
)
@click.option(
"--repo",
"-r",
help="The name of a specific repository",
multiple=True,
type=str,
)
@click.option(
"--pattern",
"-p",
help="A regular expression to match against the repository name",
multiple=True,
type=str,
)
@click.option(
"--delete",
"-d",
help="Should the current default branch be deleted?",
is_flag=True,
)
@click.option("--version", help="Print the version number", is_flag=True)
def _main(
token: Optional[str],
current: str,
target: str,
repo: List[str],
pattern: List[str],
delete: bool,
version: bool,
) -> None:
if version:
print(f"rename-github-default-branch v{__version__}")
return 0
if not token:
print(
"A GitHub.com personal access token must be provided either via "
"the environment variable 'RENAME_GITHUB_TOKEN' or the command "
"line flag '--token'"
)
return 1
with requests.Session() as session:
session.headers.update(
{
"Authorization": f"token {token}",
"Content-Type": "application/json",
"Accept": "application/vnd.github.v3+json",
}
)
if not repo:
repo = list_repos(session, current, pattern)
with tqdm.tqdm(total=len(repo)) as bar:
for r in repo:
bar.set_description_str(r)
rename_default_branch(
session, r, current, target, delete_current=delete
)
bar.update()
return 0
def main():
return _main(auto_envvar_prefix="RENAME_GITHUB")
if __name__ == "__main__":
sys.exit(_main(auto_envvar_prefix="RENAME_GITHUB")) | /rename_github_default_branch-0.0.3.tar.gz/rename_github_default_branch-0.0.3/rename_github_default_branch.py | 0.54819 | 0.159087 | rename_github_default_branch.py | pypi |
from pathlib import Path
from typing import Optional
import typer
from rename_kicad_project.manipulator import Manipulator
app = typer.Typer()
common_args = {"dry_run": False}
arg_src_dir = typer.Argument(..., help="Source KiCad project dir.")
manipulator = Manipulator()
@app.command()
def rename(
src_dir: Path = arg_src_dir,
new_project_name: str = typer.Argument(..., help="New project name."),
):
""" "Rename KiCad project in place."""
# Common args
dry_run = common_args["dry_run"]
manipulator.check_src_dir(src_dir)
prev_project_name, target_files = manipulator.list_target_files(src_dir)
manipulator.rename_project(
target_files, prev_project_name, new_project_name, dry_run
)
@app.command()
def clone(
src_dir: Path = arg_src_dir,
dest_dir: Path = typer.Argument(..., help="New project dir."),
new_project_name: Optional[str] = typer.Option(
None,
"--project-name",
"-p",
help="New project name. By default it's inferred from 'DEST'.",
),
):
"""Clone KiCad project with a new project name."""
# Common args
dry_run = common_args["dry_run"]
manipulator.check_src_dir(src_dir)
prev_project_name, target_files = manipulator.list_target_files(src_dir)
manipulator.check_dest_dir(dest_dir, dry_run)
# Infer new_project_name from dest_dir if it's not specified
if new_project_name is None:
new_project_name = dest_dir.name
manipulator.clone_project(
target_files, prev_project_name, new_project_name, dest_dir, dry_run
)
@app.callback()
def main(
dry_run: bool = typer.Option(
False, "-n", "--dry-run", help="Just shows possible manupilations and exits."
)
):
"""
Example of use:
\b
$ rename-kicad-project rename ../foo new_project_name
>>> ../foo/old_project_name{.pro, .sch, ...}
will be renamed with new_project_name.pro, ...
\b
$ rename-kicad-project -n rename ../project_dir new_project_name
>>> Run the first example with --dry-run to see possible changes.
\b
$ rename-kicad-project clone ./foo /tmp/bar
>>> ./foo/project_name{.pro, .sch, ...}
will be copied into /tmp/bar/project_name.pro, ...
>>> /tmp/bar will be automatically created if it doesn't exist yet.
\b
$ rename-kicad-project clone ./foo /tmp/bar -p new_project_name
>>> ./foo/old_project_name{.pro, .sch, ...}
will be copied into /tmp/bar/new_project_name,pro, ...
"""
common_args["dry_run"] = dry_run | /rename_kicad_project-1.1.0-py3-none-any.whl/rename_kicad_project/cli.py | 0.817283 | 0.201754 | cli.py | pypi |
to the folder with the director’s name."""
import os
import subprocess as sp
from argparse import ArgumentParser
from itertools import islice
from pathlib import Path
import isle
import macos_tags
TMDB_API_KEY = os.environ.get("TMDB_API_KEY")
FORMAT = "{year} - {first_title}{second_title}"
YES_OR_NO = "(y/n, default y)"
STYLE = {
"purple": "\033[95m",
"cyan": "\033[96m",
"darkcyan": "\033[36m",
"blue": "\033[94m",
"green": "\033[92m",
"yellow": "\033[93m",
"red": "\033[91m",
"bold": "\033[1m",
"underline": "\033[4m",
"end": "\033[0m",
}
def stylized(style, string):
return f"{STYLE[style]}{string}{STYLE['end']}"
def add_tag_to(path, *, tag):
macos_tags.add(tag, file=path)
def parse_args():
parser = ArgumentParser()
parser.add_argument("path", type=str, help="the path to a movie file")
parser.add_argument("-n", type=int, default=5, help="number of search results")
parser.add_argument("-k", "--api", type=str, default=None, help="TMDb API key")
return vars(parser.parse_args())
def ask_title():
return input(f"\n{stylized('bold', 'Title:')} ")
def ask_year():
return input(f"{stylized('bold', 'Year:')} ")
def _y_or_n(ask):
while True:
ans = input(f"\n{stylized('bold', ask)} ") or "y"
if ans in ("y", "n"):
return True if ans == "y" else False
def ask_rename():
return _y_or_n("Needs to be renamed (y/n, default y):")
def ask_genres():
return _y_or_n("Add genres (y/n, default y):")
def ask_move():
return _y_or_n("Move to the folder with the director’s name (y/n, default y):")
def print_movies(movies):
print(f"\n{stylized('bold', 'SEARCH RESULTS 🔎')}\n")
for i, movie in enumerate(movies, 1):
print(f"{i}. {movie.year or '----'} - {movie.title['original']}")
def ask_movie(movies):
ask = "Choose a movie (1 is by default):"
while True:
i = int(input(f"\n{stylized('bold', ask)} ") or "1")
if i in range(1, len(movies) + 1):
break
return movies[i - 1]
def assemble_name(movie):
frst_title = movie.title["original"]
scnd_title = get_second_title(movie)
name = FORMAT.format(
year=movie.year, first_title=frst_title, second_title=scnd_title
)
return name
def get_second_title(movie):
title = movie.title.get("US", movie.title["default"])
if not title or title.lower() == movie.title["original"].lower():
return ""
else:
return f" ({title})"
def get_directors_names(movie):
names = (p.name for p, c in movie.crew if c.job == "Director")
names = (", ".join(reversed(name.rsplit(maxsplit=1))) for name in names)
return "; ".join(sorted(names))
def print_done():
print(f"\n{stylized('bold', 'All done!')} 👍")
def main():
args = parse_args()
path, n, tmdb_api_key = Path(args["path"]), args["n"], args["api"]
isle.TMDB_API_KEY = tmdb_api_key or TMDB_API_KEY
title = ask_title()
year = ask_year()
rename = ask_rename()
genres = ask_genres()
move = ask_move()
movies = list(islice(isle.search_movie(title, year=year), n))
print_movies(movies)
movie = ask_movie(movies)
if rename:
name = assemble_name(movie) + path.suffix
new_path = path.parent / name
path.rename(new_path)
path = new_path
if genres:
add_tag_to(path, tag="Movie")
for genre in map(str, movie.genres):
add_tag_to(path, tag=genre)
if move:
dirname = get_directors_names(movie)
folder = path.parent / dirname
folder.mkdir(exist_ok=True)
new_path = folder / f"{path.name}"
path.rename(new_path)
print_done()
if __name__ == "__main__":
main() | /rename_movie-1.2.1.tar.gz/rename_movie-1.2.1/rename_movie.py | 0.552057 | 0.234385 | rename_movie.py | pypi |
# Introducing Jupyter Notebooks
_(The example used here is JamesALeedham's notebook: [intro.ipynb](https://github.com/JamesALeedham/Sphinx-Autosummary-Recursion/blob/master/docs/notebooks/intro.ipynb))_
First, set up the environment:
```
import matplotlib
import matplotlib.pyplot as pl
import numpy as np
try:
from IPython import get_ipython
get_ipython().run_line_magic('matplotlib', 'inline')
except AttributeError:
print('Magic function can only be used in IPython environment')
matplotlib.use('Agg')
pl.rcParams["figure.figsize"] = [15, 8]
```
Then, define a function that creates a pretty graph:
```
def SineAndCosineWaves():
# Get a large number of X values for a nice smooth curve. Using Pi as np.sin requires radians...
x = np.linspace(0, 2 * np.pi, 180)
# Convert radians to degrees to make for a meaningful X axis (1 radian = 57.29* degrees)
xdeg = 57.29577951308232 * np.array(x)
# Calculate the sine of each value of X
y = np.sin(x)
# Calculate the cosine of each value of X
z = np.cos(x)
# Plot the sine wave in blue, using degrees rather than radians on the X axis
pl.plot(xdeg, y, color='blue', label='Sine wave')
# Plot the cos wave in green, using degrees rather than radians on the X axis
pl.plot(xdeg, z, color='green', label='Cosine wave')
pl.xlabel("Degrees")
# More sensible X axis values
pl.xticks(np.arange(0, 361, 45))
pl.legend()
pl.show()
```
Finally, call that function to display the graph:
```
SineAndCosineWaves()
```
| /renamed_project_orl-2.0.0.tar.gz/renamed_project_orl-2.0.0/docs/notebooks/intro_notebook.ipynb | 0.799325 | 0.898678 | intro_notebook.ipynb | pypi |
from typing import Dict, List
from re import findall, compile, fullmatch
from math import log, ceil
from random import Random
from core.directory import *
from util.util import require_non_none
class RandomizeDecorator(Directory):
def __init__(self, decorated: Directory, seed: int = None):
"""
Decorator which randomly shuffles all numerical values in the directory
:param decorated: Decorated Directory
:param seed: Seed to be used ('None' for system's clock)
"""
self.__decorated = require_non_none(decorated)
self.__seed = seed
def get_files(self) -> Dict[str, FileMetadata]:
return self.__decorated.get_files()
def operate(self) -> None:
self.__decorated.operate()
flist = list(self.get_files().values())
gen = Random(self.__seed)
for i in range(len(flist) - 1):
i: int
j: int = gen.randrange(i, len(flist))
if i != j:
i: FileMetadata = flist[i]
j: FileMetadata = flist[j]
temp = i.num
i.num = j.num
j.num = temp
class ExtensionDecorator(Directory):
def __init__(self, decorated: Directory, ext: str):
"""
Decorator which modifies the file extension of all files in the directory
:param decorated: Decorated directory
:param ext: Extension to be set
"""
self.__decorated = require_non_none(decorated)
self.__ext = require_non_none(ext)
def get_files(self) -> Dict[str, FileMetadata]:
return self.__decorated.get_files()
def operate(self) -> None:
self.__decorated.operate()
files, ext = self.get_files(), self.__ext
for k, v in files.items():
v.ext = ext
class FormatDecorator(Directory):
__format_specifier = "${}" # Substitution constant for optional input regex
def __init__(self, decorated: Directory, fmt: str):
"""
Decorator which modifies the output filename into a specified format
The format string must contain a '$d' specifier, designated for the numerical pattern.
e.g. [4.doc, 5.doc] -> FormatDecorator("Homework ($d)") -> [Homework (4).doc, Homework (5).doc]
:param decorated: Decorated directory
:param fmt: Format to be applied to the output file
"""
self.__decorated = require_non_none(decorated)
self.__fmt = require_non_none(fmt)
def get_files(self) -> Dict[str, FileMetadata]:
return self.__decorated.get_files()
def operate(self) -> None:
self.__decorated.operate()
files, fmt = self.get_files(), self.__fmt
"""
Index 1: Output Format
* Must contain '$d' format specifier for numerical pattern
* May contain '$1', '$2', etc format specifiers for regex substitution
Index 2: [Optional] Input Capture
* Regex string which should include capture groups
* Any captured group will be substituted into the output format
* Captured groups correspond to their format specifiers, in order of capture
* e.g. (Apple) Pear (Banana), '$1' corresponds to 'Apple', '$2' -> 'Banana'
"""
output = fmt[0]
if len(fmt) > 1: # User wants to capture input
pattern = compile(fmt[1])
for k, v in files.items():
fmt = output
m = fullmatch(pattern, k)
if m is None:
raise Exception("Optional format regex does not match file: {}".format(k))
groups = m.groups()
num_groups = len(groups)
for i in range(num_groups):
target = FormatDecorator.__format_specifier.format(i + 1)
fmt = fmt.replace(target, groups[i])
v.fmt = fmt
else: # No regex capture provided, no need for substitution
for k, v in files.items():
v.fmt = output
class ZeroesDecorator(Directory):
def __init__(self, decorated: Directory, digits: int = 0):
"""
Decorator which inserts leading zeroes preceding the numerical value
e.g. [7.png, 300.png] -> ZeroesDecorator(3) -> [007.png, 300.png]
:param decorated: Decorated directory
:param digits: Number of desired digits for the numerical value (0 for automatic)
"""
self.__decorated = require_non_none(decorated)
self.__digits = require_non_none(digits)
def get_files(self) -> Dict[str, FileMetadata]:
return self.__decorated.get_files()
def operate(self) -> None:
self.__decorated.operate()
files = self.get_files()
large = max(map(lambda x: x.num, files.values()))
digits = max(self.__digits, self.__count_digits(large))
for k, v in files.items():
v.fnum = (digits - self.__count_digits(v.num)) * "0" + str(v.num)
@staticmethod
def __count_digits(num: int):
"""
:param num: Number to count digits of
:return: Number of digits present
"""
return int(ceil(log(abs(num) + 1, 10)))
class ShifterDecorator(Directory):
def __init__(self, decorated: Directory, shift: int):
"""
Decorator which shifts all numerical values in filenames by a specified offset
e.g. [50.mkv] -> ShifterDecorator(-5) -> [45.mkv]
:param decorated: Decorated directory
:param shift: int Offset to shift by
"""
self.__decorated = require_non_none(decorated)
self.__shift = require_non_none(shift)
if shift == 0:
raise ValueError("File number shift is invalid: " + shift)
def get_files(self) -> Dict[str, FileMetadata]:
return self.__decorated.get_files()
def operate(self) -> None:
self.__decorated.operate()
files, shift = self.get_files(), self.__shift
for k, v in files.items():
v.num = v.num + shift
class FlattenDecorator(Directory):
def __init__(self, decorated: Directory):
"""
Decorator which flattens the numerical pattern, ensuring all files are consecutive
e.g. [ "15.avi", "24.avi", "101.avi" ] -> [ "15.avi", "16.avi", "17.avi" ]
:param decorated: Decorated directory
"""
self.__decorated = require_non_none(decorated)
def get_files(self) -> Dict[str, FileMetadata]:
return self.__decorated.get_files()
def operate(self) -> None:
self.__decorated.operate()
files = self.get_files()
if len(files) <= 1:
return # A directory of zero or one files is already flattened
sort = sorted(files.items(), key=lambda t: t[1].num)
first: FileMetadata = sort[0][1]
small = int(first.num) # Redundant cast
for i in range(1, len(files)): # Skip first (smallest) element
v: FileMetadata = sort[i][1]
v.num = small + i
class NumeratedDecorator(Directory):
def __init__(self, decorated: Directory):
"""
Decorator which initializes the numerical pattern according to the filenames in the directory.
This decorator is an initialization operation and must be called before other decorators.
e.g. [ "MyPhoto34HighRes.png", "MyPhoto36HighRes.png" ] ->
{ "MyPhoto34HighRes.png": 34, "MyPhoto36HighRes.png": 36 }
:param decorated: Decorated directory
"""
self.__decorated = require_non_none(decorated)
def get_files(self) -> Dict[str, FileMetadata]:
return self.__decorated.get_files()
def operate(self) -> None:
self.__decorated.operate()
files = self.get_files()
regexp = "([0-9]+)" # Captures all 'runs' of integers in the filename
runs_by_file = {f: [int(e) for e in findall(regexp, f)] for f in files}
# Count of numerical 'runs' in which the filenames all share
num_runs = min(map(lambda x: len(x), runs_by_file.values()))
unique_runs = list(filter(
lambda x: NumeratedDecorator.__is_unique_run_set(runs_by_file, x), list(range(0, num_runs))))
if len(unique_runs) > 1:
for t in runs_by_file.items():
print("{}\n\tAmbiguous numbering: {}".format(t[0], ", ".join(map(lambda x: str(t[1][x]), unique_runs))))
raise Exception("A numerated pattern could not be differentiated")
if len(unique_runs) <= 0:
raise Exception("A numerated pattern is not present in the directory")
unique_runs = unique_runs[0]
for k, v in files.items():
v.num = runs_by_file[k][unique_runs] # Pattern can now be determined
@staticmethod # Returns true if all runs at an index are unique
def __is_unique_run_set(runs: Dict[str, List[int]], index: int):
s = set()
for nums in runs.values():
e = nums[index]
if e in s:
return False
s.add(e)
return True | /renamer_kt-1.0.1.tar.gz/renamer_kt-1.0.1/src/core/decorators.py | 0.862612 | 0.323741 | decorators.py | pypi |
from abc import abstractmethod, ABC
from typing import Dict
from os import listdir, rename
from os.path import isdir, join
from re import compile
from util.util import require_non_none
class FileMetadata:
# Pattern to extract extensions from filenames
__ext_pattern = compile(r"^.*\.([^.]+)$")
__fmt_specifier = "$d"
def __init__(self, filename: str):
"""
Constructs metadata for a specified file.
The following metadata is tracked:
* name: Original filename with extension
* fmt: Output format of the file, which must contain '$d' (file number)
* num: File number (unknown during initialization)
* fnum: Formatted number, used to override 'num'
* ext: Extension of the file
:param filename: Name of the file
"""
self.__name = require_non_none(filename)
self.__fmt = FileMetadata.__fmt_specifier
self.__num = None
self.__fnum = None
match = FileMetadata.__ext_pattern.match(filename)
if match is None:
raise ValueError("The following file must contain an extension: " + filename)
self.__ext = match.group(1)
@property
def name(self) -> str:
return self.__name
@property
def fmt(self) -> str:
return self.__fmt
@fmt.setter
def fmt(self, fmt: str) -> None:
if FileMetadata.__fmt_specifier not in require_non_none(fmt):
raise ValueError("Required format specified '{}' was not found in the following format: {}"
.format(FileMetadata.__fmt_specifier, fmt))
self.__fmt = fmt
@property
def num(self) -> int:
return self.__num
@num.setter
def num(self, num: int) -> None:
self.__num = require_non_none(num)
@property
def fnum(self) -> str:
if self.__num is None:
raise Exception("File's number cannot be formatted as it was uninitialized: " + self.__name)
if self.__fnum is None:
self.__fnum = str(self.__num)
return self.__fnum
@fnum.setter
def fnum(self, fnum: str) -> None:
self.__fnum = require_non_none(fnum)
@property
def ext(self) -> str:
return self.__ext
@ext.setter
def ext(self, ext: str) -> None:
self.__ext = require_non_none(ext)
def __str__(self) -> str:
"""
Formats the filename according to the object's metadata.
:return: Formatted filename of the object
"""
return self.fmt.replace("$d", self.fnum) + "." + self.ext
class Directory(ABC):
@abstractmethod
def get_files(self) -> Dict[str, FileMetadata]:
"""
:rtype: object
:return: Relation of filenames to their numerical ordering.
"""
pass
@abstractmethod
def operate(self) -> None:
"""
Performs an operation on the directory.
:return: None
"""
pass
class ConcreteDirectory(Directory):
def __init__(self, path: str):
self.__path = require_non_none(path)
if not isdir(path):
raise Exception("Path is not a valid directory: {}".format(path))
self.__files = {e: FileMetadata(e) for e in listdir(path)}
def save_files(self) -> None:
"""
Saves the directory to the storage medium.
File name changes made to the directory object are renamed.
:return: None
"""
"""
Determine the correct order to rename files such that no rename conflicts arise.
1st Pass: Classify operations as either conflict-free or waiting on another operation.
2nd Pass: Pop safe operations -> update status of operations that were waiting on them.
"""
operations = {k: str(v) for (k, v) in self.__files.items()}
performable = [] # Rename operations which can be performed without conflicts.
conflicts = {} # Rename operations that wait on another rename operation.
for k, v in operations.items():
if v not in operations:
performable.append(k)
else:
conflicts[v] = k
# Cyclical DAG -- Need to break it apart.
# Problem: It might be possible there are multiple cyclic DAG's.
while len(performable) > 0:
op = performable.pop()
p1 = join(self.__path, op)
p2 = join(self.__path, operations[op])
rename(p1, p2)
if op in conflicts:
performable.append(conflicts[op])
del conflicts[op]
def dag_topological_sort(self, vertices):
pass
def dag_break_cycle(self):
pass
def get_files(self) -> Dict[str, FileMetadata]:
return self.__files
def operate(self) -> None:
pass # Sentinel method. | /renamer_kt-1.0.1.tar.gz/renamer_kt-1.0.1/src/core/directory.py | 0.852168 | 0.192786 | directory.py | pypi |
from argparse import ArgumentParser
from core.directory import ConcreteDirectory
from core.decorators import *
def main():
args = ArgumentParser(description="CLI tool written in Python 3 used to systemically rename file "
"in a directory while adhering to a variety of criteria")
# Required arguments
args.add_argument("path", type=str, help="Absolute or relative path to the directory")
# Optional arguments
args.add_argument("-s", "--shift", dest="shift", type=int,
help="Shifts all numerical values by the specified offset")
args.add_argument("-z", "--zeroes", dest="zeroes", type=int, const=0, nargs="?",
help="Prepends numerical values with the specified or inferred number of leading zeroes")
args.add_argument("-n", "--random", dest="random", type=int, const=None, nargs="?", default=False,
help="Shuffles numerical values using the specified seed, or randomly")
args.add_argument("-f", "--fmt", dest="fmt", type=str, nargs="+",
help="Customizes filename output (see wiki for usage)")
args.add_argument("-e", "--ext", dest="ext", type=str,
help="Changes the extension of all files to the specified extension")
args.add_argument("-c", "--consecutive", dest="consecutive", action="store_true",
help="Flattens numerical values such that they are all consecutive")
args.add_argument("-m", "--mute", dest="mute", action="store_false",
help="Squelches the console output of filenames and their renamed filename")
args.add_argument("-y", "--yes", dest="confirm", action="store_true",
help="Confirms the operation and makes changes to your file system according to the parameters")
args = args.parse_args()
# Process arguments
directory = ConcreteDirectory(args.path)
dec = NumeratedDecorator(directory)
if args.shift:
dec = ShifterDecorator(dec, args.shift)
if args.fmt:
dec = FormatDecorator(dec, args.fmt)
if args.consecutive:
dec = FlattenDecorator(dec)
if args.random is not False:
dec = RandomizeDecorator(dec, args.random)
if args.zeroes is not None:
dec = ZeroesDecorator(dec, args.zeroes)
if args.ext:
dec = ExtensionDecorator(dec, args.ext)
dec.operate() # Perform operations according to decorators
if args.mute:
for old, new in sorted(directory.get_files().items(), key=lambda x: x[1].num):
print("Renaming [{}]\n\t--> [{}]".format(old, str(new)))
if args.confirm:
directory.save_files()
if __name__ == '__main__':
main() | /renamer_kt-1.0.1.tar.gz/renamer_kt-1.0.1/src/core/renamer.py | 0.784979 | 0.233106 | renamer.py | pypi |
from __future__ import annotations
from typing import TypeVar
from collections import deque
from util.util import require_non_none
T = TypeVar("T")
class DAG:
def __init__(self):
"""
Constructs a directed acyclic graph instance
"""
self.__nodes = {} # Map[data] -> Node
def add_vertex(self, data: T) -> bool:
"""
:param data: Data of the vertex
:return: False if the graph already contains the vertex
"""
if require_non_none(data) in self.__nodes:
return False
self.__nodes[data] = DAG.__DAGNode(data)
return True
def add_edge(self, vertex: T, edge: T) -> bool:
"""
:param vertex: Vertex in which to add an edge
:param edge: Connected vertex from the specified vertex
:return: False if the vertex already contains the specified edge
"""
if require_non_none(vertex) not in self.__nodes:
raise ValueError("Vertex is not present within the graph: {}".format(vertex))
if require_non_none(edge) not in self.__nodes:
raise ValueError("Vertex edge is not present within the graph: {}".format(edge))
if vertex == require_non_none(edge):
raise ValueError("Vertex is already a strongly connected component with itself: {}".format(vertex))
v, e = self.__nodes[vertex], self.__nodes[edge]
if e in v.edges:
return False
v.edges.add(e)
e.degree += 1
return True
def dfs_find_cycles(self) -> None:
# White: Vertices which are currently unvisited
# Gray: Vertices which are in the process of being visited
# Black: Vertices which have been completely visited
white, grey, black = self.__nodes.copy(), set(), set()
parents = {} # Map of vertices -> edge for traversal history
stack = deque()
# TODO: This makes way more sense to use recursion
while len(white > 1): # Cycle of one or no element(s) is impossible
w = next(iter(white))
stack.append(w)
parents[w] = None
while len(stack) > 0:
v = stack.pop()
grey.add(v)
for e in v.edges:
stack.append(e)
parents[e] = v # Edge was introduced by vertex
class __DAGNode:
def __init__(self, data: T):
self.data = data
self.edges = set()
self.degree = 0 | /renamer_kt-1.0.1.tar.gz/renamer_kt-1.0.1/src/util/directedgraph.py | 0.846609 | 0.456289 | directedgraph.py | pypi |
# RenameRename


[](https://codecov.io/gh/mhmdkanj/RenameRename)
**RenameRename** is a command-line utility app that bulk renames a set of files based on some specifications.
What's special about this tool, among so many other similar ones, is that it's straightforward to use.
If you don't feel like dealing with complex regex's and just want the job done without any learning effort,
then hopefully RenameRename can cater to your bulk renaming needs. 😄
The main usage of RenameRename is as a CLI tool.
Nevertheless, it can also be used as a package of modules containing utility classes and functions for your own modules.
Feel free to take a look at the [API reference / documentation](https://mhmdkanj.github.io/RenameRename/html/index.html).
## Requirements
To run the CLI tool, you primarily need the following installed on your system:
- [Python 3.6+](https://www.python.org/)
## Install
To install the package, you can do so with [pip](https://pypi.org/project/pip/).
```sh
pip install renamerename
```
Otherwise, if you want to install from source, just clone the repository in the directory of your choice,
and then install it with `pip`, as such:
```sh
git clone https://github.com/mhmdkanj/RenameRename.git
pip install -e RenameRename
```
## Usage
In order to use RenameRename, run the Python package with:
```sh
renamerename [options]
```
### Filtering
By default, RenameRename searches for files in the current working directory.
Otherwise, a different one can be set using the `--dir` option.
Filtering of files is done via Unix filename patterns supplied with the `--filter`, such as:
| Pattern | Usage | Example |
| ------- | ------ | ------- |
| *None* | exact characters | `file` -> `file` |
| `*` | any number of characters | `img*` -> `img_foo.tar.gz`, `img123.png`, `img`, etc. |
| `?` | any single character | `doc?file` -> `doc_file`, `doc1file`, `docsfile`, etc. |
| `[seq]` | any character in *seq* | `file_[abcdef].txt` -> `file_a.txt`, `file_b.txt`, etc. |
| `[!seq]` | any character not in *seq* | `file_[!abc].txt` -> `file_d.txt`, `file_1.txt`, `file__.txt`, etc. |
You can check which files are filtered out by providing the filter option without any actions.
```sh
renamerename --filter "img_*"
# OUTPUT: filter all files beginning with: img_
```
**NOTES**:
- It is necessary to enclose filter arguments with double quotation marks `" "`, as this would allow you to pass the literal filter expression to the command (otherwise, the shell would process it, resolve the filenames itself, and pass invalid arguments to the command).
- RenameRename acts on non-hidden files inside a directory. Also, the file search is non-recursive and does not take into account directory names.
### Basic Actions
Some basic actions to rename filtered files are provided.
The more specific the action, the better.
| Action | Usage | Example |
| ------- | ------ | ------- |
| `--prefix PREFIX` | prepends `PREFIX` to the filename | `file.txt` -> `PREFIXfile.txt` |
| `--suffix SUFFIX` | appends `SUFFIX` to the filename | `file.txt` -> `fileSUFFIX.txt` |
| `--change-extension .ext` | changes the file extension | `file.txt` -> `file.ext` |
| `--add-numbering PREFIX` | changes the filename to `PREFIX` and appends a counter | `myarchive.tar.gz`, `myfile.txt` -> `PREFIX1.tar.gz`, `PREFIX2.txt` |
You can of course use multiple actions at the same time.
For instance, if you want to add a prefix, suffix, and change the extension of files beginning with "myfile" and ending with ".png", execute the following:
```sh
renamerename --filter "myfile*.png" --prefix foo_ --suffix _bar --change-extension .jpeg
# Filtered files: myfile_a.png , myfileee.png , myfile_somechars.png
# OUTPUT:
# myfile_a.png ---> foo_myfile_a_bar.jpeg
# myfileee.png ---> foo_myfileee_bar.jpeg
# myfile_somechars.png ---> foo_myfile_somechars_bar.jpeg
```
### Only Show Renaming Output without Execution
If you just want to *see* what would happen if some options and actions were supplied without actually renaming your files,
you can do so by supplying the `--only-output-results` or `-o` flag.
This way, you can review if the renaming will be done as intended and without any consequences.
```sh
renamerename [actions] -o
```
### Saving What Was Renamed to What
If you want to save all the source and target filenames for future reference (in case wrong files were renamed),
you can supply the `--save-renaming` flag to do that.
```sh
renamerename [actions] --save-renaming
```
This creates a `JSON` file in the directory supplied with `--dir` (or if no directory was supplied, the current directory) that contains the necessary changes.
### Rename by Loading a JSON file
Renaming can also be done by supplying a JSON file that dictates the source and target filenames needed.
In this case, no filtering is done, but rather the source filenames are manually entered in the JSON file.
The supplied file, via the `--from-json` option, should be a dictionary of source filenames mapped to target filenames. Example:
```json
{
"myfile_a.png": "foo_myfile_a_bar.jpeg",
"myfileee.png": "foo_myfileee_bar.jpeg",
"myfile_somechars.png": "foo_myfile_somechars_bar.jpeg"
}
```
Suppose this file was called `renaming.json`, you can execute the renaming by:
```sh
renamerename --dir DIR --from-json renaming.json
```
### Undo Renaming
In case you did not intend to execute the renaming of files, you can undo this with the `--undo-from-json` option,
in which RenameRename will reverse the renaming.
The renaming can only be undone if in the previous call to RenameRename, you used the `--save-renaming` option.
The JSON file created in that call needs to be supplied to the `--undo-from-json` option.
The mapping is reversed internally.
```sh
renamerename --dir DIR --undo-from-json renaming.json
```
Otherwise, if you do not have the JSON file, you can create one manually and execute RenameRename with the `--from-json` option.
### Synopsis
```
usage: renamerename [options]
Bulk renaming of files made easy.
optional arguments:
-h, --help show this help message and exit
--dir directory directory whose filenames are processed
--only-output-results, -o
only show renaming results without execution
--filter FILTER, -f FILTER
filter the directory contents according to Unix
patterns
--prefix PREFIX, -p PREFIX
add a prefix to filtered filenames
--suffix SUFFIX, -s SUFFIX
add a suffix to filtered filenames
--change-extension CHANGE_EXTENSION, -e CHANGE_EXTENSION
change the extension of the filtered filenames
--add-numbering ADD_NUMBERING, -n ADD_NUMBERING
change filtered filenames to same name suffixed with
increasing numbers
--save-renaming, -sr create JSON file containing all files renamed
--from-json JSON file path
rename set of files as described from JSON file
--undo-from-json JSON file path
undo renaming of set of files based on saved renaming
specification
--version show program's version number and exit
```
## Test
The RenameRename Python package includes unit tests for developers who wish to locally test it (especially upon contributing).
For that, you first need to install the testing dependencies via:
```sh
pip install -r requirements-dev.txt
```
The tests can be run with `pytest` and executed via the following:
```sh
cd repository_root # enter the root directory of the repository
pytest
```
## Documentation
The [documentation](https://mhmdkanj.github.io/RenameRename/html/index.html) (mostly the API reference) to the Python package currently resides on GitHub Pages.
For contributing developers, building the docs locally requires the following:
```sh
pip install -r docs/requirements-docs.txt
apt-get install make
```
You can then build the docs with:
```sh
cd docs # relative to repository root
make html
```
| /renamerename-1.0.0.tar.gz/renamerename-1.0.0/README.md | 0.474631 | 0.932207 | README.md | pypi |
from typing import List, Tuple, TypeVar, Collection, Iterable, Optional, Dict, cast
import re, os
from more_itertools import flatten
from more_itertools.more import windowed
import torch
from renard.pipeline.ner import NEREntity, ner_entities
T = TypeVar("T")
def spans(seq: Collection[T], max_len: int) -> List[Tuple[T]]:
"""Cut the input sequence into all possible spans up to a maximum length
.. note::
spans are ordered from the smallest to the biggest,
from the beginning of seq to the end of seq.
:param seq:
:param max_len:
:return:
"""
out_spans = []
for i in range(1, min(len(seq), max_len + 1)):
for span in windowed(seq, i):
out_spans.append(span)
return out_spans
def spans_indexs(seq: List, max_len: int) -> List[Tuple[int, int]]:
""""""
indexs = []
for i in range(1, min(len(seq), max_len + 1)):
for span in windowed(range(len(seq)), i):
span = cast(Tuple[int, ...], span)
indexs.append((min(span), max(span)))
return indexs
def batch_index_select(
input: torch.Tensor, dim: int, index: torch.Tensor
) -> torch.Tensor:
"""Batched version of :func:`torch.index_select`.
Inspired by https://discuss.pytorch.org/t/batched-index-select/9115/8
:param input: a torch tensor of shape ``(B, *)`` where ``*``
is any number of additional dimensions.
:param dim: the dimension in which to index
:param index: index tensor of shape ``(B, I)``
:return: a tensor which indexes ``input`` along dimension ``dim``
using ``index``. This tensor has the same shape as ``input``,
except in dimension ``dim``, where it has dimension ``I``.
"""
batch_size = input.shape[0]
view = [batch_size] + [1 if i != dim else -1 for i in range(1, len(input.shape))]
expansion = list(input.shape)
expansion[0] = batch_size
expansion[dim] = -1
return torch.gather(input, dim, index.view(view).expand(expansion))
R = TypeVar("R")
def search_pattern(seq: Iterable[R], pattern: List[R]) -> List[int]:
"""Search a pattern in sequence
:param seq: sequence in which to search
:param pattern: searched pattern
:return: a list of patterns start index
"""
start_indices = []
for subseq_i, subseq in enumerate(windowed(seq, len(pattern))):
if list(subseq) == pattern:
start_indices.append(subseq_i)
return start_indices
def load_conll2002_bio(
path: str,
tag_conversion_map: Optional[Dict[str, str]] = None,
separator: str = "\t",
**kwargs
) -> Tuple[List[List[str]], List[str], List[NEREntity]]:
"""Load a file under CoNLL2022 BIO format. Sentences are expected
to be separated by end of lines. Tags should be in the CoNLL-2002
format (such as 'B-PER I-PER') - If this is not the case, see the
``tag_conversion_map`` argument.
:param path: path to the CoNLL-2002 formatted file
:param separator: separator between token and BIO tags
:param tag_conversion_map: conversion map for tags found in the
input file. Example : ``{'B': 'B-PER', 'I': 'I-PER'}``
:param kwargs: additional kwargs for ``open`` (such as
``encoding`` or ``newline``).
:return: ``(sentences, tokens, entities)``
"""
if tag_conversion_map is None:
tag_conversion_map = {}
with open(os.path.expanduser(path), **kwargs) as f:
raw_data = f.read()
sents = []
sent_tokens = []
tags = []
for line in raw_data.split("\n"):
line = line.strip("\n")
if re.fullmatch(r"\s*", line):
sents.append(sent_tokens)
sent_tokens = []
continue
token, tag = line.split(separator)
sent_tokens.append(token)
tags.append(tag_conversion_map.get(tag, tag))
tokens = list(flatten(sents))
entities = ner_entities(tokens, tags)
return sents, list(flatten(sents)), entities | /renard_pipeline-0.2.0.tar.gz/renard_pipeline-0.2.0/renard/utils.py | 0.879974 | 0.587766 | utils.py | pypi |
from typing import Dict, List, Set, Tuple
from collections import defaultdict
import os
script_dir = os.path.dirname(os.path.abspath(__file__))
class HypocorismGazetteer:
"""An hypocorism (nicknames) gazetteer
.. note::
datas used for this gazeeter come from
https://github.com/carltonnorthern/nickname-and-diminutive-names-lookup
and are licensed under the Apache 2.0 License
"""
supported_langs = {"eng"}
def __init__(self, lang: str = "eng"):
"""
:param lang: gazetteer language. Must be in
``HypocorismGazetteer.supported_langs``.
"""
if not lang in HypocorismGazetteer.supported_langs:
print(
f"[warning] {lang} not supported by {type(self)} (supported languages: {HypocorismGazetteer.supported_langs})"
)
self.name_to_nicknames = defaultdict(set)
self.nickname_to_names = defaultdict(set)
if lang == "eng":
with open(f"{script_dir}/datas/hypocorisms.csv") as f:
for line in f:
# it should be illegal to parse csv like that,
# however in this specific case we know there
# are no issues... right ?
line = line.strip()
splitted = line.split(",")
name = splitted[0]
nicknames = splitted[1:]
self._add_hypocorism_(name, nicknames)
def _add_hypocorism_(self, name: str, nicknames: List[str]):
"""Add a name associated with several nicknames
:param name:
:param nicknames: nicknames to associate to the given name
"""
name = name.lower()
nicknames = [n.lower() for n in nicknames]
for nickname in nicknames:
self.nickname_to_names[nickname].add(name)
self.name_to_nicknames[name].add(nickname)
def get_nicknames(self, name: str) -> Set[str]:
"""Return all possible nickname for the given name"""
try:
return self.name_to_nicknames[name.lower()]
except KeyError:
return set()
def get_possible_names(self, nickname: str) -> Set[str]:
"""Return all names that can correspond to the given nickname"""
try:
return self.nickname_to_names[nickname.lower()]
except KeyError:
return set()
def are_related(self, name1: str, name2: str) -> bool:
"""Check if one name is an hypocorism of the other
(or if both names are equals)
"""
return (
name1.lower() == name2.lower()
or name2.lower() in self.get_nicknames(name1)
or name2.lower() in self.get_possible_names(name1)
) | /renard_pipeline-0.2.0.tar.gz/renard_pipeline-0.2.0/renard/resources/hypocorisms/hypocorisms.py | 0.766731 | 0.310002 | hypocorisms.py | pypi |
import os
from typing import List, Optional, Set, Dict, Any, Literal
from renard.pipeline.ner import ner_entities
import stanza
from stanza.protobuf import CoreNLP_pb2
from stanza.server import CoreNLPClient
from stanza.resources.installation import DEFAULT_CORENLP_DIR
from renard.pipeline.core import PipelineStep, Mention
def corenlp_is_installed() -> bool:
return os.path.exists(DEFAULT_CORENLP_DIR)
def corenlp_annotations_bio_tags(annotations: CoreNLP_pb2.Document) -> List[str]:
"""Returns an array of bio tags extracted from stanford corenlp annotations
.. note::
only PERSON, LOCATION, ORGANIZATION and MISC entities are considered.
Other types of entities are discarded.
(see https://stanfordnlp.github.io/CoreNLP/ner.html#description) for
a list of usual coreNLP types.
.. note::
Weirdly, CoreNLP will annotate pronouns as entities. Only tokens having
a NNP POS are kept by this function.
:param annotations: stanford coreNLP text annotations
:return: an array of bio tags.
"""
corenlp_tokens = [
token for sentence in annotations.sentence for token in sentence.token # type: ignore
]
bio_tags = ["O"] * len(corenlp_tokens)
stanford_to_bio = {
"PERSON": "PER",
"LOCATION": "LOC",
"ORGANIZATION": "ORG",
"MISC": "MISC",
}
for mention in annotations.mentions: # type: ignore
# ignore tags not in conll 2003 format
if not mention.ner in stanford_to_bio:
continue
token_start_idx = mention.tokenStartInSentenceInclusive
token_end_idx = mention.tokenEndInSentenceExclusive
# ignore entities having a pos different than NNP
if corenlp_tokens[token_start_idx].pos != "NNP":
continue
bio_tag = f"B-{stanford_to_bio[mention.ner]}"
bio_tags[token_start_idx] = bio_tag
for i in range(token_start_idx + 1, token_end_idx):
bio_tag = f"I-{stanford_to_bio[mention.ner]}"
bio_tags[i] = bio_tag
return bio_tags
class StanfordCoreNLPPipeline(PipelineStep):
"""a full NLP pipeline using stanford CoreNLP
.. note::
The Stanford CoreNLP pipeline requires the ``stanza`` library.
You can install it with poetry using ``poetry install -E stanza``.
.. warning::
RAM usage might be high for coreference resolutions as it uses
the entire novel ! If CoreNLP terminates with an out of memory
error, you can try allocating more memory for the server by
using ``server_kwargs`` (example : ``{"memory": "8G"}``).
"""
def __init__(
self,
annotate_corefs: bool = False,
corefs_algorithm: Literal[
"deterministic", "statistical", "neural"
] = "statistical",
corenlp_custom_properties: Optional[Dict[str, Any]] = None,
server_timeout: int = 9999999,
**server_kwargs,
) -> None:
"""
:param annotate_corefs: ``True`` if coreferences must be
annotated, ``False`` otherwise. This parameter is not
yet implemented.
:param corefs_algorithm: one of ``{"deterministic", "statistical", "neural"}``
:param corenlp_custom_properties: custom properties dictionary to pass to the
CoreNLP server. Note that some properties are already set when calling the
server, so not all properties are supported : it is intended as a last
resort escape hatch. In particular, do not set ``'ner.applyFineGrained'``.
If you need to set the coreference algorithm used, see ``corefs_algorithm``.
:param server_timeout: CoreNLP server timeout in ms
:param server_kwargs: extra args for stanford CoreNLP server. `be_quiet`
and `max_char_length` are *not* supported.
See here for a list of possible args :
https://stanfordnlp.github.io/stanza/client_properties.html#corenlp-server-start-options-server
"""
assert corefs_algorithm in {"deterministic", "statistical", "neural"}
self.annotate_corefs = annotate_corefs
self.corefs_algorithm = corefs_algorithm
self.server_timeout = server_timeout
self.server_kwargs = server_kwargs
self.corenlp_custom_properties = (
corenlp_custom_properties if not corenlp_custom_properties is None else {}
)
super().__init__()
def __call__(self, text: str, **kwargs) -> Dict[str, Any]:
if not corenlp_is_installed():
stanza.install_corenlp()
# define corenlp annotators and properties
corenlp_annotators = ["tokenize", "ssplit", "pos", "lemma", "ner"]
corenlp_properties = {
**self.corenlp_custom_properties,
**{"ner.applyFineGrained": False},
}
## coreference annotation settings
if self.annotate_corefs:
if self.corefs_algorithm == "deterministic":
corenlp_annotators += ["parse", "dcoref"]
elif self.corefs_algorithm == "statistical":
corenlp_annotators += ["depparse", "coref"]
corenlp_properties = {
**corenlp_properties,
**{"coref.algorithm": "statistical"},
}
elif self.corefs_algorithm == "neural":
corenlp_annotators += ["depparse", "coref"]
corenlp_properties = {
**corenlp_properties,
**{"coref.algorithm": "neural"},
}
else:
raise RuntimeError(
f"unknown coref algorithm : {self.corefs_algorithm}."
)
with CoreNLPClient(
annotators=corenlp_annotators,
max_char_length=len(text),
timeout=self.server_timeout,
be_quiet=True,
properties=corenlp_properties,
**self.server_kwargs,
) as client:
# compute annotation
annotations: CoreNLP_pb2.Document = client.annotate(text) # type: ignore
# parse tokens
tokens = [
token.word
for sentence in annotations.sentence # type: ignore
for token in sentence.token
]
# parse NER bio tags
bio_tags = corenlp_annotations_bio_tags(annotations)
# parse corefs if enabled
if self.annotate_corefs:
coref_chains = []
for coref_chain in annotations.corefChain: # type: ignore
chain = []
for mention in coref_chain.mention: # type: ignore
mention_sent = annotations.sentence[mention.sentenceIndex] # type: ignore
sent_start_idx = mention_sent.token[0].tokenBeginIndex
mention_words = []
for token in mention_sent.token[
mention.beginIndex : mention.endIndex - 1
]:
mention_words.append(token.word)
mention_words.append(token.after)
mention_words.append(
mention_sent.token[mention.endIndex - 1].word
)
chain.append(
Mention(
mention_words,
sent_start_idx + mention.beginIndex,
sent_start_idx + mention.endIndex,
)
)
coref_chains.append(chain)
out_dict = {"tokens": tokens, "entities": ner_entities(tokens, bio_tags)}
if self.annotate_corefs:
out_dict["corefs"] = coref_chains # type: ignore
return out_dict
def needs(self) -> Set[str]:
return set()
def production(self) -> Set[str]:
production = {"tokens", "entities"}
if self.annotate_corefs:
production.add("corefs")
return production | /renard_pipeline-0.2.0.tar.gz/renard_pipeline-0.2.0/renard/pipeline/stanford_corenlp.py | 0.829077 | 0.302578 | stanford_corenlp.py | pypi |
import itertools
from typing import Dict, Any, List, Set, Optional, Tuple, Literal, Union
import operator
from itertools import accumulate
import networkx as nx
import numpy as np
from more_itertools import windowed
from renard.pipeline.ner import NEREntity
from renard.pipeline.core import PipelineStep
from renard.pipeline.characters_extraction import Character
def sent_index_for_token_index(token_index: int, sentences: List[List[str]]) -> int:
"""Compute the index of the sentence of the token at ``token_index``"""
sents_len = accumulate([len(s) for s in sentences], operator.add)
return next((i for i, l in enumerate(sents_len) if l > token_index))
def sent_indices_for_chapter(
chapters: List[List[str]], chapter_idx: int, sentences: List[List[str]]
) -> Tuple[int, int]:
"""Return the indices of the first and the last sentence of a
chapter
:param chapters: all chapters
:param chapter_idx: index of the chapter for which sentence
indices are returned
:param sentences: all sentences
:return: ``(first sentence index, last sentence index)``
"""
chapter_start_idx = sum([len(c) for i, c in enumerate(chapters) if i < chapter_idx])
chapter_end_idx = chapter_start_idx + len(chapters[chapter_idx])
sents_start_idx = None
sents_end_idx = None
count = 0
for sent_i, sent in enumerate(sentences):
start_idx, end_idx = (count, count + len(sent))
count = end_idx
if sents_start_idx is None and start_idx >= chapter_start_idx:
sents_start_idx = sent_i
if sents_end_idx is None and end_idx >= chapter_end_idx:
sents_end_idx = sent_i
break
assert not sents_start_idx is None and not sents_end_idx is None
return (sents_start_idx, sents_end_idx)
def mentions_for_chapters(
chapters: List[List[str]],
mentions: List[Tuple[Character, NEREntity]],
) -> List[List[Tuple[Character, NEREntity]]]:
"""Return each chapter mentions
:param chapters:
:param mentions:
:return: a list of mentions per chapters. This list has len
``len(chapters)``.
"""
chapters_mentions = [[] for _ in range(len(chapters))]
start_indices = list(
itertools.accumulate([0] + [len(chapter) for chapter in chapters[:-1]])
)
end_indices = start_indices[1:] + [start_indices[-1] + len(chapters[-1])]
for mention in mentions:
for chapter_i, (start_i, end_i) in enumerate(zip(start_indices, end_indices)):
if mention[1].start_idx >= start_i and mention[1].end_idx < end_i:
chapters_mentions[chapter_i].append(mention)
break
return chapters_mentions
class CoOccurrencesGraphExtractor(PipelineStep):
"""A simple character graph extractor using co-occurences"""
def __init__(
self,
co_occurences_dist: Union[int, Tuple[int, Literal["tokens", "sentences"]]],
dynamic: bool = False,
dynamic_window: Optional[int] = None,
dynamic_overlap: int = 0,
) -> None:
"""
:param co_occurences_dist: max accepted distance between two
character appearances to form a co-occurence interaction.
- if an ``int`` is given, the distance is in number of
tokens
- if a ``tuple`` is given, the first element of the
tuple is a distance while the second is an unit.
Examples : ``(1, "sentences")``, ``(3, "tokens")``.
:param dynamic:
- if ``False`` (the default), a static ``nx.graph`` is
extracted
- if ``True``, several ``nx.graph`` are extracted. In
that case, ``dynamic_window`` and
``dynamic_overlap``*can* be specified. If
``dynamic_window`` is not specified, this step is
expecting the text to be cut into chapters', and a graph
will be extracted for each 'chapter'. In that case,
``chapters`` must be passed to the pipeline as a
``List[str]`` at runtime.
:param dynamic_window: dynamic window, in number of
interactions. a dynamic window of `n` means that each
returned graph will be formed by `n` interactions.
:param dynamic_overlap: overlap, in number of interactions.
"""
if isinstance(co_occurences_dist, int):
co_occurences_dist = (co_occurences_dist, "tokens")
self.co_occurences_dist = co_occurences_dist
if dynamic:
if not dynamic_window is None:
assert dynamic_window > 0
assert dynamic_window > dynamic_overlap
self.dynamic = dynamic
self.dynamic_window = dynamic_window
self.dynamic_overlap = dynamic_overlap
self.dynamic_needs_chapter = dynamic == "nx" and dynamic_window is None
super().__init__()
def __call__(
self,
text: str,
characters: Set[Character],
sentences: List[List[str]],
chapter_tokens: Optional[List[List[str]]] = None,
sentences_polarities: Optional[List[float]] = None,
**kwargs,
) -> Dict[str, Any]:
"""Extract a characters graph
:param characters:
:return: a ``dict`` with key ``'characters_graph'`` and a
:class:`nx.Graph` or a list of :class:`nx.Graph` as
value.
"""
mentions = []
for character in characters:
for mention in character.mentions:
mentions.append((character, mention))
mentions = sorted(mentions, key=lambda cm: cm[1].start_idx)
if self.dynamic:
return {
"characters_graph": self._extract_dynamic_graph(
mentions,
self.dynamic_window,
self.dynamic_overlap,
chapter_tokens,
sentences,
sentences_polarities,
)
}
return {
"characters_graph": self._extract_graph(
mentions, sentences, sentences_polarities
)
}
def _mentions_interact(
self,
mention_1: NEREntity,
mention_2: NEREntity,
sentences: Optional[List[List[str]]] = None,
) -> bool:
"""Check if two mentions are close enough to be in interactions.
.. note::
the attribute ``self.co_occurences_dist`` is used to know wether mentions are in co_occurences
:param mention_1:
:param mention_2:
:param sentences:
:return: a boolean indicating wether the two mentions are co-occuring
"""
if self.co_occurences_dist[1] == "tokens":
return (
abs(mention_2.start_idx - mention_1.start_idx)
<= self.co_occurences_dist[0]
)
elif self.co_occurences_dist[1] == "sentences":
assert not sentences is None
mention_1_sent = sent_index_for_token_index(mention_1.start_idx, sentences)
mention_2_sent = sent_index_for_token_index(
mention_2.end_idx - 1, sentences
)
return abs(mention_2_sent - mention_1_sent) <= self.co_occurences_dist[0]
else:
raise NotImplementedError
def _extract_graph(
self,
mentions: List[Tuple[Character, NEREntity]],
sentences: List[List[str]],
sentences_polarities: Optional[List[float]],
):
"""
:param mentions: A list of character mentions, ordered by
appearance
:param sentences: if specified, ``sentences_polarities`` must
be specified as well.
:param sentences_polarities: if specified, ``sentences`` must
be specified as well. In that case, edges are annotated
with the ``'polarity`` attribute, indicating the polarity
of the relationship between two characters. Polarity
between two interactions is computed as the strongest
sentence polarity between those two mentions.
"""
compute_polarity = not sentences_polarities is None
# co-occurence matrix, where C[i][j] is 1 when appearance
# i co-occur with j if i < j, or 0 when it doesn't
C = np.zeros((len(mentions), len(mentions)))
for i, (char1, mention_1) in enumerate(mentions):
# check ahead for co-occurences
for j, (char2, mention_2) in enumerate(mentions[i + 1 :]):
if not self._mentions_interact(mention_1, mention_2, sentences):
# dist between current token and future token is
# too great : we finished co-occurences search for
# the current token
break
# ignore co-occurences with self
if char1 == char2:
continue
# record co_occurence
C[i][i + 1 + j] = 1
# * Construct graph from co-occurence matrix
G = nx.Graph()
for character, _ in mentions:
G.add_node(character)
for i, (char1, mention1) in enumerate(mentions):
for j, (char2, mention2) in enumerate(mentions):
# no co-occurences for these two mentions: we are out
if C[i][j] == 0:
continue
if not G.has_edge(char1, char2):
G.add_edge(char1, char2, weight=0)
G.edges[char1, char2]["weight"] += 1
if compute_polarity:
assert not sentences is None
assert not sentences_polarities is None
# TODO: optim
first_sent_idx = sent_index_for_token_index(
mention1.start_idx, sentences
)
last_sent_idx = sent_index_for_token_index(
mention2.start_idx, sentences
)
sents_polarities_between_mentions = sentences_polarities[
first_sent_idx : last_sent_idx + 1
]
polarity = max(sents_polarities_between_mentions, key=abs)
G.edges[char1, char2]["polarity"] = (
G.edges[char1, char2].get("polarity", 0) + polarity
)
return G
def _extract_dynamic_graph(
self,
mentions: List[Tuple[Character, NEREntity]],
window: Optional[int],
overlap: int,
chapter_tokens: Optional[List[List[str]]],
sentences: List[List[str]],
sentences_polarities: Optional[List[float]],
) -> List[nx.Graph]:
"""
.. note::
only one of ``window`` or ``chapter_tokens`` should be specified
:param mentions: A list of character mentions, ordered by appearance
:param window: dynamic window, in tokens.
:param overlap: window overlap
:param chapter_tokens: list of tokens for each chapter. If
given, one graph will be extracted per chapter.
"""
assert window is None or chapter_tokens is None
compute_polarity = not sentences is None and not sentences_polarities is None
if not window is None:
return [
self._extract_graph(
[elt for elt in ct if not elt is None],
sentences,
sentences_polarities,
)
for ct in windowed(mentions, window, step=window - overlap)
]
assert not chapter_tokens is None
graphs = []
chapters_mentions = mentions_for_chapters(chapter_tokens, mentions)
for chapter_i, (_, chapter_mentions) in enumerate(
zip(chapter_tokens, chapters_mentions)
):
chapter_start_idx = sum(
[len(c) for i, c in enumerate(chapter_tokens) if i < chapter_i]
)
# make mentions coordinates chapter local
chapter_mentions = [
(c, m.shifted(-chapter_start_idx)) for c, m in chapter_mentions
]
sent_start_idx, sent_end_idx = sent_indices_for_chapter(
chapter_tokens, chapter_i, sentences
)
chapter_sentences = sentences[sent_start_idx : sent_end_idx + 1]
chapter_sentences_polarities = None
if compute_polarity:
assert not sentences_polarities is None
chapter_sentences_polarities = sentences_polarities[
sent_start_idx : sent_end_idx + 1
]
graphs.append(
self._extract_graph(
chapter_mentions,
chapter_sentences,
chapter_sentences_polarities,
)
)
return graphs
def _extract_gephi_dynamic_graph(
self, mentions: List[Tuple[Character, NEREntity]], sentences: List[List[str]]
) -> nx.Graph:
"""
:param mentions: A list of character mentions, ordered by appearance
:param sentences:
"""
# keep only longest name in graph node : possible only if it is unique
# TODO: might want to try and get shorter names if longest names aren't
# unique
characters = set([e[0] for e in mentions])
G = nx.Graph()
character_to_last_appearance: Dict[Character, Optional[NEREntity]] = {
character: None for character in characters
}
for i, (character, mention) in enumerate(mentions):
if not character in characters:
continue
character_to_last_appearance[character] = mention
close_characters = [
c
for c, last_appearance in character_to_last_appearance.items()
if not last_appearance is None
and self._mentions_interact(mention, last_appearance, sentences)
and not c == character
]
for close_character in close_characters:
if not G.has_edge(character, close_character):
G.add_edge(character, close_character)
G.edges[character, close_character]["start"] = i
G.edges[character, close_character]["dweight"] = []
# add a new entry to the weight series according to networkx
# source code, each entry must be of the form
# [value, start, end]
weights = G.edges[character, close_character]["dweight"]
if len(weights) != 0:
# end of last weight attribute
weights[-1][-1] = i
# value, start and end of current weight attribute
last_weight_value = weights[-1][0] if len(weights) > 0 else 0
G.edges[character, close_character]["dweight"].append(
[float(last_weight_value) + 1, i, len(mentions)]
)
return G
def supported_langs(self) -> Union[Set[str], Literal["any"]]:
return "any"
def needs(self) -> Set[str]:
needs = {"characters", "sentences"}
if self.dynamic_needs_chapter:
needs.add("chapter_tokens")
return needs
def production(self) -> Set[str]:
return {"characters_graph"}
def optional_needs(self) -> Set[str]:
return {"sentences_polarities"} | /renard_pipeline-0.2.0.tar.gz/renard_pipeline-0.2.0/renard/pipeline/graph_extraction.py | 0.896501 | 0.513363 | graph_extraction.py | pypi |
from typing import Optional
from renard.pipeline.core import Pipeline
from renard.pipeline.characters_extraction import GraphRulesCharactersExtractor
from renard.pipeline.graph_extraction import CoOccurrencesGraphExtractor
def nltk_pipeline(
tokenizer_kwargs: Optional[dict] = None,
ner_kwargs: Optional[dict] = None,
characters_extractor_kwargs: Optional[dict] = None,
graph_extractor_kwargs: Optional[dict] = None,
**pipeline_kwargs
) -> Pipeline:
"""A pre-configured NLTK-based pipeline
:param tokenizer_kwargs: kwargs for :class:`.NLTKTokenizer`
:param ner_kwargs: kwargs for :class:`.NLTKNamedEntityRecognizer`
:param characters_extractor_kwargs: kwargs for :class:`.GraphRulesCharactersExtractor`
:param graph_extractor_kwargs: kwargs for :class:`.CoOccurrencesGraphExtractor`
:param pipeline_kwargs: kwargs for :class:`.Pipeline`
"""
from renard.pipeline.tokenization import NLTKTokenizer
from renard.pipeline.ner import NLTKNamedEntityRecognizer
tokenizer_kwargs = tokenizer_kwargs or {}
ner_kwargs = ner_kwargs or {}
characters_extractor_kwargs = characters_extractor_kwargs or {}
graph_extractor_kwargs = graph_extractor_kwargs or {}
if not "co_occurences_dist" in graph_extractor_kwargs:
graph_extractor_kwargs["co_occurences_dist"] = (1, "sentences")
return Pipeline(
[
NLTKTokenizer(**tokenizer_kwargs),
NLTKNamedEntityRecognizer(**ner_kwargs),
GraphRulesCharactersExtractor(**characters_extractor_kwargs),
CoOccurrencesGraphExtractor(**graph_extractor_kwargs),
],
**pipeline_kwargs
)
def bert_pipeline(
tokenizer_kwargs: Optional[dict] = None,
ner_kwargs: Optional[dict] = None,
characters_extractor_kwargs: Optional[dict] = None,
graph_extractor_kwargs: Optional[dict] = None,
**pipeline_kwargs
) -> Pipeline:
"""A pre-configured BERT-based pipeline
:param tokenizer_kwargs: kwargs for :class:`.NLTKTokenizer`
:param ner_kwargs: kwargs for :class:`.BertNamedEntityRecognizer`
:param characters_extractor_kwargs: kwargs for :class:`.GraphRulesCharactersExtractor`
:param graph_extractor_kwargs: kwargs for :class:`.CoOccurrencesGraphExtractor`
:param pipeline_kwargs: kwargs for :class:`.Pipeline`
"""
from renard.pipeline.tokenization import NLTKTokenizer
from renard.pipeline.ner import BertNamedEntityRecognizer
tokenizer_kwargs = tokenizer_kwargs or {}
ner_kwargs = ner_kwargs or {}
characters_extractor_kwargs = characters_extractor_kwargs or {}
graph_extractor_kwargs = graph_extractor_kwargs or {}
return Pipeline(
[
NLTKTokenizer(),
BertNamedEntityRecognizer(),
GraphRulesCharactersExtractor(),
CoOccurrencesGraphExtractor(co_occurences_dist=(1, "sentences")),
],
**pipeline_kwargs
) | /renard_pipeline-0.2.0.tar.gz/renard_pipeline-0.2.0/renard/pipeline/preconfigured.py | 0.899146 | 0.170335 | preconfigured.py | pypi |
from __future__ import annotations
from typing import List, Dict, Any, Set, Tuple, Optional, Union, Literal
from dataclasses import dataclass
import torch
from transformers.tokenization_utils_base import BatchEncoding
from seqeval.metrics import precision_score, recall_score, f1_score
from renard.nltk_utils import nltk_fix_bio_tags
from renard.pipeline.core import PipelineStep, Mention
from renard.pipeline.progress import ProgressReporter
@dataclass
class NEREntity(Mention):
#: NER class (without BIO prefix as in ``PER`` and not ``B-PER``)
tag: str
def shifted(self, shift: int) -> NEREntity:
"""
.. note::
This method is implemtented here to avoid type issues. Since
:meth:`.Mention.shifted` cannot be annotated as returning
``Self``, this method annotate the correct return type when
using :meth:`.NEREntity.shifted`.
"""
return super().shifted(shift) # type: ignore
def ner_entities(
tokens: List[str], bio_tags: List[str], resolve_inconsistencies: bool = True
) -> List[NEREntity]:
"""Extract NER entities from a list of BIO tags
:param tokens: a list of tokens
:param bio_tags: a list of BIO tags. In particular, BIO tags
should be in the CoNLL-2002 form (such as 'B-PER I-PER')
:return: A list of ner entities, in apparition order
"""
assert len(tokens) == len(bio_tags)
entities = []
current_tag: Optional[str] = None
current_tag_start_idx: Optional[int] = None
for i, tag in enumerate(bio_tags):
if not current_tag is None and not tag.startswith("I-"):
assert not current_tag_start_idx is None
entities.append(
NEREntity(
tokens[current_tag_start_idx:i],
current_tag_start_idx,
i,
current_tag,
)
)
current_tag = None
current_tag_start_idx = None
if tag.startswith("B-"):
current_tag = tag[2:]
current_tag_start_idx = i
elif tag.startswith("I-"):
if current_tag is None and resolve_inconsistencies:
current_tag = tag[2:]
current_tag_start_idx = i
continue
if not current_tag is None:
assert not current_tag_start_idx is None
entities.append(
NEREntity(
tokens[current_tag_start_idx : len(tokens)],
current_tag_start_idx,
len(bio_tags),
current_tag,
)
)
return entities
def score_ner(
pred_bio_tags: List[str], ref_bio_tags: List[str]
) -> Tuple[float, float, float]:
"""Score NER as in CoNLL-2003 shared task using ``seqeval``
Precision is the percentage of named entities in ``ref_bio_tags``
that are correct. Recall is the percentage of named entities in
pred_bio_tags that are in ref_bio_tags. F1 is the harmonic mean of
both.
:param pred_bio_tags:
:param ref_bio_tags:
:return: ``(precision, recall, F1 score)``
"""
assert len(pred_bio_tags) == len(ref_bio_tags)
return (
precision_score([ref_bio_tags], [pred_bio_tags]),
recall_score([ref_bio_tags], [pred_bio_tags]),
f1_score([ref_bio_tags], [pred_bio_tags]),
)
class NLTKNamedEntityRecognizer(PipelineStep):
"""An entity recognizer based on NLTK"""
def __init__(self) -> None:
"""
:param language: iso 639-2 3-letter language code
"""
import nltk
nltk.download("averaged_perceptron_tagger", quiet=True)
nltk.download("maxent_ne_chunker", quiet=True)
nltk.download("words", quiet=True)
super().__init__()
def __call__(self, text: str, tokens: List[str], **kwargs) -> Dict[str, Any]:
"""
:param text:
:param tokens:
"""
import nltk
from nltk.chunk import tree2conlltags
word_tag_iobtags = tree2conlltags(
nltk.ne_chunk(nltk.pos_tag(tokens, lang=self.lang))
)
bio_tags = nltk_fix_bio_tags([wti[2] for wti in word_tag_iobtags])
return {"entities": ner_entities(tokens, bio_tags)}
def supported_langs(self) -> Union[Set[str], Literal["any"]]:
# POS Tagging only supports english and russian
return {"eng", "rus"}
def needs(self) -> Set[str]:
return {"tokens"}
def production(self) -> Set[str]:
return {"entities"}
class BertNamedEntityRecognizer(PipelineStep):
"""An entity recognizer based on BERT"""
LANG_TO_MODELS = {
"fra": "Davlan/bert-base-multilingual-cased-ner-hrl",
"eng": "compnet-renard/bert-base-cased-literary-NER",
}
def __init__(
self,
huggingface_model_id: Optional[str] = None,
batch_size: int = 4,
device: Literal["cpu", "cuda", "auto"] = "auto",
):
"""
:param huggingface_model_id: a custom huggingface model id.
This allows to bypass the ``lang`` pipeline parameter
which normally choose a huggingface model automatically.
:param batch_size: batch size at inference
:param device: computation device
"""
self.huggingface_model_id = huggingface_model_id
self.batch_size = batch_size
if device == "auto":
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
self.device = torch.device(device)
super().__init__()
def _pipeline_init_(self, lang: str, progress_reporter: ProgressReporter):
from transformers import AutoModelForTokenClassification, AutoTokenizer # type: ignore
super()._pipeline_init_(lang, progress_reporter)
if not self.huggingface_model_id is None:
self.model = AutoModelForTokenClassification.from_pretrained(
self.huggingface_model_id
)
self.lang = "unknown"
else:
model_str = BertNamedEntityRecognizer.LANG_TO_MODELS.get(lang)
if model_str is None:
raise ValueError(
f"BertNamedEntityRecognizer does not support language {lang}"
)
self.model = AutoModelForTokenClassification.from_pretrained(model_str)
self.tokenizer = AutoTokenizer.from_pretrained(model_str)
def __call__(
self,
text: str,
tokens: List[str],
sentences: List[List[str]],
**kwargs,
) -> Dict[str, Any]:
"""
:param text:
:param tokens:
:param sentences:
"""
import torch
self.model = self.model.to(self.device)
batchs = self.encode(sentences)
# TODO: iteration could be done using a torch dataloader
# instead of doing it by hand
batches_nb = len(batchs["input_ids"]) // self.batch_size + 1
with torch.no_grad():
wp_labels = []
for batch_i in self._progress_(range(batches_nb)):
batch_start = batch_i * self.batch_size
batch_end = batch_start + self.batch_size
out = self.model(
batchs["input_ids"][batch_start:batch_end].to(self.device),
attention_mask=batchs["attention_mask"][batch_start:batch_end].to(
self.device
),
)
# (batch_size, sentence_size)
batch_classes_tens = torch.max(out.logits, dim=2).indices
wp_labels += [
self.model.config.id2label[tens.item()]
for classes_tens in batch_classes_tens
for tens in classes_tens
]
labels = self.batch_labels(batchs, 0, wp_labels, tokens)
return {"entities": ner_entities(tokens, labels)}
def encode(self, sentences: List[List[str]]) -> BatchEncoding:
return self.tokenizer(
sentences,
return_tensors="pt",
padding=True,
truncation=True,
is_split_into_words=True,
)
def batch_labels(
self,
batchs: BatchEncoding,
batch_i: int,
wp_labels: List[str],
tokens: List[str],
) -> List[str]:
"""Align labels to tokens rather than wordpiece tokens.
:param batchs: huggingface batch
:param batch_i: batch index
:param wp_labels: wordpiece aligned labels
:param tokens: original tokens
"""
batch_labels = ["O"] * len(tokens)
for wplabel_j, wp_label in enumerate(wp_labels):
token_i = batchs.token_to_word(batch_i, wplabel_j)
if token_i is None:
continue
batch_labels[token_i] = wp_label
return batch_labels
def supported_langs(self) -> Union[Set[str], Literal["any"]]:
return {"eng", "fra"}
def needs(self) -> Set[str]:
return {"tokens", "sentences"}
def production(self) -> Set[str]:
return {"entities"} | /renard_pipeline-0.2.0.tar.gz/renard_pipeline-0.2.0/renard/pipeline/ner.py | 0.889589 | 0.407599 | ner.py | pypi |
# In FoxDot, all two-character variable names are reserved for player objects, such as 'p1'
# Creating a Player Object with no arguments will play a single note on middle C, by default, repeatedly until stopped.
# Use >> to give one of these to a player object like so:
p1 >> pluck()
# To stop an individual player object, simply execute
p1.stop()
# Besides the 2-character variables that are pre-reserved, you can create your
# own with your own names
foo = Player()
foo >> pluck()
# The >> in Python is usually reserved for a type of operation, like + or -, but it is not the case in FoxDot.
# If a user re-executes the code, FoxDot will update p1 instead of creating a PlayerObject,
# which means you can make changes to your music using just one line of code.
# If you now give your player object some arguments, you can change the notes being played back.
# The first argument should be the degree of the note to be played
# (default is the lowest note of octave 5 of the major scale) and does not need to be specified by name.
# Python, like most programming languages, using zero-indexing when accessing values in an array,
# which means that 0 refers to the first note of the scale.
# Give your player object instructions to make music with their Synth.
# The first argument is the note of the scale to play. The following code
# plays the first three notes of the default scale (major) on repeat.
# For a single note
p1 >> pluck(0)
# Or a list of notes
p1 >> pluck([0,1,2])
# But you’ll need to specify whatever else you want to change...
# Such as note durations, or the length of each note
p1 >> pluck([0,0,0], dur=[1,2,3])
# Or amplitude, the "volume" of each note
p1 >> pluck([0,0,0], amp=[1,2,3])
# If the second list, the amp in this example, is too long, then the first list (the degree) just loops, and are matched with the remaining elements from the second list (the amplitude).
p1 >> pluck([0,2,4], amp=[1,2,3,1,5])
# More generally, all the lists are traversed regardless of their length.
p1 >> pluck([0,2,4], dur=[1,2], amp=[1,2,3,1,5])
# Arguments can be integers, floating points, fractions, lists,
# tuples, or a mix
p1 >> pluck([0,0,0], dur=2)
p1 >> pluck([0,0,0], dur=1.743)
p1 >> pluck([0,0,0], dur=[0.25,0.5,0.75])
p1 >> pluck([0,0,0], dur=[1/4,1/2,3/4])
p1 >> pluck([0,0,0], dur=[1/4,0.25,3])
# Lists of values are iterated over as the Player plays notes
# The following duration equates to: 1,2,3,1,4,3
# If you don't understand this yet, don't worry, more about patterns in the pattern tutorial
p1 >> pluck([0,0,0], dur=[1,[2,4],3])
# Values in tuples are used simultaneously i.e. p1 will play 3 individual notes, then a chord of 3 together at the same time.
p1 >> pluck([0,2,4,(0,2,4)])
# You can also assign values to the attributes of player objects directly
p1.oct = 5
# To see all the names of player attributes, just execute
print(Player.get_attributes())
# More about those later in the player attributes tutorial
# You could store several player instances and assign them at different times
proxy_1 = pads([0,1,2,3], dur=1/2)
proxy_2 = pads([4,5,6,7], dur=1)
p1 >> proxy_1 # Assign the first to p1
p1 >> proxy_2 # This replaces the instructions being followed by p1
# To play multiple sequences at once, just do the same things with another
# Player object:
p1 >> pluck([0, 2, 3, 4], dur=1/2)
p2 >> pads([(0, 2, 4), (3, 5, 7)], dur=8)
# Play only this player, muting others
p1.solo() # default value is 1 (solo on)
# And turn the solo off
p1.solo(0)
# Stop (not just mute) the other players
p1.only()
# Use Ctrl+. to clear everything for the scheduling clock or run
Clock.clear() | /renardo_lib-0.9.0-py3-none-any.whl/renardo_lib/demo/01_playing_notes.py | 0.708818 | 0.692842 | 01_playing_notes.py | pypi |
# FoxDot can also be used to sequence and manipulate audio samples.
# To do this all you need to do is use the special play SynthDef.
# The first argument of the play SynthDef should be a string of characters
# instead of a list of numbers as you would do for any other SynthDef.
# Each character represents a different audio file, which is stored in a buffer in SuperCollider.
# To view which character relates to which audio file, execute
print(Samples)
# You can play audio samples in the FoxDot/snd/ sub-directories by using the
# 'play' Synth and using a string of characters instead of list of notes.
bd >> play("x")
# A character refers to a sound and whitespace is used for silence, so
# you can spread sounds out in time:
bd >> play("x x x ")
hh >> play(" -")
# You can lace patterns using round brackets
# Whick plays like: "x o xo "
d1 >> play("(x )( x)o ")
# The following is the same as "-------="
hh >> play("---(-=)")
# Putting characters in square brackets will play them all in the space of one beat
# And will be played like one character, not simultaneous, but in quick succession
d1 >> play("x-o[-o]")
d1 >> play("x-o[---]")
d1 >> play("x-o[-----]")
d1 >> play("x-o[--------------]")
# and can be put in round brackets as if they were one character themselves.
d1 >> play("x[--]o(=[-o])")
# You can combine the brackets however you like: the following patterns are identical
d1 >> play("x-o(-[-o])")
d1 >> play("x-o[-(o )]")
# Curly braces select a sample sound at random if you want more variety
d1 >> play("x-o{-=[--][-o]}")
# Angle brackets combine patterns to be play simultaneously
d1 >> play("<X ><- ><# ><V >")
d1 >> play("<X >< - >< # >< V>")
# Each character is mapped to a folder of sound files and you can select different
# samples by using the "sample" keyword argument
d1 >> play("(x[--])xu[--]")
d1 >> play("(x[--])xu[--]", sample=1)
d1 >> play("(x[--])xu[--]", sample=2)
# Change the sample for each beat
d1 >> play("(x[--])xu[--]", sample=[1,2,3])
# You can layer two patterns together - note the "P", look at tutorial 4 for more information.
d1 >> play(P["x-o-"] & P[" **"])
# And change effects applied to all the layered patterns at the same time
d1 >> play(P["x-o-"] & P[" **"], room=0.5)
# Example from the player tutorial, but with samples instead
# Conditionals...
d1 >> play("x[--]xu[--]x", sample=(d1.degree=="x"))
# Or change it to sample bank 2 by multiplying
d1 >> play("x[--]xu[--]x", sample=(d1.degree=="x")*2)
# Chain multiple conditionals
d1 >> play("x[--]xu[--]x", sample=(d1.degree=="x")*2 + (d1.degree=="-")*5)
# Which is the same as
d1 >> play("x[--]xu[--]x", sample=d1.degree.map({"x":2, "-":5})) | /renardo_lib-0.9.0-py3-none-any.whl/renardo_lib/demo/03_playing_samples.py | 0.676513 | 0.538619 | 03_playing_samples.py | pypi |
import functools
import logging
import time
def enablePerfLogging():
""" This is just a small convenience method """
logging.getLogger('FoxDot.perf').setLevel(logging.DEBUG)
class Timing(object):
"""
Utility for profiling events
:param str event: Unique identifier for the perf event
:param str logger: Logger path (default 'FoxDot.perf')
:param bool logargs: If true when used as a decorator, log the arguments to
the decorated function
This can be used in multiple ways. As a decorator::
@Timing('fibonacci')
def fib(num):
...
As a context manager::
with Timing('pi'):
# calculate pi...
Or directly as an object::
timer = Timing('crank')
# do crank
timer.finish()
Note that it will log the perf data to the specified logger (FoxDot.perf by
default) at the DEBUG level. That means that in order for the information
to be visible, you will need to configure either that logger or a parent
logger to use the DEBUG level. For example::
# Set root logger to DEBUG
logging.root.setLevel(logging.DEBUG)
# Set just the perf logger to DEBUG
logging.getLogger('FoxDot.perf').setLevel(logging.DEBUG)
"""
def __init__(self, event, logger='FoxDot.perf', logargs=False):
self._event = event
self._logger = logger
self._log = logging.getLogger(logger)
self._messages = []
self._start = None
self._logargs = logargs
def __str__(self):
return "Timing(%s)" % self._event
def addMessage(self, message):
self._messages.append(message)
def start(self):
if self._start is not None:
self._log.warn("Entering %s twice!", self)
self._start = time.time()
def finish(self):
if self._start is None:
self._log.warn("Finishing %s before start!", self)
return
diff = 1000 * (time.time() - self._start)
formatted_messages = ''
if self._messages:
formatted_messages = ', '.join(self._messages) + ': '
self._log.debug("%s: %s%.02fms", self._event, formatted_messages, diff)
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, traceback):
self.finish()
def __call__(self, fxn):
@functools.wraps(fxn)
def wrapper(*args, **kwargs):
with Timing(self._event, self._logger) as timer:
if self._logargs:
if args:
timer.addMessage("args:%s" % list(args))
if kwargs:
timer.addMessage("kwargs:%s" % kwargs)
return fxn(*args, **kwargs)
return wrapper | /renardo_lib-0.9.0-py3-none-any.whl/renardo_lib/lib/Logging.py | 0.62681 | 0.176281 | Logging.py | pypi |
from renardo_lib.lib.Patterns import Pattern, PGroup
# Constants and constant values
class const:
""" A number value that cannot be changed """
def __init__(self, value):
if isinstance(value, (list, Pattern)):
self.__class__ = Pattern
self.__init__([const(x) for x in value])
elif isinstance(value, tuple):
self.__class__ = PGroup
self.__init__([const(x) for x in value])
elif isinstance(value, PGroup):
self.__class__ = value.__class__
self.__init__([const(x) for x in value])
else:
self.value = value
def __int__(self):
return int(self.value)
def __float__(self):
return float(self.value)
def __repr__(self):
return str(self.value)
def __add__(self, other):
return self.value
def __radd__(self, other):
return self.value
def __sub__(self, other):
return self.value
def __rsub__(self, other):
return self.value
def __mul__(self, other):
return self.value
def __rmul__(self, other):
return self.value
def __div__(self, other):
return self.value
def __rdiv__(self, other):
return self.value
def __gt__(self, other):
return self.value > other
def __ge__(self, other):
return self.value >= other
def __lt__(self, other):
return self.value < other
def __le__(self, other):
return self.value <= other
def __eq__(self, other):
return self.value == other
def __ne__(self, other):
return self.value != other
class _inf(const):
def __repr__(self):
return "inf"
def __add__(self, other):
return _inf(self.value + other)
def __radd__(self, other):
return _inf(other + self.value)
def __sub__(self, other):
return _inf(self.value - other)
def __rsub__(self, other):
return _inf(other - self.value)
def __mul__(self, other):
return _inf(self.value * other)
def __rmul__(self, other):
return _inf(other * self.value)
def __div__(self, other):
return _inf(self.value / other)
def __rdiv__(self, other):
return _inf(other / self.value)
def __truediv__(self, other):
return _inf(self.value / other)
def __rtruediv__(self, other):
return _inf(other / self.value)
def __eq__(self, other):
return isinstance(other, _inf)
def __gt__(self, other):
return not isinstance(other, _inf)
def __ge__(self, other):
return True
def __lt__(self, other):
return False
def __le__(self, other):
return isinstance(other, _inf)
inf = _inf(0)
class NoneConst(const):
def __init__(self):
self.value = None | /renardo_lib-0.9.0-py3-none-any.whl/renardo_lib/lib/Constants.py | 0.777384 | 0.207014 | Constants.py | pypi |
import fnmatch
import os
import wave
from contextlib import closing
from itertools import chain
from os.path import abspath, join, isabs, isfile, isdir, splitext
from renardo_lib.lib.Code import WarningMsg
from renardo_lib.lib.Logging import Timing
from renardo_lib.lib.SCLang import SampleSynthDef
from renardo_lib.lib.ServerManager import Server
from renardo_lib.lib.Settings import FOXDOT_SND, FOXDOT_LOOP
alpha = "abcdefghijklmnopqrstuvwxyz"
nonalpha = {"&" : "ampersand",
"*" : "asterix",
"@" : "at",
"^" : "caret",
":" : "colon",
"$" : "dollar",
"=" : "equals",
"!" : "exclamation",
"/" : "forwardslash",
"#" : "hash",
"-" : "hyphen",
"%" : "percent",
"+" : "plus",
"?" : "question",
"~" : "tilde",
"\\" :"backslash",
"1" : "1",
"2" : "2",
"3" : "3",
"4" : "4" }
DESCRIPTIONS = { 'a' : "Gameboy hihat", 'A' : "Gameboy kick drum",
'b' : "Noisy beep", 'B' : "Short saw",
'c' : "Voice/string", 'C' : "Choral",
'd' : "Woodblock", 'D' : "Dirty snare",
'e' : "Electronic Cowbell", 'E' : "Ringing percussion",
'f' : "Pops", 'F' : "Trumpet stabs",
'g' : "Ominous", 'G' : "Ambient stabs",
'h' : "Finger snaps", 'H' : "Clap",
'i' : "Jungle snare", 'I' : "Rock snare",
'j' : "Whines", 'J' : "Ambient stabs",
'k' : "Wood shaker", 'K' : "Percussive hits",
'l' : "Robot noise", 'L' : "Noisy percussive hits",
'm' : "808 toms", 'M' : "Acoustic toms",
'n' : "Noise", 'N' : "Gameboy SFX",
'o' : "Snare drum", 'O' : "Heavy snare",
'p' : "Tabla", 'P' : "Tabla long",
'q' : "Ambient stabs", 'Q' : "Electronic stabs",
'r' : "Metal", 'R' : "Metallic",
's' : "Shaker", 'S' : "Tamborine",
't' : "Rimshot", 'T' : "Cowbell",
'u' : "Soft snare", 'U' : "Misc. Fx",
'v' : "Soft kick", 'V' : "Hard kick",
'w' : "Dub hits", 'W' : "Distorted",
'x' : "Bass drum", 'X' : "Heavy kick",
'y' : "Percussive hits", 'Y' : "High buzz",
'z' : "Scratch", "Z" : "Loud stabs",
'-' : "Hi hat closed", "|" : "Hangdrum",
'=' : "Hi hat open", "/" : "Reverse sounds",
'*' : "Clap", "\\" : "Lazer",
'~' : "Ride cymbal", "%" : "Noise bursts",
'^' : "'Donk'", "$" : "Beatbox",
'#' : "Crash", "!" : "Yeah!",
'+' : "Clicks", "&" : "Chime",
'@' : "Gameboy noise", ":" : "Hi-hats",
'1' : "Vocals (One)",
'2' : 'Vocals (Two)',
'3' : 'Vocals (Three)',
'4' : 'Vocals (Four)'}
# Function-like class for searching directory for sample based on symbol
class _symbolToDir:
def __init__(self, root):
self.set_root(root)
def set_root(self, root):
""" Check if root is a valid directory then points FoxDot to that
folder for searching for samples. Raises an OSError if 'root'
is not a valid directory """
if os.path.isdir(root):
self.root = os.path.realpath(root)
else:
raise OSError("{!r} is not a valid directory".format(root))
return
def __call__(self, symbol):
""" Return the sample search directory for a symbol """
if symbol.isalpha():
return join(
self.root,
symbol.lower(),
'upper' if symbol.isupper() else 'lower'
)
elif symbol in nonalpha:
longname = nonalpha[symbol]
return join(self.root, '_', longname)
else:
return None
symbolToDir = _symbolToDir(FOXDOT_SND) # singleton
class Buffer(object):
def __init__(self, fn, number, channels=1):
self.fn = fn
self.bufnum = int(number)
self.channels = channels
def __repr__(self):
return "<Buffer num {}>".format(self.bufnum)
def __int__(self):
return self.bufnum
@classmethod
def fromFile(cls, filename, number):
try:
with closing(wave.open(filename)) as snd:
numChannels = snd.getnchannels()
except wave.Error:
numChannels = 1
return cls(filename, number, numChannels)
nil = Buffer('', 0)
class BufferManager(object):
def __init__(self, server=Server, paths=()):
self._server = server
self._max_buffers = server.max_buffers
# Keep buffer 0 unallocated because we use it as the "nil" buffer
self._nextbuf = 1
self._buffers = [None for _ in range(self._max_buffers)]
self._fn_to_buf = {}
self._paths = [FOXDOT_LOOP] + list(paths)
self._ext = ['wav', 'wave', 'aif', 'aiff', 'flac']
self.loops = [fn.rsplit(".",1)[0] for fn in os.listdir(FOXDOT_LOOP)]
def __str__(self):
return "\n".join(["%r: %s" % (k, v) for k, v in sorted(DESCRIPTIONS.items())])
def __repr__(self):
return '<BufferManager>'
def __getitem__(self, key):
""" Short-hand access for getBufferFromSymbol() i.e. Samples['x'] """
if isinstance(key, tuple):
return self.getBufferFromSymbol(*key)
return self.getBufferFromSymbol(key)
def _reset_buffers(self):
""" Clears the cache of loaded buffers """
files = list(self._fn_to_buf.keys())
self._fn_to_buf = {}
for fn in files:
self.loadBuffer(fn)
return
def reset(self):
return self._reset_buffers()
def _incr_nextbuf(self):
self._nextbuf += 1
if self._nextbuf >= self._max_buffers:
self._nextbuf = 1
def _getNextBufnum(self):
""" Get the next free buffer number """
start = self._nextbuf
while self._buffers[self._nextbuf] is not None:
self._incr_nextbuf()
if self._nextbuf == start:
raise RuntimeError("Buffers full! Cannot allocate additional buffers.")
freebuf = self._nextbuf
self._incr_nextbuf()
return freebuf
def addPath(self, path):
""" Add a path to the search paths for samples """
self._paths.append(abspath(path))
def free(self, filenameOrBuf):
""" Free a buffer. Accepts a filename or buffer number """
if isinstance(filenameOrBuf, int):
buf = self._buffers[filenameOrBuf]
else:
buf = self._fn_to_buf[filenameOrBuf]
del self._fn_to_buf[buf.fn]
self._buffers[buf.bufnum] = None
self._server.bufferFree(buf.bufnum)
def freeAll(self):
""" Free all buffers """
buffers = list(self._fn_to_buf.values())
for buf in buffers:
self.free(buf.bufnum)
def setMaxBuffers(self, max_buffers):
""" Set the max buffers on the SC server """
if max_buffers < self._max_buffers:
if any(self._buffers[max_buffers:]):
raise RuntimeError(
"Cannot shrink buffer size. Buffers already allocated."
)
self._buffers = self._buffers[:max_buffers]
elif max_buffers > self._max_buffers:
while len(self._buffers) < max_buffers:
self._buffers.append(None)
self._max_buffers = max_buffers
self._nextbuf = self._nextbuf % max_buffers
def getBufferFromSymbol(self, symbol, index=0):
""" Get buffer information from a symbol """
if symbol.isspace():
return nil
dirname = symbolToDir(symbol)
if dirname is None:
return nil
samplepath = self._findSample(dirname, index)
if samplepath is None:
return nil
return self._allocateAndLoad(samplepath)
def getBuffer(self, bufnum):
""" Get buffer information from the buffer number """
return self._buffers[bufnum]
def _allocateAndLoad(self, filename, force=False):
""" Allocates and loads a buffer from a filename, with caching """
if filename not in self._fn_to_buf:
bufnum = self._getNextBufnum()
buf = Buffer.fromFile(filename, bufnum)
self._server.bufferRead(filename, bufnum)
self._fn_to_buf[filename] = buf
self._buffers[bufnum] = buf
elif force:
buf = self._fn_to_buf[filename]
self._server.bufferRead(filename, buf.bufnum)
# self._fn_to_buf[filename] = bufnum
# self._buffers[bufnum] = buf
return self._fn_to_buf[filename]
def reload(self, filename):
# symbol = self.getBufferFrom
return self.loadBuffer(filename, force=True)
def _getSoundFile(self, filename):
""" Look for a file with all possible extensions """
base, cur_ext = splitext(filename)
if cur_ext:
# If the filename already has an extensions, keep it
if isfile(filename):
return filename
else:
# Otherwise, look for all possible extensions
for ext in self._ext:
# Look for .wav and .WAV
for tryext in [ext, ext.upper()]:
extpath = filename + '.' + tryext
if isfile(extpath):
return extpath
return None
def _getSoundFileOrDir(self, filename):
""" Get a matching sound file or directory """
if isdir(filename):
return abspath(filename)
foundfile = self._getSoundFile(filename)
if foundfile:
return abspath(foundfile)
return None
def _searchPaths(self, filename):
""" Search our search paths for an audio file or directory """
if isabs(filename):
return self._getSoundFileOrDir(filename)
else:
for root in self._paths:
fullpath = join(root, filename)
foundfile = self._getSoundFileOrDir(fullpath)
if foundfile:
return foundfile
return None
def _getFileInDir(self, dirname, index):
""" Return nth sample in a directory """
candidates = []
for filename in sorted(os.listdir(dirname)):
name, ext = splitext(filename)
if ext.lower()[1:] in self._ext:
fullpath = join(dirname, filename)
if len(candidates) == index:
return fullpath
candidates.append(fullpath)
if candidates:
return candidates[int(index) % len(candidates)]
return None
def _patternSearch(self, filename, index):
"""
Return nth sample that matches a path pattern
Path pattern is a relative path that can contain wildcards such as *
and ? (see fnmatch for more details). Some example paths:
samp*
**/voices/*
perc*/bass*
"""
def _findNextSubpaths(path, pattern):
""" For a path pattern, find all subpaths that match """
# ** is a special case meaning "all recursive directories"
if pattern == '**':
for dirpath, _, _ in os.walk(path):
yield dirpath
else:
children = os.listdir(path)
for c in fnmatch.filter(children, pattern):
yield join(path, c)
candidates = []
queue = self._paths[:]
subpaths = filename.split(os.sep)
filepat = subpaths.pop()
while subpaths:
subpath = subpaths.pop(0)
queue = list(chain.from_iterable(
(_findNextSubpaths(p, subpath) for p in queue)
))
# If the filepat (ex. 'foo*.wav') has an extension, we want to match
# the full filename. If not, we just match against the basename.
match_base = not hasext(filepat)
for path in queue:
for subpath, _, filenames in os.walk(path):
for filename in sorted(filenames):
basename, ext = splitext(filename)
if ext[1:].lower() not in self._ext:
continue
if match_base:
ismatch = fnmatch.fnmatch(basename, filepat)
else:
ismatch = fnmatch.fnmatch(filename, filepat)
if ismatch:
fullpath = join(subpath, filename)
if len(candidates) == index:
return fullpath
candidates.append(fullpath)
if candidates:
return candidates[index % len(candidates)]
return None
@Timing('bufferSearch', logargs=True)
def _findSample(self, filename, index=0):
"""
Find a sample from a filename or pattern
Will first attempt to find an exact match (by abspath or relative to
the search paths). Then will attempt to pattern match in search paths.
"""
path = self._searchPaths(filename)
if path:
# If it's a file, use that sample
if isfile(path):
return path
# If it's a dir, use one of the samples in that dir
elif isdir(path):
foundfile = self._getFileInDir(path, index)
if foundfile:
return foundfile
else:
WarningMsg("No sound files in %r" % path)
return None
else:
WarningMsg("File %r is neither a file nor a directory" % path)
return None
else:
# If we couldn't find a dir or file with this name, then we use it
# as a pattern and recursively walk our paths
foundfile = self._patternSearch(filename, index)
if foundfile:
return foundfile
WarningMsg("Could not find any sample matching %r" % filename)
return None
def loadBuffer(self, filename, index=0, force=False):
""" Load a sample and return the number of a buffer """
samplepath = self._findSample(filename, index)
if samplepath is None:
return 0
else:
buf = self._allocateAndLoad(samplepath, force=force)
return buf.bufnum
def hasext(filename):
return bool(splitext(filename)[1])
Samples = BufferManager()
class LoopSynthDef(SampleSynthDef):
def __init__(self):
SampleSynthDef.__init__(self, "loop")
self.pos = self.new_attr_instance("pos")
self.sample = self.new_attr_instance("sample")
self.beat_stretch = self.new_attr_instance("beat_stretch")
self.defaults['pos'] = 0
self.defaults['sample'] = 0
self.defaults['beat_stretch'] = 0
self.base.append("rate = (rate * (1-(beat_stretch>0))) + ((BufDur.kr(buf) / sus) * (beat_stretch>0));")
self.base.append("osc = PlayBuf.ar(2, buf, BufRateScale.kr(buf) * rate, startPos: BufSampleRate.kr(buf) * pos, loop: 1.0);")
self.base.append("osc = osc * EnvGen.ar(Env([0,1,1,0],[0.05, sus-0.05, 0.05]));")
self.osc = self.osc * self.amp
self.add()
def __call__(self, filename, pos=0, sample=0, **kwargs):
kwargs["buf"] = Samples.loadBuffer(filename, sample)
proxy = SampleSynthDef.__call__(self, pos, **kwargs)
proxy.kwargs["filename"] = filename
return proxy
class StretchSynthDef(SampleSynthDef):
def __init__(self):
SampleSynthDef.__init__(self, "stretch")
self.base.append("osc = Warp1.ar(2, buf, Line.kr(0,1,sus), rate, windowSize: 0.2, overlaps: 4, interp:2);")
self.base.append("osc = osc * EnvGen.ar(Env([0,1,1,0],[0.05, sus-0.05, 0.05]));")
self.osc = self.osc * self.amp
self.add()
def __call__(self, filename, pos=0, sample=0, **kwargs):
kwargs["buf"] = Samples.loadBuffer(filename, sample)
proxy = SampleSynthDef.__call__(self, pos, **kwargs)
proxy.kwargs["filename"] = filename
return proxy
class GranularSynthDef(SampleSynthDef):
def __init__(self):
SampleSynthDef.__init__(self, "gsynth")
self.pos = self.new_attr_instance("pos")
self.sample = self.new_attr_instance("sample")
self.defaults['pos'] = 0
self.defaults['sample'] = 0
self.base.append("osc = PlayBuf.ar(2, buf, BufRateScale.kr(buf) * rate, startPos: BufSampleRate.kr(buf) * pos);")
self.base.append("osc = osc * EnvGen.ar(Env([0,1,1,0],[0.05, sus-0.05, 0.05]));")
self.osc = self.osc * self.amp
self.add()
def __call__(self, filename, pos=0, sample=0, **kwargs):
kwargs["buf"] = Samples.loadBuffer(filename, sample)
return SampleSynthDef.__call__(self, pos, **kwargs)
loop = LoopSynthDef()
stretch = StretchSynthDef()
# gsynth = GranularSynthDef() | /renardo_lib-0.9.0-py3-none-any.whl/renardo_lib/lib/Buffers.py | 0.417746 | 0.226762 | Buffers.py | pypi |
from time import time, sleep
from threading import Thread
from renardo_lib.lib.Code import execute
class Bang:
duration = 0.1
def __init__(self, player, kwargs):
self.widget = execute.namespace['FoxDot']
self.func = kwargs.get("func", None)
# Argument is by default, the player
self.args = kwargs.get("args", (player,))
# User can supply a function to call on bang
if self.func:
try:
self.func.__call__(*self.args)
except Exception as e:
print(e)
else:
# Get visible portion of the text window
try:
a = self.widget.text.index("@0,0")
b = self.widget.text.index("@0,%d" % self.widget.text.winfo_height())
a, b = (int(s.split(".")[0]) for s in (a, b))
except:
a, b = 9999, 0
# Only update visuals if the line is visible
if player.line_number is None:
return
if a <= player.line_number <= b:
row = player.line_number
col = player.whitespace
env = player.envelope
clock = player.metro
duration = clock.beat_dur( player.dur / 2 )
# message_time = player.queue_block.time
message_time = player.get_timestamp(player.queue_block.beat) # clock.latency
self.id = "{}_bang".format(player.id)
start = "%d.%d" % (row, col)
end = "%d.end" % row
def bang():
# wait until the time osc messages are sent
while time() < message_time:
sleep(0.001)
self.widget.addTask(target=self.widget.text.tag_add, args=(self.id, start, end))
self.widget.addTask(target=self.widget.text.tag_config, args=(self.id,), kwargs=kwargs)
self.widget.root.after(int(1000 * duration), self.remove)
return
Thread(target=bang).start()
return
def remove(self):
self.widget.addTask(target=self.widget.text.tag_delete, args=(self.id,))
return | /renardo_lib-0.9.0-py3-none-any.whl/renardo_lib/lib/Bang.py | 0.580233 | 0.169475 | Bang.py | pypi |
from renardo_lib.lib.Utils import modi
from renardo_lib.lib.TimeVar import TimeVar
CHROMATIC_NOTES = ["C"," ","D"," ","E","F"," ","G"," ","A"," ","B"]
class Note:
def __init__(self, index):
self.char = None
self.num = None
self.set(index)
def __str__(self):
return str(self.num)
def __repr__(self):
return str(self.num)
def __float__(self):
return float(self.num)
def __int__(self):
return int(self.num)
def set(self, index):
if type(index) is str:
char = index.title()
if len(char) == 1:
mod = 0
elif len(char) == 2 and char[1] == "#":
mod = 1
elif len(char) == 2 and char[1] == "b":
mod = -1
else:
raise TypeError("Could not convert string '%s' to Note" % index)
self.char = char
self.num = (CHROMATIC_NOTES.index(char[0]) + mod) % len(CHROMATIC_NOTES)
if type(index) is int:
self.num = index
self.char = modi(CHROMATIC_NOTES, index)
if type(index) is float:
self.num = index
self.char = "<Micro-Tuned>"
if isinstance(index, TimeVar):
self.num = index
self.char = "<Time-Varying>"
def __iadd__(self, other):
self.num += other
def __isub__(self, other):
self.num -= other
def __add__(self, other):
return self.num + other
def __sub__(self, other):
return self.num - other
def __radd__(self, other):
return other + self.num
def __rsub__(self, other):
return other - self.num
def __call__(self, *args):
if len(args) > 0:
self.set(args[0])
return self
class __root__:
def __init__(self):
self.default = Note("C")
def __setattr__(self, key, value):
if key == "default" and key in vars(self):
self.default.set(value)
else:
self.__dict__[key] = value
return
def reset(self):
""" Sets the root to 0 """
self.default = 0
Root = __root__() | /renardo_lib-0.9.0-py3-none-any.whl/renardo_lib/lib/Root.py | 0.455441 | 0.293215 | Root.py | pypi |
import os.path
import logging
from renardo_lib.lib.Code import *
FoxDotCode.namespace = globals()
from renardo_lib.lib.TempoClock import *
from renardo_lib.lib.Buffers import *
from renardo_lib.lib.Players import *
from renardo_lib.lib.Patterns import *
from renardo_lib.lib.Effects import *
from renardo_lib.lib.TimeVar import *
from renardo_lib.lib.Constants import *
from renardo_lib.lib.Midi import *
from renardo_lib.lib.Settings import *
from renardo_lib.lib.SCLang._SynthDefs import *
from renardo_lib.lib.ServerManager import *
from renardo_lib.lib.SCLang import SynthDefs, Env, SynthDef, CompiledSynthDef
from renardo_lib.lib.Root import Root
from renardo_lib.lib.Scale import Scale, Tuning
@PatternMethod
def __getitem__(self, key):
""" Overrides the Pattern.__getitem__ to allow indexing
by TimeVar and PlayerKey instances. """
if isinstance(key, PlayerKey):
# Create a player key whose calculation is get_item
return key.index(self)
elif isinstance(key, TimeVar):
# Create a TimeVar of a PGroup that can then be indexed by the key
item = TimeVar(tuple(self.data))
item.dependency = key
item.evaluate = fetch(Get)
return item
else:
return self.getitem(key)
def player_method(f):
""" Decorator for assigning functions as Player methods.
>>> @player_method
... def test(self):
... print(self.degree)
>>> p1.test()
"""
setattr(Player, f.__name__, f)
return getattr(Player, f.__name__)
PlayerMethod = player_method # Temporary alias
def _futureBarDecorator(n, multiplier=1):
if callable(n):
def switch(*args, **kwargs):
Clock.now_flag = True
output = n()
Clock.now_flag = False
return output
Clock.schedule(switch, Clock.next_bar())
return switch
def wrapper(f):
Clock.schedule(f, Clock.next_bar() + (n * multiplier))
return f
return wrapper
def next_bar(n=0):
''' Schedule functions when you define them with @nextBar
Functions will run n beats into the next bar.
>>> nextBar(v1.solo)
or
>>> @nextBar
... def dostuff():
... v1.solo()
'''
return _futureBarDecorator(n)
nextBar = next_bar # temporary alias
def futureBar(n=0):
''' Schedule functions when you define them with @futureBar
Functions will run n bars in the future (0 is the next bar)
>>> futureBar(v1.solo)
or
>>> @futureBar(4)
... def dostuff():
... v1.solo()
'''
return _futureBarDecorator(n, Clock.bar_length())
def update_foxdot_clock(clock):
""" Tells the TimeVar, Player, and MidiIn classes to use
a new instance of TempoClock. """
assert isinstance(clock, TempoClock)
for item in (TimeVar, Player, MidiIn):
item.set_clock(clock)
clock.add_method(_convert_json_bpm)
return
def update_foxdot_server(serv):
""" Tells the `Effect` and`TempoClock`classes to send OSC messages to
a new ServerManager instance.
"""
assert isinstance(serv, ServerManager)
TempoClock.set_server(serv)
SynthDefs.set_server(serv)
return
def instantiate_player_objects():
""" Instantiates all two-character variable Player Objects """
alphabet = list('abcdefghijklmnopqrstuvwxyz')
numbers = list('0123456789')
for char1 in alphabet:
group = []
for char2 in alphabet + numbers:
arg = char1 + char2
FoxDotCode.namespace[arg] = EmptyPlayer(arg)
group.append(arg)
FoxDotCode.namespace[char1 + "_all"] = Group(*[FoxDotCode.namespace[char1+str(n)] for n in range(10)])
return
def _reload_synths():
""" Resends all the synth / sample info to SuperCollider. Useful for times
when starting FoxDot before running `FoxDot.start` in SuperCollider. """
from renardo_lib.lib import SCLang
from renardo_lib.lib import Effects
reload(SCLang._SynthDefs)
reload(Effects)
Samples._reset_buffers()
return
def foxdot_reload():
Server.reset()
SynthDefs.reload()
FxList.reload()
Samples.reset()
return
def _convert_json_bpm(clock, data):
""" Returns a TimeVar object that has been sent across a network using JSON """
if isinstance(data, list):
cls = data[0]
val = data[1]
dur = data[2]
return FoxDotCode.namespace[cls](val, dur)
else:
return data
def Master():
""" Returns a `Group` containing all the players currently active in the Clock """
return Group(*Clock.playing)
def Ramp(t=32, ramp_time=4):
""" Returns a `linvar` that goes from 0 to 1 over the course of the last
`ramp_time` bars of every `t` length cycle. """
return linvar([0,0,1,0],[t-ramp_time, ramp_time, 0, 0])
def allow_connections(valid = True, *args, **kwargs):
""" Starts a new instance of ServerManager.TempoServer and connects it with the clock. Default port is 57999 """
if valid:
Clock.start_tempo_server(TempoServer, **kwargs)
print("Listening for connections on {}".format(Clock.tempo_server))
else:
Clock.kill_tempo_server()
print("Closed connections")
return
# Util class
class _util:
def __repr__(self):
return "Renardo ver. 0.9.0"
def reload(self):
Server.reset()
SynthDefs.reload()
FxList.reload()
Samples.reset()
return
def reassign_clock(self):
FoxDotCode.namespace['Clock'] = _Clock
return
FoxDot = _util()
# Create a clock and define functions
_ = None
logging.basicConfig(level=logging.ERROR)
when.set_namespace(FoxDotCode) # experimental
_Clock = Clock = TempoClock()
update_foxdot_server(Server)
update_foxdot_clock(Clock)
instantiate_player_objects()
# Create a "now" time variable
now = var([0]).transform(lambda a: Clock.now())
nextbar = var([0]).transform(lambda a: Clock.next_bar())
Attributes = Player.get_attributes()
PatternMethods = Pattern.get_methods()
PatternTypes = functions(Patterns.Sequences)
# Start
Clock.start() | /renardo_lib-0.9.0-py3-none-any.whl/renardo_lib/lib/__init__.py | 0.631253 | 0.279761 | __init__.py | pypi |
def format_args(args=[], kwargs={}, delim=': '):
return ", ".join([str(a) for a in args] + ["%s%s%s" % (key, delim, value) for key, value in kwargs.items()])
class cls:
def __init__(self, name, **kwargs):
self.name = name
self.ref = kwargs.get("ref", "")
def __str__(self):
return str(self.name)
def __repr__(self):
return str(self.name)
def __call__(self, *args, **kwargs):
return instance("{}({}{})".format(self.name, self.ref, format_args(args, kwargs)))
def ar(self, *args, **kwargs):
return instance("{}.ar({}{})".format(self.name, self.ref, format_args(args, kwargs)))
def kr(self, *args, **kwargs):
return instance("{}.kr({}{})".format(self.name, self.ref, format_args(args, kwargs)))
def ir(self, *args, **kwargs):
return instance("{}.ir({}{})".format(self.name, self.ref, format_args(args, kwargs)))
class instance:
defaults = {}
shortarg = {}
def __init__(self, string):
self.value = str(string)
def __repr__(self):
return str(self.value)
def __str__(self):
return str(self.value)
def __add__(self, other):
return instance("(%s)" % (str(self) + " + " + str(other)))
def __sub__(self, other):
return instance("(%s)" % (str(self) + " - " + str(other)))
def __mul__(self, other):
return instance("(%s)" % (str(self) + " * " + str(other)))
def __div__(self, other):
return instance("(%s)" % (str(self) + " / " + str(other)))
def __pow__(self, other):
return instance("(%s)" % (str(self) + " ** " + str(other)))
def __xor__(self, other):
return instance("(%s)" % (str(self) + " ** " + str(other)))
def __truediv__(self, other):
return self.__div__(other)
def __getitem__(self, other):
return instance("(%s)" % (str(self) + "[" + str(other) + "]"))
def __radd__(self, other):
return instance("(%s)" % (str(other) + " + " + str(self)))
def __rsub__(self, other):
return instance("(%s)" % (str(other) + " - " + str(self)))
def __rmul__(self, other):
return instance("(%s)" % (str(other) + " * " + str(self)))
def __rdiv__(self, other):
return instance("(%s)" % (str(other) + " / " + str(self)))
def __rpow__(self, other):
return instance("(%s)" % (str(other) + " ** " + str(self)))
def __rxor__(self, other):
return instance("(%s)" % (str(other) + " ** " + str(self)))
def __rtruediv__(self, other):
return self.__rdiv__(other)
def __mod__(self, other):
return instance(str(self.value) % str(other)) if "%" in self.value else self
def __coerce__(self, other):
try:
self = instance(str(self))
other = instance(str(other))
return (self, other)
except:
return
def __getattr__(self, name, *args, **kwargs):
return self.custom('.' + name, *args, **kwargs)
def string(self):
return str(self.value) + "{}"
def custom(self, name):
return self.__class__(self.string().format(name))
def __call__(self, *args, **kwargs):
for arg in set(list(self.defaults.keys()) + list(self.shortarg.keys())):
if arg in self.shortarg:
if self.shortarg[arg] in kwargs:
kwargs[arg] = kwargs.get(self.shortarg[arg], self.default[arg])
del kwargs[self.shortarg[arg]]
continue
if arg in self.defaults:
kwargs[arg] = kwargs.get(arg, self.defaults[arg])
value = self.string().format("({})".format(format_args(args, kwargs)))
return self.__class__(value)
# UGens
SinOsc = cls("SinOsc")
SinOscFB = cls("SinOscFB")
Saw = cls("Saw")
LFSaw = cls("LFSaw")
VarSaw = cls("VarSaw")
LFTri = cls("LFTri")
LFPar = cls("LFPar")
PlayBuf = cls("PlayBuf")
LFNoise0 = cls("LFNoise0")
LFNoise1 = cls("LFNoise1")
LFNoise2 = cls("LFNoise2")
Gendy1 = cls("Gendy1")
Gendy2 = cls("Gendy2")
Gendy3 = cls("Gendy3")
Gendy4 = cls("Gendy4")
Gendy5 = cls("Gendy5")
Formant = cls("Formant")
Pulse = cls("Pulse")
LFPulse = cls("LFPulse")
PMOsc = cls("PMOsc")
Crackle = cls("Crackle")
LFCub = cls("LFCub")
PinkNoise = cls("PinkNoise")
Impulse = cls("Impulse")
Blip = cls("Blip")
Klank = cls("Klank", ref="`")
Resonz = cls("Resonz")
# Other
K2A = cls("K2A")
Out = cls("Out")
AudioIn = cls("AudioIn")
Lag = cls("Lag")
Vibrato = cls("Vibrato")
Line = cls("Line")
XLine = cls("XLine")
FreeVerb = cls("FreeVerb")
GVerb = cls("GVerb")
Pan2 = cls("Pan2")
LPF = cls("LPF")
RLPF = cls("RLPF")
BPF = cls("BPF")
HPF = cls("HPF")
RHPF = cls("RHPF")
DelayC = cls("DelayC")
DelayN = cls("DelayN")
DelayL = cls("DelayL")
CombN = cls("CombN")
CombL = cls("CombL")
CombC = cls("CombC")
Crackle = cls("Crackle")
Limiter = cls("Limiter")
Ringz = cls("Ringz")
Dust = cls("Dust")
Formlet = cls("Formlet")
ClipNoise = cls("ClipNoise")
BufRateScale = cls("BufRateScale")
BufSampleRate = cls("BufSampleRate")
BufFrames = cls("BufFrames")
BufChannels = cls("BufChannels")
BufFrames = cls("BufFrames")
BufDur = cls("BufDur")
# sc3 Plugins
BufGrain = cls("BufGrain")
Decimator = cls("Decimator")
SmoothDecimator = cls("SmoothDecimator")
CrossoverDistortion = cls("CrossoverDistortion")
Disintegrator = cls("Disintegrator")
MdaPiano = cls("MdaPiano")
# Array manipulation emulator functions
stutter = lambda array, n: [item for item in array for i in range(n)]
dup = lambda x: [x, x] | /renardo_lib-0.9.0-py3-none-any.whl/renardo_lib/lib/SCLang/SCLang.py | 0.607314 | 0.184859 | SCLang.py | pypi |
from renardo_lib.lib.Code.foxdot_func_cmp import *
from threading import Thread
from time import sleep
class _whenStatement:
namespace = {}
def __init__(self, func=lambda: True):
self.expr = func
self.reset()
self.remove_me = False
def __repr__(self):
return func_str(self.expr)
def __enter__(self):
when.editing = self
return self
def __exit__(self, *args):
when.editing = None
return self
@classmethod
def set_namespace(cls, ns):
''' Define the namespace to execute the actions. Should be a `dict` '''
cls.namespace = ns
def reset(self):
''' Sets the `when` and `else` actions to nothing '''
self.action = lambda: None
self.notaction = lambda: None
self.do_switch = False
self.elsedo_switch = False
def evaluate(self):
''' Calls the test expression, and if it has changed then
run the appropriate response code '''
if self.expr():
if not self.do_switch:
self.action()
self.toggle_live_functions(True)
self.do_switch = True
self.elsedo_switch = False
else:
if not self.elsedo_switch:
self.notaction()
self.toggle_live_functions(False)
self.do_switch = False
self.elsedo_switch = True
def toggle_live_functions(self, switch):
""" If the action functions are @livefunctions, turn them on/off """
try:
self.action.live = switch
except:
pass
try:
self.notaction.live = (not switch)
except:
pass
return
def when(self, func):
self.expr = func
return self
def then(self, func):
''' Set the instructions for when the test expression is True. Should
be a list of strings. '''
self.action = func
return self
def elsedo(self, func):
''' Set the instructions for when the test expression is False. Should
be a list of strings. '''
self.notaction = func
return self
def stop(self):
self.reset()
return self
def remove(self):
self.reset()
self.remove_me = True
return self
class _whenLibrary:
""" Used to store 'when statements'. Is accessed through the `__when__` object.
"""
def __init__(self):
self.library = {}
self.editing = None
def start_thread(self):
self.thread = Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
@staticmethod
def set_namespace(env):
_whenStatement.set_namespace(env.namespace)
def __len__(self):
return len(self.library)
def __repr__(self):
return repr(self.library)
def run(self):
""" Continual loop evaluating when_statements
"""
while len(self.library) > 0:
for name, expression in self.library.items():
if expression.remove_me == True:
del self.library[name]
else:
expression.evaluate()
sleep(0.01)
return
def __call__(self, name, **kwargs):
""" Calling when() with no arguments will evaluate all expressions
stored in self.library. Calling with func as a valid function
will see if the function is in self.library and add it if not,
or update do / elsedo
"""
if name in self.library:
return self.library[name]
else:
# Make a new statement
self.library[name] = _whenStatement()
# If that is the first statement, start the thread
if len(self.library) == 1:
self.start_thread()
# Return the last added expression
return self.library[name]
# what do these do?
def a(self, expr):
if self.editing is not None:
self.editing.when(expr)
return None
def b(self, expr):
if self.editing is not None:
self.editing.do(expr)
return None
def c(self, expr):
if self.editing is not None:
self.editing.elsedo(expr)
return None
def reset(self):
""" Clears the library and stop scheduling """
self.library = {}
return self
when = _whenLibrary() | /renardo_lib-0.9.0-py3-none-any.whl/renardo_lib/lib/Code/foxdot_when_statement.py | 0.61173 | 0.332961 | foxdot_when_statement.py | pypi |
from renardo_lib.lib.Patterns.Main import GeneratorPattern, Pattern, asStream, PatternInput
import random
class RandomGenerator(GeneratorPattern):
__seed = None
def __init__(self, *args, **kwargs):
GeneratorPattern.__init__(self, *args, **kwargs)
self.random = random
def init_random(self, *args, **kwargs):
""" To be called at the end of the __init__ """
if "seed" in kwargs:
self.random = self.random.Random()
self.random.seed(kwargs["seed"])
elif RandomGenerator.__seed is not None:
self.random = self.random.Random()
self.random.seed(RandomGenerator.__seed)
pattern = self[:5000]
self.__class__ = Pattern
self.data = pattern.data
return self
@classmethod
def set_override_seed(cls, seed):
cls.__seed = seed
return
# Pseudo-inheritance
def choice(self, *args, **kwargs):
return self.random.choice(*args, **kwargs)
def randint(self, *args, **kwargs):
return self.random.randint(*args, **kwargs)
def triangular(self, *args, **kwargs):
return self.random.triangular(*args, **kwargs)
class PRand(RandomGenerator):
''' Returns a random integer between start and stop. If start is a container-type it returns
a random item for that container. '''
def __init__(self, start, stop=None, **kwargs):
# If we're given a list, choose from that list -- TODO always use a list and use range
RandomGenerator.__init__(self, **kwargs)
self.args = (start, stop)
self.kwargs = kwargs
# Choosing from a list
if hasattr(start, "__iter__"):
self.data = Pattern(start)
try:
assert(len(self.data)>0)
except AssertionError:
raise AssertionError("{}: Argument size must be greater than 0".format(self.name))
self.choosing = True
self.low = self.high = None
else:
# Choosing from a range
self.choosing = False
self.low = start if stop is not None else 0
self.high = stop if stop is not None else start
try:
assert((self.high - self.low)>=1)
except AssertionError:
raise AssertionError("{}: Range size must be greater than 1".format(self.name))
self.data = "{}, {}".format(self.low, self.high)
self.init_random(**kwargs)
def choose(self):
return self.data[self.choice(range(self.MAX_SIZE))]
def func(self, index):
if self.choosing:
# value = self.choice(self.data)
value = self.choose()
else:
value = self.randint(self.low, self.high)
return value
def string(self):
""" Used in PlayString to show a PRand in curly braces """
return "{" + self.data.string() + "}"
class PWhite(RandomGenerator):
''' Returns random floating point values between 'lo' and 'hi' '''
def __init__(self, lo=0, hi=1, **kwargs):
RandomGenerator.__init__(self, **kwargs)
self.args = (lo, hi)
self.low = float(lo)
self.high = float(hi)
self.mid = (lo + hi) / 2.0
self.data = "{}, {}".format(self.low, self.high)
self.init_random(**kwargs)
def func(self, index):
return self.triangular(self.low, self.high, self.mid)
class PxRand(PRand):
def func(self, index):
value = PRand.func(self, index)
while value == self.last_value:
value = PRand.func(self, index)
self.last_value = value
return self.last_value
class PwRand(RandomGenerator):
def __init__(self, values, weights, **kwargs):
RandomGenerator.__init__(self, **kwargs)
self.args = (values, weights)
try:
assert(all(type(x) == int for x in weights))
except AssertionError:
e = "{}: Weights must be integers".format(self.name)
raise AssertionError(e)
self.data = Pattern(values)
self.weights = Pattern(weights).stretch(len(self.data))
self.values = self.data.stutter(self.weights)
self.init_random(**kwargs)
def choose(self):
return self.values[self.choice(range(self.MAX_SIZE))]
def func(self, index):
return self.choose()
class PChain(RandomGenerator):
""" An example of a Markov Chain generator pattern. The mapping argument
should be a dictionary of keys whose values are a list/pattern of possible
destinations. """
def __init__(self, mapping, **kwargs):
assert isinstance(mapping, dict)
RandomGenerator.__init__(self, **kwargs)
self.args = (mapping,)
self.last_value = 0
self.mapping = {}
i = 0
for key, value in mapping.items():
self.mapping[key] = self._convert_to_list(value)
# Use the first key to start with
if i == 0:
self.last_value = key
i += 1
self.init_random(**kwargs)
def func(self, *args, **kwargs):
index = self.last_value
if isinstance(self.last_value, GeneratorPattern):
index = index.CACHE_HEAD
if index in self.mapping:
self.last_value = self.choice(self.mapping[index])
return self.last_value
def _convert_to_list(self, value):
if isinstance(value, list):
return value
elif isinstance(value, Pattern):
return value.data
return [value]
class PZ12(GeneratorPattern):
""" Implementation of the PZ12 algorithm for predetermined random numbers. Using
an irrational value for p, however, results in a non-determined order of values.
Experimental, only works with 2 values.
"""
def __init__(self, tokens=[1,0], p=[1, 0.5]):
GeneratorPattern.__init__(self)
self.data = tokens
self.probs = [value / max(p) for value in p]
self._prev = []
self.dearth = [0 for n in self.data]
def _count_values(self, token):
return sum([self._prev[i] == token for i in range(len(self._prev))])
def func(self, index):
index = len(self._prev)
for i, token in enumerate(self.data):
d0 = self.probs[i] * (index + 1)
d1 = self._count_values(token)
self.dearth[i] = d0-d1
i = self.dearth.index(max(self.dearth))
value = self.data[i]
self._prev.append(value)
return value
class PTree(RandomGenerator):
""" Takes a starting value and two functions as arguments. The first function, f, must
take one value and return a container-type of values and the second function, choose,
must take a container-type and return a single value. In essence you are creating a
tree based on the f(n) where n is the last value chosen by choose.
"""
def __init__(self, n=0, f=lambda x: (x + 1, x - 1), choose=lambda x: random.choice(x), **kwargs):
RandomGenerator.__init__(self, **kwargs)
self.args=(n, f, choose)
self.f = f
self.choose = choose
self.values = [n]
self.init_random(**kwargs)
def func(self, index):
self.values.append( self.choose(self.f( self.values[-1] )) )
return self.values[-1]
class PWalk(RandomGenerator):
def __init__(self, max=7, step=1, start=0, **kwargs):
RandomGenerator.__init__(self, **kwargs)
self.args = (max, step, start)
self.max = abs(max)
self.min = self.max * -1
self.step = PatternInput(step).transform(abs)
self.start = start
self.data = [self.start, self.step, self.max]
self.directions = [lambda x, y: x + y, lambda x, y: x - y]
self.last_value = None
self.init_random(**kwargs)
def func(self, index):
if self.last_value is None:
self.last_value = self.start
else:
if self.last_value >= self.max: # force subtraction
f = self.directions[1]
elif self.last_value <= self.min: # force addition
f = self.directions[0]
else:
f = self.choice(self.directions)
self.last_value = f(self.last_value, self.step[index])
return self.last_value
class PDelta(GeneratorPattern):
def __init__(self, deltas, start=0):
GeneratorPattern.__init__(self)
self.deltas = asStream(deltas)
self.start = start
self.value = start
def func(self, index):
if index == 0:
return self.start
self.value += float(self.deltas[index - 1])
return self.value
class PSquare(GeneratorPattern):
''' Returns the square of the index being accessed '''
def func(self, index):
return index * index
class PIndex(GeneratorPattern):
''' Returns the index being accessed '''
def func(self, index):
return index
class PFibMod(GeneratorPattern):
""" Returns the fibonacci sequence -- maybe a bad idea"""
def func(self, index):
if index < 2: return index
a = self.cache.get(index-1, self.getitem(index-1))
b = self.cache.get(index-2, self.getitem(index-2))
return a + b | /renardo_lib-0.9.0-py3-none-any.whl/renardo_lib/lib/Patterns/Generators.py | 0.658857 | 0.33516 | Generators.py | pypi |
import re
from renardo_lib.lib.Patterns.PlayString import PlayString, ParseError
from renardo_lib.lib.Patterns.Generators import PRand
from renardo_lib.lib.Patterns.PGroups import PGroupOr, PGroupPlus
from renardo_lib.lib.Patterns.Main import Pattern, metaPattern, PatternMethod, PGroup, GeneratorPattern
from renardo_lib.lib.Utils import modi, LCM
re_nests = r"\((.*?)\)"
re_square = r"\[.*?\]"
re_curly = r"\{.*?\}"
re_arrow = r"<.*?>"
square_type = PGroupPlus
braces_type = PRand
bar_type = PGroupOr
def ParsePlayString(string, flat=False):
""" Returns the parsed play string used by sample player """
output, _ = feed(string)
return output
def convert_to_int(data):
""" Recursively calls until all nested data contains only integers """
if isinstance(data, (int, float, str)):
return int(data)
elif isinstance(data, (list, tuple)):
return data.__class__([convert_to_int(item) for item in data])
elif isinstance(data, GeneratorPattern):
return data.transform(convert_to_int)
elif isinstance(data, metaPattern):
return data.convert_data(convert_to_int)
return int(data)
def arrow_zip(pat1, pat2):
""" Zips two patterns together. If one item is a tuple, it extends the tuple / PGroup
i.e. arrow_zip([(0,1),3], [2]) -> [(0,1,2),(3,2)]
"""
output = Pattern()
for i in range(LCM(len(pat1), len(pat2))):
item1 = pat1.getitem(i, get_generator=True)
item2 = pat2.getitem(i, get_generator=True)
if all([x.__class__== PGroup for x in (item1, item2)]):
new_item = PGroup(item1.data + item2.data)
elif item1.__class__ == PGroup:
new_item = PGroup(item1.data + [item2])
elif item2.__class__ == PGroup:
new_item = PGroup([item1] + item2.data)
else:
new_item = (item1, item2)
output.append(new_item)
return output
def feed(string):
""" Used to recursively parse nested strings, returns a list object (not Pattern),
and a boolean denoting if the list contains a nested list """
string = PlayString(string)
items = [] # The actual pattern
layer_pattern = False
contains_nest = False
i = 0
while i < len(string):
char = string[i]
# look for a '<>'
if char == "<":
# Parse the contents of the brackets if found
j = string.index(">", start=i+1)
s = string[i+1:j]
i = j
chars, _ = feed(s)
if len(chars) == 0:
e = "Empty '<>' brackets in string"
raise ParseError(e)
# If we know we are layering, zip the last item
if layer_pattern:
items[-1] = items[-1].zip( Pattern(chars) )
else:
items.append(Pattern(chars))
layer_pattern = True
contains_nest = True
# Look for || for specifying sample numbers
elif char == "|":
# Parse the contents of the brackets if found
j = string.next_char_index("|", start=i+1)
s = string[i+1:j]
i = j
chars, _ = feed(s)
if len(chars) == 0:
e = "Empty '||' delimeters in string"
raise ParseError(e)
try:
assert(len(chars) == 2)
except AssertionError:
e = "'||' delimeters must contain exactly 2 elements"
raise ParseError(e)
# First is our list of sample chars
samp_chr = chars[0]
# Next is a list of integers for sample kw
samp_num = convert_to_int(chars[1])
# print(samp_chr, samp_num)
items.append(bar_type((samp_chr, samp_num)))
# Look for a '()'
elif char == "(":
# Parse the contents of the brackets if found
j = string.index(")", start=i+1)
s = string[i+1:j]
i = j
chars, _ = feed(s)
if len(chars) == 0:
e = "Empty '()' brackets in string"
raise ParseError(e)
items.append( chars ) # add the nested list
layer_pattern = False
contains_nest = True
# Look for a '{}'
elif char == "{":
# Parse the contents of the brackets if found
j = string.index("}", start=i+1)
s = string[i+1:j]
i = j
chars, _ = feed(s)
if len(chars) == 0:
e = "Empty '{}' brackets in string"
raise ParseError(e)
items.append( braces_type(chars) )
layer_pattern = False
# Look for a '[]'
elif char == "[":
j = string.index("]", start=i+1)
s = string[i+1:j]
i = j
chars, contains_nest = feed(s)
if len(chars) == 0:
e = "Empty '[]' brackets in string"
raise ParseError(e)
# Un-nest
if contains_nest:
# May contain sub-nests, so re-parse with calculated duration
new_chars = []
largest_item = max([len(ch) for ch in chars])
for num in range(largest_item):
new_chars.append(square_type([modi(ch, num) for ch in chars]))
items.append( new_chars )
layer_pattern = False
else:
new_chars = []
for char in chars:
new_chars.append(char)
items.append( square_type(new_chars) )
layer_pattern = False
# Add single character to list
elif char not in ")]}>|":
items.append( char )
layer_pattern = False
# Increase iterator
i += 1
return items, contains_nest
@PatternMethod
def fromString(self, string):
self.data = ParsePlayString(string)
self.make()
return self | /renardo_lib-0.9.0-py3-none-any.whl/renardo_lib/lib/Patterns/Parse.py | 0.531453 | 0.422147 | Parse.py | pypi |
from renardo_lib.lib.Utils import LCM
import itertools
"""
Module for key operations on Python lists or FoxDot Patterns
"""
def DominantPattern(*patterns):
return min((p for p in patterns if hasattr(p, "WEIGHT")), key = lambda x: x.WEIGHT)
class POperand:
def __init__(self, func):
self.operate = func
def __call__(self, A, B):
""" A is always a Pattern or PGroup."""
# If the first pattern is empty, return the other as a pattern
if len(A) == 0:
return A.__class__(B)
# Get the dominant pattern type and convert B
key = DominantPattern(A, B)
cls = key.__class__
# Instead of coverting the dominant to its own class, make a true_copy?
A = cls(A)
B = cls(B)
# Calculate total length before operations
i, length = 0, LCM(len(A.get_data()), len(B.get_data()))
gen_a = itertools.cycle(A.get_data())
gen_b = itertools.cycle(B.get_data())
P1 = []
while i < length:
try:
try:
val = self.operate(next(gen_a), next(gen_b))
except TypeError as e:
raise TypeError("Cannot operate on {!r} and {!r}".format(A, B))
except ZeroDivisionError:
val = 0
P1.append(val)
i += 1
# Copy the dominant pattern and set the new data vals
return key.true_copy(P1)
# General operations
def Nil(a, b): return a
def Add(a, b): return a + b
def Sub(a, b): return a - b
def Mul(a, b): return a * b
def Div(a, b): return a / b
def Mod(a, b): return a % b
def Pow(a, b): return a ** b
def Get(a, b):
try:
return a[b]
except TypeError:
return a
def FloorDiv(a, b): return a // b
def Xor(a, b): return a ^ b
def Or(a, b): return a | b
def rAdd(a, b): return b + a
def rGet(a, b):
try:
return b[a]
except TypeError:
return b
def rSub(a, b): return b - a
def rMul(a, b): return b * a
def rDiv(a, b): return b / a
def rMod(a, b): return b % a
def rPow(a, b): return b ** a
def rFloorDiv(a, b): return b // a
def rXor(a, b): return b ^ a
def rOr(a, b): return b | a
# Pattern operations
PAdd = POperand(Add)
PSub = POperand(Sub)
PSub2 = POperand(rSub)
PMul = POperand(Mul)
PDiv = POperand(Div)
PDiv2 = POperand(rDiv)
PFloor = POperand(FloorDiv)
PFloor2 = POperand(rFloorDiv)
PMod = POperand(Mod)
PMod2 = POperand(rMod)
PPow = POperand(Pow)
PPow2 = POperand(rPow)
PGet = POperand(Get)
# Pattern comparisons -> need to maybe have a equals func?
PEq = lambda a, b: (all([int(a[i]==b[i]) for i in range(len(a))]) if len(a) == len(b) else False) if a.__class__ == b.__class__ else False
PNe = lambda a, b: (any([int(a[i]!=b[i]) for i in range(len(a))]) if len(a) == len(b) else True) if a.__class__ == b.__class__ else True | /renardo_lib-0.9.0-py3-none-any.whl/renardo_lib/lib/Patterns/Operations.py | 0.678859 | 0.459622 | Operations.py | pypi |
import logging
from json import dumps as json_dumps
from typing import Any
from typing import cast
from typing import Dict
from typing import List
from typing import Optional
from warnings import warn
import aiohttp
from marshmallow.schema import Schema
from . import models
from . import schemas
from .exceptions import KamereonResponseException
_LOGGER = logging.getLogger(__name__)
_KCA_GET_ENDPOINTS: Dict[str, Any] = {
"": {"version": 2},
"battery-status": {"version": 2},
"charge-history": {"version": 1},
"charge-mode": {"version": 1},
"charges": {"version": 1},
"charging-settings": {"version": 1},
"cockpit": {"version": 2},
"hvac-history": {"version": 1},
"hvac-sessions": {"version": 1},
"hvac-status": {"version": 1},
"hvac-settings": {"version": 1},
"location": {"version": 1},
"lock-status": {"version": 1},
"notification-settings": {"version": 1},
"res-state": {"version": 1},
}
_KCA_POST_ENDPOINTS: Dict[str, Any] = {
"actions/charge-mode": {"version": 1, "type": "ChargeMode"},
"actions/charge-schedule": {"version": 2, "type": "ChargeSchedule"},
"actions/charging-start": {"version": 1, "type": "ChargingStart"},
"actions/hvac-schedule": {"version": 2, "type": "HvacSchedule"},
"actions/hvac-start": {"version": 1, "type": "HvacStart"},
}
_KCM_POST_ENDPOINTS: Dict[str, Any] = {
"charge/pause-resume": {"version": 1, "type": "ChargePauseResume"},
}
# Deprecated from 0.1.8 - kept for compatibility
DATA_ENDPOINTS = _KCA_GET_ENDPOINTS
ACTION_ENDPOINTS = _KCA_POST_ENDPOINTS
def get_commerce_url(root_url: str) -> str:
"""Get the Kamereon base commerce url."""
return f"{root_url}/commerce/v1"
def get_person_url(root_url: str, person_id: str) -> str:
"""Get the url to the person."""
return f"{get_commerce_url(root_url)}/persons/{person_id}"
def get_account_url(root_url: str, account_id: str) -> str:
"""Get the url to the account."""
return f"{get_commerce_url(root_url)}/accounts/{account_id}"
def get_car_adapter_url(
root_url: str, account_id: str, version: int, vin: str, *, adapter_type: str = "kca"
) -> str:
"""Get the url to the car adapter."""
account_url = get_account_url(root_url, account_id)
if adapter_type == "kcm":
return f"{account_url}/kamereon/kcm/v{version}/vehicles/{vin}"
return f"{account_url}/kamereon/kca/car-adapter/v{version}/cars/{vin}"
def get_contracts_url(root_url: str, account_id: str, vin: str) -> str:
"""Get the url to the car contracts."""
account_url = get_account_url(root_url, account_id)
return f"{account_url}/vehicles/{vin}/contracts"
def get_required_contracts(endpoint: str) -> str: # pragma: no cover
"""Get the required contracts for the specified endpoint."""
# "Deprecated in 0.1.3, contract codes are country-specific"
# " and can't be used to guess requirements."
warn("This method is deprecated.", DeprecationWarning, stacklevel=2)
return ""
def has_required_contracts(
contracts: List[models.KamereonVehicleContract], endpoint: str
) -> bool:
"""Check if vehicle has contract for endpoint."""
# "Deprecated in 0.1.3, contract codes are country-specific"
# " and can't be used to guess requirements."
warn("This method is deprecated.", DeprecationWarning, stacklevel=2)
return True # pragma: no cover
async def request(
websession: aiohttp.ClientSession,
method: str,
url: str,
api_key: str,
gigya_jwt: str,
params: Dict[str, str],
json: Optional[Dict[str, Any]] = None,
schema: Optional[Schema] = None,
*,
wrap_array_in: Optional[str] = None,
) -> models.KamereonResponse:
"""Process Kamereon HTTP request."""
schema = schema or schemas.KamereonResponseSchema
headers = {
"Content-type": "application/vnd.api+json",
"apikey": api_key,
"x-gigya-id_token": gigya_jwt,
}
async with websession.request(
method,
url,
headers=headers,
params=params,
json=json,
) as http_response:
response_text = await http_response.text()
if json:
_LOGGER.debug(
"Send Kamereon %s request to %s with body: %s",
method,
http_response.url,
json_dumps(json),
)
_LOGGER.debug(
"Received Kamereon response %s on %s to %s: %s",
http_response.status,
method,
http_response.url,
response_text,
)
# Some endpoints return arrays instead of objects.
# These need to be wrapped in an object.
if response_text.startswith("["):
response_text = (
f'{{"{wrap_array_in or "data"}": {response_text}}}' # noqa: B907
)
if not response_text.startswith("{"):
# Check for HTTP error
http_response.raise_for_status()
raise KamereonResponseException("Invalid JSON", response_text)
kamereon_response: models.KamereonResponse = schema.loads(response_text)
# Check for Kamereon error
kamereon_response.raise_for_error_code()
# Check for HTTP error
http_response.raise_for_status()
return kamereon_response
async def get_person(
websession: aiohttp.ClientSession,
root_url: str,
api_key: str,
gigya_jwt: str,
country: str,
person_id: str,
) -> models.KamereonPersonResponse:
"""GET to /persons/{person_id}."""
url = get_person_url(root_url, person_id)
params = {"country": country}
return cast(
models.KamereonPersonResponse,
await request(
websession,
"GET",
url,
api_key,
gigya_jwt,
params=params,
schema=schemas.KamereonPersonResponseSchema,
),
)
async def get_vehicle_contracts(
websession: aiohttp.ClientSession,
root_url: str,
api_key: str,
gigya_jwt: str,
country: str,
locale: str,
account_id: str,
vin: str,
) -> models.KamereonVehicleContractsResponse:
"""GET to /accounts/{accountId}/vehicles/{vin}/contracts."""
url = get_contracts_url(root_url, account_id, vin)
params = {
"country": country,
"locale": locale,
"brand": "RENAULT",
"connectedServicesContracts": "true",
"warranty": "true",
"warrantyMaintenanceContracts": "true",
}
return cast(
models.KamereonVehicleContractsResponse,
await request(
websession,
"GET",
url,
api_key,
gigya_jwt,
params=params,
schema=schemas.KamereonVehicleContractsResponseSchema,
wrap_array_in="contractList",
),
)
async def get_account_vehicles(
websession: aiohttp.ClientSession,
root_url: str,
api_key: str,
gigya_jwt: str,
country: str,
account_id: str,
) -> models.KamereonVehiclesResponse:
"""GET to /accounts/{account_id}/vehicles."""
url = f"{get_account_url(root_url, account_id)}/vehicles"
params = {"country": country}
return cast(
models.KamereonVehiclesResponse,
await request(
websession,
"GET",
url,
api_key,
gigya_jwt,
params=params,
schema=schemas.KamereonVehiclesResponseSchema,
),
)
async def get_vehicle_details(
websession: aiohttp.ClientSession,
root_url: str,
api_key: str,
gigya_jwt: str,
country: str,
account_id: str,
vin: str,
) -> models.KamereonVehicleDetailsResponse:
"""GET to /accounts/{account_id}/vehicles/{vin}/details."""
url = f"{get_account_url(root_url, account_id)}/vehicles/{vin}/details"
params = {"country": country}
return cast(
models.KamereonVehicleDetailsResponse,
await request(
websession,
"GET",
url,
api_key,
gigya_jwt,
params=params,
schema=schemas.KamereonVehicleDetailsResponseSchema,
),
)
async def get_vehicle_data(
websession: aiohttp.ClientSession,
root_url: str,
api_key: str,
gigya_jwt: str,
country: str,
account_id: str,
vin: str,
endpoint: str,
endpoint_version: Optional[int] = None,
params: Optional[Dict[str, str]] = None,
*,
adapter_type: str = "kca",
) -> models.KamereonVehicleDataResponse:
"""GET to /v{endpoint_version}/cars/{vin}/{endpoint}."""
endpoint_details = _KCA_GET_ENDPOINTS[endpoint]
car_adapter_url = get_car_adapter_url(
root_url=root_url,
account_id=account_id,
version=endpoint_version or int(endpoint_details["version"]),
vin=vin,
adapter_type=adapter_type,
)
url = f"{car_adapter_url}/{endpoint}" if endpoint else car_adapter_url
params = params or {}
params["country"] = country
return cast(
models.KamereonVehicleDataResponse,
await request(
websession,
"GET",
url,
api_key,
gigya_jwt,
params=params,
schema=schemas.KamereonVehicleDataResponseSchema,
),
)
async def set_vehicle_action(
websession: aiohttp.ClientSession,
root_url: str,
api_key: str,
gigya_jwt: str,
country: str,
account_id: str,
vin: str,
endpoint: str,
attributes: Dict[str, Any],
endpoint_version: Optional[int] = None,
data_type: Optional[Dict[str, Any]] = None,
*,
adapter_type: str = "kca",
) -> models.KamereonVehicleDataResponse:
"""POST to /v{endpoint_version}/cars/{vin}/{endpoint}."""
if "/" not in endpoint:
# Deprecated in 0.1.8
warn(
f"You should use the full endpoint: actions/{endpoint}.",
DeprecationWarning,
stacklevel=2,
)
endpoint = f"actions/{endpoint}"
if adapter_type == "kcm":
endpoint_details = _KCM_POST_ENDPOINTS[endpoint]
else:
endpoint_details = _KCA_POST_ENDPOINTS[endpoint]
car_adapter_url = get_car_adapter_url(
root_url=root_url,
account_id=account_id,
version=endpoint_version or int(endpoint_details["version"]),
vin=vin,
adapter_type=adapter_type,
)
url = f"{car_adapter_url}/{endpoint}"
params = {"country": country}
json = {
"data": {
"type": data_type or endpoint_details["type"],
"attributes": attributes,
}
}
return cast(
models.KamereonVehicleDataResponse,
await request(
websession,
"POST",
url,
api_key,
gigya_jwt,
params,
json,
schemas.KamereonVehicleDataResponseSchema,
),
) | /renault_api-0.2.0-py3-none-any.whl/renault_api/kamereon/__init__.py | 0.799481 | 0.210462 | __init__.py | pypi |
import json
from dataclasses import dataclass
from typing import Any
from typing import cast
from typing import Dict
from typing import List
from typing import Optional
from marshmallow.schema import Schema
from . import enums
from . import exceptions
from . import helpers
from .enums import AssetPictureSize
from renault_api.models import BaseModel
COMMON_ERRRORS: List[Dict[str, Any]] = [
{
"errorCode": "err.func.400",
"error_type": exceptions.InvalidInputException,
},
{
"errorCode": "err.func.403",
"error_type": exceptions.AccessDeniedException,
},
{
"errorCode": "err.tech.500",
"error_type": exceptions.InvalidUpstreamException,
},
{
"errorCode": "err.tech.501",
"error_type": exceptions.NotSupportedException,
},
{
"errorCode": "err.func.wired.notFound",
"error_type": exceptions.ResourceNotFoundException,
},
{
"errorCode": "err.tech.wired.kamereon-proxy",
"error_type": exceptions.FailedForwardException,
},
{
"errorCode": "err.func.wired.overloaded",
"error_type": exceptions.QuotaLimitException,
},
]
VEHICLE_SPECIFICATIONS: Dict[str, Dict[str, Any]] = {
"X101VE": { # ZOE phase 1
"reports-charge-session-durations-in-minutes": True,
"reports-in-watts": True,
"support-endpoint-location": False,
"support-endpoint-lock-status": False,
},
"X102VE": { # ZOE phase 2
"warns-on-method-set_ac_stop": "Action `cancel` on endpoint `hvac-start` may not be supported on this model.", # noqa
},
"XJA1VP": { # CLIO V
"support-endpoint-hvac-status": False,
},
"XJB1SU": { # CAPTUR II
"support-endpoint-hvac-status": False,
},
"XBG1VE": { # DACIA SPRING
"control-charge-via-kcm": True,
},
"XCB1VE": { # MEGANE E-TECH
"support-endpoint-lock-status": False,
},
}
GATEWAY_SPECIFICATIONS: Dict[str, Dict[str, Any]] = {
"GDC": { # ZOE phase 1
"reports-charge-session-durations-in-minutes": True,
"reports-in-watts": True,
"support-endpoint-location": False,
"support-endpoint-lock-status": False,
},
}
@dataclass
class KamereonResponseError(BaseModel):
"""Kamereon response error."""
errorCode: Optional[str]
errorMessage: Optional[str]
def raise_for_error_code(self) -> None:
"""Raise exception from response error."""
error_details = self.get_error_details()
for common_error in COMMON_ERRRORS:
if self.errorCode == common_error["errorCode"]:
error_type = common_error["error_type"]
raise error_type(self.errorCode, error_details)
raise exceptions.KamereonResponseException(
self.errorCode, error_details
) # pragma: no cover
def get_error_details(self) -> Optional[str]:
"""Extract the error details sometimes hidden inside nested JSON."""
try:
error_details = json.loads(self.errorMessage or "{}")
except json.JSONDecodeError:
return self.errorMessage
error_descriptions = []
for inner_error in error_details.get("errors", []):
error_description = " ".join(
filter(
None,
[
inner_error.get("title"),
inner_error.get("source", {}).get("pointer"),
inner_error.get("detail"),
],
)
)
error_descriptions.append(error_description)
return ", ".join(error_descriptions) or self.errorMessage
@dataclass
class KamereonResponse(BaseModel):
"""Kamereon response."""
errors: Optional[List[KamereonResponseError]]
def raise_for_error_code(self) -> None:
"""Raise exception if errors found in the response."""
for error in self.errors or []:
# Until we have a sample for multiple errors, just raise on first one
error.raise_for_error_code()
@dataclass
class KamereonPersonAccount(BaseModel):
"""Kamereon person account data."""
accountId: Optional[str]
accountType: Optional[str]
accountStatus: Optional[str]
@dataclass
class KamereonPersonResponse(KamereonResponse):
"""Kamereon response to GET on /persons/{gigya_person_id}."""
accounts: Optional[List[KamereonPersonAccount]]
@dataclass
class KamereonVehicleDetailsGroup(BaseModel):
"""Kamereon vehicle details group data."""
code: Optional[str]
label: Optional[str]
group: Optional[str]
@dataclass
class KamereonVehicleDetails(BaseModel):
"""Kamereon vehicle details."""
vin: Optional[str]
registrationNumber: Optional[str]
radioCode: Optional[str]
brand: Optional[KamereonVehicleDetailsGroup]
model: Optional[KamereonVehicleDetailsGroup]
energy: Optional[KamereonVehicleDetailsGroup]
engineEnergyType: Optional[str]
assets: Optional[List[Dict[str, Any]]]
def get_energy_code(self) -> Optional[str]:
"""Return vehicle energy code."""
return self.energy.code if self.energy else None
def get_brand_label(self) -> Optional[str]:
"""Return vehicle model label."""
return self.brand.label if self.brand else None
def get_model_code(self) -> Optional[str]:
"""Return vehicle model code."""
return self.model.code if self.model else None
def get_model_label(self) -> Optional[str]:
"""Return vehicle model label."""
return self.model.label if self.model else None
def get_asset(self, asset_type: str) -> Optional[Dict[str, Any]]:
"""Return asset."""
return next(
filter(
lambda asset: asset.get("assetType") == asset_type, self.assets or []
)
)
def get_picture(
self, size: AssetPictureSize = AssetPictureSize.LARGE
) -> Optional[str]:
"""Return vehicle picture."""
asset: Dict[str, Any] = self.get_asset("PICTURE") or {}
rendition: Dict[str, str] = next(
filter(
lambda rendition: rendition.get("resolutionType")
== f"ONE_MYRENAULT_{size.name}",
asset.get("renditions", [{}]),
)
)
return rendition.get("url") if rendition else None
def uses_electricity(self) -> bool:
"""Return True if model uses electricity."""
energy_type = self.engineEnergyType or self.get_energy_code()
if energy_type in [
"ELEC",
"ELECX",
"PHEV",
]:
return True
return False
def uses_fuel(self) -> bool:
"""Return True if model uses fuel."""
energy_type = self.engineEnergyType or self.get_energy_code()
if energy_type in [
"OTHER",
"PHEV",
"HEV",
]:
return True
return False
def reports_charge_session_durations_in_minutes(self) -> bool:
"""Return True if model reports history durations in minutes."""
# Default to False (=seconds) for unknown vehicles
if self.model and self.model.code:
return VEHICLE_SPECIFICATIONS.get(self.model.code, {}).get(
"reports-charge-session-durations-in-minutes", False
)
return False # pragma: no cover
def reports_charging_power_in_watts(self) -> bool:
"""Return True if model reports chargingInstantaneousPower in watts."""
# Default to False for unknown vehicles
if self.model and self.model.code:
return VEHICLE_SPECIFICATIONS.get(self.model.code, {}).get(
"reports-in-watts", False
)
return False # pragma: no cover
def supports_endpoint(self, endpoint: str) -> bool:
"""Return True if model supports specified endpoint."""
# Default to True for unknown vehicles
if self.model and self.model.code:
return VEHICLE_SPECIFICATIONS.get(self.model.code, {}).get(
f"support-endpoint-{endpoint}", True
)
return True # pragma: no cover
def warns_on_method(self, method: str) -> Optional[str]:
"""Return warning message if model trigger a warning on the method call."""
# Default to None for unknown vehicles
if self.model and self.model.code:
return VEHICLE_SPECIFICATIONS.get(self.model.code, {}).get(
f"warns-on-method-{method}", None
)
return None # pragma: no cover
def controls_action_via_kcm(self, action: str) -> bool:
"""Return True if model uses endpoint via kcm."""
# Default to False for unknown vehicles
if self.model and self.model.code:
return VEHICLE_SPECIFICATIONS.get(self.model.code, {}).get(
f"control-{action}-via-kcm", False
)
return False # pragma: no cover
@dataclass
class KamereonVehiclesLink(BaseModel):
"""Kamereon vehicles link data."""
vin: Optional[str]
vehicleDetails: Optional[KamereonVehicleDetails]
@dataclass
class KamereonVehiclesResponse(KamereonResponse):
"""Kamereon response to GET on /accounts/{account_id}/vehicles."""
accountId: Optional[str]
country: Optional[str]
vehicleLinks: Optional[List[KamereonVehiclesLink]]
@dataclass
class KamereonVehicleDetailsResponse(KamereonResponse, KamereonVehicleDetails):
"""Kamereon response to GET on /accounts/{account_id}/vehicles/{vin}/details."""
@dataclass
class KamereonVehicleDataAttributes(BaseModel):
"""Kamereon vehicle data attributes."""
@dataclass
class KamereonVehicleContract(BaseModel):
"""Kamereon vehicle contract."""
type: Optional[str]
contractId: Optional[str]
code: Optional[str]
group: Optional[str]
durationMonths: Optional[int]
startDate: Optional[str]
endDate: Optional[str]
status: Optional[str]
statusLabel: Optional[str]
description: Optional[str]
@dataclass
class KamereonVehicleContractsResponse(KamereonResponse):
"""Kamereon response to GET on /accounts/{accountId}/vehicles/{vin}/contracts."""
contractList: Optional[List[KamereonVehicleContract]]
@dataclass
class KamereonVehicleData(BaseModel):
"""Kamereon vehicle data."""
type: Optional[str]
id: Optional[str]
attributes: Optional[Dict[str, Any]]
@dataclass
class KamereonVehicleDataResponse(KamereonResponse):
"""Kamereon response to GET/POST on .../cars/{vin}/{type}."""
data: Optional[KamereonVehicleData]
def get_attributes(self, schema: Schema) -> Optional[KamereonVehicleDataAttributes]:
"""Return jwt token."""
return (
cast(KamereonVehicleDataAttributes, schema.load(self.data.attributes))
if self.data and self.data.attributes is not None
else None
)
@dataclass
class KamereonVehicleBatteryStatusData(KamereonVehicleDataAttributes):
"""Kamereon vehicle battery-status data."""
timestamp: Optional[str]
batteryLevel: Optional[int]
batteryTemperature: Optional[int]
batteryAutonomy: Optional[int]
batteryCapacity: Optional[int]
batteryAvailableEnergy: Optional[int]
plugStatus: Optional[int]
chargingStatus: Optional[float]
chargingRemainingTime: Optional[int]
chargingInstantaneousPower: Optional[float]
def get_plug_status(self) -> Optional[enums.PlugState]:
"""Return plug status."""
try:
return (
enums.PlugState(self.plugStatus)
if self.plugStatus is not None
else None
)
except ValueError as err: # pragma: no cover
# should we return PlugState.NOT_AVAILABLE?
raise exceptions.KamereonException(
f"Unable to convert `{self.plugStatus}` to PlugState."
) from err
def get_charging_status(self) -> Optional[enums.ChargeState]:
"""Return charging status."""
try:
return (
enums.ChargeState(self.chargingStatus)
if self.chargingStatus is not None
else None
)
except ValueError as err: # pragma: no cover
# should we return ChargeState.NOT_AVAILABLE?
raise exceptions.KamereonException(
f"Unable to convert `{self.chargingStatus}` to ChargeState."
) from err
@dataclass
class KamereonVehicleLocationData(KamereonVehicleDataAttributes):
"""Kamereon vehicle data location attributes."""
lastUpdateTime: Optional[str]
gpsLatitude: Optional[float]
gpsLongitude: Optional[float]
@dataclass
class KamereonVehicleHvacStatusData(KamereonVehicleDataAttributes):
"""Kamereon vehicle data hvac-status attributes."""
lastUpdateTime: Optional[str]
externalTemperature: Optional[float]
hvacStatus: Optional[str]
nextHvacStartDate: Optional[str]
socThreshold: Optional[float]
@dataclass
class KamereonVehicleChargeModeData(KamereonVehicleDataAttributes):
"""Kamereon vehicle data charge-mode attributes."""
chargeMode: Optional[str]
@dataclass
class KamereonVehicleCockpitData(KamereonVehicleDataAttributes):
"""Kamereon vehicle data cockpit attributes."""
fuelAutonomy: Optional[float]
fuelQuantity: Optional[float]
totalMileage: Optional[float]
@dataclass
class KamereonVehicleLockStatusData(KamereonVehicleDataAttributes):
"""Kamereon vehicle data lock-status attributes."""
lockStatus: Optional[str]
doorStatusRearLeft: Optional[str]
doorStatusRearRight: Optional[str]
doorStatusDriver: Optional[str]
doorStatusPassenger: Optional[str]
hatchStatus: Optional[str]
lastUpdateTime: Optional[str]
@dataclass
class KamereonVehicleResStateData(KamereonVehicleDataAttributes):
"""Kamereon vehicle data res-set attributes."""
details: Optional[str]
code: Optional[str]
@dataclass
class KamereonVehicleCarAdapterData(KamereonVehicleDataAttributes):
"""Kamereon vehicle data hvac-status attributes."""
vin: Optional[str]
vehicleId: Optional[int]
batteryCode: Optional[str]
brand: Optional[str]
canGeneration: Optional[str]
carGateway: Optional[str]
deliveryCountry: Optional[str]
deliveryDate: Optional[str]
energy: Optional[str]
engineType: Optional[str]
familyCode: Optional[str]
firstRegistrationDate: Optional[str]
gearbox: Optional[str]
modelCode: Optional[str]
modelCodeDetail: Optional[str]
modelName: Optional[str]
radioType: Optional[str]
region: Optional[str]
registrationCountry: Optional[str]
registrationNumber: Optional[str]
tcuCode: Optional[str]
versionCode: Optional[str]
privacyMode: Optional[str]
privacyModeUpdateDate: Optional[str]
svtFlag: Optional[bool]
svtBlockFlag: Optional[bool]
def uses_electricity(self) -> bool:
"""Return True if model uses electricity."""
if self.energy in [
"electric",
]:
return True
return False
def uses_fuel(self) -> bool:
"""Return True if model uses fuel."""
if self.energy in [
"gasoline",
]:
return True
return False
def reports_charging_power_in_watts(self) -> bool:
"""Return True if model reports chargingInstantaneousPower in watts."""
# Default to False for unknown vehicles
if self.carGateway:
return GATEWAY_SPECIFICATIONS.get(self.carGateway, {}).get(
"reports-in-watts", False
)
return False # pragma: no cover
def supports_endpoint(self, endpoint: str) -> bool:
"""Return True if model supports specified endpoint."""
# Default to True for unknown vehicles
if self.carGateway:
return GATEWAY_SPECIFICATIONS.get(self.carGateway, {}).get(
f"support-endpoint-{endpoint}", True
)
return True # pragma: no cover
def controls_action_via_kcm(self, action: str) -> bool:
"""Return True if model uses endpoint via kcm."""
# Default to False for unknown vehicles
if self.modelCodeDetail:
return VEHICLE_SPECIFICATIONS.get(self.modelCodeDetail, {}).get(
f"control-{action}-via-kcm", False
)
return False # pragma: no cover
@dataclass
class ChargeDaySchedule(BaseModel):
"""Kamereon vehicle charge schedule for day."""
startTime: Optional[str]
duration: Optional[int]
def for_json(self) -> Dict[str, Any]:
"""Create dict for json."""
return {
"startTime": self.startTime,
"duration": self.duration,
}
def get_end_time(self) -> Optional[str]:
"""Get end time."""
if self.startTime is None: # pragma: no cover
return None
return helpers.get_end_time(self.startTime, self.duration)
@dataclass
class ChargeSchedule(BaseModel):
"""Kamereon vehicle charge schedule for week."""
id: Optional[int]
activated: Optional[bool]
monday: Optional[ChargeDaySchedule]
tuesday: Optional[ChargeDaySchedule]
wednesday: Optional[ChargeDaySchedule]
thursday: Optional[ChargeDaySchedule]
friday: Optional[ChargeDaySchedule]
saturday: Optional[ChargeDaySchedule]
sunday: Optional[ChargeDaySchedule]
def for_json(self) -> Dict[str, Any]:
"""Create dict for json."""
result: Dict[str, Any] = {
"id": self.id,
"activated": self.activated,
}
for day in helpers.DAYS_OF_WEEK:
day_spec: Optional[ChargeDaySchedule] = getattr(self, day, None)
if day_spec is None:
result[day] = day_spec
else:
result[day] = day_spec.for_json()
return result
@dataclass
class HvacDaySchedule(BaseModel):
"""Kamereon vehicle hvac schedule for day."""
readyAtTime: Optional[str]
def for_json(self) -> Dict[str, Optional[str]]:
"""Create dict for json."""
return {
"readyAtTime": self.readyAtTime,
}
@dataclass
class HvacSchedule(BaseModel):
"""Kamereon vehicle hvac schedule for week."""
id: Optional[int]
activated: Optional[bool]
monday: Optional[HvacDaySchedule]
tuesday: Optional[HvacDaySchedule]
wednesday: Optional[HvacDaySchedule]
thursday: Optional[HvacDaySchedule]
friday: Optional[HvacDaySchedule]
saturday: Optional[HvacDaySchedule]
sunday: Optional[HvacDaySchedule]
def for_json(self) -> Dict[str, Any]:
"""Create dict for json."""
result: Dict[str, Any] = {
"id": self.id,
"activated": self.activated,
}
for day in helpers.DAYS_OF_WEEK:
day_spec: Optional[HvacDaySchedule] = getattr(self, day, None)
if day_spec is None:
result[day] = day_spec
else:
result[day] = day_spec.for_json()
return result
@dataclass
class KamereonVehicleChargingSettingsData(KamereonVehicleDataAttributes):
"""Kamereon vehicle data charging-settings attributes."""
mode: Optional[str]
schedules: Optional[List[ChargeSchedule]]
def update(self, args: Dict[str, Any]) -> None:
"""Update schedule."""
if "id" not in args: # pragma: no cover
raise ValueError("id not provided for update.")
if self.schedules is None: # pragma: no cover
self.schedules = []
for schedule in self.schedules:
if schedule.id == args["id"]: # pragma: no branch
helpers.update_schedule(schedule, args)
return
self.schedules.append(helpers.create_schedule(args)) # pragma: no cover
@dataclass
class KamereonVehicleHvacSettingsData(KamereonVehicleDataAttributes):
"""Kamereon vehicle data hvac-settings (mode+schedules) attributes."""
mode: Optional[str]
schedules: Optional[List[HvacSchedule]]
@dataclass
class KamereonVehicleNotificationSettingsData(KamereonVehicleDataAttributes):
"""Kamereon vehicle data notification-settings attributes."""
@dataclass
class KamereonVehicleChargeHistoryData(KamereonVehicleDataAttributes):
"""Kamereon vehicle data charge-history attributes."""
@dataclass
class KamereonVehicleChargesData(KamereonVehicleDataAttributes):
"""Kamereon vehicle data charges attributes."""
@dataclass
class KamereonVehicleHvacHistoryData(KamereonVehicleDataAttributes):
"""Kamereon vehicle data hvac-history attributes."""
@dataclass
class KamereonVehicleHvacSessionsData(KamereonVehicleDataAttributes):
"""Kamereon vehicle data hvac-sessions attributes."""
@dataclass
class KamereonVehicleHvacStartActionData(KamereonVehicleDataAttributes):
"""Kamereon vehicle action data hvac-start attributes."""
@dataclass
class KamereonVehicleHvacScheduleActionData(KamereonVehicleDataAttributes):
"""Kamereon vehicle action data hvac-schedule attributes."""
@dataclass
class KamereonVehicleChargeScheduleActionData(KamereonVehicleDataAttributes):
"""Kamereon vehicle action data charge-schedule attributes."""
@dataclass
class KamereonVehicleChargeModeActionData(KamereonVehicleDataAttributes):
"""Kamereon vehicle action data charge-mode attributes."""
@dataclass
class KamereonVehicleHvacModeActionData(KamereonVehicleDataAttributes):
"""Kamereon vehicle action data hvac-mode attributes."""
@dataclass
class KamereonVehicleChargingStartActionData(KamereonVehicleDataAttributes):
"""Kamereon vehicle action data charging-start attributes.""" | /renault_api-0.2.0-py3-none-any.whl/renault_api/kamereon/models.py | 0.865693 | 0.15588 | models.py | pypi |
import asyncio
import functools
from datetime import datetime
from datetime import timedelta
from typing import Any
from typing import Callable
from typing import Optional
from typing import Tuple
import aiohttp
import click
import dateparser
import tzlocal
from renault_api.exceptions import RenaultException
from renault_api.kamereon.helpers import DAYS_OF_WEEK
_DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
def coro_with_websession(func: Callable[..., Any]) -> Callable[..., Any]:
"""Ensure the routine runs on an event loop with a websession."""
async def run_command(func: Callable[..., Any], *args: Any, **kwargs: Any) -> None:
async with aiohttp.ClientSession() as websession:
try:
kwargs["websession"] = websession
await func(*args, **kwargs)
except RenaultException as exc: # pragma: no cover
raise click.ClickException(str(exc)) from exc
finally:
closed_event = create_aiohttp_closed_event(websession)
await websession.close()
await closed_event.wait()
def wrapper(*args: Any, **kwargs: Any) -> None:
asyncio.run(run_command(func, *args, **kwargs))
return functools.update_wrapper(wrapper, func)
def days_of_week_option(helptext: str) -> Callable[..., Any]:
"""Add day of week string options."""
def decorator(func: Callable[..., Any]) -> Callable[..., Any]:
for day in reversed(DAYS_OF_WEEK):
func = click.option(
f"--{day}",
help=helptext.format(day.capitalize()),
)(func)
return func
return decorator
def start_end_option(add_period: bool) -> Callable[..., Any]:
"""Add start/end options."""
def decorator(func: Callable[..., Any]) -> Callable[..., Any]:
func = click.option(
"--from", "start", help="Date to start showing history from", required=True
)(func)
func = click.option(
"--to",
"end",
help="Date to finish showing history at (cannot be in the future)",
required=True,
)(func)
if add_period:
func = click.option(
"--period",
default="month",
help="Period over which to aggregate.",
type=click.Choice(["day", "month"], case_sensitive=False),
)(func)
return func
return decorator
def create_aiohttp_closed_event(
websession: aiohttp.ClientSession,
) -> asyncio.Event: # pragma: no cover
"""Work around aiohttp issue that doesn't properly close transports on exit.
See https://github.com/aio-libs/aiohttp/issues/1925#issuecomment-639080209
Args:
websession (aiohttp.ClientSession): session for which to generate the event.
Returns:
An event that will be set once all transports have been properly closed.
"""
transports = 0
all_is_lost = asyncio.Event()
def connection_lost(exc, orig_lost): # type: ignore
nonlocal transports
try:
orig_lost(exc)
finally:
transports -= 1
if transports == 0:
all_is_lost.set()
def eof_received(orig_eof_received): # type: ignore
try:
orig_eof_received()
except AttributeError:
# It may happen that eof_received() is called after
# _app_protocol and _transport are set to None.
pass
for conn in websession.connector._conns.values(): # type: ignore
for handler, _ in conn:
proto = getattr(handler.transport, "_ssl_protocol", None)
if proto is None:
continue
transports += 1
orig_lost = proto.connection_lost
orig_eof_received = proto.eof_received
proto.connection_lost = functools.partial(
connection_lost, orig_lost=orig_lost
)
proto.eof_received = functools.partial(
eof_received, orig_eof_received=orig_eof_received
)
if transports == 0:
all_is_lost.set()
return all_is_lost
def parse_dates(start: str, end: str) -> Tuple[datetime, datetime]:
"""Convert start/end string arguments into datetime arguments."""
parsed_start = dateparser.parse(start)
parsed_end = dateparser.parse(end)
if not parsed_start: # pragma: no cover
raise ValueError(f"Unable to parse `{start}` into start datetime.")
if not parsed_end: # pragma: no cover
raise ValueError(f"Unable to parse `{end}` into end datetime.")
return (parsed_start, parsed_end)
def _timezone_offset() -> int:
"""Return UTC offset in minutes."""
utcoffset = tzlocal.get_localzone().utcoffset(datetime.now())
if utcoffset:
return int(utcoffset.total_seconds() / 60)
return 0 # pragma: no cover
def _format_tzdatetime(date_string: str) -> str:
date = datetime.fromisoformat(date_string.replace("Z", "+00:00"))
return str(date.astimezone(tzlocal.get_localzone()).strftime(_DATETIME_FORMAT))
def _format_tztime(time: str) -> str:
total_minutes = int(time[1:3]) * 60 + int(time[4:6]) + _timezone_offset()
hours, minutes = divmod(total_minutes, 60)
hours = hours % 24 # Ensure it is 00-23
return f"{hours:02g}:{minutes:02g}"
def convert_minutes_to_tztime(minutes: int) -> str:
"""Convert minutes to Thh:mmZ format."""
total_minutes = minutes - _timezone_offset()
hours, minutes = divmod(total_minutes, 60)
hours = hours % 24 # Ensure it is 00-23
return f"T{hours:02g}:{minutes:02g}Z"
def _format_seconds(secs: float) -> str:
d = timedelta(seconds=secs)
return str(d)
def get_display_value(
value: Optional[Any] = None,
unit: Optional[str] = None,
) -> str:
"""Get a display for value."""
if value is None: # pragma: no cover
return ""
if unit is None:
return str(value)
if unit == "tzdatetime":
return _format_tzdatetime(value)
if unit == "tztime":
return _format_tztime(value)
if unit == "minutes":
return _format_seconds(value * 60)
if unit == "seconds":
return _format_seconds(value)
if unit == "kW":
value = value / 1000
return f"{value:.2f} {unit}"
return f"{value} {unit}" | /renault_api-0.2.0-py3-none-any.whl/renault_api/cli/helpers.py | 0.815343 | 0.261307 | helpers.py | pypi |
import re
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
import aiohttp
import click
from tabulate import tabulate
from renault_api.cli import helpers
from renault_api.cli import renault_vehicle
from renault_api.kamereon.helpers import DAYS_OF_WEEK
from renault_api.kamereon.models import ChargeDaySchedule
from renault_api.kamereon.models import ChargeSchedule
from renault_api.renault_vehicle import RenaultVehicle
_DAY_SCHEDULE_REGEX = re.compile(
"(?P<prefix>T?)"
"(?P<hours>[0-2][0-9])"
":"
"(?P<minutes>[0-5][0-9])"
"(?P<suffix>Z?)"
","
"(?P<duration>[0-9]+)"
)
@click.group()
def schedule() -> None:
"""Display or update charge schedules."""
pass
@schedule.command()
@click.pass_obj
@helpers.coro_with_websession
async def show(
ctx_data: Dict[str, Any],
*,
websession: aiohttp.ClientSession,
) -> None:
"""Display charge schedules."""
vehicle = await renault_vehicle.get_vehicle(
websession=websession, ctx_data=ctx_data
)
response = await vehicle.get_charging_settings()
# Display mode
click.echo(f"Mode: {response.mode}")
if not response.schedules: # pragma: no cover
click.echo("\nNo schedules found.")
return
for schedule in response.schedules:
click.echo(
f"\nSchedule ID: {schedule.id}{' [Active]' if schedule.activated else ''}"
)
headers = [
"Day",
"Start time",
"End time",
"Duration",
]
click.echo(
tabulate(
[_format_charge_schedule(schedule, key) for key in DAYS_OF_WEEK],
headers=headers,
)
)
def _format_charge_schedule(schedule: ChargeSchedule, key: str) -> List[str]:
details: Optional[ChargeDaySchedule] = getattr(schedule, key)
if not details: # pragma: no cover
return [key.capitalize(), "-", "-", "-"]
return [
key.capitalize(),
helpers.get_display_value(details.startTime, "tztime"),
helpers.get_display_value(details.get_end_time(), "tztime"),
helpers.get_display_value(details.duration, "minutes"),
]
async def _get_schedule(
ctx_data: Dict[str, Any],
websession: aiohttp.ClientSession,
id: int,
) -> Tuple[RenaultVehicle, List[ChargeSchedule], ChargeSchedule]:
"""Get the given schedules activated-flag to given state."""
vehicle = await renault_vehicle.get_vehicle(
websession=websession, ctx_data=ctx_data
)
response = await vehicle.get_charging_settings()
if not response.schedules: # pragma: no cover
raise ValueError("No schedules found.")
schedule = next( # pragma: no branch
(schedule for schedule in response.schedules if id == schedule.id), None
)
if schedule:
return (vehicle, response.schedules, schedule)
raise IndexError(f"Schedule id {id} not found.") # pragma: no cover
@schedule.command()
@click.argument("id", type=int)
@helpers.days_of_week_option(
helptext="{} schedule in format `hh:mm,duration` (for local timezone) "
"or `Thh:mmZ,duration` (for utc) or `clear` to unset."
)
@click.pass_obj
@helpers.coro_with_websession
async def set(
ctx_data: Dict[str, Any],
*,
id: int,
websession: aiohttp.ClientSession,
**kwargs: Any,
) -> None:
"""Update charging schedule {ID}."""
vehicle, schedules, schedule = await _get_schedule(
websession=websession, ctx_data=ctx_data, id=id
)
update_settings(schedule, **kwargs)
write_response = await vehicle.set_charge_schedules(schedules)
click.echo(write_response.raw_data)
@schedule.command()
@click.argument("id", type=int)
@click.pass_obj
@helpers.coro_with_websession
async def activate(
ctx_data: Dict[str, Any],
*,
id: int,
websession: aiohttp.ClientSession,
) -> None:
"""Activate charging schedule {ID}."""
vehicle, schedules, schedule = await _get_schedule(
websession=websession, ctx_data=ctx_data, id=id
)
schedule.activated = True
write_response = await vehicle.set_charge_schedules(schedules)
click.echo(write_response.raw_data)
@schedule.command()
@click.argument("id", type=int)
@click.pass_obj
@helpers.coro_with_websession
async def deactivate(
ctx_data: Dict[str, Any],
*,
id: int,
websession: aiohttp.ClientSession,
) -> None:
"""Deactivate charging schedule {ID}."""
vehicle, schedules, schedule = await _get_schedule(
websession=websession, ctx_data=ctx_data, id=id
)
schedule.activated = False
write_response = await vehicle.set_charge_schedules(schedules)
click.echo(write_response.raw_data)
def update_settings(
schedule: ChargeSchedule,
**kwargs: Any,
) -> None:
"""Update charging settings."""
for day in DAYS_OF_WEEK:
if day in kwargs: # pragma: no branch
day_value = kwargs.pop(day)
if day_value == "clear":
setattr(schedule, day, None)
elif day_value:
start_time, duration = _parse_day_schedule(str(day_value))
setattr(
schedule,
day,
ChargeDaySchedule(
raw_data={}, startTime=start_time, duration=duration
),
)
def _parse_day_schedule(raw: str) -> Tuple[str, int]:
match = _DAY_SCHEDULE_REGEX.match(raw)
if not match: # pragma: no cover
raise ValueError(
f"Invalid specification for charge schedule: `{raw}`. "
"Should be of the form HH:MM,DURATION or THH:MMZ,DURATION"
)
hours = int(match.group("hours"))
if hours > 23: # pragma: no cover
raise ValueError(
f"Invalid specification for charge schedule: `{raw}`. "
"Hours should be less than 24."
)
minutes = int(match.group("minutes"))
if (minutes % 15) != 0: # pragma: no cover
raise ValueError(
f"Invalid specification for charge schedule: `{raw}`. "
"Minutes should be a multiple of 15."
)
duration = int(match.group("duration"))
if (duration % 15) != 0: # pragma: no cover
raise ValueError(
f"Invalid specification for charge schedule: `{raw}`. "
"Duration should be a multiple of 15."
)
if match.group("prefix") and match.group("suffix"):
formatted_start_time = f"T{hours:02g}:{minutes:02g}Z"
elif not (match.group("prefix") or match.group("suffix")):
formatted_start_time = helpers.convert_minutes_to_tztime(hours * 60 + minutes)
else: # pragma: no cover
raise ValueError(
f"Invalid specification for charge schedule: `{raw}`. "
"If provided, both T and Z must be set."
)
return (formatted_start_time, duration) | /renault_api-0.2.0-py3-none-any.whl/renault_api/cli/charge/schedule.py | 0.814975 | 0.175821 | schedule.py | pypi |
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
import aiohttp
import click
from tabulate import tabulate
from renault_api.cli import helpers
from renault_api.cli import renault_vehicle
from renault_api.kamereon.models import KamereonVehicleDetails
@click.command()
@helpers.start_end_option(False)
@click.pass_obj
@helpers.coro_with_websession
async def sessions(
ctx_data: Dict[str, Any],
*,
start: str,
end: str,
websession: aiohttp.ClientSession,
) -> None:
"""Display charge sessions."""
parsed_start, parsed_end = helpers.parse_dates(start, end)
vehicle = await renault_vehicle.get_vehicle(
websession=websession, ctx_data=ctx_data
)
details = await vehicle.get_details()
response = await vehicle.get_charges(start=parsed_start, end=parsed_end)
charges: List[Dict[str, Any]] = response.raw_data["charges"]
if not charges: # pragma: no cover
click.echo("No data available.")
return
headers = [
"Charge start",
"Charge end",
"Duration",
"Power (kW)",
"Started at",
"Finished at",
"Charge gained",
"Energy gained",
"Power level",
"Status",
]
click.echo(
tabulate(
[_format_charges_item(item, details) for item in charges], headers=headers
)
)
def _format_charges_item(
item: Dict[str, Any], details: KamereonVehicleDetails
) -> List[str]:
duration_unit = (
"minutes"
if details.reports_charge_session_durations_in_minutes()
else "seconds"
)
return [
helpers.get_display_value(item.get("chargeStartDate"), "tzdatetime"),
helpers.get_display_value(item.get("chargeEndDate"), "tzdatetime"),
helpers.get_display_value(item.get("chargeDuration"), duration_unit),
helpers.get_display_value(item.get("chargeStartInstantaneousPower"), "kW"),
helpers.get_display_value(item.get("chargeStartBatteryLevel"), "%"),
helpers.get_display_value(item.get("chargeEndBatteryLevel"), "%"),
helpers.get_display_value(item.get("chargeBatteryLevelRecovered"), "%"),
helpers.get_display_value(item.get("chargeEnergyRecovered"), "kWh"),
helpers.get_display_value(item.get("chargePower")),
helpers.get_display_value(item.get("chargeEndStatus")),
]
@click.command()
@helpers.start_end_option(True)
@click.pass_obj
@helpers.coro_with_websession
async def history(
ctx_data: Dict[str, Any],
*,
start: str,
end: str,
period: Optional[str],
websession: aiohttp.ClientSession,
) -> None:
"""Display charge history."""
parsed_start, parsed_end = helpers.parse_dates(start, end)
period = period or "month"
vehicle = await renault_vehicle.get_vehicle(
websession=websession, ctx_data=ctx_data
)
response = await vehicle.get_charge_history(
start=parsed_start, end=parsed_end, period=period
)
charge_summaries: List[Dict[str, Any]] = response.raw_data["chargeSummaries"]
if not charge_summaries: # pragma: no cover
click.echo("No data available.")
return
headers = [
period.capitalize(),
"Number of charges",
"Total time charging",
"Errors",
]
click.echo(
tabulate(
[_format_charge_history_item(item, period) for item in charge_summaries],
headers=headers,
)
)
def _format_charge_history_item(item: Dict[str, Any], period: str) -> List[str]:
return [
helpers.get_display_value(item.get(period)),
helpers.get_display_value(item.get("totalChargesNumber")),
helpers.get_display_value(item.get("totalChargesDuration"), "minutes"),
helpers.get_display_value(item.get("totalChargesErrors")),
] | /renault_api-0.2.0-py3-none-any.whl/renault_api/cli/charge/history.py | 0.852214 | 0.230941 | history.py | pypi |
# renConstruct
[](https://circleci.com/gh/kobaltcore/renconstruct)
[](https://pepy.tech/project/renconstruct)
A utility script to automatically build Ren'Py applications for multiple platforms.
renConstruct can build distributions for Windows, Linux, macOS and Android, including extra processing steps pre- and post-build.
By default it supports notarization for macOS distributions, a memory limit increase for Windows distributions (using `LARGEADDRESSAWARE`) and cleanup of temporary build artifacts. In addition, a `patch` task is available which enables pre-build patching of Python Ren'Py source files.
Custom pre- and post-build steps can easily be added.
## Installation
renConstruct can be installed via pip:
```bash
$ pip install renconstruct
```
Please note that renConstruct requires Python 3 and will not provide backwards compatibility for Python 2 for the foreseeable future.
## Usage
renConstruct operates based on the following process flow:
- Ensure dependencies are installed
- Validate configuration file
- Install specific version of Ren'Py if necessary
- Run the `pre-build` stage of all active tasks
- Build the Android distribution if enabled
- Build the macOS and Windows/Linux distributions if enabled
- Run the `post-build` stage of all active tasks
In the default configuration, the following tasks are executed at the respective build stage:
- `pre-build`:
+ `None`
- `post-build`:
+ `set_extended_memory_limit`
+ `clean`
### Configuration
renConstruct requires a configuration file to be supplied containing the information required to complete the build process for the various platforms. An empty template is provided in this repository under the name `config.empty.yml`
It consists of the following sections:
#### `tasks`
- `path`: An optional path to a directory containing Python files with custom tasks
- `set_extended_memory_limit`: A value of `true` or `false` determining whether to run this task or not
- `notarize`: A value of `true` or `false` determining whether to run this task or not
- `clean`: A value of `true` or `false` determining whether to run this task or not
- `patch`: A value of `true` or `false` determining whether to run this task or not
#### `build`
- `win`: A value of `true` or `false` determining whether to build the Windows/Linux distribution or not
- `mac`: A value of `true` or `false` determining whether to build the macOS distribution or not
- `android`: A value of `true` or `false` determining whether to build the Android distribution or not
#### `renutil`
- `version`: The version of Ren'Py to use while building the distributions
- `registry`: A path where `renutil` data is stored. Mostly useful for CI environments
#### `renotize`
- `apple_id`: The e-Mail address belonging to the Apple ID you want to use for signing applications.
- `password`: An app-specific password generated through the [management portal](https://appleid.apple.com/account/manage) of your Apple ID.
- `identity`: The identity associated with your Developer Certificate which can be found in `Keychain Access` under the category "My Certificates". It starts with `Developer ID Application:`, however it suffices to provide the 10-character code in the title of the certificate.
- `bundle`: The internal name for your app. This is typically the reverse domain notation of your website plus your application name, i.e. `com.example.mygame`.
- `altool_extra`: An optional string that will be passed on to all `altool` runs in all commands. Useful for selecting an organization when your Apple ID belongs to multiple, for example. Typically you will not have to touch this and you can leave it empty.
### Default Tasks
renConstruct ships with several built-in tasks that enable project-independent functionality.
#### `Clean`
This task runs as the last task in the `post-build` stage. Its purpose is to remove unnecessary artifacts and temporary build data. It achieves this by running `renutil clean`, which will remove all temporary build artifacts from the Ren'Py instance used to build the project. In addition it removes non-universal APK artifacts from the output folder.
Given that the universal APK's are only a few Megabytes larger and more widely compatible, the author opines that shipping the universal versions is preferable in all cases.
#### `Notarize`
This task runs in the `post-build` stage and utilizes the `renotize` utility to automatically notarize the macOS build of the project, if it was generated. The process is entirely automatic, though it may take a few minutes depending on the size of the game and the load on Apple's notarization servers.
#### `Set Extended Memory Limit` (**Deprecated**)
This task runs in the `post-build` stage and operates on the Windows build of the project, if it was generated. Specifically, it modifies the executable file by setting the `LARGEADDRESSAWARE` flag in the header of the file, which enables the final build to access 4GB of RAM on Windows, instead of the typical 2GB when running in regular 32-Bit mode.
Given that Ren'Py 7.4+ now supports 64-Bit operation for all major desktop operating systems, including Windows, this task is now deprecated and only retained for legacy operation. If you're building with Ren'Py 7.4+ you do not need this task.
#### `Patch`
This task runs early in the `pre-build` stage and is capable of applying patch files (code diffs) to Python files of the Ren'Py instance that will be used to build the project. This allows for automatic application of game-specific patches before building, enabling customization at the engine level.
It is important to note that renConstruct does *not* build or rebuild Ren'Py after these patches are applied. As such it is only possible to patch runtime files, which effectively boils down to the pure-Python parts of Ren'Py. Patching the compiled parts is not supported and actively discouraged.
The task works by looking at a directory of patch files (specified in the configuration file) and applying them to equally-named files in the directory of the Ren'Py instance. For this to work, the structure of the patch directory must exactly mirror that of the paths to the actual file to patch, relative to the instance directory.
For example, if you wanted to patch the file `renpy/display/presplash.py` you would generated a patch file and name it `presplash.py`. After that you would put it into the directory `patches/renpy/display/presplash.py`.
Patch files are expected to match the `diff-match-patch` format for diffs. These types of files can be easily generated using the [`diffusor`](https://github.com/kobaltcore/diffusor) command-line utility.
renConstruct will automatically figure out the actual file to apply it to, as well as create a backup of the original file. If any part of the patching process fails, all changes are rolled back. It is also guaranteed that a file will only ever be patched once. Even if a file has been patched before and the patch file has changed, the new patch will be reliably applied to the original, unmodified file and the currently modified version will be replaced by the new version.
#### `Overwrite Keystore`
This task runs early in the `pre-build` stage and is capable of overwriting the `android.keystore` file in Ren'Py's Android packager with a custom one. This is useful because you typically want to sign all your APK's with the same key, as otherwise Google will prevent you from updating your game, since the signature will have changed.
This task works by reading the keystore file either from the environment variable `RC_KEYSTORE` or from a config option like this:
```yaml
overwrite_keystore:
keystore: "<my keystore>"
```
Since the keystore is a binary file, it can't just be pasted into a text document. To get around this issue, this task expects the keystore to be base64-encoded and for that representation to be stored either in the environment variable or in the config file, depending on which method you choose.
The value in the config file takes precedence if both options are given. If the task is active but neither option is given, it will error out.
### Custom Tasks
Custom tasks can easily be added using the `path` value. It should point to a directory containing Python files.
Each file can contain one or more task, which will be picked up by renConstruct.
A task is an object that looks like this:
```python
class DoSomethingTask():
PRIORITY = -100
AFFECTED_FILES = ["rapt/hash.txt"]
def __init__(self, name, config):
self.name = name
self.config = config
@staticmethod
def validate_config(config):
pass
def pre_build(self):
pass
def post_build(self):
pass
```
The name of the class must end with `Task` for it to be picked up by renConstruct.
Every custom task will automatically receive a setting in the config file based on the class name split on uppercase letters, converted to lowercase and joined by underscores.
The example task above would receive the config variable `do_something`.
A task can have a custom section in the config file. To support this, each task class can have an optional static method `validate_config`, which is passed the subsection of the config object pertaining to that task. It can then validate this config object in any way it sees fit, raising an Exception when an issue occurs.
The parsed config subsection is then integrated into the full config object, which is then passed back to the task upon initialization.
A task can have two methods `pre_build` and `post_build` (either or both is possible).
They will be called with the validated config object at the specified build stage.
At that point they can do whatever they want. As an example, a custom task could be built to automatically optimize image assets in the game directory before every build.
Each task also has a `PRIORITY` class attribute which has a default value of `0` and determines the order in which to run the tasks. A higher priority means that task will be executed earlier than others with a lower value. Both positive and negative values are possible.
As an example, the built-in `clean` task runs at `PRIORITY = -1000` to ensure it's always the last task to be run.
Optionally, a task can specify a list of `AFFECTED_FILES`. This is a list of paths to files relative to the SDK directory which this task modifies. Any files listed here will be backed up by `renconstruct` upon its first run and will be restored to their original state on every subsequent run, ensuring that the task always has the same base to work off of. This is largely meant to combat state corruption, for example by running the patch task and disabling it in a subsequent run.
### Example
```bash
renconstruct -c config.yml -i path/to/my-game/ -o out/
```
## Using `renConstruct` with Gitlab CI
A common use case for headless building of distributions is Continuous Integration.
Find below an example of a GitLab CI configuration file which should work for most projects.
```yaml
# Recent experience shows that using "python:latest" can cause issues
# because its definition may vary per runner. Always specify the exact
# version you intend to use to avoid issues.
image: python:3.8
variables:
PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip"
cache:
paths:
- .cache/pip
- venv/
before_script:
# Downloads renconstruct through pip
- pip install renconstruct
run:
script:
# Runs renconstruct in the project directory, saving the build outputs to a new folder called "artifacts"
# inside the project directory and utilises the file config.yml to specify reconstruct options.
- renconstruct -d -i $CI_PROJECT_DIR -o $CI_PROJECT_DIR/artifacts -c $CI_PROJECT_DIR/config.yml
artifacts:
paths:
# Saves the artifacts located in the "artifacts" folder to GitLab
- $CI_PROJECT_DIR/artifacts/**.*
```
### Command Line Interface
```
Usage: renconstruct.py [OPTIONS]
A utility script to automatically build Ren'Py applications for multiple
platforms.
Options:
-i, --input TEXT The path to the Ren'Py project to build [required]
-o, --output TEXT The directory to output build artifacts to [required]
-c, --config TEXT The configuration file for this run [required]
-d, --debug If given, shows debug information if
--help Show this message and exit.
```
# Disclaimer
renConstruct is a hobby project and not in any way affiliated with Ren'Py. This means that there is no way I can guarantee that it will work at all, or continue to work once it does.
| /renconstruct-1.7.2.tar.gz/renconstruct-1.7.2/README.md | 0.597608 | 0.937383 | README.md | pypi |
import os
from typing import Any, Optional
import requests
from requests import HTTPError
RENDER_API_BASE_URL: str = "https://api.render.com/v1/services"
APPLICATION_JSON: str = "application/json"
def get_bearer_token() -> Optional[str]:
"""Fetch Render api token from environment variable.
Returns:
returns the Render api token stored in the environment
variable named RENDER_TOKEN.
"""
return os.getenv("RENDER_TOKEN")
def create_headers(is_post: bool = False) -> dict[str, str]:
"""Helper function to create headers for api call.
Args:
is_post: indicator if call is going to be a POST.
Returns:
A set of headers for a Render api call.
"""
bearer = f"Bearer {get_bearer_token()}"
headers = {"Accept": APPLICATION_JSON, "Authorization": bearer}
if is_post:
headers["Content-Type"] = APPLICATION_JSON
return headers
def retrieve_env_from_render(service_id: str, limit: int = 20) -> Any:
"""Gets environment variables for the specified service.
Args:
service_id: id service to fetch the environment variables for.
limit: number of env vars to fetch. Defaults to 20.
Returns:
A list of environment variables for a given service.
"""
initial_url = f"{RENDER_API_BASE_URL}/{service_id}/env-vars?limit={limit}"
url = initial_url
services = []
while url != "":
with requests.get(url, headers=create_headers()) as response:
try:
response.raise_for_status()
r = response.json()
services.extend(r)
if len(r) == limit:
last_cursor = r[-1]["cursor"]
url = f"{initial_url}&cursor={last_cursor}"
else:
url = ""
except HTTPError as exc:
return handle_errors(exc.response.status_code)
return services
def set_env_variables_for_service(service_id: str, env_vars: list[dict]) -> Any:
"""Sets the environment variables for the specified service.
Args:
service_id: id of service to set vars for.
env_vars: list of environment variables
Returns:
nothing
"""
url = f"{RENDER_API_BASE_URL}/{service_id}/env-vars"
payload = env_vars
with requests.put(url, headers=create_headers(True), json=payload) as response:
try:
response.raise_for_status()
return response.json()
except HTTPError as exc:
return handle_errors(exc.response.status_code)
def fetch_services(limit=20, cursor=None) -> Any:
"""Gets services associated with Render account.
This function will fetch all services, upto specified limit
for the associated Render account.
Args:
limit: number of services to fetch. Defaults to 20.
cursor: indicator passed to Render to fetch next page of results.
Returns:
All services associated with a Render account.
"""
cursor_query_param = f"&cursor={cursor}" if cursor is not None else ""
url = f"{RENDER_API_BASE_URL}?limit={limit}{cursor_query_param}"
services = []
while url != "":
with requests.get(url, headers=create_headers()) as response:
try:
response.raise_for_status()
r = response.json()
services.extend(r)
if len(r) == limit:
last_cursor = r[-1]["cursor"]
url = f"{RENDER_API_BASE_URL}?limit={limit}&cursor={last_cursor}"
else:
url = ""
except HTTPError as exc:
return handle_errors(exc.response.status_code)
return services
def find_service_by_name(service_name: str) -> Any:
"""Finds service by name associated with Render account.
This function will fetch services from Redner and return the
service that matches the specified name.
Args:
service_name: name of service to search for.
Returns:
Service information for specified service if it exists.
"""
data = fetch_services()
found = False
resulting_service = None
while True:
for svc_listing in data:
service = svc_listing["service"]
if service["name"] == service_name:
resulting_service = svc_listing
found = True
break
if found:
break
return resulting_service
def handle_errors(status_code) -> dict[str, str]:
"""Helper function to handle errors from the api."""
if status_code == 401:
return {"error": "401 - Unauthorized"}
elif status_code == 406:
return {"error": "406 - request error"}
elif status_code == 429:
return {"error": "429 - Exceeded service limit"}
elif status_code == 500 or status_code == 503:
return {"error": f"{status_code} - Render service unavailable"}
else:
return {"error": f"{status_code} - unexpected error"} | /render-cli-0.2.1.tar.gz/render-cli-0.2.1/src/render_cli/render_services.py | 0.797911 | 0.245644 | render_services.py | pypi |
from __future__ import print_function
from __future__ import absolute_import
import os
import glob
import json
import time
from collections import OrderedDict
from .sample_data import sample_nodes, sample_links
def rst(s, *repls):
"""Really stupid templates
Yeah, so templates might be better. Meh."""
for name, value in repls:
s = s.replace("${" + name + "}", str(value))
return s
def _resolve_file(filename, search_dir=None):
"""Resolve a path
For absolute paths, do nothing, otherwise search in "search_dir"
and then in this file's directory
Raise an error if the file cannot be resolved"""
package_dir = os.path.dirname(os.path.abspath(__file__))
for d in ["", search_dir, package_dir]:
if d is None:
continue
f = os.path.join(d, filename)
if os.path.exists(f):
return f
raise IOError("File {} cannot be located".format(filename))
def rst_file_basic(filename, *repls):
"""Run rst on a file"""
with open(_resolve_file(filename)) as fid:
s = fid.read()
return rst(s, *repls)
def include_rst_files(filename, search_dir=None):
"""RST's import/include system
This uses traditional inclusion (like the c pre-processor)
and not template inheritance (like Jinja, Mako, etc.)
Searches out all instances of ${file=*} and
replaces them with the contents in that file
Runs recursively"""
filename = _resolve_file(filename, search_dir)
d = os.path.dirname(filename)
with open(filename) as fid:
s = fid.read()
ss = s.split("${file=")
main = ss[:1]
for i in ss[1:]:
fn, rest = i.split("}", 1)
main += [include_rst_files(fn, d), rest]
return "".join(main)
def rst_file(filename, *repls):
"""Run rst on a file after including any referenced files"""
return rst(include_rst_files(filename), *repls)
def render_d3_fdg(
dat,
title="A Force-Directed Graph",
scale=1,
force_scale=1,
default_size=5,
expand_scale=3,
neighbor_scale=1.5,
shrink_scale=1,
show_labels=False,
canvas_wh=(800, 800),
slider_init_x=0.4,
save_freq="null",
move_new_nodes_to_centroid=True,
click_function="click_function_focus_node",
connections_html_function="indented_list",
zooming_code="enable_zoom()",
zoom_in=0.1,
zoom_out=10,
html_filename="fdg_base.html.template",
custom_repls=(),
):
move_new_nodes_to_centroid = "true" if move_new_nodes_to_centroid else "false"
f = "/tmp/index.html"
w, h = canvas_wh
s = rst_file(
html_filename,
("title", title),
("scale", scale),
("force_scale", force_scale),
("default_size", default_size),
("expand_scale", expand_scale),
("neighbor_scale", neighbor_scale),
("shrink_scale", shrink_scale),
("show_labels", "true" if show_labels else "false"),
("canvasw", w),
("canvash", h),
("slider_init_x", slider_init_x),
("save_freq", save_freq),
("move_new_nodes_to_centroid", move_new_nodes_to_centroid),
("click_function", click_function),
("connections_html_function", connections_html_function),
("zooming_code", zooming_code), # Make sure this comes first...
("zoom_in", zoom_in),
("zoom_out", zoom_out),
("graph", json.dumps(dat)),
*custom_repls
)
with open(f, "w") as fid:
fid.write(s)
os.system("xdg-open " + f)
def fdg(nodes, links, **kwds):
"""High-level wrapper around render_d3_fdg
nodes is a list of 2-tuples of strings like: (id, group)
links is a list of 2-tuples of strings like: (source, target, value)
source and target should be id's found in nodes
all kwds are passed to render_d3_fdg
"""
d = OrderedDict(
[
(
"nodes",
[OrderedDict([("id", _id), ("group", group)]) for _id, group in nodes],
),
(
"links",
[
OrderedDict(
[("source", source), ("target", target), ("value", value)]
)
for source, target, value in links
],
),
]
)
return render_d3_fdg(d, **kwds)
def do_cmd(cmd):
print(cmd)
return os.system(cmd)
def file_stem(filename):
return os.path.splitext(os.path.split(filename)[-1])[0]
def string_between(s, before, after):
return s.split(before)[1].split(after)[0]
def _generate_pngs(svg_base, dout, out_base, png_wh):
w, h = png_wh
for fin in glob.glob(svg_base):
f = file_stem(fin)
f = "newesttree (0)" if f == "newesttree" else f # For consistency
i = int(string_between(f, "(", ")"))
fout = os.path.join(dout, "{}_{:03d}.png".format(out_base, i))
do_cmd(
'inkscape -z -e "{fout}" -w {w} -h {h} "{fin}" -y=1'.format(
fin=fin, fout=fout, w=w, h=h
)
)
def _generate_gif(dout, out_base, animation_delay=20, display=True):
pngs = os.path.join(dout, out_base + "_*.png")
gif = os.path.join(dout, "animation.gif")
do_cmd(
"convert -delay {delay} -loop 0 {pngs} {gif}".format(
delay=animation_delay, pngs=pngs, gif=gif
)
)
if display:
do_cmd("eog {}".format(gif))
def _handle_save_freq_options(save_freq, total_steps=300):
"""Handle multiple string options for save_freq (see docs for fdg_plus_images)
total_steps is the number of time steps d3 seems to takes in the sim (always 300?)
Returns:
save_freq: JS-friendly format (integer or 'null')
ignore_first: boolean flag that controls which svgs get processed to images"""
ignore_first = save_freq == "last"
sf_dict = {
None: "null",
"first_last": total_steps - 1,
"last": total_steps - 1,
"first": 10000000,
"all": 1,
-1: total_steps - 1, # ??
}
save_freq = sf_dict[save_freq] if save_freq in sf_dict else save_freq
return save_freq, ignore_first
def fdg_plus_images(
nodes,
links,
save_freq="last",
png_wh=(1200, 1200),
sleep_time=10,
out_base="out",
dout="/tmp/",
clean_downloads=True,
clean_tmp=True,
animate=True,
animation_delay=20,
display=True,
**kwds
):
"""Render a D3 graph, save svg's at various points, and then use
inkscape and ImageMagick (convert) to create pngs and then an
animated gif
Input kwd args:
save_freq: Control the number of svg's saved from the simulation
one of: None or 'null', 'last' (default), 'first', 'first_last', 'all'
or any integer
png_wh: Canvas size of output pngs, default (1200, 1200)
sleep_time: Time to wait before starting the png conversion, default 10s
out_base: name of the output png files default 'out'
dout: output directory, default '/tmp/'
clean_downloads: When True (default), clear the Downloads folder of names like newesttree*.svg
clean_tmp: When True (default), clear the output directory of names matching the output pattern
animate: When True (default), create an animated gif from the generated pngs
display: When True (default), open the gif with eog
All other **kwds get passed to render_d3_fdg thru fdg"""
svg_base = os.path.expanduser("~/Downloads/newesttree*.svg")
save_freq, ignore_first = _handle_save_freq_options(save_freq)
if clean_downloads:
do_cmd("rm " + svg_base)
if clean_tmp:
do_cmd("rm {}_*.png".format(os.path.join(dout, out_base)))
if ignore_first:
svg_base = svg_base.replace("*", " (*)")
fdg(nodes, links, save_freq=save_freq, **kwds)
if save_freq != "null":
time.sleep(sleep_time)
_generate_pngs(svg_base, dout, out_base, png_wh)
if animate:
_generate_gif(
dout, out_base, animation_delay=animation_delay, display=display
) | /render_d3_fdg-0.2.4.4.tar.gz/render_d3_fdg-0.2.4.4/render_d3_fdg/render_d3_fdg.py | 0.524151 | 0.179171 | render_d3_fdg.py | pypi |
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, caste, color, religion, or sexual
identity and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the overall
community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or advances of
any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email address,
without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at kjaymiller@gmail.com.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series of
actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or permanent
ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within the
community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.1, available at
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
Community Impact Guidelines were inspired by
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
For answers to common questions about this code of conduct, see the FAQ at
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
[https://www.contributor-covenant.org/translations][translations].
[homepage]: https://www.contributor-covenant.org
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
[Mozilla CoC]: https://github.com/mozilla/diversity
[FAQ]: https://www.contributor-covenant.org/faq
[translations]: https://www.contributor-covenant.org/translations | /render_engine_microblog-2023.6.1.tar.gz/render_engine_microblog-2023.6.1/CODE_OF_CONDUCT.md | 0.585338 | 0.712482 | CODE_OF_CONDUCT.md | pypi |
import pathlib
import typing
import git
from more_itertools import batched, flatten
from slugify import slugify
from ._base_object import BaseObject
from .archive import Archive
from .feeds import RSSFeed
from .hookspecs import register_plugins
from .page import Page
from .parsers import BasePageParser
from .parsers.markdown import MarkdownPageParser
class Collection(BaseObject):
"""
Collection objects serve as a way to quickly process pages that have a
portion of content that is similar or file driven.
Example:
```python
from render_engine import Site, Collection
site = Site()
@site.collection
class BasicCollection(Collection):
content_path = "content/pages"
```
Collection pages **MUST** come from a `content_path` and all be the same
content type. `content_path` can be a string representing a path or URL, depending on the [parser][src.render_engine.parsers.base_parsers] used.
Attributes:
archive_template: The template to use for the [`Archive`][src.render_engine.archive.Archive] pages.
content_path: The path to iterate over to generate pages.
content_type: Type[Page] = Page
Feed: Type[RSSFeed]
feed_title: str
include_suffixes: list[str] = ["*.md", "*.html"]
items_per_page: int | None
PageParser: Type[BasePageParser] = MarkdownPageParser
parser_extras: dict[str, Any]
routes: list[str] = ["./"]
sort_by: str = "title"
sort_reverse: bool = False
title: str
template: str | None
archive_template str | None: The template to use for the archive pages.
"""
archive_template: str | None
content_path: pathlib.Path | str
content_type: Page = Page
Feed: RSSFeed
feed_title: str
include_suffixes: list[str] = ["*.md", "*.html"]
items_per_page: int | None
PageParser: BasePageParser = MarkdownPageParser
parser_extras: dict[str, any]
routes: list[str] = ["./"]
sort_by: str = "title"
sort_reverse: bool = False
template: str | None
plugins: list[typing.Callable] | None
def __init__(
self,
) -> None:
self.has_archive = any(
[
hasattr(self, "archive_template"),
getattr(self, "items_per_page", None),
]
)
self.title = self._title
def iter_content_path(self):
"""Iterate through in the collection's content path."""
return flatten(
[
pathlib.Path(self.content_path).glob(suffix)
for suffix in self.include_suffixes
]
)
def _generate_content_from_modified_pages(self) -> typing.Generator[Page, None, None]:
"""
Check git status for newly created and modified files.
Returns the Page objects for the files in the content path
"""
repo = git.Repo()
changed_files = [
*repo.untracked_files, # new files not yet in git's index
*repo.index.diff(), # modified files in git index
]
return (
self.get_page(pathlib.Path(changed_path))
for changed_path in changed_files
if pathlib.Path(changed_path).parent == pathlib.Path(self.content_path)
)
def get_page(
self,
content_path:str|None=None,
) -> type[Page]:
"""Returns the page Object for the specified Content Path"""
_page = self.content_type(
content_path=content_path,
Parser=self.PageParser,
)
if getattr(self, '_pm', None):
_page.register_plugins(self.plugins)
_page.parser_extras = getattr(self, "parser_extras", {})
_page.routes = self.routes
_page.template = getattr(self, "template", None)
_page.collection_vars = self.to_dict()
return _page
@property
def sorted_pages(self):
return sorted(
(page for page in self.__iter__()),
key=lambda page: getattr(page, self.sort_by, self._title),
reverse=self.sort_reverse,
)
@property
def archives(self) -> typing.Generator[Archive, None, None]:
"""
Returns a [Archive][src.render_engine.archive.Archive] objects containing the pages from the `content_path` .
Archives are an iterable and the individual pages are built shortly after the collection pages are built. This happens when [Site.render][render_engine.Site.render] is called.
"""
if not self.has_archive:
yield from ()
sorted_pages = list(self.sorted_pages)
items_per_page = getattr(self, "items_per_page", len(sorted_pages))
archives = [sorted_pages]
if items_per_page != len(sorted_pages):
archives.extend(list(batched(sorted_pages, items_per_page)))
num_archive_pages = len(archives)
for index, pages in enumerate(archives):
yield Archive(
pages=pages,
template=getattr(self, "archive_template", None),
title=self._title,
routes=self.routes,
archive_index=index,
num_archive_pages=num_archive_pages,
)
@property
def feed(self):
feed = self.Feed()
feed.pages = [page for page in self]
feed.title = getattr(self, "feed_title", self._title)
feed.slug = self._slug
feed.Parser = self.PageParser
return feed
@property
def slug(self):
return slugify(self.title)
def __repr__(self):
return f"{self}: {__class__.__name__}"
def __str__(self):
return f"{__class__.__name__}"
def __iter__(self):
if not hasattr(self, "pages"):
for page in self.iter_content_path():
yield self.get_page(page)
else:
for page in self.pages:
yield page
def render_archives(archive, **kwargs) -> list[Archive]:
return [archive.render(pages=archive.pages, **kwargs) for archive in archive] | /render_engine-2023.7.4a2-py3-none-any.whl/render_engine/collection.py | 0.773302 | 0.410874 | collection.py | pypi |
from slugify import slugify
from .hookspecs import register_plugins
class BaseObject:
"""
Shared properties for render_engine objects.
This ensures that the behavior around the title, slug, and path_name are consistent
This is not intended to be used directly.
"""
title: str
template_vars: dict
plugins: list
@property
def _title(self) -> str:
"""
The title of the Page
If no title is provided, use the class name.
"""
return getattr(self, "title", self.__class__.__name__)
@property
def _slug(self) -> str:
"""The slugified path of the page"""
return slugify(getattr(self, 'slug', self._title))
@property
def extension(self) -> str:
"""The extension of the page"""
return getattr(self, "_extension", ".html")
@extension.setter
def extension(self, extension: str) -> None:
"""Ensures consistency on extension"""
if not extension.startswith("."):
self._extension = f".{extension}"
self._extension = extension
@property
def path_name(self) -> str:
"""
Returns the [`url_for`][src.render_engine.page.Page.url_for] for the page including the first route.
"""
return f"{self._slug}{self.extension}"
def url_for(self):
pass
def to_dict(self):
"""
Returns a dict of the page's attributes.
This is often used to pass attributes into the page's `template`.
"""
base_dict ={
**vars(self),
"title": self._title,
"slug": self._slug,
"url": self.url_for(),
"path_name": self.path_name,
}
# Pull out template_vars
if hasattr(self, "template_vars"):
for key, value in self.template_vars.items():
base_dict[key] = value
return base_dict
def register_plugins(self, plugins):
"""Creates the plugin manager and registers plugins"""
if getattr('self', 'plugins', None):
self.plugins.extend(plugins)
else:
self.plugins = plugins
self._pm = register_plugins(self.plugins) | /render_engine-2023.7.4a2-py3-none-any.whl/render_engine/_base_object.py | 0.799051 | 0.218336 | _base_object.py | pypi |
from typing import Any, Callable
import jinja2
from ._base_object import BaseObject
from .parsers.base_parsers import BasePageParser
class BasePage(BaseObject):
"""
This is the Base Page object.
It was created to allow for the creation of custom page objects.
This is not intended to be used directly.
Attributes:
slug: The slug of the page. Defaults to the `title` slugified.
content: The content to be rendered by the page
parser:
The Parser used to parse the page's content. Defaults to `BasePageParser`.
reference:
The attribute to use as the reference for the page in the site's route list.
Defaults to `slug`.
"""
extension: str = ".html"
routes: list[str] = ["./"]
template: str | type[jinja2.Template] | None
_reference: str = "_slug"
@property
def _content(self):
"""Returns the content of the page."""
return getattr(self, "content", None)
def url_for(self) -> str:
"""
Returns the URL for the page including the first route.
This gets the relative URL for a page.
!!! note
Pages don't have access to the `Site` attrs.
You cannot get an abolute URL from a Page object.
Use {{SITE_URL}} in your templates to get the absolute URL.
This is the preferred way to reference a page inside of a template.
"""
if (route := self.routes[0]) == "./":
return f"/{self.path_name}"
else:
return f"/{route}/{self.path_name}"
def _render_from_template(self, template: jinja2.Template, **kwargs) -> str:
"""Renders the page from a template."""
return template.render(
**{
**self.to_dict(),
**{"content": self._content},
**kwargs,
},
)
def _render_content(self, engine: jinja2.Environment | None = None, **kwargs) -> str:
"""Renders the content of the page."""
engine = getattr(self, "engine", engine)
template = getattr(self, "template", None)
# Parsing with a template
if template and engine:
template = engine.get_template(template)
return self._render_from_template(template, **kwargs)
# Parsing without a template
try:
if isinstance(self._content, str):
return self._content
else:
raise ValueError("The returned content attribute must be a string.")
except AttributeError:
raise AttributeError(
f"{self} does not have a content attribute. "
"You must either provide a template or content."
)
def __str__(self):
return self._slug
def __repr__(self) -> str:
return f"<Page: {self._title}>"
class Page(BasePage):
"""
The general BasePage object used to make web pages.
Pages can be rendered directly from a template or generated from a file.
!!! note
Not all attributes are defined by default (those that are marked *optional*) but
will be checked for in other areas of the code.
When you create a page, you specify variables passed into rendering template.
Attributes:
content_path:
The path to the file that will be used to generate the Page's `content`.
extension: The suffix to use for the page. Defaults to `.html`.
engine:
If present, the engine to use for rendering the page.
!!! note
**This is normally not set and the `Site`'s engine will be used.**
reference:
Used to determine how to reference the page in the `Site`'s route_list.
Defaults to `slug`.
routes: The routes to use for the page. Defaults to `["./"]`.
template:
The template used to render the page.
If not provided, the `Site`'s `content` will be used.
Parser:
The parser to generate the page's `raw_content`.
Defaults to `BasePageParser`.
title: The title of the page. Defaults to the class name.
"""
content: Any
content_path: str | None
Parser: type[BasePageParser] = BasePageParser
inherit_plugins: bool
parser_extras: dict[str, Any] | None
title: str
def __init__(
self,
content_path: str | None = None,
content: Any | None = None,
Parser: type[BasePageParser] | None = None,
) -> None:
if Parser:
self.Parser = Parser
# Parse Content from the Content Path or the Content
if content_path := (content_path or getattr(self, "content_path", None)):
attrs, self.content = self.Parser.parse_content_path(content_path)
elif content := (content or getattr(self, "content", None)):
attrs, self.content = self.Parser.parse_content(content)
else:
attrs = {}
self.content = None
# Set the attributes
for key, val in attrs.items():
setattr(self, key.lower(), val)
@property
def _content(self):
"""Returns the content of the page."""
return self.Parser.parse(self.content, page=self) | /render_engine-2023.7.4a2-py3-none-any.whl/render_engine/page.py | 0.910316 | 0.27408 | page.py | pypi |
import sys
import typing
import logging
import pathlib
import shutil
from collections import defaultdict
from jinja2 import Environment
from rich.progress import Progress
from .collection import Collection
from .engine import engine
from .page import Page
import pluggy
from .hookspecs import _PROJECT_NAME, SiteSpecs
class Site:
"""
The site stores your pages and collections to be rendered.
Attributes:
engine: Jinja2 Environment used to render pages
output_path:
path to write rendered content
partial:
if True, only render pages that have been modified. Uses gitPython to check for changes.
plugins:
list of plugins that will be loaded and passed into each object
static:
path for the static folder. This will get copied to the output folder.
site_vars:
dictionary that will be passed into page template
site_settings:
settings that will be passed into pages and collections but not into templates
"""
_pm: pluggy.PluginManager
output_path: str = "output"
static_path: str = "static"
partial: bool = False
site_settings: dict = {
"plugins": {}
}
site_vars: dict = {
"SITE_TITLE": "Untitled Site",
"SITE_URL": "http://localhost:8000/",
"DATETIME_FORMAT": "%d %b %Y %H:%M %Z"
}
engine: Environment = engine
def __init__(
self,
) -> None:
self.route_list = dict()
self.subcollections = defaultdict(lambda: {"pages": []})
self.engine.globals.update(self.site_vars)
# Manage Plugins
self._pm = pluggy.PluginManager(project_name=_PROJECT_NAME)
self._pm.add_hookspecs(SiteSpecs)
def register_plugins(self, *plugins, **settings: dict[str, typing.Any]) -> None:
"""Register plugins with the site
parameters:
plugins: list of plugins to register
settings: settings to pass into the plugins
settings keys are the plugin names as strings.
"""
for plugin in plugins:
self._pm.register(plugin)
self.site_settings['plugins'][plugin.__name__] = plugin.default_settings
self._pm.hook.add_default_settings(
site=self,
custom_settings=settings,
)
self.site_settings['plugins'].update(**settings)
@property
def plugins(self):
return self._pm.get_plugins()
def collection(self, Collection: type[Collection]) -> Collection:
"""
Add the collection to the route list to be rendered later.
This is the primary way to add a collection to the site and
can either be called on an uninstantiated class or on the class definition as a decorator.
In most cases. You should use the decorator method.
```python
from render_engine import Site, Collection
site = Site()
@site.collection # works
class Pages(Collection):
pass
class Posts(Collection):
pass
site.collection(Posts) # also works
```
"""
_Collection = Collection()
plugins = [*self.plugins, *getattr(_Collection, "plugins", [])]
for plugin in getattr(_Collection, 'ignore_plugins', []):
plugins.remove(plugin)
_Collection.register_plugins(plugins)
self._pm.hook.pre_build_collection(
collection=_Collection,
settings=self.site_settings.get('plugins', {}),
) #type: ignore
self.route_list[_Collection._slug] = _Collection
return _Collection
def page(self, Page: Page) -> Page:
"""
Add the page to the route list to be rendered later.
Also remaps `title` in case the user wants to use it in the template rendering.
This is the primary way to add a page to the site and can either be called
on an uninstantiated class or on the class definition as a decorator.
In most cases. You should use the decorator method.
```python
from render_engine import Site, Page
site = Site()
@site.page # works
class Home(Page):
pass
class About(Page):
pass
site.page(About) # also works
```
"""
page = Page()
page.title = page._title # Expose _title to the user through `title`
# copy the plugin manager, removing any plugins that the page has ignored
page._pm = self._pm
for plugin in getattr(page, 'ignore_plugins', []):
page._pm.unregister(plugin)
self.route_list[getattr(page, page._reference)] = page
def _render_static(self) -> None:
"""Copies a Static Directory to the output folder"""
shutil.copytree(
self.static_path,
pathlib.Path(self.output_path) / pathlib.Path(self.static_path).name,
dirs_exist_ok=True
)
def _render_output(self, route: str, page: Page):
"""writes the page object to disk"""
path = (
pathlib.Path(self.output_path)
/ pathlib.Path(route)
/ pathlib.Path(page.path_name)
)
path.parent.mkdir(parents=True, exist_ok=True)
page.rendered_content = page._render_content(engine=self.engine)
self._pm.hook.post_render_content(page=page)
return path.write_text(page.rendered_content)
def _render_partial_collection(self, collection: Collection) -> None:
"""Iterate through the Changed Pages and Check for Collections and Feeds"""
for entry in collection._generate_content_from_modified_pages():
for route in collection.routes:
self._render_output(route, entry)
if collection.has_archive:
for archive in collection.archives:
logging.debug("Adding Archive: %s", archive.__class__.__name__)
self._render_output(collection.routes[0], archive)
if hasattr(collection, "Feed"):
self._render_output("./", collection.feed)
def _render_full_collection(self, collection: Collection) -> None:
"""Iterate through Pages and Check for Collections and Feeds"""
for entry in collection:
self._pm.hook.render_content(page=entry)
for route in collection.routes:
self._render_output(route, entry)
if collection.has_archive:
for archive in collection.archives:
logging.debug("Adding Archive: %s", archive.__class__.__name__)
for route in collection.routes:
self._render_output(collection.routes[0], archive)
if hasattr(collection, "Feed"):
self._render_output("./", collection.feed)
def render(self) -> None:
"""
Render all pages and collections.
These are pages and collections that have been added to the site using
the [`Site.page`][src.render_engine.Site.page]
and [`Site.collection`][src.render_engine.Site.collection] methods.
Render should be called after all pages and collections have been added to the site.
You can choose to call it manually in your file or use the CLI command [`render-engine build`][src.render_engine.cli.build]
"""
with Progress() as progress:
pre_build_task = progress.add_task("Loading Pre-Build Plugins", total=1)
self._pm.hook.pre_build_site(
site=self,
settings=self.site_settings.get('plugins', {})
) #type: ignore
# Parse Route List
task_add_route = progress.add_task(
"[blue]Adding Routes", total=len(self.route_list)
)
if pathlib.Path(self.static_path).exists():
self._render_static()
self.engine.globals["site"] = self
self.engine.globals["routes"] = self.route_list
for slug, entry in self.route_list.items():
progress.update(
task_add_route, description=f"[blue]Adding[gold]Route: [blue]{slug}"
)
if isinstance(entry, Page):
if getattr(entry, "collection", None):
self._pm.hook.render_content(Page=entry, settings=self.site_settings.get('plugins', None))
for route in entry.routes:
progress.update(
task_add_route,
description=f"[blue]Adding[gold]Route: [blue]{entry._slug}",
)
self._render_output(route, entry)
if isinstance(entry, Collection):
if self.partial:
self._render_partial_collection(entry)
else:
self._render_full_collection(entry)
progress.add_task("Loading Post-Build Plugins", total=1)
self._pm.hook.post_build_site(
site=self,
settings=self.site_settings.get('plugins', {}),
)
progress.update(pre_build_task, advance=1) | /render_engine-2023.7.4a2-py3-none-any.whl/render_engine/site.py | 0.52756 | 0.435841 | site.py | pypi |
import urllib.parse
from datetime import datetime
from email import utils
from jinja2 import (
ChoiceLoader,
Environment,
FileSystemLoader,
PackageLoader,
pass_environment,
select_autoescape,
)
from .collection import Collection
render_engine_templates_loader = ChoiceLoader(
[
FileSystemLoader("templates"),
PackageLoader("render_engine", "render_engine_templates"),
]
)
engine = Environment(
loader=render_engine_templates_loader,
autoescape=select_autoescape(["xml"]),
lstrip_blocks=True,
trim_blocks=True,
)
def to_pub_date(value: datetime):
"""
Parse information from the given class object.
"""
return utils.format_datetime(value)
engine.filters["to_pub_date"] = to_pub_date
@pass_environment
def format_datetime(env: Environment, value: str) -> str:
"""
Parse information from the given class object.
"""
return datetime.strftime(value, env.globals.get("DATETIME_FORMAT", "%d %b %Y %H:%M %Z"))
engine.filters["format_datetime"] = format_datetime
@pass_environment
def to_absolute(env: Environment, url:str) -> str:
return urllib.parse.urljoin(env.globals.get('SITE_URL'), url)
engine.filters["to_absolute"] = to_absolute
@pass_environment
def feed_url(env: Environment, value: str) -> str:
"""Returns the URL for the collections feed"""
routes = env.globals.get("routes")
return routes[value].feed.url_for()
engine.filters["feed_url"] = feed_url
@pass_environment
def url_for(env: Environment, value: str, page: int=0) -> str:
"""Look for the route in the route_list and return the url for the page."""
routes = env.globals.get("routes")
route = value.split(".", maxsplit=1)
if len(route) == 2 and type(route) == list:
collection, route = route
if collection:= routes.get(collection, None):
for page in collection:
if getattr(page, page._reference) == route:
return page.url_for()
else:
route = routes.get(value)
if isinstance(route, Collection):
return list(route.archives)[page].url_for()
return route.url_for()
raise ValueError(f"{value} is not a valid route.")
engine.filters["url_for"] = url_for | /render_engine-2023.7.4a2-py3-none-any.whl/render_engine/engine.py | 0.561936 | 0.179028 | engine.py | pypi |
import importlib
import pathlib
import sys
import typing
from http.server import HTTPServer, SimpleHTTPRequestHandler
import dtyper
import typer
from rich.console import Console
from rich.progress import Progress
from render_engine.engine import engine
from render_engine.site import Site
app = typer.Typer()
def get_app(module_site: str) -> Site:
"""Split the site module into a module and a class name"""
sys.path.insert(0, ".")
import_path, app_name = module_site.split(":", 1)
importlib.import_module(import_path)
return getattr(sys.modules[import_path], app_name)
def _create_folder(*, folder: pathlib.Path, overwrite: bool) -> pathlib.Path:
"""Create a folder if it doesn't exist or if overwrite is True"""
folder.mkdir(parents=True, exist_ok=overwrite)
return folder
CREATE_APP_PY_TEMPLATE = engine.get_template("create_app_py.txt")
def _create_templates_folder(
*templates,
project_folder: pathlib.Path,
templates_folder_name: pathlib.Path,
exists_ok: bool,
) -> None:
"""Create a folder for templates and optionally create an index.html file"""
path = project_folder.joinpath(templates_folder_name)
path.mkdir(
exist_ok=exists_ok,
)
for template in templates:
path.joinpath(template).write_text(engine.get_template(template).render())
def _create_site_with_vars(
*,
site_title: typing.Optional[str] = None, # noqa: UP007
site_url: typing.Optional[str] = None,
site_description: typing.Optional[str] = None,
site_author: typing.Optional[str] = None,
collection_path: typing.Optional[str] = None,
) -> Site:
"""Create a new site from a template"""
site = Site()
potential_site_vars = {
"site_title": site_title,
"site_url": site_url,
"site_author": site_author,
"site_description": site_description,
"collections_path": str(collection_path),
}
site_vars = {key: value for key, value in potential_site_vars.items() if value}
site.site_vars.update(site_vars)
return site
@dtyper.function
@app.command()
def init(
collection_path: pathlib.Path = typer.Option(
pathlib.Path("pages"),
help="create your content folder in a custom location",
rich_help_panel="Path Options",
),
force: bool = typer.Option(
False,
"--force",
"-f",
help="Force overwrite of existing files",
rich_help_panel="Flags",
),
output_path: pathlib.Path = typer.Option(
"output",
help="custom output folder location.",
rich_help_panel="Path Attributes",
),
project_path_name: pathlib.Path = typer.Option(
"app.py",
help="name of render_engine app name",
rich_help_panel="Path Attributes",
),
project_folder: pathlib.Path = typer.Option(
pathlib.Path("./"),
help="path to create the project in",
rich_help_panel="Path Attributes",
),
site_author: typing.Optional[str] = typer.Option(
None,
help="(Optional): Author of the site",
rich_help_panel="Site Vars",
),
site_description: typing.Optional[str] = typer.Option(
None,
help="(Optional): Site Description",
rich_help_panel="Site Vars",
),
site_title: typing.Optional[str] = typer.Option(
None,
"--title",
"-t",
help="title of the site",
rich_help_panel="Site Vars",
show_default=False,
),
site_url: typing.Optional[str] = typer.Option(
None,
"--url",
"-u",
help="URL for the site",
rich_help_panel="Site Vars",
show_default=False,
),
skip_collection: bool = typer.Option(
False,
"--skip-collection",
"-C",
help="Skip creating the content folder and a collection",
rich_help_panel="Flags",
),
skip_static: bool = typer.Option(
False,
"--skip-static",
"-S",
help="Skip copying static files",
rich_help_panel="Flags",
),
static_path: pathlib.Path = typer.Option(
pathlib.Path("static"),
help="custom static folder",
rich_help_panel="Path Attributes",
),
templates_path: pathlib.Path = typer.Option(
pathlib.Path("templates"),
"--templates-path",
help="custom templates folder",
),
):
"""
CLI for creating a new site configuration.
Params:
collection_path: create your content folder in a custom location
force: Force overwrite of existing files
output_path: custom output folder location
project_path_name: name of render_engine app name
project_folder: path to create the project
site_author: Author of the site
site_description: Site Description
site_title: title of the site
site_url: URL for the site
skip_collection: Skip creating the content folder and a collection
skip_static: Skip copying static files
static_path: custom static folder
templates_path: custom templates folder
"""
# creating the site object and site_vars
project_folder_path = pathlib.Path(project_folder)
with Progress() as progress:
progress.console.rule("[green][bold]Creating Project")
task_project = progress.add_task("Creating Site", total=5)
site = _create_site_with_vars(
site_title=site_title,
site_url=site_url,
site_description=site_description,
site_author=site_author,
collection_path=collection_path,
)
progress.update(task_project, advance=1)
# add output path
if output_path:
site.output_path = str(output_path)
# creating folders unless skipped
if not skip_static:
task_static_folder = progress.add_task(
f"Creating Static Folder: [blue]{static_path}",
total=1,
)
static = project_folder_path.joinpath(static_path)
static.mkdir(exist_ok=force)
site.static_path = str(static_path)
progress.update(task_static_folder, advance=1)
progress.update(task_project, advance=1)
# creating the app.py file from the template
project_config_path = (
pathlib.Path(project_folder).joinpath(project_path_name).with_suffix(".py")
)
task_generate_project_path = progress.add_task(
f"Generating App File: [blue]{project_config_path}", total=1
)
has_attrs = any((site_title, site_url, site_description, site_author))
project_config_path.write_text(
CREATE_APP_PY_TEMPLATE.render(
has_attrs=has_attrs,
site_title=site_title,
site_url=site_url,
site_description=site_description,
site_author=site_author,
output_path=output_path,
static_path=static_path,
collection_path=collection_path,
)
)
progress.update(task_generate_project_path, advance=1)
progress.update(task_project, advance=1)
# Create the templates folder and the index.html file
task_templates = progress.add_task(
f"Creating Templates Folder: [blue]{templates_path}", total=1
)
templates = ["index.html", "base.html", "content.html"]
_create_templates_folder(
*templates,
project_folder=project_folder,
templates_folder_name=templates_path,
exists_ok=force,
)
progress.update(task_templates, advance=1)
progress.update(task_project, advance=1)
# Create the collection
if not skip_collection:
task_create_collection = progress.add_task(
f"Creating Collection: [blue]{collection_path}", total=1
)
_collection_path = pathlib.Path(project_folder).joinpath(collection_path)
_collection_path.mkdir(exist_ok=force)
_collection_path.joinpath("sample_pages.md").write_text(
engine.get_template("base_collection_path.md").render()
)
progress.update(task_create_collection, advance=1)
progress.update(task_project, advance=1)
@app.command()
def build(site_module: str):
"""
CLI for creating a new site
Params:
site_module: module and class name of the site
"""
app = get_app(site_module)
app.render()
@app.command()
def serve(
module_site: typing.Optional[str] = typer.Option(
None,
"--build",
"-b",
help="module:site for Build the site prior to serving",
),
directory: typing.Optional[str] = typer.Option(
None,
"--directory",
"-d",
help="Directory to serve",
show_default=False,
),
port: int = typer.Option(
8000,
"--port",
"-p",
help="Port to serve on",
show_default=False,
),
):
"""
Create an HTTP server to serve the site at `localhost`.
!!! warning
this is only for development purposes and should not be used in production.
Params:
module_site: Python module and initialize Site class
build: flag to build the site prior to serving the app
directory: Directory to serve. If `module_site` is provided, this will be the `output_path` of the site.
port: Port to serve on
"""
if module_site:
app = get_app(module_site)
app.render()
if not directory:
if module_site:
directory = app.output_path
else:
directory = 'output'
class server(SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, directory=directory, **kwargs)
server_address = ("localhost", port)
httpd = HTTPServer(server_address, server)
console = Console()
console.print(f"Serving [blue]{directory} on http://{server_address[0]}:{server_address[1]}")
console.print("Press [bold red]CTRL+C[/bold red] to stop serving")
return httpd.serve_forever()
def cli():
app() | /render_engine-2023.7.4a2-py3-none-any.whl/render_engine/cli.py | 0.413477 | 0.257205 | cli.py | pypi |
from typing import Sequence
import numpy as np
from PIL import Image
import pyrender
import trimesh
from trimesh.transformations import rotation_matrix, concatenate_matrices
def render_frames(mesh: trimesh.Trimesh, w: int, h: int, num_frames: int) -> Sequence[Image.Image]:
return _render_trimesh_frames(mesh, w, h, num_frames)
def _render_trimesh_frames(mesh: trimesh.Trimesh, w: int, h: int, num_frames: int) -> Sequence[Image.Image]:
# normalize size + swap axes (mesh is Z-up blender style, scene is Y-up)
s = 0.33 / mesh.scale
scale = np.array([
[s, 0, 0, 0],
[0, 0, s, 0],
[0, s, 0, 0],
[0, 0, 0, 1],
])
bound = mesh.bounding_box
recenter = np.linalg.inv(bound.primitive.transform)
# print(recenter)
mat = concatenate_matrices(scale, recenter)
# print(mat)
mesh.apply_transform(mat)
scene = pyrender.Scene()
scene.add(pyrender.Mesh.from_trimesh(mesh))
# scene.add(mesh)
# pyrender.Viewer(scene, use_raymond_lighting=True)
# scene = scene
imgs = []
r = pyrender.OffscreenRenderer(w, h)
for rot_z in np.linspace(0, np.pi * 2, num_frames, endpoint=False):
camera = pyrender.PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=1.0)
s = np.sqrt(2)/2
camera_pose = np.array([
[0.0, -s, s, 0.3],
[1.0, 0.0, 0.0, 0.0],
[0.0, s, s, 0.35],
[0.0, 0.0, 0.0, 1.0],
])
rot = rotation_matrix(rot_z, [0, 0, 1])
# print(rot)
camera_pose = concatenate_matrices(rot, camera_pose)
n1 = scene.add(camera, pose=camera_pose)
# TODO https://pyrender.readthedocs.io/en/latest/generated/pyrender.light.DirectionalLight.html#pyrender.light.DirectionalLight
light = pyrender.SpotLight(color=np.ones(3), intensity=3.0,
innerConeAngle=np.pi/16.0,
outerConeAngle=np.pi/6.0)
n2= scene.add(light, pose=camera_pose)
color, depth = r.render(scene, flags=pyrender.RenderFlags.SKIP_CULL_FACES)
scene.remove_node(n1)
scene.remove_node(n2)
imgs.append(Image.frombytes("RGB", color.shape[:2], color.copy()))
return imgs | /render_me_harder-0.1.0.tar.gz/render_me_harder-0.1.0/render_me_harder/render_me_harder.py | 0.488283 | 0.553626 | render_me_harder.py | pypi |
import requests
import logging
from .render import format_baseurl, renderaccess
from .utils import NullHandler, get_json, put_json, rest_delete
logger = logging.getLogger(__name__)
logger.addHandler(NullHandler())
def copy_match_explicit(match):
"""create independent match dictionary equivalent to the input match
significantly faster than e.g. copy.deepcopy or json serialization
Parameters
----------
match : dict
match dictionary
Returns
-------
new_match : dict
match dictionary equivalent to input match
"""
# explicitly copy match dictionary since it contains lists
new_matchd = {
"p": [i[:] for i in match["matches"]["p"]],
"q": [i[:] for i in match["matches"]["q"]],
"w": match["matches"]["w"][:]
}
new_match = {k: (match[k] if k != "matches" else new_matchd)
for k in match.keys()}
return new_match
def copy_matches_explicit(matches):
"""create independent match dictionaries equivalent to the input matches
significantly faster than e.g. copy.deepcopy or json serialization
Parameters
----------
matches : list[dict]
list of match dictionaries to copy
Returns
-------
new_matches : list[dict]
list of match dictionaries equivalent to input matches
"""
return [copy_match_explicit(match) for match in matches]
def swap_matchpair(match, copy=True):
"""
Parameters
----------
match : dict
match dictionary to swap p->q,q->p
copy : bool
whether to return a copy which, when modified, does not change original
Returns
-------
new_match : dict
match dictionary with "p" and "q" swapped
"""
updated_d = {
"pId": match["qId"],
"qId": match["pId"],
"qGroupId": match["pGroupId"],
"pGroupId": match["qGroupId"],
"matches": {
"p": match["matches"]["q"],
"q": match["matches"]["p"],
"w": match["matches"]["w"] # include weight because shallow swap
}
}
new_match = {k: updated_d.get(k, v) for k, v in match.items()}
return (copy_match_explicit(new_match) if copy else new_match)
@renderaccess
def get_matchcollection_owners(host=None, port=None,
session=requests.session(),
render=None, **kwargs):
"""get all the matchCollection owners
:func:`renderapi.render.renderaccess` decorated function
Parameters
----------
render : renderapi.render.Render
Render connection object
session : requests.session.Session
requests session
Returns
-------
:obj:`list` of :obj:`unicode`
matchCollection owners
Raises
------
RenderError
if cannot get a reponse from server
"""
request_url = format_baseurl(host, port) + \
"/matchCollectionOwners"
return get_json(session, request_url)
@renderaccess
def get_matchcollections(owner=None, host=None, port=None,
session=requests.session(), render=None, **kwargs):
"""get all the matchCollections owned by owner
:func:`renderapi.render.renderaccess` decorated function
Parameters
----------
owner : unicode
matchCollection owner (fallback to render.DEFAULT_OWNER)
(note match owner != stack owner always)
render : Render
Render connection object
session : requests.session.Session
requests session
Returns
-------
:obj:`list` of :obj:`unicode`
matchcollections owned by owner
Raises
------
RenderError
if cannot get a reponse from server
"""
request_url = format_baseurl(host, port) + \
"/owner/%s/matchCollections" % owner
return get_json(session, request_url)
@renderaccess
def get_match_groupIds(matchCollection, owner=None, host=None,
port=None, session=requests.session(),
render=None, **kwargs):
"""get all the groupIds in a matchCollection
:func:`renderapi.render.renderaccess` decorated function
Parameters
----------
matchCollection : str
matchCollection name
owner : str
matchCollection owner (fallback to render.DEFAULT_OWNER)
(note match owner != stack owner always)
render : Render
Render connection object
session : requests.session.Session
requests session
Returns
-------
:obj:`list` of :obj:`str`
groupIds in matchCollection
Raises
------
RenderError
if cannot get a reponse from server
"""
request_url = format_baseurl(host, port) + \
"/owner/%s/matchCollection/%s/groupIds" % (owner, matchCollection)
return get_json(session, request_url)
@renderaccess
def get_matches_outside_group(matchCollection, groupId, mergeCollections=None,
stream=True,
owner=None, host=None,
port=None, session=requests.session(),
render=None, **kwargs):
"""get all the matches outside a groupId in a matchCollection
returns all matches where pGroupId == groupId and qGroupId != groupId
:func:`renderapi.render.renderaccess` decorated function
Parameters
----------
matchCollection : str
matchCollection name
groupId : str
groupId to query
mergeCollections : :obj:`list` of :obj:`str`
other matchCollections to aggregate into answer
stream: bool
whether to invoke streaming on get (default True)
owner : unicode
matchCollection owner (fallback to render.DEFAULT_OWNER)
(note match owner != stack owner always)
render : Render
Render connection object
session : requests.session.Session
requests session
Returns
-------
:obj:`list` of :obj:`dict`
list of matches (see matches definition)
Raises
------
RenderError
if cannot get a reponse from server
"""
request_url = format_baseurl(host, port) + \
"/owner/%s/matchCollection/%s/group/%s/matchesOutsideGroup" % (
owner, matchCollection, groupId)
request_url = add_merge_collections(request_url, mergeCollections)
return get_json(session, request_url, stream=stream)
@renderaccess
def get_matches_within_group(matchCollection, groupId, mergeCollections=None,
stream=True,
owner=None, host=None, port=None,
session=requests.session(),
render=None, **kwargs):
"""get all the matches within a groupId in a matchCollection
returns all matches where pGroupId == groupId and qGroupId == groupId
:func:`renderapi.render.renderaccess` decorated function
Parameters
----------
matchCollection : str
matchCollection name
groupId : str
groupId to query
mergeCollections : :obj:`list` of :obj:`str` or None
other matchCollections
to aggregate into answer
stream: bool
whether to invoke streaming on get (default True)
owner : unicode
matchCollection owner (fallback to render.DEFAULT_OWNER)
(note match owner != stack owner always)
render : RenderClient
RenderClient connection object
session : requests.session.Session
requests session
Returns
-------
:obj:`list` of :obj:`dict`
list of matches (see matches definition)
Raises
------
RenderError
if cannot get a reponse from server
"""
request_url = format_baseurl(host, port) + \
"/owner/%s/matchCollection/%s/group/%s/matchesWithinGroup" % (
owner, matchCollection, groupId)
request_url = add_merge_collections(request_url, mergeCollections)
return get_json(session, request_url, stream=stream)
@renderaccess
def get_matches_from_group_to_group(matchCollection, pgroup, qgroup,
mergeCollections=None, stream=True,
render=None, owner=None, host=None,
port=None,
session=requests.session(), **kwargs):
"""get all the matches between two specific groups
returns all matches where pgroup == pGroupId and qgroup == qGroupId
OR pgroup == qGroupId and qgroup == pGroupId
:func:`renderapi.render.renderaccess` decorated function
Parameters
----------
matchCollection : str
matchCollection name
pgroup : str
first group
qgroup : str
second group
mergeCollections : :obj:`list` of :obj:`str` or None
other matchCollections
to aggregate into answer
stream: bool
whether to invoke streaming on get (default True)
owner : unicode
matchCollection owner (fallback to render.DEFAULT_OWNER)
(note match owner != stack owner always)
render : RenderClient
RenderClient connection object
session : requests.session.Session
requests session
Returns
-------
:obj:`list` of :obj:`dict`
list of matches (see matches definition)
Raises
------
RenderError
if cannot get a reponse from server
"""
request_url = format_baseurl(host, port) + \
"/owner/%s/matchCollection/%s/group/%s/matchesWith/%s" % (
owner, matchCollection, pgroup, qgroup)
request_url = add_merge_collections(request_url, mergeCollections)
return get_json(session, request_url, stream=stream)
def add_merge_collections(request_url, mcs):
"""utility function to add mergeCollections to request_url
Parameters
----------
request_url : str
request url
mcs : :obj:`list` of :obj:`str`
list of mergeCollections to add
Returns
-------
str
request_url with ?mergeCollection=mc[0]&mergeCollection=mc[1]...
appended
"""
if mcs is not None:
if type(mcs) is list:
request_url += "?"+"&".join(
['mergeCollection=%s' % mc for mc in mcs])
return request_url
@renderaccess
def get_matches_from_tile_to_tile(matchCollection, pgroup, pid,
qgroup, qid, mergeCollections=None,
render=None, owner=None,
host=None, port=None,
session=requests.session(), **kwargs):
"""get all the matches between two specific tiles
returns all matches where
pgroup == pGroupId and pid=pId and qgroup == qGroupId and qid == qId
OR
qgroup == pGroupId and Qid=pId and Pgroup == qGroupId and pid == qId
:func:`renderapi.render.renderaccess` decorated function
Parameters
----------
matchCollection : str
matchCollection name
pgroup : str
first group
pid : str
first id
qgroup : str
second group
qid : str
second id
mergeCollections : :obj:`list` of :obj:`str` or None
other matchCollections to aggregate into answer
owner : unicode
matchCollection owner (fallback to render.DEFAULT_OWNER)
(note match owner != stack owner always)
render : RenderClient
RenderClient connection object
session : requests.session.Session
requests session
Returns
-------
:obj:`list` of :obj:`dict`
list of matches (see matches definition)
Raises
------
RenderError
if cannot get a reponse from server
"""
request_url = format_baseurl(host, port) + \
("/owner/%s/matchCollection/%s/group/%s/id/%s/"
"matchesWith/%s/id/%s" % (
owner, matchCollection, pgroup, pid, qgroup, qid))
request_url = add_merge_collections(request_url, mergeCollections)
return get_json(session, request_url)
@renderaccess
def get_matches_with_group(matchCollection, pgroup, mergeCollections=None,
stream=True,
render=None, owner=None,
host=None, port=None,
session=requests.session(), **kwargs):
"""get all the matches from a specific groups
returns all matches where pgroup == pGroupId
:func:`renderapi.render.renderaccess` decorated function
Parameters
----------
matchCollection : str
matchCollection name
pgroup : str
source group to query
mergeCollections : :obj:`list` of :obj:`str` or None
other matchCollections to aggregate into answer
stream : bool
whether to invoke streaming (default=True)
owner : unicode
matchCollection owner (fallback to render.DEFAULT_OWNER)
(note match owner != stack owner always)
render : Render
Render connection object
session : requests.session.Session
requests session
Returns
-------
:obj:`list` of :obj:`dict`
list of matches (see matches definition)
Raises
------
RenderError
if cannot get a reponse from server
"""
request_url = format_baseurl(host, port) + \
"/owner/%s/matchCollection/%s/pGroup/%s/matches/" % (
owner, matchCollection, pgroup)
request_url = add_merge_collections(request_url, mergeCollections)
return get_json(session, request_url, stream=stream)
@renderaccess
def get_match_groupIds_from_only(matchCollection, mergeCollections=None,
render=None, owner=None,
host=None, port=None,
session=requests.session(), **kwargs):
"""get all the source pGroupIds in a matchCollection
:func:`renderapi.render.renderaccess` decorated function
Parameters
----------
matchCollection : str
matchCollection name
owner : unicode
matchCollection owner (fallback to render.DEFAULT_OWNER)
(note match owner != stack owner always)
render : RenderClient
RenderClient connection object
session : requests.session.Session
requests session
Returns
-------
:obj:`list` of :obj:`str`
pGroupIds in matchCollection
Raises
------
RenderError
if cannot get a reponse from server
"""
request_url = format_baseurl(host, port) + \
"/owner/%s/matchCollection/%s/pGroupIds" % (owner, matchCollection)
request_url = add_merge_collections(request_url, mergeCollections)
return get_json(session, request_url)
@renderaccess
def get_match_groupIds_to_only(matchCollection, mergeCollections=None,
render=None, owner=None,
host=None, port=None,
session=requests.session(), **kwargs):
"""get all the destination qGroupIds in a matchCollection
:func:`renderapi.render.renderaccess` decorated function
Parameters
----------
matchCollection : str
matchCollection name
owner : unicode
matchCollection owner (fallback to render.DEFAULT_OWNER)
(note match owner != stack owner always)
render : Render
Render connection object
session : requests.session.Session
requests session
Returns
-------
:obj:`list` of :obj:`str`
qGroupIds in matchCollection
Raises
------
RenderError
if cannot get a reponse from server
"""
request_url = format_baseurl(host, port) + \
"/owner/%s/matchCollection/%s/qGroupIds" % (owner, matchCollection)
request_url = add_merge_collections(request_url, mergeCollections)
return get_json(session, request_url)
@renderaccess
def get_matches_involving_tile(matchCollection, groupId, id,
mergeCollections=None, stream=True,
owner=None, host=None, port=None,
session=requests.session(), **kwargs):
"""get all the matches involving a specific tile
returns all matches where groupId == pGroupId and id == pId
OR groupId == qGroupId and id == qId
:func:`renderapi.render.renderaccess` decorated function
Parameters
----------
matchCollection : str
matchCollection name
groupId : str
groupId to query
id : str
id to query
mergeCollections : :obj:`list` of :obj:`str`, optional
other matchCollections to aggregate into answer
stream: bool
whether to invoke streaming on get (default True)
owner : unicode
matchCollection owner (fallback to render.DEFAULT_OWNER)
(note match owner != stack owner always)
render : Render
Render connection object
session : requests.session.Session
requests session
Returns
-------
:obj:`list` of :obj:`dict`
list of matches (see matches definition)
Raises
------
RenderError
if cannot get a reponse from server
"""
request_url = format_baseurl(host, port) + \
"/owner/{}/matchCollection/{}/group/{}/id/{}/".format(
owner, matchCollection, groupId, id)
request_url = add_merge_collections(request_url, mergeCollections)
return get_json(session, request_url, stream=stream)
@renderaccess
def delete_point_matches_between_groups(matchCollection, pGroupId, qGroupId,
render=None, owner=None, host=None,
port=None, session=requests.session(),
**kwargs):
"""delete all the matches between two specific groups
deletes all matches where (pgroup == pGroupId and qgroup == qGroupId)
OR (pgroup == qGroupId and qgroup == pGroupId()
:func:`renderapi.render.renderaccess` decorated function
Parameters
----------
matchCollection : str
matchCollection name
pgroup : str
first group
qgroup : str
second group
mergeCollections : :obj:`list` of :obj:`str` or None
other matchCollections to aggregate into answer
owner : unicode
matchCollection owner (fallback to render.DEFAULT_OWNER)
(note match owner != stack owner always)
render : Render
Render connection object
session : requests.session.Session
requests session
Returns
-------
:obj:`list` of :obj:`dict`
list of matches (see matches definition)
Raises
------
RenderError
if cannot get a reponse from server
"""
request_url = format_baseurl(host, port) + \
"/owner/{}/matchCollection/{}/group/{}/matchesWith/{}".format(
owner, matchCollection, pGroupId, qGroupId)
r = rest_delete(session, request_url) # noqa: F841
@renderaccess
def import_matches(matchCollection, data, owner=None, host=None, port=None,
session=requests.session(), render=None, **kwargs):
"""import matches into render database
:func:`renderapi.render.renderaccess` decorated function
Parameters
----------
matchCollection : str
matchCollection name
data : :obj:`list` of :obj:`dict`
list of matches to import (see matches definition)
owner : unicode
matchCollection owner (fallback to render.DEFAULT_OWNER)
(note match owner != stack owner always)
render : Render
Render connection object
session : requests.session.Session
requests session
Returns
-------
requests.response.Reponse
server response
"""
request_url = format_baseurl(host, port) + \
"/owner/%s/matchCollection/%s/matches" % (owner, matchCollection)
logger.debug(request_url)
return put_json(session, request_url, data)
@renderaccess
def delete_collection(matchCollection, owner=None, host=None, port=None,
session=requests.session(), render=None, **kwargs):
"""delete match collection from render database
:func:`renderapi.render.renderaccess` decorated function
Parameters
----------
matchCollection : str
matchCollection name to delete
owner : unicode
matchCollection owner (fallback to render.DEFAULT_OWNER)
(note match owner != stack owner always)
render : Render
Render connection object
session : requests.session.Session
requests session
Returns
-------
requests.response.Reponse
server response
Raises
------
RenderError
if cannot get a proper reponse from server
"""
request_url = format_baseurl(host, port) + \
"/owner/%s/matchCollection/%s" % (owner, matchCollection)
logger.debug(request_url)
r = rest_delete(session, request_url) # noqa: F841 | /render-python-2.3.0.tar.gz/render-python-2.3.0/renderapi/pointmatch.py | 0.824568 | 0.290616 | pointmatch.py | pypi |
import io
import requests
from PIL import Image
import numpy as np
import logging
from .render import format_preamble, format_baseurl, renderaccess
from .errors import RenderError
from .utils import NullHandler, jbool, get_json, put_json
logger = logging.getLogger(__name__)
logger.addHandler(NullHandler())
# define acceptable image formats -- currently render generates png, jpeg, tiff
IMAGE_FORMATS = {'png': 'png-image',
'.png': 'png-image',
'jpg': 'jpeg-image',
'jpeg': 'jpeg-image',
'.jpg': 'jpeg-image',
'tif': 'tiff-image',
'.tif': 'tiff-image',
'tiff': 'tiff-image',
'tiff16': 'tiff16-image',
None: 'png-image'} # Default to png
def _strip_None_value_dictitems(d, exclude_keys=[]):
return {k: v for k, v in d.items()
if v is not None and k not in exclude_keys}
@renderaccess
def get_bb_renderparams(stack, z, x, y, width, height, scale=1.0,
channel=None, minIntensity=None, maxIntensity=None,
binaryMask=None, filter=None, filterListName=None,
convertToGray=None, excludeMask=None,
host=None, port=None, owner=None,
project=None, session=requests.session(),
render=None, **kwargs):
request_url = format_preamble(
host, port, owner, project, stack) + \
"/z/%d/box/%d,%d,%d,%d,%f/render-parameters" % (
z, x, y, width, height, scale)
qparams = _strip_None_value_dictitems({
"minIntensity": minIntensity,
"maxIntensity": maxIntensity,
"binaryMask": binaryMask,
"filter": filter,
"filterListName": filterListName,
"convertToGray": convertToGray,
"excludeMask": excludeMask,
"channels": channel})
return get_json(session, request_url, params=qparams)
@renderaccess
def get_bb_image(stack, z, x, y, width, height, scale=1.0,
channel=None,
minIntensity=None, maxIntensity=None, binaryMask=None,
filter=None, maxTileSpecsToRender=None,
host=None, port=None, owner=None, project=None,
img_format=None, session=requests.session(),
render=None, **kwargs):
"""render image from a bounding box defined in xy and return numpy array:
:func:`renderapi.render.renderaccess` decorated function
Parameters
----------
stack : str
name of render stack to get image from
z : float
z value to render
x : int
leftmost point of bounding rectangle
y : int
topmost pont of bounding rectangle
width : int
number of units @scale=1.0 to right (+x() of bounding box to render
height : int
number of units @scale=1.0 down (+y) of bounding box to render
scale : float
scale to render image at (default 1.0)
channel : str
channel name to render, (e.g. 'DAPI') or a weighted average of channels of the format
e.g 'DAPI___.8___GFP___.2'
binaryMask : bool
whether to treat maskimage as binary
maxTileSpecsToRender : int
max number of tilespecs to render
filter : bool
whether to use server side filtering
render : :class:`renderapi.render.Render`
render connect object
session : :class:`requests.sessions.Session`
sessions object to connect with
Returns
-------
numpy.array
[N,M,:] array of image data from render
Raises
------
RenderError
""" # noqa: E501
try:
image_ext = IMAGE_FORMATS[img_format]
except KeyError as e: # pragma: no cover
raise ValueError('{} is not a valid render image format!'.format(e))
request_url = format_preamble(
host, port, owner, project, stack) + \
"/z/%d/box/%d,%d,%d,%d,%f/%s" % (
z, x, y, width, height, scale, image_ext)
qparams = {}
if minIntensity is not None:
qparams['minIntensity'] = minIntensity
if maxIntensity is not None:
qparams['maxIntensity'] = maxIntensity
if binaryMask is not None:
qparams['binaryMask'] = jbool(binaryMask)
if filter is not None:
qparams['filter'] = jbool(filter)
if maxTileSpecsToRender is not None:
qparams['maxTileSpecsToRender'] = maxTileSpecsToRender
if channel is not None:
qparams.update({'channels': channel})
r = session.get(request_url, params=qparams)
try:
image = np.asarray(Image.open(io.BytesIO(r.content)))
return image
except Exception as e:
logger.error(e)
logger.error(r.text)
return RenderError(r.text)
@renderaccess
def get_tile_renderparams(
stack, tileId, channel=None, normalizeForMatching=None,
excludeAllTransforms=None, excludeTransformsAfterLast=None,
excludeFirstTransformAndAllAfter=None, scale=None,
width=None, height=None, minIntensity=None, maxIntensity=None,
filter=None, filterListName=None, excludeMask=None, convertToGray=None,
binaryMask=None, host=None, port=None, owner=None,
project=None, img_format=None,
session=requests.session(), render=None, **kwargs):
request_url = format_preamble(
host, port, owner, project, stack) + \
"/tile/%s/render-parameters" % (
tileId)
qparams = _strip_None_value_dictitems({
"normalizeForMatching": normalizeForMatching,
"excludeAllTransforms": excludeAllTransforms,
"excludeTransformsAfterLast": excludeTransformsAfterLast,
"excludeFirstTransformAndAllAfter": excludeFirstTransformAndAllAfter,
"scale": scale,
"width": width,
"height": height,
"minIntensity": minIntensity,
"maxIntensity": maxIntensity,
"binaryMask": binaryMask,
"filter": filter,
"filterListName": filterListName,
"convertToGray": convertToGray,
"excludeMask": excludeMask,
"channels": channel})
return get_json(session, request_url, params=qparams)
@renderaccess
def get_tile_image_data(stack, tileId, channel=None, normalizeForMatching=True,
excludeAllTransforms=False, scale=None,
minIntensity=None, maxIntensity=None,
filter=None, host=None, port=None, owner=None,
project=None, img_format=None,
session=requests.session(), render=None, **kwargs):
"""render image from a tile with all transforms and return numpy array
:func:`renderapi.render.renderaccess` decorated function
Parameters
----------
stack : str
name of render stack to get tile from
tileId : str
tileId of tile to render
channel : str
channel name to render, (e.g. 'DAPI') or a weighted average of channels of the format
e.g 'DAPI___.8___GFP___.2'
normalizeForMatching : bool
whether to render the tile with transformations
removed ('local' coordinates)
removeAllOption : bool
whether to remove all transforms from image when
doing normalizeForMatching some versions of render
only remove the last transform from list.
(or remove till there are max 3 transforms)
scale : float
force scale of image
minIntensity : int
Minimum pixel value to rescale image
maxIntensity : int
Maximum pixel value to rescale image
filter : bool
whether to apply server side filtering to image
img_format : str
image format: one of IMAGE_FORMATS = 'png','.png','jpg',
'jpeg','.jpg','tif','.tif','tiff'
render : :obj:`renderapi.render.Render`
render connect object
session : :obj:`requests.sessions.Session`
sessions object to connect with
Returns
-------
numpy.array
[N,M,:] array of image data from render
Raises
------
RenderError
""" # noqa: E501
try:
image_ext = IMAGE_FORMATS[img_format]
except KeyError as e: # pragma: no cover
raise ValueError('{} is not a valid render image format!'.format(e))
request_url = format_preamble(
host, port, owner, project, stack) + \
"/tile/%s/%s" % (tileId, image_ext)
qparams = {}
if normalizeForMatching:
qparams['normalizeForMatching'] = jbool(normalizeForMatching)
if scale is not None:
qparams['scale'] = scale
if filter is not None:
qparams['filter'] = jbool(filter)
if excludeAllTransforms is not None:
qparams['excludeAllTransforms'] = jbool(excludeAllTransforms)
if channel is not None:
qparams.update({'channels': channel})
if minIntensity is not None:
qparams['minIntensity'] = minIntensity
if maxIntensity is not None:
qparams['maxIntensity'] = maxIntensity
logger.debug(request_url)
r = session.get(request_url, params=qparams)
try:
img = Image.open(io.BytesIO(r.content))
array = np.asarray(img)
return array
except Exception as e:
logger.error(e)
logger.error(r.text)
return RenderError(r.text)
@renderaccess
def get_section_renderparams(stack, z, binaryMask=None, channel=None,
convertToGray=None, excludeMask=None, filter=None,
filterListName=None, minIntensity=None,
maxIntensity=None, scale=None,
host=None, port=None, owner=None, project=None,
session=requests.session(),
render=None, **kwargs):
request_url = format_preamble(
host, port, owner, project, stack) + "/z/{}/render-parameters".format(
z)
qparams = _strip_None_value_dictitems({
"scale": scale,
"minIntensity": minIntensity,
"maxIntensity": maxIntensity,
"binaryMask": binaryMask,
"filter": filter,
"filterListName": filterListName,
"convertToGray": convertToGray,
"excludeMask": excludeMask,
"channels": channel})
return get_json(session, request_url, params=qparams)
@renderaccess
def get_section_image(stack, z, scale=1.0, channel=None,
minIntensity=None, maxIntensity=None,
filter=False,
maxTileSpecsToRender=None, img_format=None,
host=None, port=None, owner=None, project=None,
session=requests.session(),
render=None, **kwargs):
"""render an section of image
:func:`renderapi.render.renderaccess` decorated function
Parameters
----------
stack : str
name of render stack to render image from
z : float
layer Z
scale : float
linear scale at which to render image (e.g. 0.5)
channel: str
channel name to render, (e.g. 'DAPI') or a weighted average of channels of the format
e.g 'DAPI___.8___GFP___.2'
minIntensity : int
Minimum pixel value to rescale image
maxIntensity : int
Maximum pixel value to rescale image
filter : bool
whether or not to apply server side filtering
maxTileSpecsToRender : int
maximum number of tile specs in rendering
img_format : str
one of IMAGE_FORMATS 'png','.png','jpg','jpeg',
'.jpg','tif','.tif','tiff'
render : :obj:`renderapi.render.Render`
render connect object
session : requests.sessions.Session
sessions object to connect with
Returns
-------
numpy.array
[N,M,:] array of image data of section from render
Examples
--------
::
>>> import renderapi
>>> render = renderapi.render.connect('server',8080,'me','myproject')
>>> img = render.run(renderapi.stack.get_section_image,'mystack',3.0)
""" # noqa: E501
try:
image_ext = IMAGE_FORMATS[img_format]
except KeyError as e: # pragma: no cover
raise ValueError('{} is not a valid render image format!'.format(e))
request_url = format_preamble(
host, port, owner, project, stack) + '/z/{}/{}'.format(z, image_ext)
qparams = {'scale': scale, 'filter': jbool(filter)}
if maxTileSpecsToRender is not None:
qparams.update({'maxTileSpecsToRender': maxTileSpecsToRender})
if channel is not None:
qparams.update({'channels': channel})
if minIntensity is not None:
qparams['minIntensity'] = minIntensity
if maxIntensity is not None:
qparams['maxIntensity'] = maxIntensity
r = session.get(request_url, params=qparams)
return np.asarray(Image.open(io.BytesIO(r.content)))
@renderaccess
def get_renderparameters_image(renderparams, img_format=None,
host=None, port=None, owner=None,
session=requests.session(),
render=None, **kwargs):
try:
image_ext = IMAGE_FORMATS[img_format]
except KeyError as e: # pragma: no cover
raise ValueError('{} is not a valid render image format!'.format(e))
request_url = format_baseurl(host, port) + '/owner/{owner}/{ext}'.format(
owner=owner, ext=image_ext)
r = put_json(session, request_url, renderparams)
return np.array(Image.open(io.BytesIO(r.content))) | /render-python-2.3.0.tar.gz/render-python-2.3.0/renderapi/image.py | 0.793426 | 0.248762 | image.py | pypi |
from collections.abc import MutableMapping
from .errors import RenderError
import logging
from .utils import NullHandler
import warnings
logger = logging.getLogger(__name__)
logger.addHandler(NullHandler())
class MipMap:
"""MipMap class to represent a image and its mask
Attributes
----------
imageUrl : str or None
uri corresponding to image
maskUrl : str or None
uri corresponding to mask
"""
def __init__(self, imageUrl=None, maskUrl=None):
self.imageUrl = imageUrl
self.maskUrl = maskUrl
def to_dict(self):
"""
Returns
-------
dict
json compatible dictionary representaton
"""
return dict(self.__iter__())
def _formatUrls(self):
d = {}
if self.imageUrl is not None:
d.update({'imageUrl': self.imageUrl})
if self.maskUrl is not None:
d.update({'maskUrl': self.maskUrl})
return d
def __setitem__(self, key, value):
if key == 'imageUrl':
self.imageUrl = value
elif key == 'maskUrl':
self.maskUrl = value
else:
raise KeyError('{} not a valid mipmap attribute'.format(key))
def __getitem__(self, key):
if key == 'imageUrl':
return self.imageUrl
if key == 'maskUrl':
return self.maskUrl
else:
raise KeyError(
'{} is not a valid attribute of a mipmapLevel'.format(key))
def __iter__(self):
return iter(self._formatUrls().items())
def __eq__(self, b):
try:
return all([self.imageUrl == b.imageUrl,
self.maskUrl == b.maskUrl])
except AttributeError as e:
return all([self.imageUrl == b.get('imageUrl'),
self.maskUrl == b.get('maskUrl')])
class MipMapLevel:
"""MipMapLevel class to represent a level of an image pyramid.
Can be put in dictionary formatting using dict(mML)
Attributes
----------
level : int
level of 2x downsampling represented by mipmaplevel
imageUrl : str or None
uri corresponding to image
maskUrl : str or None
uri corresponding to mask
"""
def __init__(self, level, imageUrl=None, maskUrl=None):
warnings.warn(
"use of mipmaplevels deprecated, use MipMap and ImagePyramid",
DeprecationWarning)
self.level = level
self.mipmap = MipMap(imageUrl, maskUrl)
def to_dict(self):
"""
Returns
-------
dict
json compatible dictionary representaton
"""
return dict(self.mipmap)
def __getitem__(self, key):
if key == 'imageUrl':
return self.mipmap.imageUrl
if key == 'maskUrl':
return self.mipmap.maskUrl
else:
raise KeyError(
'{} is not a valid attribute of a mipmapLevel'.format(key))
def __iter__(self):
return iter(self.to_dict().items())
def __eq__(self, b):
return self.mipmap == b.mipmap
class TransformedDict(MutableMapping):
"""A dictionary that applies an arbitrary key-altering
function before accessing the keys"""
def __init__(self, *args, **kwargs):
self.store = dict()
self.update(dict(*args, **kwargs)) # use the free update to set keys
def __getitem__(self, key):
return self.store[self.__keytransform__(key)]
def __setitem__(self, key, value):
self.store[self.__keytransform__(key)] = value
def __delitem__(self, key):
del self.store[self.__keytransform__(key)]
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
def __keytransform__(self, key):
return key
class ImagePyramid(TransformedDict):
'''Image Pyramid class representing a set of MipMapLevels which correspond
to mipmapped (continuously downsmapled by 2x) representations
of an image at level 0
Can be put into dictionary formatting using dict(ip) or OrderedDict(ip)
'''
def __keytransform__(self, key):
try:
level = int(key)
except ValueError as e:
raise RenderError("{} is not a valid mipmap level".format(key))
if level < 0:
raise RenderError(
"{} is not a valid mipmap level (less than 0)".format(key))
return "{}".format(level)
def to_dict(self):
return {k: v.to_dict() for k, v in self.items()}
@classmethod
def from_dict(cls, d):
return cls({l: MipMap(v.get('imageUrl', None),
v.get('maskUrl', None))
for l, v in d.items()})
def __iter__(self):
return iter(sorted(self.store))
@property
def levels(self):
"""list of MipMapLevels in this ImagePyramid"""
return self.store.keys() | /render-python-2.3.0.tar.gz/render-python-2.3.0/renderapi/image_pyramid.py | 0.862279 | 0.236648 | image_pyramid.py | pypi |
class Layout:
"""Layout class to describe acquisition settings
Attributes
----------
sectionId : str
sectionId this tile was taken from
scopeId : str
what microscope this came from
cameraId : str
camera this was taken with
imageRow : int
what row from a row,col layout this was taken
imageCol : int
column from a row,col layout this was taken
stageX : float
X stage coordinates for where this was taken
stageY : float
Y stage coordinates for where this taken
rotation : float
angle of camera when this was taken
pixelsize : float
effective size of pixels (in units of choice)
distanceZ : float
distance (in units of choice) from prior layer
"""
def __init__(self, sectionId=None, scopeId=None, cameraId=None,
imageRow=None, imageCol=None, stageX=None, stageY=None,
rotation=None, pixelsize=None,
force_pixelsize=True, distanceZ=None, **kwargs):
"""Initialize Layout
Parameters
----------
sectionId : str
sectionId this tile was taken from
scopeId : str
what microscope this came from
cameraId : str
camera this was taken with
imageRow : int
what row from a row,col layout this was taken
imageCol : int
column from a row,col layout this was taken
stageX : float
X stage coordinates for where this was taken
stageY : float
Y stage coordinates for where this taken
rotation : float
angle of camera when this was taken
pixelsize : float
effective size of pixels (in units of choice)
force_pixelsize : bool
whether to default pixelsize to 0.1
distanceZ : float
distance (in units of choice) from prior layer
"""
self.sectionId = sectionId
self.scopeId = scopeId
self.cameraId = cameraId
self.imageRow = imageRow
self.imageCol = imageCol
self.stageX = stageX
self.stageY = stageY
self.rotation = rotation
if force_pixelsize:
pixelsize = 0.100 if pixelsize is None else pixelsize
self.pixelsize = pixelsize
self.distanceZ = distanceZ
def to_dict(self):
"""return a dictionary representation of this object
Returns
-------
dict
json compatible dictionary of this object
"""
d = {}
d['sectionId'] = self.sectionId
d['temca'] = self.scopeId
d['camera'] = self.cameraId
d['imageRow'] = self.imageRow
d['imageCol'] = self.imageCol
d['stageX'] = self.stageX
d['stageY'] = self.stageY
d['rotation'] = self.rotation
d['pixelsize'] = self.pixelsize
d['distanceZ'] = self.distanceZ
d = {k: v for k, v in d.items() if v is not None}
return d
def from_dict(self, d):
"""set this object equal to the fields found in dictionary
Parameters
----------
d : dict
dictionary to use to update
"""
if d is not None:
self.sectionId = d.get('sectionId')
self.cameraId = d.get('camera')
self.scopeId = d.get('temca')
self.imageRow = d.get('imageRow')
self.imageCol = d.get('imageCol')
self.stageX = d.get('stageX')
self.stageY = d.get('stageY')
self.rotation = d.get('rotation')
self.pixelsize = d.get('pixelsize')
self.distanceZ = d.get('distanceZ') | /render-python-2.3.0.tar.gz/render-python-2.3.0/renderapi/layout.py | 0.956452 | 0.571826 | layout.py | pypi |
import json
from renderapi.errors import RenderError
from renderapi.transform.leaf import load_leaf_json
__all__ = [
'TransformList',
'ReferenceTransform',
'InterpolatedTransform',
'load_transform_json']
class TransformList:
"""A list of Transforms
Attributes
----------
tforms : :obj:`list` of :class:`Transform`
transforms to apply
transformId : str, optional
uniqueId for this TransformList
"""
def __init__(self, tforms=None, transformId=None, json=None):
"""Initialize TransformList
Parameters
----------
tforms : :obj:`list` of :class:`Transform`
transforms to apply
transformId : str, optional
uniqueId for this TransformList
json : dict, optional
json compatible dictionary to create
:class:`TransformList` via :method:`from_dict`
(will supersede tforms and transformId if not None)
"""
if json is not None:
self.from_dict(json)
else:
if tforms is None:
self.tforms = []
else:
if not isinstance(tforms, list):
raise RenderError(
'unexpected type {} for transforms!'.format(
type(tforms)))
self.tforms = tforms
self.transformId = transformId
def to_dict(self):
"""serialization function
Returns
-------
dict
json & render compatible representation of this TransformList
"""
d = {}
d['type'] = 'list'
d['specList'] = [tform.to_dict() for tform in self.tforms]
if self.transformId is not None:
d['id'] = self.transformId
return d
def to_json(self):
"""serialization function
Returns
-------
str
string representation of the json & render
representation of this TransformList
"""
return json.dumps(self.to_dict())
def from_dict(self, d):
"""deserialization function
Parameters
----------
d : dict
json compatible dictionary representation of this TransformList
"""
self.tforms = []
if d is not None:
self.transformId = d.get('id')
for td in d['specList']:
self.tforms.append(load_transform_json(td))
return self.tforms
class InterpolatedTransform:
"""Transform spec defined by linear interpolation of
two other transform specs
Attributes
----------
a : :class:`Transform` or :class:`TransformList` or :class:`InterpolatedTransform`
transform at minimum weight
b : :class:`Transform` or :class:`TransformList` or :class:`InterpolatedTransform`
transform at maximum weight
lambda_ : float
value in interval [0.,1.] which defines evaluation of the
linear interpolation between a (at 0) and b (at 1)
""" # noqa: E501
def __init__(self, a=None, b=None, lambda_=None, json=None):
"""Initialize InterpolatedTransform
Parameters
----------
a : :class:`Transform` or :class:`TransformList`
or :class:`InterpolatedTransform`
transform at minimum weight
b : :class:`Transform` or :class:`TransformList`
or :class:`InterpolatedTransform`
transform at maximum weight
lambda_ : float
value in interval [0.,1.] which defines evaluation of the
linear interpolation between a (at 0) and b (at 1)
json : dict
json compatible representation of this transform to
initialize via :method:`self.from_dict`
(will supersede a, b, and lambda_ if not None)
"""
if json is not None:
self.from_dict(json)
else:
self.a = a
self.b = b
self.lambda_ = lambda_
def to_dict(self):
"""serialization routine
Returns
-------
dict
json compatible representation
"""
return dict(self)
def from_dict(self, d):
"""deserialization routine
Parameters
----------
d : dict
json compatible representation
"""
self.a = load_transform_json(d['a'])
self.b = load_transform_json(d['b'])
self.lambda_ = d['lambda']
def __iter__(self):
return iter([('type', 'interpolated'),
('a', self.a.to_dict()),
('b', self.b.to_dict()),
('lambda', self.lambda_)])
class ReferenceTransform:
"""Transform which is simply a reference to a transform stored elsewhere
Attributes
----------
refId : str
transformId of the referenced transform
"""
def __init__(self, refId=None, json=None):
"""Initialize ReferenceTransform
Parameters
----------
refId : str
transformId of the referenced transform
json : dict
json compatible representation of this transform
(will supersede refId if not None)
"""
if json is not None:
self.from_dict(json)
else:
self.refId = refId
def to_dict(self):
"""serialization routine
Returns
-------
dict
json compatible representation of this transform
"""
d = {}
d['type'] = 'ref'
d['refId'] = self.refId
return d
def from_dict(self, d):
"""deserialization routine
Parameters
----------
d : dict
json compatible representation of this transform
"""
self.refId = d['refId']
def __str__(self):
return 'ReferenceTransform(%s)' % self.refId
def __repr__(self):
return self.__str__()
def __iter__(self):
return iter([('type', 'ref'), ('refId', self.refId)])
def load_transform_json(d, default_type='leaf'):
"""function to get the proper deserialization function
Parameters
----------
d : dict
json compatible representation of Transform
default_type : str
what kind of transform should we assume this
if it is not specified in 'type' ('leaf','list','ref','interpolated')
Returns
-------
renderapi.transform.Transform
deserialized transformation using the most appropriate class
Raises
------
RenderError
if d['type'] isn't one of ('leaf','list','ref','interpolated')
"""
handle_load_tform = {'leaf': load_leaf_json,
'list': lambda x: TransformList(json=x),
'ref': lambda x: ReferenceTransform(json=x),
'interpolated':
lambda x: InterpolatedTransform(json=x)}
try:
return handle_load_tform[d.get('type', default_type)](d)
except KeyError as e:
raise RenderError('Unknown Transform Type {}'.format(e)) | /render-python-2.3.0.tar.gz/render-python-2.3.0/renderapi/transform/transform.py | 0.878887 | 0.45944 | transform.py | pypi |
from collections.abc import Iterable
from renderapi.errors import RenderError
from .leaf import AffineModel, Polynomial2DTransform
from .transform import TransformList, ReferenceTransform
__all__ = ['estimate_dstpts',
'estimate_transformsum']
def estimate_dstpts(transformlist, src=None, reference_tforms=None):
"""estimate destination points for list of transforms. Recurses
through lists.
Parameters
----------
transformlist : :obj:list of :obj:Transform
transforms that have a tform method implemented
src : numpy.array
a Nx2 array of source points
Returns
-------
numpy.array
Nx2 array of destination points
"""
dstpts = src
for tform in transformlist:
if isinstance(tform, list):
dstpts = estimate_dstpts(tform, dstpts, reference_tforms)
elif isinstance(tform, TransformList):
dstpts = estimate_dstpts(tform.tforms, dstpts, reference_tforms)
elif isinstance(tform, ReferenceTransform):
try:
tform_deref = next((tf for tf in reference_tforms
if tf.transformId == tform.refId))
except TypeError:
raise RenderError(
"you supplied a set of tranforms that includes a "
"reference transform, but didn't supply a set of "
"reference transforms to enable dereferencing")
except StopIteration:
raise RenderError(
"the list of transforms you provided references "
"transorm {} but that transform could not be found "
"in the list of reference transforms".format(tform.refId))
dstpts = estimate_dstpts([tform_deref], dstpts, reference_tforms)
else:
dstpts = tform.tform(dstpts)
return dstpts
def estimate_transformsum(transformlist, src=None, order=2):
"""pseudo-composition of transforms in list of transforms
using source point transformation and a single estimation.
Will produce an Affine Model if all input transforms are Affine,
otherwise will produce a Polynomial of specified order
Parameters
----------
transformlist : :obj:`list` of :obj:`Transform`
list of transform objects that implement tform
src : numpy.array
Nx2 array of source points for estimation
order : int
order of Polynomial output if transformlist
inputs are non-Affine
Returns
-------
:class:`AffineModel` or :class:`Polynomial2DTransform`
best estimate of transformlist in a single transform of this order
"""
def flatten(l):
"""generator-iterator to flatten deep lists of lists"""
for i in l:
if isinstance(i, Iterable):
try:
notstring = isinstance(i, basestring)
except NameError:
notstring = isinstance(i, str)
if notstring:
for sub in flatten(i):
yield sub
else:
yield i
dstpts = estimate_dstpts(transformlist, src)
tforms = flatten(transformlist)
if all([(tform.className == AffineModel.className)
for tform in tforms]):
am = AffineModel()
am.estimate(A=src, B=dstpts, return_params=False)
return am
return Polynomial2DTransform(src=src, dst=dstpts, order=order) | /render-python-2.3.0.tar.gz/render-python-2.3.0/renderapi/transform/utils.py | 0.941244 | 0.545407 | utils.py | pypi |
from .transform import Transform, logger
from .common import calc_first_order_properties
import numpy as np
from renderapi.errors import ConversionError, EstimationError
try:
from scipy.linalg import svd, LinAlgError
except ImportError as e:
logger.info(e)
logger.info('scipy-based linalg may or may not lead '
'to better parameter fitting')
from numpy.linalg import svd
from numpy.linalg.linalg import LinAlgError
__all__ = [
'AffineModel', 'TranslationModel',
'SimilarityModel', 'RigidModel']
class AffineModel(Transform):
"""Linear 2d Transformation
mpicbg classname: mpicbg.trakem2.transform.AffineModel2D
implements this simple math
x'=M00*x + M01*x + B0
y'=M10*x + M11*y + B1
Attributes
----------
M00 : float
x'+=M00*x
M01 : float
x'+=M01*y
M10 : float
y'+=M10*x
M11 : float
y'+=M11*y
B0 : float
x'+=B0
B1 : float
y'+=B1
transformId : str, optional
unique transformId for this transform
labels : list of str
list of labels to give this transform
M : numpy.array
3x3 numpy array representing 2d Affine with homogeneous coordinates
populates with values from M00, M01, M10, M11, B0, B1 with load_M()
"""
className = 'mpicbg.trakem2.transform.AffineModel2D'
def __init__(self, M00=1.0, M01=0.0, M10=0.0, M11=1.0, B0=0.0, B1=0.0,
transformId=None, labels=None, json=None, force_shear='x'):
"""Initialize AffineModel, defaulting to identity
Parameters
----------
M00 : float
x'+=M00*x
M01 : float
x'+=M01*y
M10 : float
y'+=M10*x
M11 : float
y'+=M11*y
B0 : float
x'+=B0
B1 : float
y'+=B1
transformId : str
unique transformId for this transform (optional)
labels : list of str
list of labels to give this transform
json : dict
json compatible representation of this transform
(will supersede all other parameters if not None)
"""
self.force_shear = force_shear
if json is not None:
self.from_dict(json)
else:
self.M00 = M00
self.M01 = M01
self.M10 = M10
self.M11 = M11
self.B0 = B0
self.B1 = B1
self.className = 'mpicbg.trakem2.transform.AffineModel2D'
self.labels = labels
self.load_M()
self.transformId = transformId
@property
def dataString(self):
"""dataString string for this transform"""
return "%.10f %.10f %.10f %.10f %.10f %.10f" % (
self.M[0, 0], self.M[1, 0], self.M[0, 1],
self.M[1, 1], self.M[0, 2], self.M[1, 2])
def _process_dataString(self, datastring):
"""generate datastring and param attributes from datastring"""
dsList = datastring.split()
self.M00 = float(dsList[0])
self.M10 = float(dsList[1])
self.M01 = float(dsList[2])
self.M11 = float(dsList[3])
self.B0 = float(dsList[4])
self.B1 = float(dsList[5])
self.load_M()
def load_M(self):
"""method to take the attribute of self and fill in self.M"""
self.M = np.identity(3, np.double)
self.M[0, 0] = self.M00
self.M[0, 1] = self.M01
self.M[1, 0] = self.M10
self.M[1, 1] = self.M11
self.M[0, 2] = self.B0
self.M[1, 2] = self.B1
@staticmethod
def fit(A, B, return_all=False):
"""function to fit this transform given the corresponding sets of points A & B
Parameters
----------
A : numpy.array
a Nx2 matrix of source points
B : numpy.array
a Nx2 matrix of destination points
Returns
-------
numpy.array
a 6x1 matrix with the best fit parameters
ordered M00,M01,M10,M11,B0,B1
"""
if not all([A.shape[0] == B.shape[0], A.shape[1] == B.shape[1] == 2]):
raise EstimationError(
'shape mismatch! A shape: {}, B shape {}'.format(
A.shape, B.shape))
N = A.shape[0] # total points
M = np.zeros((2 * N, 6))
Y = np.zeros((2 * N, 1))
for i in range(N):
M[2 * i, :] = [A[i, 0], A[i, 1], 0, 0, 1, 0]
M[2 * i + 1, :] = [0, 0, A[i, 0], A[i, 1], 0, 1]
Y[2 * i] = B[i, 0]
Y[2 * i + 1] = B[i, 1]
(Tvec, residuals, rank, s) = np.linalg.lstsq(M, Y)
if return_all:
return Tvec, residuals, rank, s
return Tvec
def estimate(self, A, B, return_params=True, **kwargs):
"""method for setting this transformation with the best fit
given the corresponding points A,B
Parameters
----------
A : numpy.array
a Nx2 matrix of source points
B : numpy.array
a Nx2 matrix of destination points
return_params : boolean
whether to return the parameter matrix
**kwargs
keyword arguments to pass to self.fit
Returns
-------
numpy.array
a 2x3 matrix of parameters for this matrix,
laid out (x,y) x (x,y,offset)
(or None if return_params=False)
"""
Tvec = self.fit(A, B, **kwargs)
self.M00 = Tvec[0, 0]
self.M10 = Tvec[2, 0]
self.M01 = Tvec[1, 0]
self.M11 = Tvec[3, 0]
self.B0 = Tvec[4, 0]
self.B1 = Tvec[5, 0]
self.load_M()
if return_params:
return self.M
def concatenate(self, model):
"""concatenate a model to this model -- ported from trakEM2 below:
::
final double a00 = m00 * model.m00 + m01 * model.m10;
final double a01 = m00 * model.m01 + m01 * model.m11;
final double a02 = m00 * model.m02 + m01 * model.m12 + m02;
final double a10 = m10 * model.m00 + m11 * model.m10;
final double a11 = m10 * model.m01 + m11 * model.m11;
final double a12 = m10 * model.m02 + m11 * model.m12 + m12;
Parameters
----------
model : AffineModel
model to concatenate to this one
Returns
-------
AffineModel
model after concatenating model with this model
"""
A = self.M.dot(model.M)
newmodel = AffineModel(
A[0, 0], A[0, 1], A[1, 0],
A[1, 1], A[0, 2], A[1, 2])
return newmodel
def invert(self):
"""return an inverted version of this transformation
Returns
-------
AffineModel
an inverted version of this transformation
"""
inv_M = np.linalg.inv(self.M)
Ai = AffineModel(inv_M[0, 0], inv_M[0, 1], inv_M[1, 0],
inv_M[1, 1], inv_M[0, 2], inv_M[1, 2])
return Ai
@staticmethod
def convert_to_point_vector(points):
"""method to help reshape x,y points to x,y,1 vectors
Parameters
----------
points : numpy.array
a Nx2 array of x,y points
Returns
-------
numpy.array
a Nx3 array of x,y,1 points used for transformations
"""
Np = points.shape[0]
onevec = np.ones((Np, 1), np.double)
if points.shape[1] != 2:
raise ConversionError('Points must be of shape (:, 2) '
'-- got {}'.format(points.shape))
Nd = 2
points = np.concatenate((points, onevec), axis=1)
return points, Nd
@staticmethod
def convert_points_vector_to_array(points, Nd=2):
"""method for convertion x,y,K points to x,y vectors
Parameters
----------
points : numpy.array
a Nx3 vector of points after transformation
Nd : int
the number of dimensions to cutoff (should be 2)
Returns
-------
numpy.array: a Nx2 array of x,y points
"""
points = points[:, 0:Nd] / np.tile(points[:, 2], (Nd, 1)).T
return points
def tform(self, points):
"""transform a set of points through this transformation
Parameters
----------
points : numpy.array
a Nx2 array of x,y points
Returns
-------
numpy.array
a Nx2 array of x,y points after transformation
"""
points, Nd = self.convert_to_point_vector(points)
pt = np.dot(self.M, points.T).T
return self.convert_points_vector_to_array(pt, Nd)
def inverse_tform(self, points):
"""transform a set of points through the inverse of this transformation
Parameters
----------
points : numpy.array
a Nx2 array of x,y points
Returns
-------
numpy.array
a Nx2 array of x,y points after inverse transformation
"""
points, Nd = self.convert_to_point_vector(points)
pt = np.dot(np.linalg.inv(self.M), points.T).T
return self.convert_points_vector_to_array(pt, Nd)
def calc_properties(self):
return calc_first_order_properties(
self.M[0:2, 0:2],
force_shear=self.force_shear)
@property
def scale(self):
"""tuple of scale for x, y"""
sx, sy, cx, cy, theta = self.calc_properties()
return (sx, sy)
@property
def shear(self):
"""shear"""
sx, sy, cx, cy, theta = self.calc_properties()
if self.force_shear == 'x':
return cx
else:
return cy
@property
def translation(self):
"""tuple of translation in x, y"""
return tuple(self.M[:2, 2])
@property
def rotation(self):
"""counter-clockwise rotation"""
sx, sy, cx, cy, theta = self.calc_properties()
return theta
def __str__(self):
return "M=[[%f,%f],[%f,%f]] B=[%f,%f]" % (
self.M[0, 0], self.M[0, 1], self.M[1, 0],
self.M[1, 1], self.M[0, 2], self.M[1, 2])
class TranslationModel(AffineModel):
"""Translation fitting and estimation as an :class:`AffineModel`
Linear 2d Transformation
mpicbg classname: mpicbg.trakem2.transform.AffineModel2D
implements this simple math
x'=M00*x + M01*x + B0
y'=M10*x + M11*y + B1
Attributes
----------
M00 : float
x'+=M00*x
M01 : float
x'+=M01*y
M10 : float
y'+=M10*x
M11 : float
y'+=M11*y
B0 : float
x'+=B0
B1 : float
y'+=B1
transformId : str, optional
unique transformId for this transform
labels : list of str
list of labels to give this transform
M : numpy.array
3x3 numpy array representing 2d Affine with homogeneous coordinates
populates with values from M00, M01, M10, M11, B0, B1 with load_M()
"""
className = 'mpicbg.trakem2.transform.TranslationModel2D'
def __init__(self, *args, **kwargs):
super(TranslationModel, self).__init__(*args, **kwargs)
def _process_dataString(self, dataString):
"""expected dataString is 'tx ty'"""
tx, ty = map(float, dataString.split(' '))
self.B0 = tx
self.B1 = ty
self.M00 = 1
self.M10 = 0
self.M01 = 0
self.M11 = 1
self.load_M()
@staticmethod
def fit(src, dst):
"""function to fit Translation transform given
the corresponding sets of points src & dst
Parameters
----------
src : numpy.array
a Nx2 matrix of source points
dst : numpy.array
a Nx2 matrix of destination points
Returns
-------
numpy.array
a 6x1 matrix with the best fit parameters
ordered M00,M01,M10,M11,B0,B1
"""
t = dst.mean(axis=0) - src.mean(axis=0)
T = np.eye(3)
T[:2, 2] = t
return T
def estimate(self, src, dst, return_params=True):
"""method for setting this transformation with the best fit
given the corresponding points src,dst
Parameters
----------
src : numpy.array
a Nx2 matrix of source points
dst : numpy.array
a Nx2 matrix of destination points
return_params : bool
whether to return the parameter matrix
Returns
-------
numpy.array
a 2x3 matrix of parameters for this matrix,
laid out (x,y) x (x,y,offset)
(or None if return_params=False)
"""
self.M = self.fit(src, dst)
if return_params:
return self.M
class RigidModel(AffineModel):
"""model for fitting Rigid only transformations
(rotation+translation)
or
(determinate=1, orthonormal eigenvectors)
implemented as an :class:`AffineModel`
Attributes
----------
M00 : float
x'+=M00*x
M01 : float
x'+=M01*y
M10 : float
y'+=M10*x
M11 : float
y'+=M11*y
B0 : float
x'+=B0
B1 : float
y'+=B1
transformId : str, optional
unique transformId for this transform
labels : list of str
list of labels to give this transform
M : numpy.array
3x3 numpy array representing 2d Affine with homogeneous coordinates
populates with values from M00, M01, M10, M11, B0, B1 with load_M()
"""
className = 'mpicbg.trakem2.transform.RigidModel2D'
def __init__(self, *args, **kwargs):
super(RigidModel, self).__init__(*args, **kwargs)
def _process_dataString(self, dataString):
"""expected datastring is 'theta tx ty'"""
theta, tx, ty = map(float, dataString.split(' '))
self.M00 = np.cos(theta)
self.M01 = -np.sin(theta)
self.M10 = np.sin(theta)
self.M11 = np.sin(theta)
self.B0 = tx
self.B1 = ty
self.load_M()
@staticmethod
def fit(src, dst, rigid=True, **kwargs):
"""function to fit this transform given the corresponding
sets of points src & dst
Umeyama estimation of similarity transformation
Parameters
----------
src : numpy.array
a Nx2 matrix of source points
dst : numpy.array
a Nx2 matrix of destination points
rigid : bool
whether to constrain this transform to be rigid
Returns
-------
numpy.array
a 6x1 matrix with the best fit parameters
ordered M00,M01,M10,M11,B0,B1
"""
# TODO shape assertion
num, dim = src.shape
src_cld = src - src.mean(axis=0)
dst_cld = dst - dst.mean(axis=0)
A = np.dot(dst_cld.T, src_cld) / num
d = np.ones((dim, ), dtype=np.double)
if np.linalg.det(A) < 0:
d[dim - 1] = -1
T = np.eye(dim + 1, dtype=np.double)
rank = np.linalg.matrix_rank(A)
if rank == 0:
raise EstimationError('zero rank matrix A unacceptable -- '
'likely poorly conditioned')
U, S, V = svd(A)
if rank == dim - 1:
if np.linalg.det(U) * np.linalg.det(V) > 0:
T[:dim, :dim] = np.dot(U, V)
else:
s = d[dim - 1]
d[dim - 1] = -1
T[:dim, :dim] = np.dot(U, np.dot(np.diag(d), V))
d[dim - 1] = s
else:
T[:dim, :dim] = np.dot(U, np.dot(np.diag(d), V.T))
fit_scale = (1.0 if rigid else
1.0 / src_cld.var(axis=0).sum() * np.dot(S, d))
T[:dim, dim] = dst.mean(axis=0) - fit_scale * np.dot(
T[:dim, :dim], src.mean(axis=0).T)
T[:dim, :dim] *= fit_scale
return T
def estimate(self, A, B, return_params=True, **kwargs):
"""method for setting this transformation with the
best fit given the corresponding points src,dst
Parameters
----------
A : numpy.array
a Nx2 matrix of source points
B : numpy.array
a Nx2 matrix of destination points
return_params : bool
whether to return the parameter matrix
Returns
-------
numpy.array
a 2x3 matrix of parameters for this matrix,
laid out (x,y) x (x,y,offset)
(or None if return_params=False)
"""
self.M = self.fit(A, B, **kwargs)
if return_params:
return self.M
class SimilarityModel(RigidModel):
"""class for fitting Similarity transformations
(translation+rotation+scaling)
or
(orthogonal eigen vectors with equal eigenvalues)
implemented as an :class:`AffineModel`
Attributes
----------
M00 : float
x'+=M00*x
M01 : float
x'+=M01*y
M10 : float
y'+=M10*x
M11 : float
y'+=M11*y
B0 : float
x'+=B0
B1 : float
y'+=B1
transformId : str, optional
unique transformId for this transform
labels : list of str
list of labels to give this transform
M : numpy.array
3x3 numpy array representing 2d Affine with homogeneous coordinates
populates with values from M00, M01, M10, M11, B0, B1 with load_M()
"""
className = 'mpicbg.trakem2.transform.SimilarityModel2D'
def __init__(self, *args, **kwargs):
super(SimilarityModel, self).__init__(*args, **kwargs)
def _process_dataString(self, dataString):
"""expected datastring is 's theta tx ty'"""
s, theta, tx, ty = map(float, dataString.split(' '))
self.M00 = s * np.cos(theta)
self.M01 = -s * np.sin(theta)
self.M10 = s * np.sin(theta)
self.M11 = s * np.sin(theta)
self.B0 = tx
self.B1 = ty
self.load_M()
@staticmethod
def fit(src, dst, rigid=False, **kwargs):
"""function to fit this transform given the corresponding
sets of points src & dst
Umeyama estimation of similarity transformation
Parameters
----------
src : numpy.array
a Nx2 matrix of source points
dst : numpy.array
a Nx2 matrix of destination points
rigid : bool
whether to constrain this transform to be rigid
Returns
-------
numpy.array
a 6x1 matrix with the best fit parameters
ordered M00,M01,M10,M11,B0,B1
"""
return RigidModel.fit(src, dst, rigid=rigid) | /render-python-2.3.0.tar.gz/render-python-2.3.0/renderapi/transform/leaf/affine_models.py | 0.849878 | 0.356685 | affine_models.py | pypi |
from .transform import Transform, logger
from renderapi.errors import RenderError
from .affine_models import (
AffineModel,
TranslationModel,
RigidModel,
SimilarityModel)
from .polynomial_models import (
Polynomial2DTransform,
NonLinearTransform,
NonLinearCoordinateTransform,
LensCorrection)
from .thin_plate_spline import (
ThinPlateSplineTransform)
__all__ = ['load_leaf_json']
def load_leaf_json(d):
"""function to get the proper deserialization function for leaf transforms
Parameters
----------
d : dict
json compatible representation of leaf transform to deserialize
Returns
-------
renderapi.transform.Transform
deserialized transformation
Raises
------
RenderError
if d['type'] != leaf or is omitted
"""
handle_load_leaf = {
AffineModel.className: lambda x: AffineModel(json=x),
Polynomial2DTransform.className:
lambda x: Polynomial2DTransform(json=x),
TranslationModel.className: lambda x: TranslationModel(json=x),
RigidModel.className: lambda x: RigidModel(json=x),
SimilarityModel.className: lambda x: SimilarityModel(json=x),
NonLinearTransform.className: lambda x: NonLinearTransform(json=x),
LensCorrection.className: lambda x: LensCorrection(json=x),
ThinPlateSplineTransform.className:
lambda x: ThinPlateSplineTransform(json=x),
NonLinearCoordinateTransform.className:
lambda x: NonLinearCoordinateTransform(json=x)}
tform_type = d.get('type', 'leaf')
if tform_type != 'leaf':
raise RenderError(
'Unexpected or unknown Transform Type {}'.format(tform_type))
tform_class = d['className']
try:
return handle_load_leaf[tform_class](d)
except KeyError as e:
logger.info('Leaf transform class {} not defined in '
'transform module, using generic'.format(e))
return Transform(json=d) | /render-python-2.3.0.tar.gz/render-python-2.3.0/renderapi/transform/leaf/utils.py | 0.863593 | 0.262765 | utils.py | pypi |
import numpy as np
from renderapi.errors import RenderError, EstimationError
from renderapi.utils import encodeBase64, decodeBase64
from .transform import Transform
import scipy.spatial
import logging
import sys
__all__ = ['ThinPlateSplineTransform']
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(sys.stdout))
class ThinPlateSplineTransform(Transform):
"""
render-python class that can hold a dataString for
mpicbg.trakem2.transform.ThinPlateSplineTransform class.
Parameters
----------
dataString: str or None
data string of transformation
labels : list of str
list of labels to give this transform
json: dict or None
json compatible dictionary representation of the transformation
Returns
-------
:class:`ThinPlateSplineTransform`
a transform instance
"""
className = 'mpicbg.trakem2.transform.ThinPlateSplineTransform'
def __init__(self, dataString=None, json=None, transformId=None,
labels=None):
if json is not None:
self.from_dict(json)
else:
if dataString is not None:
self._process_dataString(dataString)
self.labels = labels
self.transformId = transformId
self.className = (
'mpicbg.trakem2.transform.ThinPlateSplineTransform')
def _process_dataString(self, dataString):
fields = dataString.split(" ")
self.ndims = int(fields[1])
self.nLm = int(fields[2])
if fields[3] != "null":
try:
values = decodeBase64(fields[3])
self.aMtx = values[0:self.ndims*self.ndims].reshape(
self.ndims, self.ndims)
self.bVec = values[self.ndims*self.ndims:]
except ValueError:
raise RenderError(
"inconsistent sizes and array lengths, \
in ThinPlateSplineTransform dataString")
else:
self.aMtx = None
self.bVec = None
try:
values = decodeBase64(fields[4])
self.srcPts = values[0:self.ndims*self.nLm].reshape(
self.ndims, self.nLm, order='F')
self.dMtxDat = values[self.ndims*self.nLm:].reshape(
self.ndims, self.nLm, order='C')
except ValueError:
raise RenderError(
"inconsistent sizes and array lengths, \
in ThinPlateSplineTransform dataString")
def tform(self, points):
"""transform a set of points through this transformation
Parameters
----------
points : numpy.array
a Nx2 array of x,y points
Returns
-------
numpy.array
a Nx2 array of x,y points after transformation
"""
return self.apply(points)
def apply(self, points):
if not hasattr(self, 'dMtxDat'):
return points
result = points + self.computeDeformationContribution(points)
if self.aMtx is not None:
result += self.aMtx.dot(points.transpose()).transpose()
if self.bVec is not None:
result += self.bVec
return result
def computeDeformationContribution(self, points):
disp = scipy.spatial.distance.cdist(
points,
self.srcPts.transpose(),
metric='sqeuclidean')
disp *= np.ma.log(np.sqrt(disp)).filled(0.0)
return disp.dot(self.dMtxDat.transpose())
def gradient_descent(
self,
pts,
gamma=1.0,
precision=0.0001,
max_iters=1000):
"""based on https://en.wikipedia.org/wiki/Gradient_descent#Python
Parameters
----------
pts : numpy array
a Nx2 array of x,y points
gamma : float
step size is gamma fraction of current gradient
precision : float
criteria for stopping for differences between steps
max_iters : int
limit for iterations, error if reached
Returns
-------
cur_pts : numpy array
a Nx2 array of x,y points, estimated inverse of pt
"""
cur_pts = np.copy(pts)
prev_pts = np.copy(pts)
step_size = 1
iters = 0
while (step_size > precision) & (iters < max_iters):
prev_pts[:, :] = cur_pts[:, :]
cur_pts -= gamma*(self.apply(prev_pts) - pts)
step_size = np.linalg.norm(cur_pts - prev_pts, axis=1).max()
iters += 1
if iters == max_iters:
raise EstimationError(
'gradient descent for inversion of ThinPlateSpline '
'reached maximum iterations: %d' % max_iters)
return cur_pts
def inverse_tform(
self,
points,
gamma=1.0,
precision=0.0001,
max_iters=1000):
"""transform a set of points through the inverse of this transformation
Parameters
----------
points : numpy.array
a Nx2 array of x,y points
gamma : float
step size is gamma fraction of current gradient
precision : float
criteria for stopping for differences between steps
max_iters : int
limit for iterations, error if reached
Returns
-------
numpy.array
a Nx2 array of x,y points after inverse transformation
"""
newpts = self.gradient_descent(
points,
gamma=gamma,
precision=precision,
max_iters=max_iters)
return newpts
@staticmethod
def fit(A, B, computeAffine=True):
"""function to fit this transform given the corresponding sets of points A & B
Parameters
----------
A : numpy.array
a Nx2 matrix of source points
B : numpy.array
a Nx2 matrix of destination points
Returns
-------
dMatrix : numpy.array
ndims x nLm
aMatrix : numpy.array
ndims x ndims, affine matrix
bVector : numpy.array
ndims x 1, translation vector
"""
if not all([A.shape[0] == B.shape[0], A.shape[1] == B.shape[1] == 2]):
raise EstimationError(
'shape mismatch! A shape: {}, B shape {}'.format(
A.shape, B.shape))
# build displacements
ndims = B.shape[1]
nLm = B.shape[0]
y = (B - A).flatten()
# compute K
# tempting to matricize this, but, nLm x nLm can get big
# settle for vectorize
kMatrix = np.zeros((ndims * nLm, ndims * nLm))
for i in range(nLm):
r = np.linalg.norm(A[i, :] - A, axis=1)
nrm = np.zeros_like(r)
ind = np.argwhere(r > 1e-8)
nrm[ind] = r[ind] * r[ind] * np.log(r[ind])
kMatrix[i * ndims, 0::2] = nrm
kMatrix[(i * ndims + 1)::2, 1::2] = nrm
# compute L
lMatrix = kMatrix
if computeAffine:
pMatrix = np.tile(np.eye(ndims), (nLm, ndims + 1))
for d in range(ndims):
pMatrix[0::2, d*ndims] = A[:, d]
pMatrix[1::2, d*ndims + 1] = A[:, d]
lMatrix = np.zeros(
(ndims * (nLm + ndims + 1), ndims * (nLm + ndims + 1)))
lMatrix[
0: pMatrix.shape[0],
kMatrix.shape[1]: kMatrix.shape[1] + pMatrix.shape[1]] = \
pMatrix
pMatrix = np.transpose(pMatrix)
lMatrix[
kMatrix.shape[0]: kMatrix.shape[0] + pMatrix.shape[0],
0: pMatrix.shape[1]] = pMatrix
lMatrix[0: ndims * nLm, 0: ndims * nLm] = kMatrix
y = np.append(y, np.zeros(ndims * (ndims + 1)))
wMatrix = np.linalg.solve(lMatrix, y)
dMatrix = np.reshape(wMatrix[0: ndims * nLm], (ndims, nLm), order='F')
aMatrix = None
bVector = None
if computeAffine:
aMatrix = np.reshape(
wMatrix[ndims * nLm: ndims * nLm + ndims * ndims],
(ndims, ndims),
order='F')
bVector = wMatrix[ndims * nLm + ndims * ndims:]
return dMatrix, aMatrix, bVector
def estimate(self, A, B, computeAffine=True):
"""method for setting this transformation with the best fit
given the corresponding points A,B
Parameters
----------
A : numpy.array
a Nx2 matrix of source points
B : numpy.array
a Nx2 matrix of destination points
computeAffine: boolean
whether to include an affine computation
"""
self.dMtxDat, self.aMtx, self.bVec = self.fit(
A, B, computeAffine=computeAffine)
(self.nLm, self.ndims) = B.shape
self.srcPts = np.transpose(A)
@property
def dataString(self):
header = 'ThinPlateSplineR2LogR {} {}'.format(self.ndims, self.nLm)
if self.aMtx is not None:
blk1 = np.concatenate((self.aMtx.flatten(), self.bVec))
b64_1 = encodeBase64(blk1)
else:
b64_1 = "null"
blk2 = np.concatenate((
self.srcPts.flatten(order='F'),
self.dMtxDat.flatten(order='C')))
b64_2 = encodeBase64(blk2)
return '{} {} {}'.format(header, b64_1, b64_2)
@staticmethod
def mesh_refine(
new_src,
old_src,
old_dst,
old_tf=None,
computeAffine=True,
tol=1.0,
max_iter=50,
nworst=10,
niter=0):
"""recursive kernel for adaptive_mesh_estimate()
Parameters
----------
new_src : numpy.array
Nx2 array of new control source points. Adapts during recursion.
Seeded by adaptive_mesh_estimate.
old_src : numpy.array
Nx2 array of orignal control source points.
old_dst : numpy.array
Nx2 array of orignal control destination points.
old_tf : ThinPlateSplineTransform
transform constructed from old_src and old_dst, passed through
recursion iterations. Created if None.
computeAffine : boolean
whether returned transform will have aMtx
tol : float
in units of pixels, how close should the points match
max_iter: int
some limit on how many recursive attempts
nworst : int
per iteration, the nworst matching srcPts will be added
niter : int
passed through the recursion for stopping criteria
Returns
-------
ThinPlateSplineTransform
"""
if old_tf is None:
old_tf = ThinPlateSplineTransform()
old_tf.estimate(old_src, old_dst, computeAffine=computeAffine)
new_tf = ThinPlateSplineTransform()
new_tf.estimate(
new_src,
old_tf.tform(new_src),
computeAffine=computeAffine)
new_dst = new_tf.tform(old_src)
delta = np.linalg.norm(new_dst - old_dst, axis=1)
ind = np.argwhere(delta > tol).flatten()
if ind.size == 0:
return new_tf
if niter == max_iter:
raise EstimationError(
"Max number of iterations ({}) reached in"
" ThinPlateSplineTransform.mesh_refine()".format(
max_iter))
sortind = np.argsort(delta[ind])
new_src = np.vstack((new_src, old_src[ind[sortind[0: nworst]]]))
return ThinPlateSplineTransform.mesh_refine(
new_src,
old_src,
old_dst,
old_tf=old_tf,
computeAffine=computeAffine,
tol=tol,
max_iter=max_iter,
nworst=nworst,
niter=(niter + 1))
def adaptive_mesh_estimate(
self,
starting_grid=7,
computeAffine=True,
tol=1.0,
max_iter=50,
nworst=10):
"""method for creating a transform with fewer control points
that matches the original transfom within some tolerance.
Parameters
----------
starting_grid : int
estimate will start with an n x n grid
computeAffine : boolean
whether returned transform will have aMtx
tol : float
in units of pixels, how close should the points match
max_iter: int
some limit on how many recursive attempts
nworst : int
per iteration, the nworst matching srcPts will be added
Returns
-------
ThinPlateSplineTransform
"""
mn = self.srcPts.min(axis=1)
mx = self.srcPts.max(axis=1)
new_src = self.src_array(
mn[0], mn[1], mx[0], mx[1], starting_grid, starting_grid)
old_src = self.srcPts.transpose()
old_dst = self.tform(old_src)
return ThinPlateSplineTransform.mesh_refine(
new_src,
old_src,
old_dst,
old_tf=self,
computeAffine=computeAffine,
tol=tol,
max_iter=max_iter,
nworst=nworst,
niter=0)
@staticmethod
def src_array(xmin, ymin, xmax, ymax, nx, ny):
"""create N x 2 array of regularly spaced points
Parameters
----------
xmin : float
minimum of x grid
ymin : float
minimum of y grid
xmax : float
maximum of x grid
ymax : float
maximum of y grid
nx : int
number of points in x axis
ny : int
number of points in y axis
Returns
-------
src : :class:`numpy.ndarray`
(nx * ny) x 2 array of coordinated.
"""
src = np.mgrid[xmin:xmax:nx*1j, ymin:ymax:ny*1j].reshape(2, -1).T
return src
def scale_coordinates(
self,
factor,
ngrid=20,
preserve_srcPts=False):
"""estimates a new ThinPlateSplineTransform from the current one
in a scaled transform space.
Parameters
----------
factor : float
the factor by which to scale the space
ngrid : int
number of points per axis for the estimation grid
preserve_srcPts : bool
one might want to keep the original scaled srcPts
for example, if pts were made specially for a mask
or a crack or fold
Returns
-------
new_tform : :class:`renderapi.transform.ThinPlateSplineTransform`
the new transform in the scaled space
"""
new_tform = ThinPlateSplineTransform()
computeAffine = True
if self.aMtx is None:
computeAffine = False
mn = self.srcPts.min(axis=1)
mx = self.srcPts.max(axis=1)
src = self.src_array(mn[0], mn[1], mx[0], mx[1], ngrid, ngrid)
if preserve_srcPts:
# do not repeat close points
dist = scipy.spatial.distance.cdist(
src,
self.srcPts.transpose(),
metric='euclidean')
ind = np.invert(np.any(dist < 1e-3, axis=0))
src = np.vstack((src, self.srcPts.transpose()[ind]))
new_tform.estimate(
src * factor,
self.tform(src) * factor,
computeAffine=computeAffine)
return new_tform | /render-python-2.3.0.tar.gz/render-python-2.3.0/renderapi/transform/leaf/thin_plate_spline.py | 0.761006 | 0.463201 | thin_plate_spline.py | pypi |
`render-svg-icons` is a python script for rendering SVG icons to PNG icons in various sizes. While it runs on both Linux and macOS, it currently uses a specific SVG template and a folder structure based on [the XDG Icon Theme Specification](https://specifications.freedesktop.org/icon-theme-spec/latest/).
# Provenance
I've come across the progenitor of this script in multiple places. It seems to have originated with GNOME, but it's not entirely clear.
# Development Status
`render-svg-icons` _should_ basically work for its original purpose of rendering XDG icon themes. I plan to continue adapting the code to make it easier to use and to allow for more flexible use cases in the future.
While I plan to leave the command-line interface more or less intact, it and the implementation thereof shouldn't be considered stable at the moment. If you plan to call `render-svg-icons` as a dependency, you should specify an exact version you know to work for your purposes.
# Installation
For day-to-day use, you should install `render-svg-icons` from PyPI:
```bash
$ pip install render-svg-icons
```
For development purposes, you can build and install `render-svg-icons` from the cloned repository using [Poetry](https://python-poetry.org/).
To install Poetry, run:
```bash
$ curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python -
```
Then, from the `render-svg-icons` cloned repository, run:
```bash
$ poetry build
$ pip install --force-reinstall dist/*.whl
```
This will install the repository code in the same manner as the PyPI version. (The `--force-reinstall` causes it to overwrite any other installed version.)
To uninstall either version of `render-svg-icons`, run:
```bash
$ pip uninstall render-svg-icons
```
# Dependencies
`render-svg-icons` requires both [Inkscape](https://inkscape.org/) and [OptiPNG](http://optipng.sourceforge.net/).
## Linux
To install Inkscape and OptiPNG on, e.g., Debian, Ubuntu, etc.:
```bash
$ sudo apt install inkscape optipng
```
(For Fedora, Arch, etc., I'm sure you know what you're doing.)
## macOS
To install Inkscape and OptiPNG on macOS, first install [Homebrew](https://brew.sh/) if you haven't already:
```bash
% /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
```
Then use Homebrew to install Inkscape and OptiPNG:
```
% brew install inkscape optipng
```
You can probably use other installation methods, but you may need to specify the executable path if it's different from the one used by Homebrew.
## Other Platforms
`render-svg-icons` isn't currently compatible with platforms other than Linux and macOS. If you'd like to use this on another platform, please [open an issue](https://github.com/elsiehupp/render-svg-icons/issues/new) or [a pull request](https://github.com/elsiehupp/render-svg-icons/compare).
# Usage
After installation:
```bash
$ render_svg_icons --help
┌──────────────────────────────────────────────────┐
│ Render icons from SVG to PNG │
└──────────────────────────────────────────────────┘
┌──────────────────────────────────────────────────┐
│ Usage │
├──────────────────────────────────────────────────┤
│ $ render_svg_icons │
│ [--help] │
│ [--base_dpi BASE_DPI] │
│ [--categories [CATEGORIES]] │
│ [--filter FILTER] │
│ [--inkscape_path INKSCAPE_PATH] │
│ [--individual_icons [INDIVIDUAL_ICONS]] │
│ [--optipng_path OPTIPNG_PATH] │
│ [--output_path OUTPUT_PATH] │
│ [--scaling_factors [SCALING_FACTORS]] │
│ [--verbose] │
│ │
└──────────────────────────────────────────────────┘
┌──────────────────────────────────────────────────┐
│ Optional Arguments │
├──────────────────────────────────────────────────┤
│ --help │
│ │
│ Show this help message and exit. │
│ │
│ --base_dpi BASE_DPI │
│ │
│ dpi to use for rendering (by default 96) │
│ │
│ --categories [CATEGORIES] │
│ │
│ categories of icons to render (by default all) │
│ │
│ --filter FILTER │
│ │
│ Inkscape filter to apply while rendering │
│ (by default none) │
│ │
│ --inkscape_path INKSCAPE_PATH │
│ │
│ path of Inkscape executable │
│ (if the script can't find it) │
│ │
│ --individual_icons [INDIVIDUAL_ICONS] │
│ │
│ individual icon names (without extensions) │
│ to render (by default all) │
│ │
│ --optipng_path OPTIPNG_PATH │
│ │
│ path of OptiPNG executable │
│ (if the script can't find it) │
│ │
│ --output_path OUTPUT_PATH │
│ │
│ output directory (by default '.') │
│ │
│ --scaling_factors [SCALING_FACTORS] │
│ │
│ scaling factors to render at │
│ (by default [1, 2], e.g. 100% & 200%) │
│ │
│ --verbose │
│ │
│ print verbose output to the terminal │
│ │
└──────────────────────────────────────────────────┘
```
# License
`render-svg-icons` is published under the GPLv3 or later (i.e. you can use it for proprietary purposes via the command line, but you can only link to it from other GPL code). | /render-svg-icons-0.1.0.tar.gz/render-svg-icons-0.1.0/README.md | 0.591369 | 0.919787 | README.md | pypi |
import argparse
import json
import os
import sys
import yaml
from jinja2 import Environment, FileSystemLoader
def error(msg, *args, **kwargs):
"""Print an error and exit."""
sys.stderr.write(msg.format(*args, **kwargs) + "\n")
sys.exit(1)
def get_vars(values):
"""Parse a list of vars and return them as a dictionary."""
items = {}
for value in (values or []):
if '=' not in value:
error("var '{}' is invalid", value)
key, _, value = value.partition('=')
items[key] = value
return items
def get_file_context(paths, loader):
"""Parse a list files using the provided parser. Return their combined context as a
dictionary."""
context = {}
for path in paths or ():
try:
with open(path) as fd:
data = loader(fd)
context.update(data)
except IOError as e:
error("file '{}' not readable: {}", e)
return context
def get_yaml(paths):
"""Parse a list of YAML files and return their combined contents as a dictionary."""
def loader(fd):
data = yaml.load(fd)
if not isinstance(data, dict):
error("file '{}' does not contain a YAML object")
return data
return get_file_context(paths, loader)
def get_json(paths):
"""Parse a list of JSON files and return their combined contents as a dictionary."""
def loader(fd):
data = json.load(fd)
if not isinstance(data, dict):
error("file '{}' does not contain a JSON object")
return data
return get_file_context(paths, loader)
def get_envs(names):
"""Retrieve the named environment values and return them as a dictionary. Non-existent
environment variables will contain an empty string for the value."""
items = {}
for name in (names or []):
items[name] = os.getenv(name, "")
return items
def get_context(options):
"""Return the template context from the provided options."""
context = {}
context.update(get_yaml(options.yaml))
context.update(get_json(options.json))
if options.allenv:
context.update(os.environ)
context.update(os.environ if options.allenv else get_envs(options.envs))
context.update(get_vars(options.vars))
return context
def render(out, tpl, context):
"""Render the template at ``tpl`` to the file-like object ``out`` using the provided
context."""
tpl = os.path.abspath(tpl)
env = Environment(loader=FileSystemLoader(searchpath='/'))
template = env.get_template(tpl)
template.stream(context).dump(out)
# Jinja strips the end-of-file newline. Let's add it back.
out.write('\n')
def run():
parser = argparse.ArgumentParser(
description="Render a template file to stdout. All options may be given multiple times.")
parser.add_argument('-v', '--var', metavar='name=value', dest='vars', action='append',
help="Add the provided value.")
parser.add_argument('-e', '--env', metavar='name', dest='envs', action='append',
help="Add an environment variable.")
parser.add_argument('-E', '--all-env', dest='allenv', action='store_true',
help="Add all environment variables.")
parser.add_argument('-y', '--yaml', metavar='file', dest='yaml', action='append',
help="Add contents of a YAML file.")
parser.add_argument('-j', '--json', metavar='file', dest='json', action='append',
help="Add contents of a JSON file.")
parser.add_argument('template', action='store',
help="The template to render.")
options = parser.parse_args()
render(sys.stdout, options.template, get_context(options))
if __name__ == '__main__':
run() | /render-v1.0.0.tar.gz/render-v1.0.0/render.py | 0.477798 | 0.201185 | render.py | pypi |
# Render Configuration Files
Configure your application with rendered configuration trees. I.e., configuration trees that contain leaves with
references to other leaves and computations.
## Features
`renderconftree` is a module that allows you to write configuration files containing python expressions and variable references.
These can be used to do simple variable substitution (have one parameter in your configuration file be automatically
set to the value of another parameter) or more complicated computations (have the value of a parameter
be automatically calculated from one or more other parameters). It has the following features:
- Recursive parameter substitution. Configuration data is stored in a
tree-like object (i.e. nested dict/list) and parameter substitution occurs at all levels.
- Parameter calculation. Configuration data can be computed using Python expressions.
- It is file format agnostic. `renderconftree` does not parse configuration files. It relies on a "loader".
If you have a function that can take a string containing the text of your configuration file and return
a configuration tree (nested dict/list) then you can use `renderconftree`.
## Installation
`renderconftree` is available on PyPi
```
pip install renderconftree
```
## Examples
YAML is a great language for writing configuration files. It is simple to write, configuration options
can be stored in a logical hierarchy, and it is easy to get into your Python code. `renderconftree` simply
adds the power of Python to your YAML file so you can do something like:
#! /usr/bin/python
from renderconftree.read import *
text = '''
var1 : 1
var2 : some string
var3 : 3
var4 : $(${var3} + math.pi + 2)
var5 : $(${var4} + 2.0)
nest1 :
var1 : 11
var2 : $(${var3} + 12}}
var3 : $(${var1} + 12}}
var4 : $(${var3} + 12}}
var5 : $(${/nest1/var3} + 12)
'''
config = readConfig( text )
print(yaml.dump(config, default_flow_style=False))
The YAML configuration file is loaded into a nested dictionary/list. Each value in
the tree is then parsed for expressions (text inside of $()).
If an
expression is found it is evaluated (using Python's `eval()` function) and the
parameter value is replaced with the result.
If the expression contains a variable reference
(text inside of ${}) the variables value is inserted into the expression before it is evaluated.
The tree itself is passed into the
evaluation context, so parameters in the dictionary can be accessed
within the expression.
This is extremely useful if you write code that does numerical calculations, like a physics simulation.
Consider the following configuration for a physics simulation that solves the 2D heat equation using a Finite-Difference method. You might have a
configuration file that looks like this.
# heat solver configuration
grid:
x:
min : 0
max : 10
N : 100
y:
min : 0
max : 20
N : 200
time:
start : 0
stop : 10
dt : 0.001
Now suppose you want to be able to set the grid size (N) based on a desired resolution. You could either 1) modify your code to accept a dx and dy
configuration parameter, or 2) make your configuration file dynamic with `renderconftree`.
# heat solver configuration
grid:
x:
min : 0
max : 10
N : $( (${max} - ${min})/0.1 )
y:
min : 0
max : 20
N : $( (${max} - ${min})/0.1 )
time:
start : 0
stop : 10
dt : 0.001
If you chose to modify your code to a accept a resolution parameter, you would have to write logic to check which parameter was specified, N or dx. But what
if both where given? This can be especially tedious if your simulation is not written in a scripting language like Python, but in C or C++.
By using `renderconftree`, you keep your configuration logic in your application simple while having power to create configurations that auto-compute
parameter values. What if you want the x and y resolution to be the same, but you would like to be able to easily change it?
# heat solver configuration
grid:
res : 0.001
x:
min : 0
max : 10
N : $( (${max} - $(min})/${../res} )
y:
min : 0
max : 20
N : $( (${max} - ${min})/${../res} )
time:
start : 0
stop : 10
dt : 0.001
Note that the `res` parameter is accessed using a filesystem-style path. This is provided by the [`fspathtree` class](https://github.com/CD3/fspathtree), which is a lightweight
wrapper around Python's `dict` and `list` objects that `renderconftree` uses.
Don't like YAML? No problem, just
provide the `readConfig` function with a parser that reads your preferred format from a string and
returns a nested dict. So, to read JSON,
from renderconftree.read import *
import json
with open('myConfig.json', 'r') as f:
text = f.read()
config = readConfig( text, parser=json.loads )
Don't want to learn YAML or JSON? Just use INI,
from renderconftree.read import *
from renderconftree.parsers import ini
import json
with open('myConfig.ini', 'r') as f:
text = f.read()
config = readConfig( text, parser=ini.load )
## Command line utility
If your application isn't using Python, you can use `renderconftree`. A command-line utility
named `render-config-file` is provided that can read a configuration file,
render the configuration tree, and write it back out. A variety of formats are supported, and
the output format can be different than the input format, so you can even use this script
to translate configuration file formats.
| /renderconftree-0.3.tar.gz/renderconftree-0.3/README.md | 0.835114 | 0.846831 | README.md | pypi |
Handling templates
==================
.. program:: renderspec
.. highlight:: bash
Templates are based on `Jinja2`_ and usually end with .spec.j2 .
.. note:: There are a lot of examples available in the `openstack/rpm-packaging`_ project.
Rendering a template called `example.spec.j2` can be done with::
renderspec example.spec.j2
This will output the rendered spec to stdout.
Different styles
****************
Different distributions have different spec file styles (i.e. different naming
policies and different epoch handling policies). :program:`renderspec` automatically
detects which distibution is used and uses that style. Forcing a specific style can
be done with::
renderspec --spec-style suse example.spec.j2
Different pyver variants
************************
For singlespec variant spec.j2 templates (i.e. templates that can build for
multiple python flavors in parallel) it might be undesirable to expand requirements
for a particular python flavor. In that case the option `--skip-pyversion` can
be used to skip expansion for those dependencies:
renderspec --skip-pyversion py3 example.spec.j2
For CentOS 7.x hosts :program:`renderspec` defaults to skipping the py3 expansion.
Different template formats
**************************
The only supported input template format is currently called `spec.j2` (which is
the default).
Handling epochs
***************
Different distributions may have different epochs for different packages. This
is handled with an extra epoch file which must be in yaml format. Here's an example
of a epoch file called `epochs.yaml`::
---
epochs:
python-dateutil: 3
oslo.config: 2
Rendering the `example.spec.j2` file and also use the epochs can be done with::
renderspec --epochs epochs.yaml example.spec.j2
The ```Epoch:``` field in the spec.j2 file itself can be handled with the ```epoch()```
context function like this::
Epoch: {{ epoch('oslo.config') }}
This will add the epoch number from the yaml file or `0` in case there is no epoch file
or the given name in not available in the yaml file.
.. note:: if no epoch file is available, no epochs are added to the version numbers.
The epoch file is optional. If a package name is not in the epochs file,
epoch for that package is not used.
Handling requirements
*********************
Updating versions for `Requires` and `BuildRequires` takes a lot of time.
:program:`renderspec` has the ability to insert versions from a given
`global-requirements.txt` file. The file must contain lines following `PEP0508`_
.. note:: For OpenStack, the `global-requirements.txt`_ can be used.
To render a `example.spec.j2` file with a given requirements file, do::
renderspec --requirements global-requirements.txt example.spec.j2
It's also possible to use multiple requirements file. The last mentioned file
has the highest priority in case both files contain requirements for the same
package name. Using multiple files looks like this::
renderspec --requirements global-requirements.txt \
--requirements custom-requirements.txt \
example.spec.j2
.. _PEP0508: https://www.python.org/dev/peps/pep-0508/
.. _global-requirements.txt: https://git.openstack.org/cgit/openstack/requirements/tree/global-requirements.txt
Handling the package version
****************************
Distributions handle versions, especially pre-release versions differently.
SUSE for example allows using RPM's tilde ('~) while Fedora doesn't allow that
and uses a combination of RPM `Version` and `Release` tag to express pre-releases.
To support both styles with renderspec, the upstream version and a release
must be available in the context::
{% set upstream_version = upstream_version('1.2.3.0rc1') %}
{% set rpm_release = '1' %}
This should be done on the first lines in the spec.j2 template. The `rpm_release` is
only used in the fedora style.
Then for the RPM version and release, use::
Version: {{ py2rpmversion() }}
Release: {{ py2rpmrelease() }}
For suse-style, this renders to::
Version: 1.2.3.0~rc1
Release: 0
For fedora-style, this renders to::
Version: 1.2.3
Release: 0.1.0rc1%{?dist}
Note that in case of pre-releases you may need to adjust the version that is used
in the `Source` tag and the `%prep` sections `%setup`. So use e.g. ::
{% set upstream_version = upstream_version('1.2.3.0rc1') %}
{% set rpm_release = '1' %}
{% set source = url_pypi() %}
%name oslo.config
Version: {{ py2rpmversion() }}
Release: {{ py2rpmrelease() }}
Source0: {{ source }}
%prep
%setup -q -n %{sname}-{{upstream_version}}
which would render (with suse-style) to::
%name oslo.config
Version: 1.2.3.0~rc1
Release: 0
Source0: https://files.pythonhosted.org/packages/source/o/%{sname}/%{sname}-1.2.3rc1.tar.gz
%prep
%setup -q -n %{sname}-1.2.3.0rc1
The `upstream_version` can also be automatically detected from archive files
(like sdist archives available from pypi) which contain a valid `PKG-INFO`_ file.
For automatic version detection, the context need to know the `pypi_name` and a
archive file must be available and the context variable `upstream_version` needs to
be set to the value of the context function `upstream_version()`. The difference
here is that the version in `upstream_version()` is not explicit given. The archive
can be fetched with the `fetch_source()` function::
{% set source = fetch_source('http://tarballs.openstack.org/oslo.config/oslo.config-master.tar.gz') %}
{% set pypi_name = 'oslo.config' %}
{% set upstream_version = upstream_version() %}
{% set rpm_release = '1' %}
Version: {{ py2rpmversion() }}
Release: {{ py2rpmrelease() }}
Source0: {{ source }}
.. _PKG-INFO: https://www.python.org/dev/peps/pep-0314/
Template features
=================
Templates are just plain `Jinja2`_ templates. So all magic (i.e. filters) from
Jinja can be used in the templates. Beside the Jinja provided features, there are
some extra features renderspec adds to the template context.
context function `py2name`
**************************
`py2name` is used to translate a given pypi name to a package name following the
different distribution specific guidelines.
.. note:: For translating pypi names (the name a python package has on `pypi.python.org`_
to distro specific names, internally a module called `pymod2pkg`_ is used.
The prefered way to use `py2name` is to set the context variable `pypi_name` and
then call `py2name()` without any parameters. In that case, the context variable
is used::
{% set pypi_name = 'oslo.config' %}
Name: {{ py2name() }}
Rendering this template :program:`renderspec` with the `suse` style would result in::
Name: python-oslo.config
It is also possible to pass the pypi name directly to the `py2name` context function::
Name: {{ py2name('oslo.config') }}
That would create the same rendering result.
If the context env var `pypi_name` is set **and** `py2name` is called with a parameter,
the parameter is used instead of the context var.
Since `pymod2pkg 0.10.0`, there is the possibility to get a name for a specific python
version. Currently there are 3 values
* `py`: this is the unversioned name
* `py2`: this is the python2 name
* `py3`: this is the python3 name
This can also be used with `py2name()`::
Name: {{ py2name('oslo.config', py_versions='py3') }}
Rendering this template :program:`renderspec` with the `suse` style would result in::
Name: python3-oslo.config
context function `py2pkg`
*************************
`py2pkg` is used to
* translate the given pypi name to a distro specific name
* handle epochs and version
For example, a BuildRequires in a spec.j2 template for the package `oslo.config` in
version `>= 3.4.0` would be defined as::
BuildRequires: {{ py2pkg('oslo.config', ('>=', '3.4.0')) }}
Rendering this template with :program:`renderspec` with the `suse` style would result in::
BuildRequires: python-oslo.config >= 3.4.0
Rendering it with the `fedora` style would be::
BuildRequires: python-oslo-config >= 3.4.0
With an epoch file and an entry for `oslo.config` set to i.e. `2`, this would be
rendered on Fedora to::
BuildRequires: python-oslo-config >= 2:3.4.0
It's also possible to skip adding required versions and handle that with a
`global-requirements.txt` file. Given that this file contains `oslo.config>=4.3.0` and
rendering with `--requirements`, the rendered spec would contain::
BuildRequires: python-oslo-config >= 4.3.0
The translation for a specific python version can be done with the `py_versions` parameter
similar to `py2name()```::
BuildRequires: {{ py2pkg('oslo.config', ('>=', '3.4.0'), py_versions='py3') }}
renders to::
BuildRequires: python3-oslo-config >= 2:3.4.0
Multiple versions are also possible::
BuildRequires: {{ py2pkg('oslo.config', ('>=', '3.4.0'), py_versions=['py2', 'py3']) }}
renders to::
BuildRequires: python2-oslo-config >= 2:3.4.0 python3-oslo-config >= 2:3.4.0
context function `py2`
**********************
Similar to `py2pkg` but uses a `py_versions` set to `py2`
context function `py3`
**********************
Similar to `py2pkg` but uses a `py_versions` set to `py3`.
context function `epoch`
************************
The epochs are stored in a yaml file. Using the `epoch` context function can be done with::
Epoch: {{ epoch('oslo.config') }}
Without an yaml file, this would be rendered to::
Epoch: 0
With an existing yaml (and `oslo.config` epoch set to 2), this would be rendered to::
Epoch: 2
context function `license`
**************************
The templates use `SPDX`_ license names and theses names are translated for different distros.
For example, a project uses the `Apache-2.0` license::
License: {{ license('Apache-2.0') }}
With the `fedora` spec-style, this would be rendered to::
License: ASL 2.0
With the `suse` spec-style::
License: Apache-2.0
context function `upstream_version`
***********************************
This function can be used to assign a static version to the variable `upstream_version`
or to dynamically detect the version from a archive (eg. an sdist tarball).
Static assignment looks like::
{% set upstream_version = upstream_version('1.1.0a3') %}
which is basically the same as::
{% set upstream_version = '1.1.0a3' %}
So static assignment is not that useful. Dynamic assignment looks like::
{% set pypi_name = 'oslo.config' %}
{% set upstream_version = upstream_version() %}
Note that for dynamic version detection, the variable `pypi_name` needs to be set
before calling `upstream_version()`. `upstream_version()` tries to find an archive
in:
1. the output directory where the rendered .spec file ends
2. the directory where the .spec.j2 template comes from
3. the current working directory
context function `py2rpmversion`
********************************
Python has a semantic version schema (see `PEP0440`_) and converting Python versions
to RPM compatible versions is needed in some cases. For example, in the Python world
the version "1.1.0a3" is lower than "1.1.0" but for RPM the version is higher.
To transform a Python version to a RPM compatible version, use::
{% set upstream_version = '1.1.0a3' %}
{% set rpm_release = '1' %}
Version: {{ py2rpmversion() }}
With the `suse` spec-style it will be translated to::
Version: 1.1.0~xalpha3
Note that you need to set 2 context variables (`upstream_version` and `rpm_release`)
to be able to use the `py2rpmversion()` function.
context function `py2rpmrelease`
********************************
Fedora doesn't allow the usage of `~` (tilde) in the `Version` tag. So for pre-releases
the `Release` tag is used (see `Fedora Packaging Versioning`_)
For the fedora-style::
{% set upstream_version = '1.1.0a3' %}
{% set rpm_release = '1' %}
Version: {{ py2rpmversion() }}
Release: {{ py2rpmrelease() }}
this would render to::
Version: 1.1.0
Release: 0.1a3%{?dist}
Note that you need to set 2 context variables (`upstream_version` and `rpm_release`)
to be able to use the `py2rpmrelease()` function.
context function `fetch_source`
*******************************
The function `fetch_source` downloads the given url and puts the file into the
`output_dir` (that's the directory where the rendered .spec file will be in).
If `output_dir` is not available (that's the case when `renderspec` writes the
rendered spec to stdout) the download is skipped. But in any case the function
returns the same url that it got as parameter::
{% set source = fetch_source('http://tarballs.openstack.org/oslo.log/oslo.log-master.tar.gz') %}
Source0: {{ source }}
context function `url_pypi`
***************************
The function `url_pypi` return a full url to a sdist tar.gz tarball on pypi. The function
requires the contect variables `upstream_version` and `pypi_name`.
For example::
{% set pypi_name = 'oslo.concurrency' %}
{% set upstream_version = upstream_version('3.20.0') %}
{% set source = fetch_source(url_pypi()) %}
context filter `basename`
*************************
This is a filter which just returns ```os.path.basename()```::
{% set source = fetch_source('http://tarballs.openstack.org/oslo.log/oslo.log-master.tar.gz') %}
Source0: {{ source|basename }}
which then renders to::
Source0: oslo.log-master.tar.gz
distribution specific blocks & child templates
**********************************************
To properly handle differences between individual .spec styles, renderspec
contains child templates in `renderspec/dist-templates` which are
automatically used with corresponding `--spec-style`. These allow different
output for each spec style (distro) using jinja `{% block %}` syntax.
For example consider simple `renderspec/dist-templates/fedora.spec.j2`::
{% extends ".spec" %}
{% block build_requires %}
BuildRequires: {{ py2pkg('setuptools') }}
{% endblock %}
allows following in a spec template::
{% block build_requires %}{% endblock %}
to render into::
BuildRequires: python-setuptools
with `fedora` spec style, while `renderspec/dist-templates/suse.spec.j2` might
define other result for `suse` spec style.
For more information, see current `renderspec/dist-templates` and usage in
`openstack/rpm-packaging`_ project.
Available context variables
===========================
There are some variables that need to be set in the spec.j2 template. Preferable
at the beginning before any context function is used.
pypi_name
*********
This variable defines the name that is used on pypi. Set with::
{% set pypi_name = 'oslo.messaging' %}
where 'oslo.messaging' is the name that is set. The variable can later be used::
Source: {{ pypi_name }}.tar.gz
upstream_version
****************
The variable defines the upstream version that is used::
{% set upstream_version = '1.2.3.0rc1' %}
rpm_release
***********
The variable defines the rpm release. It is used together with 'upstream_version'
and only needed with the fedora spec style::
{% set rpm_release = '1' %}
.. _Jinja2: http://jinja.pocoo.org/docs/dev/
.. _openstack/rpm-packaging: https://git.openstack.org/cgit/openstack/rpm-packaging/
.. _pymod2pkg: https://git.openstack.org/cgit/openstack/pymod2pkg
.. _pypi.python.org: https://pypi.python.org/pypi
.. _SPDX: https://spdx.org/licenses/
.. _PEP0440: https://www.python.org/dev/peps/pep-0440/
.. _Fedora Packaging Versioning: https://fedoraproject.org/wiki/Packaging:Versioning#Pre-Release_packages
| /renderspec-2.1.0.tar.gz/renderspec-2.1.0/doc/source/usage.rst | 0.958177 | 0.865281 | usage.rst | pypi |
# Python Rendertron
> Rendertron middleware for Python applications.
[](https://travis-ci.com/frontendr/python-rendertron.svg)
[](https://coveralls.io/github/frontendr/python-rendertron?branch=develop)
[](https://github.com/ambv/black)
[Rendertron](https://github.com/GoogleChrome/rendertron) is a headless Chrome
rendering solution designed to render & serialise web pages on the fly. The
goal of this package is to provide middleware to render a request using a
Rendertron service and make the result available.
This makes it possible to for example render Progressive Web Apps (PWA), wait
for it to fully render (completes initial data loading etc.) and use that
fully built markup as a response.
Besides the fact that your user will see a fully rendered application faster it
also allows search engines to properly index the markup.
## Installing
Install a Rendertron service by following the steps in
[the documentation](https://github.com/GoogleChrome/rendertron#installing--deploying).
Install this package using `pip`:
```bash
pip install rendertron
```
You can also install the latest development version using `pip`'s `-e` flag:
```bash
pip install -e git://git@github.com:frontendr/python-rendertron.git@develop#egg=rendertron
```
This will install the `develop` branch.
### Django
First, add `'rendertron'` to the `INSTALLED_APPS` list in settings.
Then you have 2 choices:
- Enable the **middleware** and render everything that matches either
`RENDERTRON_INCLUDE_PATTERNS` or does not matches anything in
`RENDERTRON_EXCLUDE_PATTERNS`. See the Configuration section for more information about
those.
- Decorate specific views with the `@rendertron_render` decorator to only let render
those views with the Rendertron service.
#### Middleware
1. Add `'rendertron.middleware.DjangoRendertronMiddleware'` to the `MIDDLEWARE`
list in the settings.
2. Make sure to specify either `RENDERTRON_INCLUDE_PATTERNS` to specify path patterns
which are to be rendered by the Rendertron service or `RENDERTRON_EXCLUDE_PATTERNS_EXTRA`
to only specify what to exclude.
#### Decorate specific views
Instead of relying on the middleware and settings it is also possible to decorate
specific views with the `@rendertron_render` decorator.
```python
from rendertron.decorators.django import rendertron_render
@rendertron_render
def my_view(request):
...
```
For class based views use Django's [`method_decorator`](https://docs.djangoproject.com/en/dev/topics/class-based-views/intro/#decorating-the-class).
```python
from django.utils.decorators import method_decorator
from django.views.generic import TemplateView
from rendertron.decorators.django import rendertron_render
@method_decorator(rendertron_render, name="dispatch")
class MyView(TemplateView):
...
```
## Configuration
Most configuration is done by specifying specific variables. For Django users
that's done in your site's settings.
The following settings are available:
| Setting | Default | Description |
|---------|---------|-------------|
| `RENDERTRON_BASE_URL` | `'http://localhost:3000/'` | The url the Rendertron service is listening on. |
| `RENDERTRON_RENDER_QUERY_PARAM` | `'rendertron_render'` | The query parameter added to the request url passed to Rendertron. This is used to differentiate normal requests with requests from Rendertron. |
| `RENDERTRON_STORAGE` | See Storage | An object literal specifying and configuring the storage class to be used. See the Storage section for more information. |
| `RENDERTRON_INCLUDE_PATTERNS` | `[]` | A list of regular expression patterns to include. Once a pattern in this list matches the request no further checking will be done. |
| `RENDERTRON_EXCLUDE_PATTERNS` | List of common extensions. | By default this is a list of common static file type extensions used on the web. If Django is detected it's `STATIC_URL` and `MEDIA_URL` paths are added to the list. Note that if you override this setting all defaults are gone. If you want to keep these defaults *and* add your own patterns use `RENDERTRON_EXCLUDE_PATTERNS_EXTRA`.
| `RENDERTRON_EXCLUDE_PATTERNS_EXTRA` | `[]` | Like `RENDERTRON_EXCLUDE_PATTERNS` but will be appended to that list. |
## Storage
Storage classes are handling the logic of storing the results coming from the
Rendertron service for a period of time. They handle if, how, where and how
long a result is stored. There are some core storage classes available the
system is built for it to be very easy to built your own.
The choice of one of the built in storage classes depends on your framework.
### Any framework: `DummyStorage`
A storage class that doesn't do anything. It doesn't store and will never return
a stored result.
To use it simply set `RENDERTRON_STORAGE['CLASS']` to
`'rendertron.storage.DummyStorage'`. It has no options.
### Django: `DjangoCacheStorage`
A storage class that utilizes Django's cache framework to store the results.
To use it simply set `RENDERTRON_STORAGE['CLASS']` to
`'rendertron.storage.DjangoCacheStorage'`. It has the following options:
| Setting | Default | Description |
|---------|---------|-------------|
| `TIMEOUT` | Django's `DEFAULT_TIMEOUT` cache setting which is `300` (5 minutes) | The number of seconds the result should be stored in the cache. It's the `timeout` argument for Django's [`cache.set`](https://docs.djangoproject.com/en/dev/topics/cache/#django.core.caches.cache.set) method. |
| `VERSION` | `None` | The `version` argument which is passed to Django's [`cache.set`](https://docs.djangoproject.com/en/dev/topics/cache/#django.core.caches.cache.set) method. |
Example config:
```python
RENDERTRON_STORAGE = {
'CLASS': 'rendertron.storage.DjangoCacheStorage',
'OPTIONS': {
'TIMEOUT': 300,
}
}
```
## Running tests
First install Django to be able to test Django related things.
```bash
pip install django
```
Then run the tests via `django-admin` using the provided minimal settings file.
```bash
django-admin test --pythonpath . --settings tests.django.settings
```
## License
MIT
| /rendertron-0.2.0.tar.gz/rendertron-0.2.0/README.md | 0.581065 | 0.948106 | README.md | pypi |
from rengine.exercises import (
Exercise,
ExerciseFromTypePreset,
StrengthExercise,
pick_random_exercise,
)
import pandas as pd
from rengine.config import (
TIME_BASED_CONDITIONS,
EquipmentAvailable,
ExerciseLoadValues,
ExerciseType,
ExerciseLoad,
ExerciseTypeValues,
MuscleGroup,
EXERCISE_DF,
)
from typing import List, Tuple
import dataframe_image as dfi
import random
DF = EXERCISE_DF
def dictionary_addition(dictionaries):
"""Add corresponding values in dictionary."""
keys = list(set([key for dictionary in dictionaries for key in dictionary]))
added_dict = dict((key, 0) for key in keys)
for dictionary in dictionaries:
for key, value in dictionary.items():
added_dict[key] += value
return added_dict
def tbc_set_reduction(tbc: dict, exercise: ExerciseFromTypePreset):
exercise_type = exercise.exercise_type
exercise_load = exercise.exercise_load
if (
exercise_type in tbc["set_reductions"]
and exercise_load in tbc["set_reductions"][exercise_type]
):
exercise.sets -= tbc["set_reductions"][exercise_type][exercise_load]
class BaseWorkout:
def __init__(self, exercises: List[Exercise] = None):
self.workout = exercises or []
def create(self):
self._create()
self._post_create()
def _create(self):
"""This is where all the specifics are implemented. Magic happens here!"""
pass
def _post_create(self):
"""Runs after _create is called."""
pass
def add_exercise(self, exercise: Exercise):
"""Adds specific exercise to workout"""
self.workout.append(exercise)
def add_exercises(self, exercises: List[Exercise]):
for exercise in exercises:
self.workout.append(exercise)
def sort_workout_by_intensity(self):
"""
Sorts workouts by:
1. First priority is ExerciseType
2. Second priority is ExerciseLoad
"""
self.workout = list(
sorted(
self.workout,
key=lambda x: (
ExerciseTypeValues[x.exercise_type],
ExerciseLoadValues[x.exercise_load],
),
reverse=True,
)
)
def pick_random_exercise(
self,
muscle_groups_targeted: List[MuscleGroup],
exercise_type: ExerciseType,
allowed_loads: List[ExerciseLoad] = [
ExerciseLoad.HEAVY,
ExerciseLoad.MEDIUM,
ExerciseLoad.LIGHT,
],
) -> Exercise:
"""Picks single random exercise within rules determined by parameters and time dependencies. <strict> determines if muscle_groups_targeted should exactly match up"""
return pick_random_exercise(
muscle_groups_targeted, exercise_type, allowed_loads
)
@property
def total_time(self):
"""Returns total time in minutes of workout"""
return sum([0] + [exercise.length for exercise in self.workout])
@property
def df(self):
return pd.DataFrame(self.workout_dict)
@property
def workout_dict(self):
return [
{
"exercise_name": exercise.exercise_name,
"muscle_group": exercise.muscle_group,
"sets": exercise.sets,
"rep_range": exercise.rep_range,
"rest_time_range": exercise.rest_time_range,
}
for exercise in self.workout
]
@property
def load_per_muscle_group(self):
load_dict = dict((muscle_group, 0) for muscle_group in MuscleGroup.ALL)
for exercise in self.workout:
load_dict[exercise.muscle_group] += 1
return load_dict
@property
def exercise_names(self):
return [exercise.exercise_name for exercise in self.workout]
def __str__(self) -> str:
return str(self.df) + f"\nTotal Time: {str(int(self.total_time))} minutes"
class AutoGeneratedWorkout(BaseWorkout):
"""Auto generates a workout based on time based conditions, previous workouts, strength exercise."""
def __init__(
self,
target_time,
muscle_groups: List[MuscleGroup],
strength_exercise_name: str = None,
equipment_available=EquipmentAvailable.ALL,
excluded_exercise_names: List[str] = None,
prior_cummative_loads_per_muscle_group: dict = None,
exercises: List[Exercise] = None,
):
self.target_time = target_time
self.strength_exercise_name = strength_exercise_name
self.muscle_groups = muscle_groups
self.equipment_available = equipment_available
self.caps = TIME_BASED_CONDITIONS[target_time]["caps"]
self.prior_cummative_loads_per_muscle_group = (
prior_cummative_loads_per_muscle_group
or dict((muscle_group, 0) for muscle_group in MuscleGroup.ALL)
)
self.excluded_exercise_names = excluded_exercise_names or []
super().__init__(exercises=exercises or [])
def _create(self):
self._auto_generate()
def _post_create(self):
self.sort_workout_by_intensity()
def _auto_generate(self):
"""Automatically generates workout based on conditions."""
tbc = TIME_BASED_CONDITIONS[self.target_time]
if (
len(tbc["allowed_strength_loads"]) > 0
and self.strength_exercise_name != None
):
strength_exercise = StrengthExercise(
self.strength_exercise_name, allowed_loads=tbc["allowed_strength_loads"]
)
tbc_set_reduction(tbc, strength_exercise)
self.add_exercise(strength_exercise)
for endurance_probability in tbc["endurance_exercises_probabilities"]:
n = random.uniform(0, 1)
if n <= endurance_probability:
muscle_to_work = self._find_next_muscle_group_to_work()
endurance_exercise = pick_random_exercise(
[muscle_to_work],
ExerciseType.ENDURANCE,
excluded_exercise_names=self.excluded_exercise_names,
equipment_available=self.equipment_available,
)
self.excluded_exercise_names.append(endurance_exercise.exercise_name)
tbc_set_reduction(tbc, endurance_exercise)
self.add_exercise(endurance_exercise)
while self.total_time < self.target_time - min(
self.target_time // 15 * 2.5, 7.5
):
muscle_to_work = self._find_next_muscle_group_to_work()
hypertrophy_exercise = pick_random_exercise(
[muscle_to_work],
ExerciseType.HYPERTROPHY,
excluded_exercise_names=self.excluded_exercise_names,
equipment_available=self.equipment_available,
)
self.excluded_exercise_names.append(hypertrophy_exercise.exercise_name)
tbc_set_reduction(tbc, hypertrophy_exercise)
self.add_exercise(hypertrophy_exercise)
def _find_next_muscle_group_to_work(self):
"""Finds muscle group with least load"""
trainable_loads = {
muscle_group: load
for muscle_group, load in self.cummulative_load_per_muscle_group.items()
if muscle_group in self.muscle_groups
and (
muscle_group in self.caps
and self.caps[muscle_group] <= self.load_per_muscle_group[muscle_group]
)
== False
}
min_muscle = min(trainable_loads, key=trainable_loads.get)
return min_muscle
def regenerate_exercise(exercise_ind: int):
"""Regenerates one exercise of chose. Ensures that is same muscle group is worked and exercises use equipment available"""
pass
@property
def cummulative_load_per_muscle_group(self):
"""Total workout load per muscle group for entire week at current state of workout and previous workouts."""
return dictionary_addition(
(self.prior_cummative_loads_per_muscle_group, self.load_per_muscle_group)
)
class UpperBodyWorkout(AutoGeneratedWorkout):
def __init__(
self,
target_time,
strength_exercise_name: str = None,
equipment_available: List[EquipmentAvailable] = EquipmentAvailable.ALL,
excluded_exercise_names: List[str] = None,
prior_cummative_loads_per_muscle_group: dict = None,
exercises: List[Exercise] = None
):
muscle_groups = [
MuscleGroup.BACK,
MuscleGroup.CHEST,
MuscleGroup.BICEPS,
MuscleGroup.TRICEPS,
MuscleGroup.DELTOIDS,
]
super().__init__(
target_time,
muscle_groups,
strength_exercise_name,
equipment_available,
excluded_exercise_names or [],
exercises or [],
prior_cummative_loads_per_muscle_group or {},
)
class LowerBodyWorkout(AutoGeneratedWorkout):
def __init__(
self,
target_time,
strength_exercise_name: str = None,
equipment_available: List[EquipmentAvailable] = EquipmentAvailable.ALL,
excluded_exercise_names: List[str] = None,
prior_cummative_loads_per_muscle_group: dict = None,
exercises: List[Exercise] = None
):
muscle_groups = [MuscleGroup.QUAD, MuscleGroup.HAMSTRINGS, MuscleGroup.CALVES]
super().__init__(
target_time,
muscle_groups,
strength_exercise_name,
equipment_available,
excluded_exercise_names or [],
exercises or [],
prior_cummative_loads_per_muscle_group or {},
) | /rengine-workouts-1.0.14.tar.gz/rengine-workouts-1.0.14/rengine/workouts.py | 0.748076 | 0.469216 | workouts.py | pypi |
import random
from statistics import mean
from copy import deepcopy
from typing import List, Tuple
import numpy as np
from rengine.config import EXERCISE_CATEGORY_DATA, EquipmentAvailable, MuscleGroup
from rengine.config import ExerciseLoad, ExerciseType, EXERCISE_DF
from rengine.config import ExperienceLevel
def pick_random_exercise(
muscle_groups_targeted: List[str],
exercise_type: ExerciseType,
allowed_loads: List[ExerciseLoad] = [ExerciseLoad.HEAVY, ExerciseLoad.MEDIUM, ExerciseLoad.LIGHT],
experience_levels = [ExperienceLevel.BEGINNER, ExperienceLevel.INTERMEDIATE, ExperienceLevel.EXPERIENCED],
equipment_available = EquipmentAvailable.ALL,
excluded_exercise_names: List[str] = []
):
"""Picks random exercise based on many parameters"""
global EXERCISE_DF
df = EXERCISE_DF.copy()
if(equipment_available != EquipmentAvailable.ALL):
df = df[df["Equipment"].isin(equipment_available)]
df = df[
(~df["EXERCISE"].isin(excluded_exercise_names)) &
(df["Muscle Group"].isin(muscle_groups_targeted)) &
(df[exercise_type] == 1) &
(df.loc[:,experience_levels].sum(axis = 1) > 0)
]
df.index = range(len(df.iloc[:,0]))
if(len(df) == 0):
return None
exercise_ind = random.randint(0, len(df.iloc[:,0]) - 1)
exercise_chose = df.iloc[exercise_ind, :]
return ExerciseFromTypePreset(exercise_chose["EXERCISE"], exercise_type, allowed_loads)
def listify_if_non_iterable(obj):
obj = deepcopy(obj)
if(type(obj) in [tuple, list]):
return obj
return [obj]
def get_variables_based_on_exercise_type_and_load(exercise_type: ExerciseType, exercise_load: ExerciseLoad):
variables = EXERCISE_CATEGORY_DATA[exercise_type][exercise_load]
return {
"sets": variables["sets"],
"rep_range": variables["rep_range"],
"rest_time_range": variables["rest_time_range"]
}
def get_muscle_group(exercise_name):
"""Finds muscle group based on exercise name. If does not exist returns 'UNKNOWN'"""
return EXERCISE_DF[EXERCISE_DF["EXERCISE"]==exercise_name]["Muscle Group"].values[0]
class Exercise:
"""Basic implementation of an exercise"""
def __init__(self, exercise_name: str, sets, rep_range: Tuple[int], rest_time_range: Tuple[float], muscle_group: MuscleGroup = None):
self.exercise_name = exercise_name
self.sets = sets
self.rep_range = rep_range
self.rest_time_range = rest_time_range
self.muscle_group = muscle_group
@property
def length(self):
"""Length in minutes. Currently with assumption that each set takes 1 minute"""
rest_time = listify_if_non_iterable(self.rest_time_range)
return self.sets * (1 + mean(rest_time))
def __str__(self) -> str:
return f"{{exercise_name: {self.exercise_name}, muscle_group: {self.muscle_group}, sets: {str(self.sets)}, rep_range: {str(self.rep_range)}, rest_time_range: {str(self.rest_time_range)}}}"
class ExerciseFromTypePreset(Exercise):
"""Similar to Exercise class but sets, rep_range and rest_time determined by ExerciseType"""
def __init__(self, exercise_name: str, exercise_type: ExerciseType, allowed_loads: List[ExerciseLoad] = [ExerciseLoad.HEAVY, ExerciseLoad.MEDIUM, ExerciseLoad.LIGHT], exercise_load: ExerciseLoad = None):
self.exercise_type = exercise_type
self.exercise_load = exercise_load or self.pick_random_load(allowed_loads)
super().__init__(exercise_name = exercise_name, muscle_group = get_muscle_group(exercise_name),**get_variables_based_on_exercise_type_and_load(self.exercise_type, self.exercise_load))
def pick_random_load(self, allowed_loads):
"""Picks randomly the load based on ExerciseType and valid ExerciseLoad"""
initial_probabilities = [EXERCISE_CATEGORY_DATA[self.exercise_type][load]["chance"] for load in allowed_loads]
normalized_probabilities = [prob/sum(initial_probabilities) for prob in initial_probabilities]
return np.random.choice(allowed_loads, p = normalized_probabilities)
def __str__(self):
return Exercise.__str__(self).rstrip("}") + f", exercise_type: {self.exercise_type}, exercise_load: {self.exercise_load}}}"
class StrengthExercise(ExerciseFromTypePreset):
def __init__(self, exercise_name: str, allowed_loads: List[ExerciseLoad] = [ExerciseLoad.HEAVY, ExerciseLoad.MEDIUM, ExerciseLoad.LIGHT], exercise_load: ExerciseLoad = None):
super().__init__(exercise_name = exercise_name, exercise_type = ExerciseType.STRENGTH, allowed_loads=allowed_loads, exercise_load=exercise_load)
class EnduranceExercise(ExerciseFromTypePreset):
def __init__(self, exercise_name: str, allowed_loads: List[ExerciseLoad] = [ExerciseLoad.HEAVY, ExerciseLoad.MEDIUM, ExerciseLoad.LIGHT], exercise_load: ExerciseLoad = None):
super().__init__(exercise_name = exercise_name, exercise_type = ExerciseType.ENDURANCE, allowed_loads=allowed_loads, exercise_load=exercise_load)
class HypertExercise(ExerciseFromTypePreset):
def __init__(self, exercise_name: str, allowed_loads: List[ExerciseLoad] = [ExerciseLoad.HEAVY, ExerciseLoad.MEDIUM, ExerciseLoad.LIGHT], exercise_load: ExerciseLoad = None):
super().__init__(exercise_name = exercise_name, exercise_type = ExerciseType.HYPERTROPHY, allowed_loads=allowed_loads, exercise_load=exercise_load) | /rengine-workouts-1.0.14.tar.gz/rengine-workouts-1.0.14/rengine/exercises.py | 0.627038 | 0.377799 | exercises.py | pypi |
from random import shuffle
class StrengthExerciseQueueNode:
""""""
def __init__(self, exercise_name: str, priority: float) -> None:
self.next = None
self.previous = None
self.exercise_name = exercise_name
self.priority = priority
def __str__(self):
return f"({self.exercise_name}-{self.priority})"
class StrengthExerciseQueue:
"""Queue that generates next strength exercise should do in particular muscle group which goes along with priorities."""
def __init__(self, exercises: list = None, randomly_order_equal_priorities = True) -> None:
self.head = None
self.tail = None
if(exercises):
if(randomly_order_equal_priorities):
shuffle(exercises)
for exercise in exercises:
self.add(*exercise)
def add(self, exercise_name: str, priority: float):
"""Similar performance to a priority where smaller numbers have precedence in queue. When exercise is added it moves forward."""
node = StrengthExerciseQueueNode(exercise_name, priority)
if(self.head == None):
self.head = node
self.tail = node
return
n = self.tail
while(n and n.priority>node.priority):
n = n.next
#inserted node becomes head of queue as it has the smallest priority value
if(n == None):
self.head.next = node
prev_head = self.head
self.head = node
self.head.previous = prev_head
return
#Inserted node has largest priority value and becomes tail
if(n == self.tail):
node.next = self.tail
self.tail = node
n.previous = self.tail
return
#Inserted node is in middle of queue somewhere
next_node = n
previous_node = n.previous
next_node.previous = node
previous_node.next = node
node.next = next_node
node.previous = previous_node
def get(self):
"""Gets next element in queue then sends to tail."""
if(self.head == self.tail):
return self.head
element = self.head
self.head = element.previous
self.head.next = None
element.next = self.tail
self.tail.previous = element
element.previous = None
self.tail = element
return element
def __str__(self):
node_str = "TAIL --"
n = self.tail
while(n):
node_str += " " + str(n)
n = n.next
node_str += " -- HEAD"
return node_str
if __name__ == "__main__":
queue = StrengthExerciseQueue([
("Barbell Deadlift", 1),
("Barbell Squat", 1),
("Sumo Deadlift", 2)
])
for i in range(6):
print(f"Day {i+1}: {queue.get().exercise_name}") | /rengine-workouts-1.0.14.tar.gz/rengine-workouts-1.0.14/rengine/data_structures.py | 0.705278 | 0.379723 | data_structures.py | pypi |
# Relative Entropy Experiments
```
!pip install faker pyarrow
import pyarrow as pa
from pyarrow import compute as pc
from faker import Faker
from rich.jupyter import print
Faker.seed(0)
fake = Faker()
def gini_index(x: pa.Array):
value_counts = pc.value_counts(x)
value_probs = pc.divide(value_counts.field('counts'), float(len(x)))
return pc.subtract(1, pc.sum(pc.multiply(value_probs, value_probs)))
def simpson_index(x: pa.Array):
value_counts = pc.value_counts(x)
value_probs = pc.divide(value_counts.field('counts'), float(len(x)))
return pc.subtract(1, pc.sum(pc.multiply(value_probs, value_probs)))
# Generate some data...
LEN_DATA = 1000
Faker.seed(0)
countries = pa.array([fake.country() for _ in range(LEN_DATA)])
mostly_usa = pa.array(["United States" for _ in range(LEN_DATA - 1)] + ["Canada"])
half_usa = pa.array(["United States" for _ in range(LEN_DATA // 2)] + ["Canada" for _ in range(LEN_DATA // 2)])
len(countries), len(pc.unique(countries))
len(mostly_usa), len(pc.unique(mostly_usa))
len(half_usa), len(pc.unique(half_usa))
def pc_entropy(probs):
return pc.negate(pc.sum(pc.multiply(probs, pc.log2(probs))))
def relative_entropy(x: pa.Array):
value_counts = pc.value_counts(x)
value_probs = pc.divide(value_counts.field('counts'), float(len(x)))
entropy = pc_entropy(value_probs)
uniform_prob = pc.divide(1, pc.cast(len(value_counts), pa.float64()))
uniform_entropy = pc_entropy(uniform_prob)
score = pc.divide(pc.subtract(uniform_entropy, entropy), uniform_entropy)
return score.as_py()
relative_entropy(countries)
relative_entropy(mostly_usa)
relative_entropy(half_usa)
pc.divide(pa.array([10, 20, 30]), 5.5)
relative_entropy(pa.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))
```
| /renkon-0.0.2.tar.gz/renkon-0.0.2/notebooks/RelativeEntropy.ipynb | 0.654895 | 0.714342 | RelativeEntropy.ipynb | pypi |
# renku-aqs
## `params`
```bash
$ (cd renku-aqs-test-case/; renku aqs params)
+--------------------------------------+-------------------+--------------+
| Run ID | AstroQuery Module | Astro Object |
+--------------------------------------+-------------------+--------------+
| 228555e4-151a-4440-919d-f1269132a0fb | SimbadClass | Mrk 421 |
| 76943a72-30bf-4e9e-ad92-3dd068424968 | SimbadClass | Mrk 421 |
| 0320ea0d-eee2-4b50-aa3e-b64c557b9bf2 | SimbadClass | Mrk 421 |
+--------------------------------------+-------------------+--------------+
@prefix local-renku: <file:///home/savchenk/work/oda/renku/renku-aqs/renku-aqs-test-case/.renku/> .
@prefix oda: <http://odahub.io/ontology#> .
@prefix odas: <https://odahub.io/ontology#> .
local-renku:4ab60eb4-d5e7-11eb-a2dc-b5ff8b3b1162 a oda:Run ;
oda:isRequestingAstroObject odas:AstroObjectMrk_421 ;
oda:isUsing odas:AQModuleSimbadClass .
local-renku:53e67e80-d5ea-11eb-a2dc-b5ff8b3b1162 a oda:Run ;
oda:isRequestingAstroObject odas:AstroObjectMrk_421 ;
oda:isUsing odas:AQModuleSimbadClass .
local-renku:dd481450-d5e4-11eb-a2dc-b5ff8b3b1162 a oda:Run ;
oda:isRequestingAstroObject odas:AstroObjectMrk_421 ;
oda:isUsing odas:AQModuleSimbadClass .
```

# Graphical visualization of the graph
Starting from the knowledge graph generated and enriched during the various executions of the notebooks present within
the repository, this is queried to retrieve the needed information, perform some inferring and generate a graphical
representation.
In particular, two commands are provided:
* `display` to generate a representation of the graph over an output image
* `show-graph` to start an interactive visualization of the graph over the browser
## `display` command
CLI command to generate a graphical representation of the graph over an image.
In particular, the following information are elaborated:
* inputs/arguments/outputs of the notebook execution;
* [astroquery](https://github.com/oda-hub/astroquery/) modules used and the main query methods called ([astroquery api](https://github.com/astropy/astroquery/blob/main/docs/api.rst)).
#### Parameters
* `--filename` The filename of the output file image, until now, only png images are supported (eg `--filename graph.png`), default is `graph.png`
* `--input-notebook` Input notebook to process, if not specified, will query for all the executions from all notebooks
* `--no-oda-info` Exclude oda related information in the output graph, an output much closer to the lineage graph provided in the renkulab will be generated
```bash
$ renku aqs display
```

#### Specify executed notebook
```bash
$ renku aqs display --input-notebook final-an.ipynb
```

#### Do not display oda-related information
```bash
$ renku aqs display --input-notebook final-an.ipynb --no-oda-info
```

## `show-graph` command
CLI command to generate an interactive graphical representation of the graph.
```bash
$ renku aqs show-graph
```

The user can interact with the graph via a single click on one of its nodes: upon clicking,
a `SPARQL` query is dynamically built, and this will retrieve all the nodes and edges directly connected to the clicked
node, as shown in the animation below. Once the node has been expanded, the newly added nodes, along
with the other leaves nodes that are connected to the previously clicked node, can be re-absorbed
by clicking again on the very same node. This is also visible in the animation below.
<div align="center">
<img align="center" width="65%" src="readme_imgs/animation_expansion_retraction.gif">
</div>
<br clear="left"/>
The interface provides the user with a number of adjustable options:
* **Change graph layout**: Two possible layouts are at the moment supported
* _random_: where nodes and edges are displayed in a random fashion over the dedicated frame
(this layout is used in the above picture)
* _hierarchical_: where a hierarchical visualization is applied to the nodes and the edges, an example of
this is displayed in the image below
<div align="center">
<img src="readme_imgs/hierarchical_view.png" width="65%" />
</div>
* **Enable/disable selection of subsets of nodes**: filtering of certain subset of nodes (eg astroquery-related nodes).
This is configurable via a dedicated json file.
* **Apply reductions on the graph**: allows to enable the display of certain nodes with the relative child ones
absorbed within, and viceversa (eg in the example images, the `Plan` node has a number of child nodes, those
can be visualized inside the `Plan`, left image, as well as regular ones, right image)
This is configurable via a dedicated json file.
<div align="center">
<img align="center" width="37%" src="readme_imgs/reduced_plan.png">
<img align="center" width="38%" src="readme_imgs/expanded_plan.png">
</div>
<br clear="left"/>
* **Enable/disable graphical configurations for the graph**: enable/disable a set of graphical configurations for
the graph (nodes and edges), where each configuration is loaded from a dedicated json file. In the image displayed below,
two json files have been loaded, and the related checkbox will enable/disable that configuration.
<div align="center">
<img width="70%" src="readme_imgs/enable_disable_configuration-graph.png">
</div>
The functionalities for the graph drawing and its behavior are developed in javascript and are avaialble
at the following [repository](https://github.com/oda-hub/renku-aqs-graph-library/).
# Installation of the plugin
Currently, the plugin is developed using the version `2.2.0` of [renku-python](https://github.com/SwissDataScienceCenter/renku-python). Please make sure such version is installed by running:
```bash
renku --version
```
The versioning of the dependency is guaranteed during the installation of the plugin, and in addition a dedicated check will be performed during the import.
For the installation of the plugin via `pip`:
<!---
TODO installation from the github repo, will be distributed ?
--->
```bash
pip install renku_aqs
```
| /renku-aqs-1.0.7.tar.gz/renku-aqs-1.0.7/README.md | 0.725649 | 0.873808 | README.md | pypi |
import requests
from urllib.parse import urlsplit
from typing import Optional, List, Dict
from dataclasses import dataclass, field
from calamus import fields
from calamus.schema import JsonLDSchema
from renkubio import logger
schema = fields.Namespace("http://schema.org/")
bio = fields.Namespace("http://bioschemas.org/")
@dataclass
class Entity:
"""Base for specific entities"""
_id: str
name: str
@dataclass
class EnrichableEntity(Entity):
"""An entity whose properties can be enriched by querying an API with its name."""
_api_url: Optional[str] = field(default=None)
def query(self) -> List[Dict[str, str]]:
"""Query the API using the name attribute and
returns the list of matching records."""
base_url = urlsplit(self.api_url).netloc
logger.debug(f"Querying {base_url} API for {self.name}...")
resp = requests.get(self.api_url).json()
if len(resp):
records = resp
else:
logger.warning(
f"No match for {self.name} in {urlsplit(self.api_url).netloc}: "
f"{self.__class__.__name__} metadata will be left empty."
)
records = None
return records
@property
def api_url(self):
try:
return self._api_url % self.name
except TypeError:
logger.error(
f"No API URL available in class {self.__class__.__name__}. Cannot run query()"
)
raise
def enrich(self, _: Dict[str, str]):
"""Update object with a JSON record from an API. The dictionary values
are used to fill-in the instance's attributes"""
logger.error("enrich must be implemented by the specific entity")
raise NotImplementedError
@dataclass
class Place(EnrichableEntity):
_id: str
_api_url: str = field(
default="https://nominatim.openstreetmap.org/search?q=%s&format=json"
)
name: str
latitude: Optional[int] = None
longitude: Optional[int] = None
def enrich(self, geo_data: Dict[str, str]):
"""Update object with a geo record from Nominatim"""
self.name = geo_data.get("display_name", self.name)
self.latitude = float(geo_data.get("lat", self.latitude))
self.longitude = float(geo_data.get("lon", self.longitude))
class PlaceSchema(JsonLDSchema):
_id = fields.Id()
name = fields.String(schema.name)
latitude = fields.Float(schema.latitude)
longitude = fields.Float(schema.longitude)
class Meta:
rdf_type = schema.Place
model = Place
@dataclass
class Taxon(EnrichableEntity):
_id: str
_api_url: str = "https://www.ebi.ac.uk/ena/taxonomy/rest/any-name/%s"
name: str
parent_taxon: Optional[str] = None
scientific_name: Optional[str] = None
taxon_rank: Optional[str] = None
def enrich(self, ebi_taxon: Dict[str, str]):
"""Update object with a taxonomy record from EBI"""
self.taxon_rank = ebi_taxon.get("rank", self.taxon_rank)
try:
self.parent_taxon = ebi_taxon["lineage"].split(";")[-3].strip()
except KeyError:
pass
self.scientific_name = ebi_taxon.get("scientificName", self.scientific_name)
self.name = ebi_taxon.get("commonName", self.name)
class TaxonSchema(JsonLDSchema):
_id = fields.Id()
name = fields.String(schema.name)
scientific_name = fields.String(bio.scientificName)
taxon_rank = fields.String(bio.taxonRank)
parent_taxon = fields.String(bio.parentTaxon)
class Meta:
rdf_type = bio.Taxon
model = Taxon
@dataclass
class Organization(EnrichableEntity):
_id: str
_api_url: str = "https://api.ror.org/organizations?affiliation=%s"
name: str
def query(self) -> List[Dict[str, str]]:
"""Query the ROR API using the name attribute and
returns the list of matching organizations"""
base_url = urlsplit(self.api_url).netloc
logger.debug(f"Querying {base_url} API for {self.name}...")
resp = requests.get(self.api_url).json()
if resp["number_of_results"] > 0:
records = [rec["organization"] for rec in resp["items"]]
else:
logger.warning(
f"No match for {self.name} in {base_url}: "
f"{self.__class__.__name__} metadata will be left empty."
)
records = None
return records
def enrich(self, ror_record: Dict[str, str]):
"""Update object with a record from ROR"""
self.name = ror_record.get("name", self.name)
self._id = ror_record.get("id", self._id)
class OrganizationSchema(JsonLDSchema):
_id = fields.Id()
name = fields.String(schema.name)
class Meta:
rdf_type = schema.Organization
model = Organization | /renku-bio-0.2.0.tar.gz/renku-bio-0.2.0/renkubio/common_entities.py | 0.845465 | 0.332988 | common_entities.py | pypi |
Renku metadata"""
import json
from typing import Any, Dict, Optional
from rdflib import Graph, URIRef
from rdflib.namespace import Namespace
from renku.command.graph import export_graph_command
from renkubio.utils import nest_dict_items
schema = Namespace("http://schema.org/")
bio = Namespace("http://bioschemas.org/")
NS_DICT = {
"prov": "http://www.w3.org/ns/prov#",
"oa": "http://www.w3.org/ns/oa#",
"schema": "http://schema.org/",
"renku": "https://swissdatasciencecenter.github.io/renku-ontology#",
"foaf": "http://xmlns.com/foaf/0.1/",
"bio": "http://bioschemas.org/",
}
def load_graph() -> Graph:
"""Load the Renku project RDF graph of the current working directory."""
# Ideally use RDFGraph(), but unable due to client init bug
result = export_graph_command().build().execute()
graph = result.output.as_rdflib_graph()
for k, v in NS_DICT.items():
graph.bind(k, v)
return graph
def get_sample_uri(sample_name: str, graph: Graph) -> Optional[URIRef]:
"""Return the URI corresponding to input biosample name in the renku project graph."""
res = graph.query(
f"""
SELECT *
WHERE {{
?sample schema:name "{sample_name}" ;
a bio:BioSample .
}}""",
initNs=NS_DICT,
)
try:
sample_id = res.serialize(format="csv").splitlines()[1].decode()
sample_id = URIRef(sample_id)
except IndexError:
sample_id = None
return sample_id
def extract_biosample_meta(sample_uri: URIRef, graph: Graph) -> Dict[str, Any]:
"""Extract user-relevant biosample metadata into a human-readable dictionary"""
ordered_fields = [
"name",
"description",
"common_name",
"scientific_name",
"parent_taxon",
"taxon_rank",
"sex",
"age",
"control",
"collector",
"location_name",
"latitude",
"longitude",
"url",
]
# Get the URI of each nested object in the biosample
taxon_uri = graph.value(sample_uri, bio.taxonomicRange)
org_uri = graph.value(sample_uri, bio.collector)
loc_uri = graph.value(sample_uri, bio.locationCreated)
# Get the sample name (mandatory), and all other available
# properties (optional)
query = f"""
SELECT ?{' ?'.join(ordered_fields)}
WHERE {{
<{sample_uri}> schema:name ?name .
OPTIONAL {{ <{sample_uri}> schema:gender ?sex ; }}
OPTIONAL {{ <{sample_uri}> schema:description ?description ; }}
OPTIONAL {{ <{sample_uri}> schema:url ?url ; }}
OPTIONAL {{ <{sample_uri}> bio:isControl ?control . }}
OPTIONAL {{ <{sample_uri}> bio:samplingAge ?age . }}
OPTIONAL {{ <{loc_uri}> schema:name ?location_name . }}
OPTIONAL {{ <{loc_uri}> schema:latitude ?latitude ;
schema:longitude ?longitude .
}}
OPTIONAL {{ <{org_uri}> schema:name ?collector . }}
OPTIONAL {{ <{taxon_uri}> schema:name ?common_name . }}
OPTIONAL {{ <{taxon_uri}> bio:scientificName ?scientific_name ;
bio:parentTaxon ?parent_taxon ;
bio:taxonRank ?taxon_rank .
}}
}}
"""
res = graph.query(query, initNs=NS_DICT).serialize(format="json").decode()
# Only keep field: value pairs, discarding datatypes and other information
meta_dict = json.loads(res)["results"]["bindings"][0]
values_dict = {
field: meta_dict[field]["value"]
for field in ordered_fields
if field in meta_dict
}
# Nest properties belonging to objects in biosample
taxon_properties = set(
("scientific_name", "common_name", "parent_taxon", "taxon_rank")
)
if set(meta_dict) & taxon_properties:
nest_dict_items(
values_dict,
taxon_properties,
"taxon",
)
location_properties = set(("location_name", "latitude", "longitude"))
if set(meta_dict) & location_properties:
nest_dict_items(values_dict, location_properties, "location_created")
return values_dict
def get_sample_table(graph: Graph) -> str:
res = graph.query(
"""
SELECT ?name ?taxon
WHERE {
?sampleid a bio:BioSample ;
schema:name ?name .
OPTIONAL {
?sampleid bio:taxonomicRange ?taxid .
?taxid schema:name ?taxon .
}
}
""",
initNs=NS_DICT,
)
return res.serialize(format="csv").decode() | /renku-bio-0.2.0.tar.gz/renku-bio-0.2.0/renkubio/rdf.py | 0.845672 | 0.410874 | rdf.py | pypi |
import csv
import functools
import json
from io import StringIO
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional
import click
from renku.command.command_builder.command import Command
from renku.core.constant import DATABASE_METADATA_PATH
from renku.core.dataset.dataset import edit_dataset, show_dataset
from renku.core.project import edit_project, show_project
from renku.core.util.util import NO_VALUE
from renku.domain_model.project_context import project_context
from renku.domain_model.provenance.annotation import Annotation
from tabulate import tabulate
style_key = functools.partial(click.style, bold=True, fg="magenta")
style_value = functools.partial(click.style, bold=True)
def nest_dict_items(dic: Dict, src_keys: Iterable[Any], to_key: Any):
"""Given dictionary dic containing keys listed in src_keys, nest
these keys into a dictionary inside arr_key. dic is modified in place
Examples
--------
>>> d = {'a': 1, 'b': 2, 'c': 3}
>>> nest_dict_items(d, src_keys=['b', 'c'], to_key='consonants')
>>> d
{'a': 1, 'consonants': {'b': 2, 'c': 3}}
"""
sub_dict = {k: dic.pop(k) for k in src_keys if k in dic}
dic[to_key] = sub_dict
def prettify_csv(csv_str: str, has_headers=True, **kwargs) -> str:
r"""Given an input string representing a csv table,
return the prettified table. Keyword arguments are
passed to tabulate.tabulate
Examples
--------
>>> prettify_csv("a,b,c\nd,e,f", has_headers=False)
'- - -\na b c\nd e f\n- - -'
"""
table = list(csv.reader(StringIO(csv_str)))
if has_headers:
kwargs["headers"] = table.pop(0)
return tabulate(table, **kwargs)
def print_key_value(key, value, print_empty: bool = True):
if print_empty or value:
click.echo(style_key(key) + style_value(value))
def prettyprint_dict(dic: Dict, prefix=""):
"""Colored and capitalized printing of input dictionary."""
for k, v in dic.items():
nice_key = f"{prefix}{k.capitalize()}: "
# Recurse in case of nested dictionaries and
# increase indentation level
if isinstance(v, dict):
print_key_value(nice_key, "")
prettyprint_dict(v, prefix=prefix + " ")
else:
print_key_value(nice_key, v)
def get_project_url():
"""Use localclient to build the full Renku project URL."""
with project_context.with_path(Path.cwd()):
remote = project_context.remote
name, owner, host = [key for key in remote]
return f"https://{host}/{owner}/{name}"
def get_renku_project() -> Dict:
"""Gets the metadata of the renku project in the current working directory."""
with project_context.with_path(Path.cwd()):
# Current annotations
project = (
Command()
.command(show_project)
.lock_project()
.with_database()
.require_migration()
.build()
.execute()
.output.__dict__
)
return project
def get_renku_dataset(name: str) -> Dict:
with project_context.with_path(Path.cwd()):
# Current annotations
ds = (
Command()
.command(show_dataset)
.lock_project()
.with_database()
.require_migration()
.build()
.execute(name)
.output
)
return ds
def load_annotations(entity: Dict) -> List[Dict]:
"""Loads custom annotations from project or dataset metadata into a dictionary."""
# Initialize annotations if needed
if entity["annotations"] in ([], None):
annotations = [
dict(id=Annotation.generate_id(), body=[], source="renku")
]
else:
annotations = json.loads(entity["annotations"])
return annotations
def find_sample_in_annot(annot: List[Dict], name: str) -> int:
"""Returns the index of the annotation body corresponding to input sample name. Returns -1 if sample is not found"""
body = annot[0]["body"]
# For each biosample annotation in the body, check if it has the input name.
for sample_idx, sample in enumerate(body):
# Malformed samples
if isinstance(sample, list):
sample = sample[0]
if (
name in sample["http://schema.org/name"][0].values()
and "http://bioschemas.org/BioSample" in sample["@type"]
):
return sample_idx
return -1
def edit_annotations(annotations: Dict, dataset: Optional[str] = None):
"""Replace annotations for target dataset. If no dataset name is
specified, edit project annotations instead. The keyword 'renku-bio'
is also added automatically if not present."""
with project_context.with_path(Path.cwd()):
if dataset:
keywords = get_renku_dataset(dataset)["keywords"]
edit_cmd = edit_dataset
edit_args = dict(
name=dataset,
creators=NO_VALUE,
images=NO_VALUE,
title=NO_VALUE,
custom_metadata=annotations[0][
"body"
], # TODO: rm last [0] for multisample
)
else:
keywords = get_renku_project()["keywords"]
edit_cmd = edit_project
edit_args = dict(
creator=NO_VALUE,
custom_metadata=annotations[0]["body"],
)
# add common args
edit_args |= dict(
keywords=NO_VALUE,
description=NO_VALUE,
custom_metadata_source="renku-bio",
)
if "renku-bio" not in keywords:
edit_args["keywords"] = keywords + ["renku-bio"]
command = (
Command()
.command(edit_cmd)
.lock_project()
.with_database(write=True)
.require_migration()
.with_commit(commit_only=DATABASE_METADATA_PATH)
)
command.build().execute(**edit_args) | /renku-bio-0.2.0.tar.gz/renku-bio-0.2.0/renkubio/utils.py | 0.737347 | 0.200832 | utils.py | pypi |
import json
from datetime import datetime
from typing import Optional
import click
import pyld
import questionary
from renkubio import __version__
from renkubio.biosample import BioSample, BioSampleSchema
from renkubio.common_entities import EnrichableEntity
from renkubio.rdf import (
extract_biosample_meta,
get_sample_table,
get_sample_uri,
load_graph,
)
from renkubio.utils import (
edit_annotations,
find_sample_in_annot,
get_project_url,
get_renku_dataset,
get_renku_project,
load_annotations,
prettify_csv,
prettyprint_dict,
)
def prompt_enrich_object(obj: EnrichableEntity, msg: str, display_field: str):
"""Interactive prompt the user to select the API record matching object property."""
# Send a request with object name to an API
records = obj.query()
# Interactive selection among matching records
try:
choices = [rec[display_field] for rec in records] + ["None"]
except TypeError:
return
choice = questionary.select(
msg,
choices=choices,
).ask()
if choice:
# Recover full metadata for chosen record and enrich biosample with it
chosen_record = list(filter(lambda r: r.get(display_field) == choice, records))[
0
]
obj.enrich(chosen_record)
def prompt_biosample(
bs: BioSample,
taxon: Optional[str] = None,
collector: Optional[str] = None,
location: Optional[str] = None,
):
"""Sequentially prompts the user to enrich each object property of the input biosample."""
if taxon:
prompt_enrich_object(
bs.taxonomic_range,
msg="What is the matching taxon record ?",
display_field="scientificName",
)
if collector:
prompt_enrich_object(
bs.collector,
msg="What is the organization responsible for sample collection ?",
display_field="name",
)
if location:
prompt_enrich_object(
bs.location_created,
msg="What is the correct location where the sample was collected ?",
display_field="display_name",
)
def get_iso_date(date: str) -> datetime:
"""Check that input string is in ISO-8601 format and
keep only year, month and day informations"""
try:
return datetime.fromisoformat(date)
except ValueError:
raise ValueError("Deadline must be in ISO-8601 (YYYY-MM-DD) format.")
@click.group()
@click.version_option(version=__version__)
def bio():
"""
renku-bio: Biological annotations in Renku metadata.
"""
@bio.command()
@click.argument("name")
@click.option("--description", "-d", help="Free text description of the sample.")
@click.option(
"--url", "-u", help="If applicable, a link to a ressource describing the sample."
)
@click.option(
"--taxon", "-t", help="The taxon of the sample (Scientific or common name)."
)
@click.option(
"--age", "-a", type=int, help="The biological age of the sample, in years."
)
@click.option(
"--collector",
"-c",
help="The organization responsible for sample collection.",
)
@click.option(
"--control",
"-C",
is_flag=True,
help="Whether this sample represents the control group.",
)
@click.option(
"--gender",
"-g",
help="Can be 'male', 'female', or another value.",
)
@click.option(
"--date", help="Date when the sample was collected, in YYYY-MM-DD format."
)
@click.option(
"--location",
"-l",
help="Where the sample was collected. Ideally a geographical name.",
)
@click.option(
"--dataset",
help="Target dataset in which to add the sample. If not specified, sample"
" is added to the project's metadata.",
)
@click.option(
"--dry-run", is_flag=True, help="Only print biosample, do not modify metadata."
)
@click.option(
"--no-prompt", is_flag=True, help="Do not prompt to enrich biosample properties."
)
def add_sample(
name: str,
description: str,
url: Optional[str],
taxon: Optional[str],
age: Optional[int],
gender: Optional[str],
collector: Optional[str],
control: Optional[bool],
date: Optional[str],
location: Optional[str],
dry_run: bool = False,
no_prompt: bool = False,
dataset: Optional[str] = None,
):
"""Add a biological sample to the project's metadata.
name is the identifier used to refer to the sample. If
a dataset is specified, adds the sample to the dataset
metadata instead."""
if dataset:
meta = get_renku_dataset(dataset)
else:
meta = get_renku_project()
annotations = load_annotations(meta)
if find_sample_in_annot(annotations, name) >= 0:
raise ValueError(
f"A sample with name {name} already exists in "
"this project. Please use another name."
)
if date:
date = get_iso_date(date)
# Generate a unique ID for the biosample
sample_id = get_project_url()
if dataset:
sample_id += f"/datasets/{dataset}"
sample_id += f"/{name}"
bs = BioSample(
_id=sample_id,
name=name,
url=url,
description=description,
taxonomic_range=taxon,
collector=collector,
sampling_age=age,
gender=gender,
location_created=location,
date_created=date,
is_control=control,
)
if not no_prompt:
prompt_biosample(bs, taxon, collector, location)
bs_dict = BioSampleSchema().dump(bs)
bs_json = pyld.jsonld.flatten({k: v for k, v in bs_dict.items() if v is not None})
if dry_run:
print(json.dumps(bs_json, indent=2))
else:
# Add sample to current annotations
annotations[0]["body"] += bs_json
edit_annotations(annotations, dataset)
@bio.command()
def ls_samples():
"""List available biosamples"""
graph = load_graph()
samples_csv = get_sample_table(graph)
pretty_csv = prettify_csv(
samples_csv,
has_headers=True,
tablefmt="fancy_grid",
)
print(pretty_csv)
@bio.command()
@click.argument("name")
def show_sample(name: str):
"""Display information on a specific biosample"""
graph = load_graph()
sample_id = get_sample_uri(name, graph)
if sample_id:
meta_dict = extract_biosample_meta(sample_id, graph)
prettyprint_dict(meta_dict)
else:
print(f"No sample named '{name}'.")
@bio.command()
@click.argument("name")
@click.option(
"--dataset",
help="Target dataset in which to add the sample. If not specified, sample"
" is added to the project's metadata.",
)
def rm_sample(name: str, dataset: Optional[str] = None):
"""Remove a biosample from the project's metadata.
name refers to the biosample to remove."""
if dataset:
meta = get_renku_dataset(dataset)
else:
meta = get_renku_project()
# find sample in current annotations dictionary
annotations = load_annotations(meta)
rm_idx = find_sample_in_annot(annotations, name)
if rm_idx < 0:
print(f"No sample named {name}.")
return
# rm sample from annotations dictionary
annotations[0]["body"].pop(rm_idx)
# Update project metadata with edited dictionary
edit_annotations(annotations, dataset) | /renku-bio-0.2.0.tar.gz/renku-bio-0.2.0/renkubio/cli.py | 0.756537 | 0.249624 | cli.py | pypi |
import json
import re
from typing import List
import click
import pyld
import rdflib
from deepdiff import DeepDiff
from mlsconverters.io import COMMON_DIR, MLS_DIR
from prettytable import PrettyTable
from renku.command.command_builder.command import Command
from renku.command.graph import get_graph_for_all_objects, update_nested_node_host
from renku.core import errors
from renku.core.plugin import hookimpl
from renku.core.util.urls import get_host
from renku.domain_model.project_context import project_context
from renku.domain_model.provenance.annotation import Annotation
class MLS(object):
def __init__(self, activity):
self._activity = activity
@property
def renku_mls_path(self):
"""Return a ``Path`` instance of Renku MLS metadata folder."""
return project_context.metadata_path / MLS_DIR / COMMON_DIR
def _load_model(self, path):
"""Load MLS reference file."""
if path and path.exists():
return json.load(path.open())
return {}
@property
def annotations(self) -> List[Annotation]:
_annotations = []
if not self.renku_mls_path.exists():
return _annotations
for p in self.renku_mls_path.iterdir():
mls_annotation = self._load_model(p)
model_id = mls_annotation["@id"]
annotation_id = "{activity}/annotations/mls/{id}".format(activity=self._activity.id, id=model_id)
p.unlink()
_annotations.append(Annotation(id=annotation_id, source="MLS plugin", body=mls_annotation))
return _annotations
@hookimpl
def activity_annotations(activity):
"""``activity_annotations`` hook implementation."""
mls = MLS(activity)
return mls.annotations
def _run_id(activity_id):
return str(activity_id).split("/")[-1]
def _export_graph():
graph = get_graph_for_all_objects()
# NOTE: rewrite ids for current environment
host = get_host()
for node in graph:
update_nested_node_host(node, host)
return graph
def _conjunctive_graph(graph):
"""Convert a renku ``Graph`` to an rdflib ``ConjunctiveGraph``."""
def to_jsonld(graph, format):
"""Return formatted graph in JSON-LD ``format`` function."""
output = getattr(pyld.jsonld, format)(graph)
return json.dumps(output, indent=2)
return rdflib.ConjunctiveGraph().parse(data=to_jsonld(graph, "expand"), format="json-ld")
def _graph(revision, paths):
cmd_result = Command().command(_export_graph).with_database(write=False).require_migration().build().execute()
if cmd_result.status == cmd_result.FAILURE:
raise errors.OperationError("Cannot export Renku graph.")
graph = _conjunctive_graph(cmd_result.output)
graph.bind("prov", "http://www.w3.org/ns/prov#")
graph.bind("foaf", "http://xmlns.com/foaf/0.1/")
graph.bind("schema", "http://schema.org/")
graph.bind("renku", "https://swissdatasciencecenter.github.io/renku-ontology/")
graph.bind("mls", "http://www.w3.org/ns/mls#")
graph.bind("oa", "http://www.w3.org/ns/oa#")
graph.bind("xsd", "http://www.w3.org/2001/XMLSchema#")
return graph
def _create_leaderboard(data, metric, format=None):
leaderboard = PrettyTable()
leaderboard.field_names = ["Run ID", "Model", "Inputs", metric]
leaderboard.align["Model"] = "l"
leaderboard.align["Inputs"] = "l"
leaderboard.align[metric] = "r"
for commit, v in data.items():
if metric in v:
v["inputs"].sort()
leaderboard.add_row([commit, v["model"], v["inputs"], v[metric]])
leaderboard.sortby = metric
leaderboard.reversesort = True
return leaderboard
@click.group()
def mls():
"""MLSchema plugin commands."""
pass
@mls.command()
@click.option(
"--revision",
default="HEAD",
help="The git revision to generate the log for, default: HEAD",
)
@click.option("--format", default="ascii", help="Choose an output format.")
@click.option("--metric", default="accuracy", help="Choose metric for the leaderboard")
@click.argument("paths", type=click.Path(exists=False), nargs=-1)
def leaderboard(revision, format, metric, paths):
"""Leaderboard based on evaluation metrics of machine learning models"""
graph = _graph(revision, paths)
leaderboard = dict()
for r in graph.query(
"""SELECT DISTINCT ?type ?value ?run ?runId ?dsPath where {{
?em a mls:ModelEvaluation ;
mls:hasValue ?value ;
mls:specifiedBy ?type ;
^mls:hasOutput/mls:implements/rdfs:label ?run ;
^mls:hasOutput/^oa:hasBody/oa:hasTarget ?runId ;
^mls:hasOutput/^oa:hasBody/oa:hasTarget/prov:qualifiedUsage/prov:entity/prov:atLocation ?dsPath
}}"""
):
run_id = _run_id(r.runId)
metric_type = r.type.split("#")[1]
if run_id in leaderboard:
leaderboard[run_id]["inputs"].append(r.dsPath.__str__())
continue
leaderboard[run_id] = {
metric_type: r.value.value,
"model": r.run,
"inputs": [r.dsPath.__str__()],
}
if len(paths):
filtered_board = dict()
for path in paths:
filtered_board.update(dict(filter(lambda x: path in x[1]["inputs"], leaderboard.items())))
print(_create_leaderboard(filtered_board, metric))
else:
print(_create_leaderboard(leaderboard, metric))
@mls.command()
@click.option(
"--revision",
default="HEAD",
help="The git revision to generate the log for, default: HEAD",
)
@click.option("--format", default="ascii", help="Choose an output format.")
@click.option("--diff", nargs=2, help="Print the difference between two model revisions")
@click.argument("paths", type=click.Path(exists=False), nargs=-1)
def params(revision, format, paths, diff):
"""List the hyper-parameter settings of machine learning models"""
def _param_value(rdf_iteral):
if not type(rdf_iteral) != rdflib.term.Literal:
return rdf_iteral
if rdf_iteral.isnumeric():
return rdf_iteral.__str__()
else:
return rdf_iteral.toPython()
graph = _graph(revision, paths)
model_params = dict()
for r in graph.query(
"""SELECT ?runId ?algo ?hp ?value where {{
?run a mls:Run ;
mls:hasInput ?in .
?in a mls:HyperParameterSetting .
?in mls:specifiedBy/rdfs:label ?hp .
?in mls:hasValue ?value .
?run mls:implements/rdfs:label ?algo ;
^oa:hasBody/oa:hasTarget ?runId
}}"""
):
run_id = _run_id(r.runId)
if run_id in model_params:
model_params[run_id]["hp"][str(r.hp)] = _param_value(r.value)
else:
model_params[run_id] = dict({"algorithm": str(r.algo), "hp": {str(r.hp): _param_value(r.value)}})
if diff:
for r in diff:
if r not in model_params:
print("Unknown revision provided for diff parameter: {}".format(r))
return
if model_params[diff[0]]["algorithm"] != model_params[diff[1]]["algorithm"]:
print("Model:")
print("\t- {}".format(model_params[diff[0]]["algorithm"]))
print("\t+ {}".format(model_params[diff[1]]["algorithm"]))
else:
params_diff = DeepDiff(model_params[diff[0]], model_params[diff[1]], ignore_order=True)
output = PrettyTable()
output.field_names = ["Hyper-Parameter", "Old", "New"]
output.align["Hyper-Parameter"] = "l"
if "values_changed" not in params_diff:
print(output)
return
for k, v in params_diff["values_changed"].items():
parameter_name = re.search(r"\['(\w+)'\]$", k).group(1)
output.add_row(
[
parameter_name,
_param_value(v["new_value"]),
_param_value(v["old_value"]),
]
)
print(output)
else:
output = PrettyTable()
output.field_names = ["Run ID", "Model", "Hyper-Parameters"]
output.align["Run ID"] = "l"
output.align["Model"] = "l"
output.align["Hyper-Parameters"] = "l"
for runid, v in model_params.items():
output.add_row([runid, v["algorithm"], json.dumps(v["hp"])])
print(output) | /renku-mls-0.4.0.tar.gz/renku-mls-0.4.0/renkumls/plugin.py | 0.749637 | 0.160135 | plugin.py | pypi |
import re
from renoir._shortcuts import to_unicode
class Element(object):
self_closing_tags = (
'meta', 'img', 'link', 'br', 'hr', 'input', 'source', 'track')
ELEMENT = '%'
ID = '#'
CLASS = '.'
HAML_REGEX = re.compile(r"""
(?P<tag>%\w+(\:\w+)?)?
(?P<id>\#[\w-]*)?
(?P<class>\.[\w\.-]*)*
(?P<attributes>\{.*\})?
(?P<nuke_outer_whitespace>\>)?
(?P<nuke_inner_whitespace>\<)?
(?P<selfclose>/)?
(?P<inline>[^\w\.#\{].*)?
""", re.X | re.MULTILINE | re.DOTALL | re.UNICODE)
_ATTRIBUTE_KEY_REGEX = r'(?P<key>[a-zA-Z_][a-zA-Z0-9_-]*)'
# Single and double quote regexes from:
# http://stackoverflow.com/a/5453821/281469
_SINGLE_QUOTE_STRING_LITERAL_REGEX = r"'([^'\\]*(?:\\.[^'\\]*)*)'"
_DOUBLE_QUOTE_STRING_LITERAL_REGEX = r'"([^"\\]*(?:\\.[^"\\]*)*)"'
_ATTRIBUTE_VALUE_REGEX = r'(?P<val>\d+|None(?!\w)|%s|%s)' % (
_SINGLE_QUOTE_STRING_LITERAL_REGEX, _DOUBLE_QUOTE_STRING_LITERAL_REGEX)
RUBY_HAML_REGEX = re.compile(r'(:|\")%s(\"|) =>' % (_ATTRIBUTE_KEY_REGEX))
ATTRIBUTE_REGEX = re.compile(r'(?P<pre>\{\s*|,\s*)%s\s*:\s*%s' % (
_ATTRIBUTE_KEY_REGEX, _ATTRIBUTE_VALUE_REGEX), re.UNICODE)
def __init__(self, haml, attr_wrapper="'"):
self.haml = haml
self.attr_wrapper = attr_wrapper
self.tag = None
self.id = None
self.classes = None
self.attributes = ''
self.self_close = False
self.nuke_inner_whitespace = False
self.nuke_outer_whitespace = False
self.inline_content = ''
self._parse_haml()
def attr_wrap(self, value):
return '%s%s%s' % (self.attr_wrapper, value, self.attr_wrapper)
def _parse_haml(self):
split_tags = self.HAML_REGEX.search(self.haml).groupdict('')
self.attributes_dict = self._parse_attribute_dictionary(
split_tags.get('attributes'))
self.tag = split_tags.get('tag').strip(self.ELEMENT) or 'div'
self.id = self._parse_id(split_tags.get('id'))
self.classes = (
'%s %s' % (
split_tags.get('class').lstrip(self.CLASS).replace('.', ' '),
self._parse_class_from_attributes_dict())
).strip()
self.self_close = (
split_tags.get('selfclose') or self.tag in self.self_closing_tags)
self.nuke_inner_whitespace = \
split_tags.get('nuke_inner_whitespace') != ''
self.nuke_outer_whitespace = \
split_tags.get('nuke_outer_whitespace') != ''
#self.weppy_variable = split_tags.get('django') != ''
self.inline_content = split_tags.get('inline').strip()
def _parse_class_from_attributes_dict(self):
cls = self.attributes_dict.get('class', '')
if not isinstance(cls, str):
cls = ''
for one_class in self.attributes_dict.get('class'):
cls += ' ' + one_class
return cls.strip()
def _parse_id(self, id_haml):
id_text = id_haml.strip(self.ID)
if 'id' in self.attributes_dict:
id_text += self._parse_id_dict(self.attributes_dict['id'])
id_text = id_text.lstrip('_')
return id_text
def _parse_id_dict(self, id_dict):
text = ''
id_dict = self.attributes_dict.get('id')
if isinstance(id_dict, str):
text = '_' + id_dict
else:
text = ''
for one_id in id_dict:
text += '_' + one_id
return text
def _escape_attribute_quotes(self, v):
'''
Escapes quotes with a backslash, except those inside a Django tag
'''
escaped = []
inside_tag = False
for i, _ in enumerate(v):
if v[i:i + 2] == '{{':
inside_tag = True
elif v[i:i + 2] == '}}':
inside_tag = False
if v[i] == self.attr_wrapper and not inside_tag:
escaped.append('\\')
escaped.append(v[i])
return ''.join(escaped)
def _parse_attribute_dictionary(self, attribute_dict_string):
attributes_dict = {}
if attribute_dict_string:
attribute_dict_string = attribute_dict_string.replace('\n', ' ')
try:
# converting all allowed attributes to python dictionary style
# Replace Ruby-style HAML with Python style
attribute_dict_string = re.sub(
self.RUBY_HAML_REGEX, '"\g<key>":', attribute_dict_string)
# Put double quotes around key
attribute_dict_string = re.sub(
self.ATTRIBUTE_REGEX, '\g<pre>"\g<key>":\g<val>',
attribute_dict_string)
# Parse string as dictionary
attributes_dict = eval(attribute_dict_string)
for k, v in attributes_dict.items():
if k != 'id' and k != 'class':
if v is None:
self.attributes += "%s " % (k,)
elif isinstance(v, int) or isinstance(v, float):
self.attributes += "%s=%s " % (
k, self.attr_wrap(v))
else:
attributes_dict[k] = v
v = to_unicode(v)
self.attributes += "%s=%s " % (
k, self.attr_wrap(
self._escape_attribute_quotes(v)))
self.attributes = self.attributes.strip()
except Exception:
raise Exception('failed to decode: %s' % attribute_dict_string)
return attributes_dict | /Renoir-HAML-1.0.0.tar.gz/Renoir-HAML-1.0.0/renoir_haml/elements.py | 0.404743 | 0.172939 | elements.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.