code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import numpy as np
import os
import h5py
import nptdms
import re
from contextlib import ExitStack
from pathlib import Path
from typing import Any, AnyStr, Dict, Generator, List, Match, Pattern
from enum import IntEnum
from typing import List, Tuple, Union
from dream3d.Filter import Filter, FilterDelegatePy
from dream3d.simpl import *
VERSION_KEY: str = 'Version'
SLICES_KEY: str = 'TDMSData'
INDEX_KEY: str = 'Index'
LAYER_START_TIME_KEY: str = 'LayerStartTime'
LAYER_END_TIME_KEY: str = 'LayerEndTime'
PART_START_TIME_KEY: str = 'PartStartTime'
PART_END_TIME_KEY: str = 'PartEndTime'
TDMS_GROUP_NAME_KEY: str = 'TDMS_GroupName'
VERTICES_KEY: str = 'Vertices'
def get_file_list(end_index: int, file_extension: str, file_prefix: str, file_suffix: str, increment: int, input_path: str, ordering: int, padding_digits: int, start_index: int, status_delegate: Union[FilterDelegateCpp, FilterDelegatePy] = FilterDelegatePy()) -> None:
file_list = []
index = 0
i = 0
while i < (end_index - start_index) + 1:
if(ordering):
index = end_index - i
else:
index = start_index + i
file = input_path + '/' + file_prefix + str(index).zfill(padding_digits) + file_suffix + '.' + file_extension
file_list.append(file)
i = i + increment
return file_list
def tdms2h5(file_info: StackFileListInfo, file_list: List, output_dir: Path, area_offset: int, intensity_offset: int, laser_offset: int, groups: List[str] = [], status_delegate: Union[FilterDelegateCpp, FilterDelegatePy] = FilterDelegatePy()) -> None:
largest_offset = max(area_offset, intensity_offset)
with ExitStack() as exitStack:
h5_files: Dict[str, h5py.File] = {}
slice_indices: List[int] = []
regex_name: Pattern[AnyStr] = re.compile(fr'{file_info.FilePrefix}(\d+)')
path: Path
paths = []
for file in file_list:
paths.append(Path(file))
paths_size = len(paths)
path_ind = 1
for path in paths:
status_delegate.notifyStatusMessage(f'Converting {path_ind} of {paths_size}: \"{path}\"')
path_ind = path_ind + 1
match: Match[AnyStr] = regex_name.search(path.stem)
slice_index = int(match.group(1))
slice_indices.append(slice_index)
with nptdms.TdmsFile(path) as tdmsFile:
build_firmware_version: int = 3
try:
status_delegate.notifyStatusMessage(f'Trying to find old style Bitgain property..')
bitgain_os_1: float = tdmsFile.properties['Bitgain OS 1']
bitgain_os_2: float = tdmsFile.properties['Bitgain OS 2']
except:
try:
status_delegate.notifyStatusMessage(f'Trying to find new style Bitgain property..')
build_firmware_version = 4
bitgain_os_1: float = tdmsFile.properties['Scanner.Bitgain.OS1']
bitgain_os_2: float = tdmsFile.properties['Scanner.Bitgain.OS2']
except:
status_delegate.notifyStatusMessage(f'Bitgain Property not found. Unable to determine firmware version. Exiting Function.')
return
group: nptdms.TdmsGroup
for ind, group in enumerate(tdmsFile.groups()):
if groups and not any(re.match(pattern, group.name) for pattern in groups):
if ind == len(tdmsFile.groups())-1:
status_delegate.notifyStatusMessage(f' Group(s) not located')
continue
output_file_path = output_dir / f'{group.name}.h5'
if group.name not in h5_files:
h5_files[group.name] = exitStack.enter_context(h5py.File(output_file_path, 'w'))
h5_file = h5_files[group.name]
h5_file.attrs[VERSION_KEY] = build_firmware_version
h5_group = h5_file.create_group(SLICES_KEY)
h5_group.attrs[TDMS_GROUP_NAME_KEY] = group.name
h5_file = h5_files[group.name]
h5_group: h5py.Group = h5_file[SLICES_KEY].create_group(str(slice_index))
layer_replacements = {
'StartTime': LAYER_START_TIME_KEY,
'EndTime': LAYER_END_TIME_KEY
}
_write_tdms_properties(h5_group, tdmsFile.properties, layer_replacements)
part_replacements = {
'StartTime' : PART_START_TIME_KEY,
'EndTime' : PART_END_TIME_KEY,
'Part.StartTime': PART_START_TIME_KEY,
'Part.EndTime' : PART_END_TIME_KEY
}
_write_tdms_properties(h5_group, group.properties, part_replacements)
# LaserTTL only uses laser_offset. The end has to be adjusted to make the resulting array consistent
laser_channel: nptdms.TdmsChannel = group['LaserTTL']
laser_end_index: int = len(laser_channel) - (laser_offset)
h5_group.create_dataset(laser_channel.name, data=laser_channel[laser_offset: laser_end_index])
# Intensity and Area use laser_offset
area_channel: nptdms.TdmsChannel = group['Area']
# At this point for illustrative purposes, since the laser_offset is always the largest
end_index: int = len(area_channel) - (area_offset)
h5_group.create_dataset(area_channel.name, data=area_channel[area_offset: end_index])
intensity_channel: nptdms.TdmsChannel = group['Intensity']
end_index: int = len(intensity_channel) - (intensity_offset)
h5_group.create_dataset(intensity_channel.name, data=intensity_channel[intensity_offset: end_index])
# Have not figured out how to correlate parameter to the actual parameter used, just use the same as Laser TTL since it is a machine setting
parameter_channel: nptdms.TdmsChannel = group['Parameter']
h5_group.create_dataset(parameter_channel.name, data=parameter_channel[:])
# X and Y channels just adjust the maximum
x_channel: nptdms.TdmsChannel = group['X-Axis']
x_dataset = h5_group.create_dataset(x_channel.name, data=(x_channel[:] / bitgain_os_1), dtype=np.float32)
x_dataset.attrs['Units'] = 'μm'
y_channel: nptdms.TdmsChannel = group['Y-Axis']
y_dataset = h5_group.create_dataset(y_channel.name, data=(y_channel[:] / bitgain_os_2), dtype=np.float32)
y_dataset.attrs['Units'] = 'μm'
# Resulting slices will be aligned with the same number of data points for each channel
if not h5_files:
status_delegate.notifyStatusMessage(f' No TDMS files located')
return -1
slice_indices = sorted(slice_indices)
for h5_file in h5_files.values():
index_dataset = np.zeros((len(slice_indices), 3), dtype=np.int64)
for i, index in enumerate(slice_indices):
index_dataset[i][0] = index
index_dataset[i][1] = h5_file[SLICES_KEY][str(index)].attrs['layerThickness']
index_dataset[i][2] = h5_file[SLICES_KEY][str(index)]['X-Axis'].size
dataset: h5py.Dataset = h5_file.create_dataset(INDEX_KEY, data=index_dataset)
dataset.attrs['Column0'] = 'SliceIndex'
dataset.attrs['Column1'] = 'LayerThickness (μm)'
dataset.attrs['Column2'] = 'NumVertices'
status_delegate.notifyStatusMessage('\nWrote files:')
h5_file: h5py.File
for h5_file in h5_files.values():
status_delegate.notifyStatusMessage(f' \"{h5_file.filename}\"')
def _write_tdms_properties(h5_group: h5py.Group, tdms_dict: Dict[str, Any], replacements: Dict[str, str]) -> None:
key: str
value: Any
for key, value in tdms_dict.items():
if key in replacements:
key = replacements[key]
if isinstance(value, np.datetime64):
h5_group.attrs[key] = str(np.datetime_as_string(value, unit='us', timezone='UTC'))
else:
h5_group.attrs[key] = value
class TDMStoH5(Filter):
def __init__(self) -> None:
self.area_offset: int = 0
self.intensity_offset: int = 0
self.laser_offset: int = 0
self.output_folder: str = ''
self.group: str = ''
self.stack_info_param: StackFileListInfo = StackFileListInfo(0, 0, 0, 0, 1, '', 'Prefix_', '_Suffix', 'tdms')
def _set_area_offset(self, value: int) -> None:
self.area_offset = value
def _get_area_offset(self) -> int:
return self.area_offset
def _set_intensity_offset(self, value: int) -> None:
self.intensity_offset = value
def _get_intensity_offset(self) -> int:
return self.intensity_offset
def _set_laser_offset(self, value: int) -> None:
self.laser_offset = value
def _get_laser_offset(self) -> int:
return self.laser_offset
def _set_output_folder(self, value: str) -> None:
self.output_folder = value
def _get_output_folder(self) -> str:
return self.output_folder
def _set_group(self, value: str) -> None:
self.group = value
def _get_group(self) -> str:
return self.group
def _get_stack_info_param(self) -> StackFileListInfo:
return self.stack_info_param
def _set_stack_info_param(self, value) -> None:
self.stack_info_param = value
@staticmethod
def name() -> str:
return 'TDMS to H5'
@staticmethod
def uuid() -> str:
return '{8b069c55-6d94-4db1-b012-cdfb7dd08ad6}'
@staticmethod
def group_name() -> str:
return 'Example'
@staticmethod
def sub_group_name() -> str:
return 'Sub Example'
@staticmethod
def human_label() -> str:
return 'Convert TDMS to HDF5'
@staticmethod
def version() -> str:
return '1.0.0'
@staticmethod
def compiled_lib_name() -> str:
return 'Python'
def setup_parameters(self) -> List[FilterParameter]:
req = MultiDataArraySelectionFilterParameter.RequirementType([IGeometry.Type.Image], [AttributeMatrix.Type.Cell], [], [])
return [
IntFilterParameter('Area Offset', 'area_offset', self.area_offset, FilterParameter.Category.Parameter, self._set_area_offset, self._get_area_offset, -1),
IntFilterParameter('Intensity Offset', 'intensity_offset', self.intensity_offset, FilterParameter.Category.Parameter, self._set_intensity_offset, self._get_intensity_offset, -1),
IntFilterParameter('Laser Offset', 'laser_offset', self.laser_offset, FilterParameter.Category.Parameter, self._set_laser_offset, self._get_laser_offset, -1),
OutputPathFilterParameter('Output Folder', 'Output Folder', '', FilterParameter.Category.Parameter,
self._set_output_folder, self._get_output_folder, -1),
StringFilterParameter('Group', 'group', self.group, FilterParameter.Category.Parameter, self._set_group,
self._get_group, -1),
FileListInfoFilterParameter('File List', 'filelist', self.stack_info_param, FilterParameter.Category.Parameter,
self._set_stack_info_param, self._get_stack_info_param)
]
def data_check(self, dca: DataContainerArray, status_delegate: Union[FilterDelegateCpp, FilterDelegatePy] = FilterDelegatePy()) -> Tuple[int, str]:
if not self.stack_info_param.InputPath:
return (-5550, 'An input folder must be selected')
if not self.output_folder:
return (-301, 'An output folder must be selected')
if not os.path.exists(self.stack_info_param.InputPath):
return (-302, f' Input path {self.input_folder} does not exist')
if not os.path.exists(self.output_folder):
status_delegate.setWarningCondition(1, f' Output folder {self.output_folder} does not exist; creating a new folder')
return (0, 'Success')
def _execute_impl(self, dca: DataContainerArray, status_delegate: Union[FilterDelegateCpp, FilterDelegatePy] = FilterDelegatePy()) -> Tuple[int, str]:
file_list = get_file_list(self.stack_info_param.EndIndex, self.stack_info_param.FileExtension, self.stack_info_param.FilePrefix, self.stack_info_param.FileSuffix, self.stack_info_param.IncrementIndex, self.stack_info_param.InputPath, self.stack_info_param.Ordering, self.stack_info_param.PaddingDigits, self.stack_info_param.StartIndex)
if not os.path.exists(self.output_folder):
os.mkdir(self.output_folder)
if tdms2h5(self.stack_info_param, file_list, Path(self.output_folder), self.area_offset, self.intensity_offset, self.laser_offset, self.group, status_delegate) == -1:
return (-1, 'could not create HDF5 files')
return (0, 'Success')
filters = [TDMStoH5] | Python/Filters/tdms_to_h5.py |
import numpy as np
import os
import h5py
import nptdms
import re
from contextlib import ExitStack
from pathlib import Path
from typing import Any, AnyStr, Dict, Generator, List, Match, Pattern
from enum import IntEnum
from typing import List, Tuple, Union
from dream3d.Filter import Filter, FilterDelegatePy
from dream3d.simpl import *
VERSION_KEY: str = 'Version'
SLICES_KEY: str = 'TDMSData'
INDEX_KEY: str = 'Index'
LAYER_START_TIME_KEY: str = 'LayerStartTime'
LAYER_END_TIME_KEY: str = 'LayerEndTime'
PART_START_TIME_KEY: str = 'PartStartTime'
PART_END_TIME_KEY: str = 'PartEndTime'
TDMS_GROUP_NAME_KEY: str = 'TDMS_GroupName'
VERTICES_KEY: str = 'Vertices'
def get_file_list(end_index: int, file_extension: str, file_prefix: str, file_suffix: str, increment: int, input_path: str, ordering: int, padding_digits: int, start_index: int, status_delegate: Union[FilterDelegateCpp, FilterDelegatePy] = FilterDelegatePy()) -> None:
file_list = []
index = 0
i = 0
while i < (end_index - start_index) + 1:
if(ordering):
index = end_index - i
else:
index = start_index + i
file = input_path + '/' + file_prefix + str(index).zfill(padding_digits) + file_suffix + '.' + file_extension
file_list.append(file)
i = i + increment
return file_list
def tdms2h5(file_info: StackFileListInfo, file_list: List, output_dir: Path, area_offset: int, intensity_offset: int, laser_offset: int, groups: List[str] = [], status_delegate: Union[FilterDelegateCpp, FilterDelegatePy] = FilterDelegatePy()) -> None:
largest_offset = max(area_offset, intensity_offset)
with ExitStack() as exitStack:
h5_files: Dict[str, h5py.File] = {}
slice_indices: List[int] = []
regex_name: Pattern[AnyStr] = re.compile(fr'{file_info.FilePrefix}(\d+)')
path: Path
paths = []
for file in file_list:
paths.append(Path(file))
paths_size = len(paths)
path_ind = 1
for path in paths:
status_delegate.notifyStatusMessage(f'Converting {path_ind} of {paths_size}: \"{path}\"')
path_ind = path_ind + 1
match: Match[AnyStr] = regex_name.search(path.stem)
slice_index = int(match.group(1))
slice_indices.append(slice_index)
with nptdms.TdmsFile(path) as tdmsFile:
build_firmware_version: int = 3
try:
status_delegate.notifyStatusMessage(f'Trying to find old style Bitgain property..')
bitgain_os_1: float = tdmsFile.properties['Bitgain OS 1']
bitgain_os_2: float = tdmsFile.properties['Bitgain OS 2']
except:
try:
status_delegate.notifyStatusMessage(f'Trying to find new style Bitgain property..')
build_firmware_version = 4
bitgain_os_1: float = tdmsFile.properties['Scanner.Bitgain.OS1']
bitgain_os_2: float = tdmsFile.properties['Scanner.Bitgain.OS2']
except:
status_delegate.notifyStatusMessage(f'Bitgain Property not found. Unable to determine firmware version. Exiting Function.')
return
group: nptdms.TdmsGroup
for ind, group in enumerate(tdmsFile.groups()):
if groups and not any(re.match(pattern, group.name) for pattern in groups):
if ind == len(tdmsFile.groups())-1:
status_delegate.notifyStatusMessage(f' Group(s) not located')
continue
output_file_path = output_dir / f'{group.name}.h5'
if group.name not in h5_files:
h5_files[group.name] = exitStack.enter_context(h5py.File(output_file_path, 'w'))
h5_file = h5_files[group.name]
h5_file.attrs[VERSION_KEY] = build_firmware_version
h5_group = h5_file.create_group(SLICES_KEY)
h5_group.attrs[TDMS_GROUP_NAME_KEY] = group.name
h5_file = h5_files[group.name]
h5_group: h5py.Group = h5_file[SLICES_KEY].create_group(str(slice_index))
layer_replacements = {
'StartTime': LAYER_START_TIME_KEY,
'EndTime': LAYER_END_TIME_KEY
}
_write_tdms_properties(h5_group, tdmsFile.properties, layer_replacements)
part_replacements = {
'StartTime' : PART_START_TIME_KEY,
'EndTime' : PART_END_TIME_KEY,
'Part.StartTime': PART_START_TIME_KEY,
'Part.EndTime' : PART_END_TIME_KEY
}
_write_tdms_properties(h5_group, group.properties, part_replacements)
# LaserTTL only uses laser_offset. The end has to be adjusted to make the resulting array consistent
laser_channel: nptdms.TdmsChannel = group['LaserTTL']
laser_end_index: int = len(laser_channel) - (laser_offset)
h5_group.create_dataset(laser_channel.name, data=laser_channel[laser_offset: laser_end_index])
# Intensity and Area use laser_offset
area_channel: nptdms.TdmsChannel = group['Area']
# At this point for illustrative purposes, since the laser_offset is always the largest
end_index: int = len(area_channel) - (area_offset)
h5_group.create_dataset(area_channel.name, data=area_channel[area_offset: end_index])
intensity_channel: nptdms.TdmsChannel = group['Intensity']
end_index: int = len(intensity_channel) - (intensity_offset)
h5_group.create_dataset(intensity_channel.name, data=intensity_channel[intensity_offset: end_index])
# Have not figured out how to correlate parameter to the actual parameter used, just use the same as Laser TTL since it is a machine setting
parameter_channel: nptdms.TdmsChannel = group['Parameter']
h5_group.create_dataset(parameter_channel.name, data=parameter_channel[:])
# X and Y channels just adjust the maximum
x_channel: nptdms.TdmsChannel = group['X-Axis']
x_dataset = h5_group.create_dataset(x_channel.name, data=(x_channel[:] / bitgain_os_1), dtype=np.float32)
x_dataset.attrs['Units'] = 'μm'
y_channel: nptdms.TdmsChannel = group['Y-Axis']
y_dataset = h5_group.create_dataset(y_channel.name, data=(y_channel[:] / bitgain_os_2), dtype=np.float32)
y_dataset.attrs['Units'] = 'μm'
# Resulting slices will be aligned with the same number of data points for each channel
if not h5_files:
status_delegate.notifyStatusMessage(f' No TDMS files located')
return -1
slice_indices = sorted(slice_indices)
for h5_file in h5_files.values():
index_dataset = np.zeros((len(slice_indices), 3), dtype=np.int64)
for i, index in enumerate(slice_indices):
index_dataset[i][0] = index
index_dataset[i][1] = h5_file[SLICES_KEY][str(index)].attrs['layerThickness']
index_dataset[i][2] = h5_file[SLICES_KEY][str(index)]['X-Axis'].size
dataset: h5py.Dataset = h5_file.create_dataset(INDEX_KEY, data=index_dataset)
dataset.attrs['Column0'] = 'SliceIndex'
dataset.attrs['Column1'] = 'LayerThickness (μm)'
dataset.attrs['Column2'] = 'NumVertices'
status_delegate.notifyStatusMessage('\nWrote files:')
h5_file: h5py.File
for h5_file in h5_files.values():
status_delegate.notifyStatusMessage(f' \"{h5_file.filename}\"')
def _write_tdms_properties(h5_group: h5py.Group, tdms_dict: Dict[str, Any], replacements: Dict[str, str]) -> None:
key: str
value: Any
for key, value in tdms_dict.items():
if key in replacements:
key = replacements[key]
if isinstance(value, np.datetime64):
h5_group.attrs[key] = str(np.datetime_as_string(value, unit='us', timezone='UTC'))
else:
h5_group.attrs[key] = value
class TDMStoH5(Filter):
def __init__(self) -> None:
self.area_offset: int = 0
self.intensity_offset: int = 0
self.laser_offset: int = 0
self.output_folder: str = ''
self.group: str = ''
self.stack_info_param: StackFileListInfo = StackFileListInfo(0, 0, 0, 0, 1, '', 'Prefix_', '_Suffix', 'tdms')
def _set_area_offset(self, value: int) -> None:
self.area_offset = value
def _get_area_offset(self) -> int:
return self.area_offset
def _set_intensity_offset(self, value: int) -> None:
self.intensity_offset = value
def _get_intensity_offset(self) -> int:
return self.intensity_offset
def _set_laser_offset(self, value: int) -> None:
self.laser_offset = value
def _get_laser_offset(self) -> int:
return self.laser_offset
def _set_output_folder(self, value: str) -> None:
self.output_folder = value
def _get_output_folder(self) -> str:
return self.output_folder
def _set_group(self, value: str) -> None:
self.group = value
def _get_group(self) -> str:
return self.group
def _get_stack_info_param(self) -> StackFileListInfo:
return self.stack_info_param
def _set_stack_info_param(self, value) -> None:
self.stack_info_param = value
@staticmethod
def name() -> str:
return 'TDMS to H5'
@staticmethod
def uuid() -> str:
return '{8b069c55-6d94-4db1-b012-cdfb7dd08ad6}'
@staticmethod
def group_name() -> str:
return 'Example'
@staticmethod
def sub_group_name() -> str:
return 'Sub Example'
@staticmethod
def human_label() -> str:
return 'Convert TDMS to HDF5'
@staticmethod
def version() -> str:
return '1.0.0'
@staticmethod
def compiled_lib_name() -> str:
return 'Python'
def setup_parameters(self) -> List[FilterParameter]:
req = MultiDataArraySelectionFilterParameter.RequirementType([IGeometry.Type.Image], [AttributeMatrix.Type.Cell], [], [])
return [
IntFilterParameter('Area Offset', 'area_offset', self.area_offset, FilterParameter.Category.Parameter, self._set_area_offset, self._get_area_offset, -1),
IntFilterParameter('Intensity Offset', 'intensity_offset', self.intensity_offset, FilterParameter.Category.Parameter, self._set_intensity_offset, self._get_intensity_offset, -1),
IntFilterParameter('Laser Offset', 'laser_offset', self.laser_offset, FilterParameter.Category.Parameter, self._set_laser_offset, self._get_laser_offset, -1),
OutputPathFilterParameter('Output Folder', 'Output Folder', '', FilterParameter.Category.Parameter,
self._set_output_folder, self._get_output_folder, -1),
StringFilterParameter('Group', 'group', self.group, FilterParameter.Category.Parameter, self._set_group,
self._get_group, -1),
FileListInfoFilterParameter('File List', 'filelist', self.stack_info_param, FilterParameter.Category.Parameter,
self._set_stack_info_param, self._get_stack_info_param)
]
def data_check(self, dca: DataContainerArray, status_delegate: Union[FilterDelegateCpp, FilterDelegatePy] = FilterDelegatePy()) -> Tuple[int, str]:
if not self.stack_info_param.InputPath:
return (-5550, 'An input folder must be selected')
if not self.output_folder:
return (-301, 'An output folder must be selected')
if not os.path.exists(self.stack_info_param.InputPath):
return (-302, f' Input path {self.input_folder} does not exist')
if not os.path.exists(self.output_folder):
status_delegate.setWarningCondition(1, f' Output folder {self.output_folder} does not exist; creating a new folder')
return (0, 'Success')
def _execute_impl(self, dca: DataContainerArray, status_delegate: Union[FilterDelegateCpp, FilterDelegatePy] = FilterDelegatePy()) -> Tuple[int, str]:
file_list = get_file_list(self.stack_info_param.EndIndex, self.stack_info_param.FileExtension, self.stack_info_param.FilePrefix, self.stack_info_param.FileSuffix, self.stack_info_param.IncrementIndex, self.stack_info_param.InputPath, self.stack_info_param.Ordering, self.stack_info_param.PaddingDigits, self.stack_info_param.StartIndex)
if not os.path.exists(self.output_folder):
os.mkdir(self.output_folder)
if tdms2h5(self.stack_info_param, file_list, Path(self.output_folder), self.area_offset, self.intensity_offset, self.laser_offset, self.group, status_delegate) == -1:
return (-1, 'could not create HDF5 files')
return (0, 'Success')
filters = [TDMStoH5] | 0.462959 | 0.103839 |
from datetime import timedelta
import os
from typing import Tuple
from airflow import DAG
from airflow.operators.sensors import S3KeySensor
from airflow.operators.python_operator import PythonOperator
from airflow.utils.dates import days_ago
import boto3
from csv_generator.process_xml_zip import process_zip_or_extracted_dir
from db_manager.database import managed_connection
from db_manager.processing import process_source_dir
default_args = {
'owner': 'elife',
'depends_on_past': False,
'start_date': days_ago(1),
'email': ['<EMAIL>'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5)
}
SUCCESS = True
FAILURE = False
TEMP_DIR = 'temp-dir'
OUTPUT_DIR = 'csv_output'
ARCHIVE_BUCKET = os.environ.get('ARCHIVE_BUCKET')
INCOMING_BUCKET = os.environ.get('INCOMING_BUCKET', 'elife-data-pipeline-test')
def _set_upstreams(upstream_map: Tuple[Tuple[str]]) -> None:
"""takes a map of operators and their target upstream
operators and sets the upstream operator value.
:param upstream_map: tuple
:return:
"""
for operator, upstream in upstream_map:
operator.set_upstream(upstream)
def _get_file_keys(*args, **kwargs) -> bool:
"""Retrieves a list of object keys from the
`INCOMING_BUCKET` and stores them in the `xcom` layer.
:param args:
:param kwargs:
:return: bool
"""
client = boto3.client('s3')
response = client.list_objects(Bucket=INCOMING_BUCKET)
file_keys = [f['Key'] for f in response['Contents']]
if len(file_keys):
kwargs['ti'].xcom_push('file_keys', file_keys)
return SUCCESS
return FAILURE
def _download_zip_files(*args, **kwargs):
"""Download `n` files from `INCOMING_BUCKET`, and stores the
file paths as `zip_files` in the `xcom` layer.
Expects `file_keys` to be present on the `xcom` layer to provide
the files to download.
:param args:
:param kwargs:
:return: bool
"""
client = boto3.client('s3')
file_keys = kwargs['ti'].xcom_pull(task_ids=None, key='file_keys')
if len(file_keys):
os.makedirs(TEMP_DIR, exist_ok=True)
for zip_file in file_keys:
client.download_file(INCOMING_BUCKET, zip_file, os.path.join(TEMP_DIR, zip_file))
kwargs['ti'].xcom_push('zip_files', os.listdir(TEMP_DIR))
return SUCCESS
return FAILURE
def _convert_zips_to_csvs(*args, **kwargs) -> bool:
"""Runs target `zip_files` through the `csv_generator`.
:param args:
:param kwargs:
:return: bool
"""
zip_files = kwargs['ti'].xcom_pull(task_ids=None, key='zip_files')
if len(zip_files):
os.makedirs(OUTPUT_DIR, exist_ok=True)
for zip_file in zip_files:
process_zip_or_extracted_dir(os.path.join(TEMP_DIR, zip_file), OUTPUT_DIR, batch=True)
return SUCCESS
return FAILURE
def _move_zip_files_to_archive(*args, **kwargs):
"""Copies files from `INCOMING_BUCKET` to the
`ARCHIVE_BUCKET` then deletes the original files.
Expects `file_keys` to be present on the `xcom` layer to provide
the files to copy.
:param args:
:param kwargs:
:return:
"""
client = boto3.client('s3')
file_keys = kwargs['ti'].xcom_pull(task_ids=None, key='file_keys')
if len(file_keys):
for zip_file in file_keys:
client.copy_object(
Key=zip_file,
CopySource='{0}/{1}'.format(INCOMING_BUCKET, zip_file),
Bucket=ARCHIVE_BUCKET
)
client.delete_object(
Key=zip_file,
Bucket=INCOMING_BUCKET
)
return SUCCESS
return FAILURE
def _run_db_manager(*args, **kwargs) -> bool:
"""Ingest CSV files from `OUTPUT_DIR` into target database.
:param args:
:param kwargs:
:return: bool
"""
with managed_connection() as connection:
process_source_dir(
connection,
OUTPUT_DIR,
is_batch_mode=True
)
return SUCCESS
dag = DAG('data_pipeline', default_args=default_args, schedule_interval='@daily')
bucket_watcher = S3KeySensor(
task_id='bucket_watcher',
poke_interval=5,
timeout=300,
soft_fail=True,
wildcard_match=True,
bucket_key='*',
bucket_name=INCOMING_BUCKET,
dag=dag
)
get_file_keys = PythonOperator(
task_id='get_file_keys',
provide_context=True,
python_callable=_get_file_keys,
dag=dag
)
download_zip_files = PythonOperator(
task_id='download_zip_files',
provide_context=True,
python_callable=_download_zip_files,
dag=dag
)
move_zip_files_to_archive = PythonOperator(
task_id='move_zip_files_to_archive',
provide_context=True,
python_callable=_move_zip_files_to_archive,
dag=dag
)
convert_zips_to_csvs = PythonOperator(
task_id='convert_zips_to_csvs',
provide_context=True,
python_callable=_convert_zips_to_csvs,
dag=dag
)
run_db_manager = PythonOperator(
task_id='run_db_manager',
provide_context=True,
python_callable=_run_db_manager,
dag=dag
)
upstream_map = (
(get_file_keys, bucket_watcher),
(download_zip_files, get_file_keys),
(convert_zips_to_csvs, download_zip_files),
(move_zip_files_to_archive, download_zip_files),
(run_db_manager, convert_zips_to_csvs),
)
_set_upstreams(upstream_map) | airflow/dags/data_pipeline_dag.py | from datetime import timedelta
import os
from typing import Tuple
from airflow import DAG
from airflow.operators.sensors import S3KeySensor
from airflow.operators.python_operator import PythonOperator
from airflow.utils.dates import days_ago
import boto3
from csv_generator.process_xml_zip import process_zip_or_extracted_dir
from db_manager.database import managed_connection
from db_manager.processing import process_source_dir
default_args = {
'owner': 'elife',
'depends_on_past': False,
'start_date': days_ago(1),
'email': ['<EMAIL>'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5)
}
SUCCESS = True
FAILURE = False
TEMP_DIR = 'temp-dir'
OUTPUT_DIR = 'csv_output'
ARCHIVE_BUCKET = os.environ.get('ARCHIVE_BUCKET')
INCOMING_BUCKET = os.environ.get('INCOMING_BUCKET', 'elife-data-pipeline-test')
def _set_upstreams(upstream_map: Tuple[Tuple[str]]) -> None:
"""takes a map of operators and their target upstream
operators and sets the upstream operator value.
:param upstream_map: tuple
:return:
"""
for operator, upstream in upstream_map:
operator.set_upstream(upstream)
def _get_file_keys(*args, **kwargs) -> bool:
"""Retrieves a list of object keys from the
`INCOMING_BUCKET` and stores them in the `xcom` layer.
:param args:
:param kwargs:
:return: bool
"""
client = boto3.client('s3')
response = client.list_objects(Bucket=INCOMING_BUCKET)
file_keys = [f['Key'] for f in response['Contents']]
if len(file_keys):
kwargs['ti'].xcom_push('file_keys', file_keys)
return SUCCESS
return FAILURE
def _download_zip_files(*args, **kwargs):
"""Download `n` files from `INCOMING_BUCKET`, and stores the
file paths as `zip_files` in the `xcom` layer.
Expects `file_keys` to be present on the `xcom` layer to provide
the files to download.
:param args:
:param kwargs:
:return: bool
"""
client = boto3.client('s3')
file_keys = kwargs['ti'].xcom_pull(task_ids=None, key='file_keys')
if len(file_keys):
os.makedirs(TEMP_DIR, exist_ok=True)
for zip_file in file_keys:
client.download_file(INCOMING_BUCKET, zip_file, os.path.join(TEMP_DIR, zip_file))
kwargs['ti'].xcom_push('zip_files', os.listdir(TEMP_DIR))
return SUCCESS
return FAILURE
def _convert_zips_to_csvs(*args, **kwargs) -> bool:
"""Runs target `zip_files` through the `csv_generator`.
:param args:
:param kwargs:
:return: bool
"""
zip_files = kwargs['ti'].xcom_pull(task_ids=None, key='zip_files')
if len(zip_files):
os.makedirs(OUTPUT_DIR, exist_ok=True)
for zip_file in zip_files:
process_zip_or_extracted_dir(os.path.join(TEMP_DIR, zip_file), OUTPUT_DIR, batch=True)
return SUCCESS
return FAILURE
def _move_zip_files_to_archive(*args, **kwargs):
"""Copies files from `INCOMING_BUCKET` to the
`ARCHIVE_BUCKET` then deletes the original files.
Expects `file_keys` to be present on the `xcom` layer to provide
the files to copy.
:param args:
:param kwargs:
:return:
"""
client = boto3.client('s3')
file_keys = kwargs['ti'].xcom_pull(task_ids=None, key='file_keys')
if len(file_keys):
for zip_file in file_keys:
client.copy_object(
Key=zip_file,
CopySource='{0}/{1}'.format(INCOMING_BUCKET, zip_file),
Bucket=ARCHIVE_BUCKET
)
client.delete_object(
Key=zip_file,
Bucket=INCOMING_BUCKET
)
return SUCCESS
return FAILURE
def _run_db_manager(*args, **kwargs) -> bool:
"""Ingest CSV files from `OUTPUT_DIR` into target database.
:param args:
:param kwargs:
:return: bool
"""
with managed_connection() as connection:
process_source_dir(
connection,
OUTPUT_DIR,
is_batch_mode=True
)
return SUCCESS
dag = DAG('data_pipeline', default_args=default_args, schedule_interval='@daily')
bucket_watcher = S3KeySensor(
task_id='bucket_watcher',
poke_interval=5,
timeout=300,
soft_fail=True,
wildcard_match=True,
bucket_key='*',
bucket_name=INCOMING_BUCKET,
dag=dag
)
get_file_keys = PythonOperator(
task_id='get_file_keys',
provide_context=True,
python_callable=_get_file_keys,
dag=dag
)
download_zip_files = PythonOperator(
task_id='download_zip_files',
provide_context=True,
python_callable=_download_zip_files,
dag=dag
)
move_zip_files_to_archive = PythonOperator(
task_id='move_zip_files_to_archive',
provide_context=True,
python_callable=_move_zip_files_to_archive,
dag=dag
)
convert_zips_to_csvs = PythonOperator(
task_id='convert_zips_to_csvs',
provide_context=True,
python_callable=_convert_zips_to_csvs,
dag=dag
)
run_db_manager = PythonOperator(
task_id='run_db_manager',
provide_context=True,
python_callable=_run_db_manager,
dag=dag
)
upstream_map = (
(get_file_keys, bucket_watcher),
(download_zip_files, get_file_keys),
(convert_zips_to_csvs, download_zip_files),
(move_zip_files_to_archive, download_zip_files),
(run_db_manager, convert_zips_to_csvs),
)
_set_upstreams(upstream_map) | 0.761006 | 0.2627 |
from talon.voice import Context, Key, press
import talon.clip as clip
from ..utils import (
text,
parse_words,
parse_words_as_integer,
insert,
word,
join_words,
is_filetype,
)
from ..formatters import (
CAMELCASE,
formatted_text,
)
JS_EXTENSIONS = (".js", ".jsx", ".ts", ".vue")
context = Context("javascript", func=is_filetype(JS_EXTENSIONS))
def remove_spaces_around_dashes(m):
words = parse_words(m)
s = " ".join(words)
s = s.replace(" – ", "-")
insert(s)
def CursorText(s):
left, right = s.split("{.}", 1)
return [left + right, Key(" ".join(["left"] * len(right)))]
context.keymap(
{
"const [<dgndictation>]": ["const ", text],
"let [<dgndictation>]": ["let ", text],
"static": "static ",
# vue stuff
"(view | vee) component": ["vuecomp", Key('ctrl-space')],
"(view | vee) flex": 'v-flex',
"(view | vee) layout": 'v-layout',
"(view | vee) for": 'v-for=""',
"(view | vee) if": 'v-if=""',
"args": ["(", Key("left")],
"index": ["[", Key("left")],
"block": ["{", Key("enter")],
"empty array": "[",
"empty object": "{",
"function <dgndictation> [over]": [
Key("function "),
formatted_text(CAMELCASE),
Key("() {"),
Key("enter"),
],
"state return": "return ",
"state constructor": "constructor ",
"state if": ["if ()", Key("left")],
"state else": " else ",
"state else if": [" else if ()", Key("left")],
"state while": ["while ()", Key("left")],
"state for": ["for ()", Key("left")],
"state switch": ["switch ()", Key("left")],
"state case": ["case \nbreak;", Key("up")],
"state goto": "goto ",
"state important": "import ",
"state class": "class ",
"state extends": "extends ",
"state super": "super",
"comment js": "// ",
"word no": "null",
"arrow": " => ",
"arrow (func | function)": "() => {}",
"a sink": " async ",
"op (minus | subtract)": " - ",
"op (plus | add)": " + ",
"op (times | multiply)": " * ",
"op divide": " / ",
"op mod": " % ",
"[op] (minus | subtract) equals": " -= ",
"[op] (plus | add) equals": " += ",
"[op] (times | multiply) equals": " *= ",
"[op] divide equals": " /= ",
"[op] mod equals": " %= ",
"(op | is) greater [than]": " > ",
"(op | is) less [than]": " < ",
"(op | is) equal": " === ",
"(op | is) not equal": " !== ",
"(op | is) greater [than] or equal": " >= ",
"(op | is) less [than] or equal": " <= ",
"(op (power | exponent) | to the power [of])": " ** ",
"op and": " && ",
"op or": " || ",
}
) | lang/javascript.py | from talon.voice import Context, Key, press
import talon.clip as clip
from ..utils import (
text,
parse_words,
parse_words_as_integer,
insert,
word,
join_words,
is_filetype,
)
from ..formatters import (
CAMELCASE,
formatted_text,
)
JS_EXTENSIONS = (".js", ".jsx", ".ts", ".vue")
context = Context("javascript", func=is_filetype(JS_EXTENSIONS))
def remove_spaces_around_dashes(m):
words = parse_words(m)
s = " ".join(words)
s = s.replace(" – ", "-")
insert(s)
def CursorText(s):
left, right = s.split("{.}", 1)
return [left + right, Key(" ".join(["left"] * len(right)))]
context.keymap(
{
"const [<dgndictation>]": ["const ", text],
"let [<dgndictation>]": ["let ", text],
"static": "static ",
# vue stuff
"(view | vee) component": ["vuecomp", Key('ctrl-space')],
"(view | vee) flex": 'v-flex',
"(view | vee) layout": 'v-layout',
"(view | vee) for": 'v-for=""',
"(view | vee) if": 'v-if=""',
"args": ["(", Key("left")],
"index": ["[", Key("left")],
"block": ["{", Key("enter")],
"empty array": "[",
"empty object": "{",
"function <dgndictation> [over]": [
Key("function "),
formatted_text(CAMELCASE),
Key("() {"),
Key("enter"),
],
"state return": "return ",
"state constructor": "constructor ",
"state if": ["if ()", Key("left")],
"state else": " else ",
"state else if": [" else if ()", Key("left")],
"state while": ["while ()", Key("left")],
"state for": ["for ()", Key("left")],
"state switch": ["switch ()", Key("left")],
"state case": ["case \nbreak;", Key("up")],
"state goto": "goto ",
"state important": "import ",
"state class": "class ",
"state extends": "extends ",
"state super": "super",
"comment js": "// ",
"word no": "null",
"arrow": " => ",
"arrow (func | function)": "() => {}",
"a sink": " async ",
"op (minus | subtract)": " - ",
"op (plus | add)": " + ",
"op (times | multiply)": " * ",
"op divide": " / ",
"op mod": " % ",
"[op] (minus | subtract) equals": " -= ",
"[op] (plus | add) equals": " += ",
"[op] (times | multiply) equals": " *= ",
"[op] divide equals": " /= ",
"[op] mod equals": " %= ",
"(op | is) greater [than]": " > ",
"(op | is) less [than]": " < ",
"(op | is) equal": " === ",
"(op | is) not equal": " !== ",
"(op | is) greater [than] or equal": " >= ",
"(op | is) less [than] or equal": " <= ",
"(op (power | exponent) | to the power [of])": " ** ",
"op and": " && ",
"op or": " || ",
}
) | 0.448185 | 0.385086 |
from sklearn.utils.validation import check_is_fitted
from sklearn.base import BaseEstimator
from ..base import TransformerResamplerMixin
from ..utils.validation import validate_params
from sklearn.utils.validation import check_array, column_or_1d
import numpy as np
class Resampler(BaseEstimator, TransformerResamplerMixin):
"""Data sampling transformer that returns a sampled numpy.ndarray.
Parameters
----------
period : int, default: 2
The sampling period, i.e. one point every period will be kept.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from giotto.time_series import Resampler
>>> # Create a noisy signal sampled
>>> signal = np.asarray([np.sin(x /40) + np.random.random()
... for x in range(0, 300)])
>>> plt.plot(signal)
>>> plt.show()
>>> # Set up the Resampler
>>> period = 10
>>> periodic_sampler = Resampler(period=period)
>>> # Fit and transform the DataFrame
>>> periodic_sampler.fit(signal)
>>> signal_resampled = periodic_sampler.transform(signal)
>>> plt.plot(signal_resampled)
"""
_hyperparameters = {'period': [int, (1, np.inf)]}
def __init__(self, period=2):
self.period = period
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged.
This method is there to implement the usual scikit-learn API and hence
work in pipelines.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
y : None
Ignored.
Returns
-------
self : object
"""
validate_params(self.get_params(), self._hyperparameters)
check_array(X, ensure_2d=False)
self._is_fitted = True
return self
def transform(self, X, y=None):
"""Transform/resample X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data. ``
y : None
There is no need of a target, yet the pipeline API
requires this parameter.
Returns
-------
Xt : ndarray, shape (n_samples_new, n_features)
The transformed/resampled input array. ``n_samples_new =
n_samples // period``.
"""
# Check if fit had been called
check_is_fitted(self, ['_is_fitted'])
Xt = check_array(X, ensure_2d=False)
return Xt[::self.period]
def resample(self, y, X=None):
"""Resample y.
Parameters
----------
y : ndarray, shape (n_samples, n_features)
Target.
X : None
There is no need of input data,
yet the pipeline API requires this parameter.
Returns
-------
yt : ndarray, shape (n_samples_new, 1)
The resampled target. ``n_samples_new = n_samples // period``.
"""
# Check if fit had been called
check_is_fitted(self, ['_is_fitted'])
y = column_or_1d(y)
return y[::self.period]
class Stationarizer(BaseEstimator, TransformerResamplerMixin):
"""Data sampling transformer that returns numpy.ndarray.
Parameters
----------
operation : ``'return'`` | ``'log-return'``, default: ``'return'``
The type of stationarization operation with which to stationarize
the time series. It can have two values:
- ``'return'``:
This option transforms the time series :math:`{X_t}_t` into the
time series of relative returns, i.e. the ratio :math:`(X_t-X_{
t-1})/X_t`.
- ``'log-return'``:
This option transforms the time series :math:`{X_t}_t` into the
time series of relative log-returns, i.e. :math:`\\log(X_t/X_{
t-1})`.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from giotto.time_series import Stationarizer
>>> # Create a noisy signal sampled
>>> signal = np.asarray([np.sin(x /40) + 5 + np.random.random()
>>> for x in range(0, 300)]).reshape(-1, 1)
>>> plt.plot(signal)
>>> plt.show()
>>> # Initialize the stationarizer
>>> stationarizer = Stationarizer(stationarization_type='return')
>>> stationarizer.fit(signal)
>>> signal_stationarized = stationarizer.transform(signal)
>>> plt.plot(signal_stationarized)
"""
_hyperparameters = {'operation': [str, ['return', 'log-return']]}
def __init__(self, operation='return'):
self.operation = operation
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged.
This method is there to implement the usual scikit-learn API and hence
work in pipelines.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
y : None
Ignored.
Returns
-------
self : object
"""
validate_params(self.get_params(), self._hyperparameters)
check_array(X, ensure_2d=False)
self._is_fitted = True
return self
def transform(self, X, y=None):
"""Transform/resample X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
y : None
There is no need of a target, yet the pipeline API
requires this parameter.
Returns
-------
Xt : ndarray, shape (n_samples_new, n_features)
The transformed/resampled input array. ``n_samples_new =
n_samples - 1``.
"""
# Check if fit had been called
check_is_fitted(self, ['_is_fitted'])
X = check_array(X, ensure_2d=False)
if self.operation == 'return':
return np.diff(X, n=1, axis=0) / X[1:]
else: # 'log-return' operation
return np.diff(np.log(X), n=1, axis=0)
def resample(self, y, X=None):
"""Resample y.
Parameters
----------
y : ndarray, shape (n_samples, n_features)
Target.
X : None
There is no need of input data,
yet the pipeline API requires this parameter.
Returns
-------
yt : ndarray, shape (n_samples_new, 1)
The resampled target. ``n_samples_new = n_samples - 1``.
"""
# Check if fit had been called
check_is_fitted(self, ['_is_fitted'])
y = column_or_1d(y)
return y[1:] | giotto/time_series/preprocessing.py |
from sklearn.utils.validation import check_is_fitted
from sklearn.base import BaseEstimator
from ..base import TransformerResamplerMixin
from ..utils.validation import validate_params
from sklearn.utils.validation import check_array, column_or_1d
import numpy as np
class Resampler(BaseEstimator, TransformerResamplerMixin):
"""Data sampling transformer that returns a sampled numpy.ndarray.
Parameters
----------
period : int, default: 2
The sampling period, i.e. one point every period will be kept.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from giotto.time_series import Resampler
>>> # Create a noisy signal sampled
>>> signal = np.asarray([np.sin(x /40) + np.random.random()
... for x in range(0, 300)])
>>> plt.plot(signal)
>>> plt.show()
>>> # Set up the Resampler
>>> period = 10
>>> periodic_sampler = Resampler(period=period)
>>> # Fit and transform the DataFrame
>>> periodic_sampler.fit(signal)
>>> signal_resampled = periodic_sampler.transform(signal)
>>> plt.plot(signal_resampled)
"""
_hyperparameters = {'period': [int, (1, np.inf)]}
def __init__(self, period=2):
self.period = period
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged.
This method is there to implement the usual scikit-learn API and hence
work in pipelines.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
y : None
Ignored.
Returns
-------
self : object
"""
validate_params(self.get_params(), self._hyperparameters)
check_array(X, ensure_2d=False)
self._is_fitted = True
return self
def transform(self, X, y=None):
"""Transform/resample X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data. ``
y : None
There is no need of a target, yet the pipeline API
requires this parameter.
Returns
-------
Xt : ndarray, shape (n_samples_new, n_features)
The transformed/resampled input array. ``n_samples_new =
n_samples // period``.
"""
# Check if fit had been called
check_is_fitted(self, ['_is_fitted'])
Xt = check_array(X, ensure_2d=False)
return Xt[::self.period]
def resample(self, y, X=None):
"""Resample y.
Parameters
----------
y : ndarray, shape (n_samples, n_features)
Target.
X : None
There is no need of input data,
yet the pipeline API requires this parameter.
Returns
-------
yt : ndarray, shape (n_samples_new, 1)
The resampled target. ``n_samples_new = n_samples // period``.
"""
# Check if fit had been called
check_is_fitted(self, ['_is_fitted'])
y = column_or_1d(y)
return y[::self.period]
class Stationarizer(BaseEstimator, TransformerResamplerMixin):
"""Data sampling transformer that returns numpy.ndarray.
Parameters
----------
operation : ``'return'`` | ``'log-return'``, default: ``'return'``
The type of stationarization operation with which to stationarize
the time series. It can have two values:
- ``'return'``:
This option transforms the time series :math:`{X_t}_t` into the
time series of relative returns, i.e. the ratio :math:`(X_t-X_{
t-1})/X_t`.
- ``'log-return'``:
This option transforms the time series :math:`{X_t}_t` into the
time series of relative log-returns, i.e. :math:`\\log(X_t/X_{
t-1})`.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from giotto.time_series import Stationarizer
>>> # Create a noisy signal sampled
>>> signal = np.asarray([np.sin(x /40) + 5 + np.random.random()
>>> for x in range(0, 300)]).reshape(-1, 1)
>>> plt.plot(signal)
>>> plt.show()
>>> # Initialize the stationarizer
>>> stationarizer = Stationarizer(stationarization_type='return')
>>> stationarizer.fit(signal)
>>> signal_stationarized = stationarizer.transform(signal)
>>> plt.plot(signal_stationarized)
"""
_hyperparameters = {'operation': [str, ['return', 'log-return']]}
def __init__(self, operation='return'):
self.operation = operation
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged.
This method is there to implement the usual scikit-learn API and hence
work in pipelines.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
y : None
Ignored.
Returns
-------
self : object
"""
validate_params(self.get_params(), self._hyperparameters)
check_array(X, ensure_2d=False)
self._is_fitted = True
return self
def transform(self, X, y=None):
"""Transform/resample X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
y : None
There is no need of a target, yet the pipeline API
requires this parameter.
Returns
-------
Xt : ndarray, shape (n_samples_new, n_features)
The transformed/resampled input array. ``n_samples_new =
n_samples - 1``.
"""
# Check if fit had been called
check_is_fitted(self, ['_is_fitted'])
X = check_array(X, ensure_2d=False)
if self.operation == 'return':
return np.diff(X, n=1, axis=0) / X[1:]
else: # 'log-return' operation
return np.diff(np.log(X), n=1, axis=0)
def resample(self, y, X=None):
"""Resample y.
Parameters
----------
y : ndarray, shape (n_samples, n_features)
Target.
X : None
There is no need of input data,
yet the pipeline API requires this parameter.
Returns
-------
yt : ndarray, shape (n_samples_new, 1)
The resampled target. ``n_samples_new = n_samples - 1``.
"""
# Check if fit had been called
check_is_fitted(self, ['_is_fitted'])
y = column_or_1d(y)
return y[1:] | 0.960259 | 0.830319 |
begin_unit
comment|'# Copyright 2010 United States Government as represented by the'
nl|'\n'
comment|'# Administrator of the National Aeronautics and Space Administration.'
nl|'\n'
comment|'# Copyright 2011 <NAME>'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
comment|'# NOTE(markmc): this is imported before monkey patching in nova.cmd'
nl|'\n'
comment|'# so we avoid extra imports here'
nl|'\n'
nl|'\n'
name|'import'
name|'sys'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|enabled
name|'def'
name|'enabled'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'('
string|"'--remote_debug-host'"
name|'in'
name|'sys'
op|'.'
name|'argv'
name|'and'
nl|'\n'
string|"'--remote_debug-port'"
name|'in'
name|'sys'
op|'.'
name|'argv'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|init
dedent|''
name|'def'
name|'init'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'import'
name|'nova'
op|'.'
name|'conf'
newline|'\n'
name|'CONF'
op|'='
name|'nova'
op|'.'
name|'conf'
op|'.'
name|'CONF'
newline|'\n'
nl|'\n'
comment|'# NOTE(markmc): gracefully handle the CLI options not being registered'
nl|'\n'
name|'if'
string|"'remote_debug'"
name|'not'
name|'in'
name|'CONF'
op|':'
newline|'\n'
indent|' '
name|'return'
newline|'\n'
nl|'\n'
dedent|''
name|'if'
name|'not'
op|'('
name|'CONF'
op|'.'
name|'remote_debug'
op|'.'
name|'host'
name|'and'
name|'CONF'
op|'.'
name|'remote_debug'
op|'.'
name|'port'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
newline|'\n'
nl|'\n'
dedent|''
name|'import'
name|'logging'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'i18n'
name|'import'
name|'_LW'
newline|'\n'
name|'LOG'
op|'='
name|'logging'
op|'.'
name|'getLogger'
op|'('
name|'__name__'
op|')'
newline|'\n'
nl|'\n'
name|'LOG'
op|'.'
name|'debug'
op|'('
string|"'Listening on %(host)s:%(port)s for debug connection'"
op|','
nl|'\n'
op|'{'
string|"'host'"
op|':'
name|'CONF'
op|'.'
name|'remote_debug'
op|'.'
name|'host'
op|','
nl|'\n'
string|"'port'"
op|':'
name|'CONF'
op|'.'
name|'remote_debug'
op|'.'
name|'port'
op|'}'
op|')'
newline|'\n'
nl|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'from'
name|'pydev'
name|'import'
name|'pydevd'
newline|'\n'
dedent|''
name|'except'
name|'ImportError'
op|':'
newline|'\n'
indent|' '
name|'import'
name|'pydevd'
newline|'\n'
dedent|''
name|'pydevd'
op|'.'
name|'settrace'
op|'('
name|'host'
op|'='
name|'CONF'
op|'.'
name|'remote_debug'
op|'.'
name|'host'
op|','
nl|'\n'
name|'port'
op|'='
name|'CONF'
op|'.'
name|'remote_debug'
op|'.'
name|'port'
op|','
nl|'\n'
name|'stdoutToServer'
op|'='
name|'False'
op|','
nl|'\n'
name|'stderrToServer'
op|'='
name|'False'
op|')'
newline|'\n'
nl|'\n'
name|'LOG'
op|'.'
name|'warning'
op|'('
name|'_LW'
op|'('
string|"'WARNING: Using the remote debug option changes how '"
nl|'\n'
string|"'Nova uses the eventlet library to support async IO. This '"
nl|'\n'
string|"'could result in failures that do not occur under normal '"
nl|'\n'
string|"'operation. Use at your own risk.'"
op|')'
op|')'
newline|'\n'
dedent|''
endmarker|''
end_unit | nova/debugger.py | begin_unit
comment|'# Copyright 2010 United States Government as represented by the'
nl|'\n'
comment|'# Administrator of the National Aeronautics and Space Administration.'
nl|'\n'
comment|'# Copyright 2011 <NAME>'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
comment|'# NOTE(markmc): this is imported before monkey patching in nova.cmd'
nl|'\n'
comment|'# so we avoid extra imports here'
nl|'\n'
nl|'\n'
name|'import'
name|'sys'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|enabled
name|'def'
name|'enabled'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'('
string|"'--remote_debug-host'"
name|'in'
name|'sys'
op|'.'
name|'argv'
name|'and'
nl|'\n'
string|"'--remote_debug-port'"
name|'in'
name|'sys'
op|'.'
name|'argv'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|init
dedent|''
name|'def'
name|'init'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'import'
name|'nova'
op|'.'
name|'conf'
newline|'\n'
name|'CONF'
op|'='
name|'nova'
op|'.'
name|'conf'
op|'.'
name|'CONF'
newline|'\n'
nl|'\n'
comment|'# NOTE(markmc): gracefully handle the CLI options not being registered'
nl|'\n'
name|'if'
string|"'remote_debug'"
name|'not'
name|'in'
name|'CONF'
op|':'
newline|'\n'
indent|' '
name|'return'
newline|'\n'
nl|'\n'
dedent|''
name|'if'
name|'not'
op|'('
name|'CONF'
op|'.'
name|'remote_debug'
op|'.'
name|'host'
name|'and'
name|'CONF'
op|'.'
name|'remote_debug'
op|'.'
name|'port'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
newline|'\n'
nl|'\n'
dedent|''
name|'import'
name|'logging'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'i18n'
name|'import'
name|'_LW'
newline|'\n'
name|'LOG'
op|'='
name|'logging'
op|'.'
name|'getLogger'
op|'('
name|'__name__'
op|')'
newline|'\n'
nl|'\n'
name|'LOG'
op|'.'
name|'debug'
op|'('
string|"'Listening on %(host)s:%(port)s for debug connection'"
op|','
nl|'\n'
op|'{'
string|"'host'"
op|':'
name|'CONF'
op|'.'
name|'remote_debug'
op|'.'
name|'host'
op|','
nl|'\n'
string|"'port'"
op|':'
name|'CONF'
op|'.'
name|'remote_debug'
op|'.'
name|'port'
op|'}'
op|')'
newline|'\n'
nl|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'from'
name|'pydev'
name|'import'
name|'pydevd'
newline|'\n'
dedent|''
name|'except'
name|'ImportError'
op|':'
newline|'\n'
indent|' '
name|'import'
name|'pydevd'
newline|'\n'
dedent|''
name|'pydevd'
op|'.'
name|'settrace'
op|'('
name|'host'
op|'='
name|'CONF'
op|'.'
name|'remote_debug'
op|'.'
name|'host'
op|','
nl|'\n'
name|'port'
op|'='
name|'CONF'
op|'.'
name|'remote_debug'
op|'.'
name|'port'
op|','
nl|'\n'
name|'stdoutToServer'
op|'='
name|'False'
op|','
nl|'\n'
name|'stderrToServer'
op|'='
name|'False'
op|')'
newline|'\n'
nl|'\n'
name|'LOG'
op|'.'
name|'warning'
op|'('
name|'_LW'
op|'('
string|"'WARNING: Using the remote debug option changes how '"
nl|'\n'
string|"'Nova uses the eventlet library to support async IO. This '"
nl|'\n'
string|"'could result in failures that do not occur under normal '"
nl|'\n'
string|"'operation. Use at your own risk.'"
op|')'
op|')'
newline|'\n'
dedent|''
endmarker|''
end_unit | 0.533641 | 0.069795 |
from flask import url_for
from tests.test_auth import login, logout
from ygq import Dish, User, db, Order
def test_index_page(client):
rv = client.get(url_for('shop.index', shop_id=1))
assert b'FzuHotel' in rv.data
assert b'FoTiaoQiang' in rv.data
def test_show_orders(client):
rv = client.get(url_for('shop.show_orders', shop_id=1))
assert b'FoTiaoQiang' in rv.data
assert b'998*1' in rv.data
def test_apply2shop(client):
user = User.query.get(2)
user.confirmed = True
db.session.commit()
login(client, email='<EMAIL>', password="<PASSWORD>")
rv = client.post(url_for('shop.apply2shop', username='unconfirmed'), data=dict(
name='ShaXianHotel',
location_x='100',
location_y='100',
tel='98765432100'
), follow_redirects=True)
assert rv.status_code == 200
assert b'Shop published.' in rv.data
def test_delete_dish(client):
login(client, email='<EMAIL>', password="<PASSWORD>")
rv = client.post(url_for('shop.delete_dish', dish_id=1), follow_redirects=True)
assert rv.status_code == 403
assert b'Forbidden' in rv.data
logout(client)
login(client)
rv = client.post(url_for('shop.delete_dish', dish_id=1), follow_redirects=True)
assert rv.status_code == 200
assert b'Dish deleted.' in rv.data
assert Dish.query.get(1) is None
def test_new_dish(client):
login(client, email='<EMAIL>', password="<PASSWORD>")
rv = client.post(url_for('shop.new_dish', shop_id=1), data=dict(
price=100,
description='form.description.data',
shop_id=1,
prepare_time=20,
name='form.name.data'
), follow_redirects=True)
assert rv.status_code == 403
assert b'Forbidden' in rv.data
logout(client)
login(client)
rv = client.post(url_for('shop.new_dish', shop_id=1), data=dict(
price=100,
description='form.description.data',
shop_id=1,
prepare_time=20,
name='test'
), follow_redirects=True)
assert rv.status_code == 200
assert b'Dish published.' in rv.data
assert Dish.query.get(2).name == 'test'
def test_edit_description(client):
assert Dish.query.get(1).description == '佛跳墙又名福寿全,是福建省福州市的一道特色名菜,属闽菜系。'
login(client, email='<EMAIL>', password="<PASSWORD>")
rv = client.post(url_for('shop.edit_description', dish_id=1), data=dict(
description='test description.'
), follow_redirects=True)
assert rv.status_code == 403
assert b'Forbidden' in rv.data
logout(client)
login(client)
rv = client.post(url_for('shop.edit_description', dish_id=1), data=dict(
description='test description.'
), follow_redirects=True)
assert b'Description updated.' in rv.data
assert Dish.query.get(1).description == 'test description.'
def test_new_tag(client):
assert Dish.query.get(1).tags[0].name == 'test tag'
login(client, email='<EMAIL>', password="<PASSWORD>")
rv = client.post(url_for('shop.new_tag', dish_id=1), data=dict(
tag='test test hello world '
), follow_redirects=True)
assert rv.status_code == 403
assert b'Forbidden' in rv.data
logout(client)
login(client)
rv = client.post(url_for('shop.new_tag', dish_id=1), data=dict(
tag='test test hello world '
), follow_redirects=True)
assert rv.status_code == 200
assert b'Tag added.' in rv.data
assert Dish.query.get(1).tags[1].name == 'test'
assert Dish.query.get(1).tags[2].name == 'hello'
assert Dish.query.get(1).tags[3].name == 'world'
def test_finish_order(client):
login(client, email='<EMAIL>', password="<PASSWORD>")
rv = client.post(url_for('shop.finish_order', order_id=1), follow_redirects=True)
assert rv.status_code == 403
assert b'Forbidden' in rv.data
logout(client)
login(client)
client.post(url_for('shop.finish_order', order_id=1), follow_redirects=True)
assert Order.query.get(1).is_prepared | tests/test_shop.py | from flask import url_for
from tests.test_auth import login, logout
from ygq import Dish, User, db, Order
def test_index_page(client):
rv = client.get(url_for('shop.index', shop_id=1))
assert b'FzuHotel' in rv.data
assert b'FoTiaoQiang' in rv.data
def test_show_orders(client):
rv = client.get(url_for('shop.show_orders', shop_id=1))
assert b'FoTiaoQiang' in rv.data
assert b'998*1' in rv.data
def test_apply2shop(client):
user = User.query.get(2)
user.confirmed = True
db.session.commit()
login(client, email='<EMAIL>', password="<PASSWORD>")
rv = client.post(url_for('shop.apply2shop', username='unconfirmed'), data=dict(
name='ShaXianHotel',
location_x='100',
location_y='100',
tel='98765432100'
), follow_redirects=True)
assert rv.status_code == 200
assert b'Shop published.' in rv.data
def test_delete_dish(client):
login(client, email='<EMAIL>', password="<PASSWORD>")
rv = client.post(url_for('shop.delete_dish', dish_id=1), follow_redirects=True)
assert rv.status_code == 403
assert b'Forbidden' in rv.data
logout(client)
login(client)
rv = client.post(url_for('shop.delete_dish', dish_id=1), follow_redirects=True)
assert rv.status_code == 200
assert b'Dish deleted.' in rv.data
assert Dish.query.get(1) is None
def test_new_dish(client):
login(client, email='<EMAIL>', password="<PASSWORD>")
rv = client.post(url_for('shop.new_dish', shop_id=1), data=dict(
price=100,
description='form.description.data',
shop_id=1,
prepare_time=20,
name='form.name.data'
), follow_redirects=True)
assert rv.status_code == 403
assert b'Forbidden' in rv.data
logout(client)
login(client)
rv = client.post(url_for('shop.new_dish', shop_id=1), data=dict(
price=100,
description='form.description.data',
shop_id=1,
prepare_time=20,
name='test'
), follow_redirects=True)
assert rv.status_code == 200
assert b'Dish published.' in rv.data
assert Dish.query.get(2).name == 'test'
def test_edit_description(client):
assert Dish.query.get(1).description == '佛跳墙又名福寿全,是福建省福州市的一道特色名菜,属闽菜系。'
login(client, email='<EMAIL>', password="<PASSWORD>")
rv = client.post(url_for('shop.edit_description', dish_id=1), data=dict(
description='test description.'
), follow_redirects=True)
assert rv.status_code == 403
assert b'Forbidden' in rv.data
logout(client)
login(client)
rv = client.post(url_for('shop.edit_description', dish_id=1), data=dict(
description='test description.'
), follow_redirects=True)
assert b'Description updated.' in rv.data
assert Dish.query.get(1).description == 'test description.'
def test_new_tag(client):
assert Dish.query.get(1).tags[0].name == 'test tag'
login(client, email='<EMAIL>', password="<PASSWORD>")
rv = client.post(url_for('shop.new_tag', dish_id=1), data=dict(
tag='test test hello world '
), follow_redirects=True)
assert rv.status_code == 403
assert b'Forbidden' in rv.data
logout(client)
login(client)
rv = client.post(url_for('shop.new_tag', dish_id=1), data=dict(
tag='test test hello world '
), follow_redirects=True)
assert rv.status_code == 200
assert b'Tag added.' in rv.data
assert Dish.query.get(1).tags[1].name == 'test'
assert Dish.query.get(1).tags[2].name == 'hello'
assert Dish.query.get(1).tags[3].name == 'world'
def test_finish_order(client):
login(client, email='<EMAIL>', password="<PASSWORD>")
rv = client.post(url_for('shop.finish_order', order_id=1), follow_redirects=True)
assert rv.status_code == 403
assert b'Forbidden' in rv.data
logout(client)
login(client)
client.post(url_for('shop.finish_order', order_id=1), follow_redirects=True)
assert Order.query.get(1).is_prepared | 0.424412 | 0.404213 |
import pywind.lib.reader as reader
import pywind.web.lib.httpchunked as httpchunked
import sys, traceback
class wsgi(object):
__app = None
__is_finish = False
__reader = None
__output_body_func = None
__output_hdr_func = None
__finish_func = None
__resp_status = None
__resp_headers = None
__resp_stcode = 0
__is_resp_hdr = False
__has_hdr = None
__is_chunked = False
__chunked = None
__resp_content_length = 0
__responsed_content_length = 0
__recv_length = 0
__received_length = 0
def __init__(self, application, cgi_env, output_hdr_func, output_body_func, finish_func):
"""
:param application:
:param caddr: client address
:param cgi_env: cgi env
:param output_hdr_func :hdr function
:param output_body_func: body function
:param finish_func : finish function
"""
self.__app = application
self.__reader = reader.reader()
self.__output_hdr_func = output_hdr_func
self.__output_body_func = output_body_func
self.__finish_func = finish_func
wsgi_env = self.__convert2wsgi_env(cgi_env)
self.__recv_length = int(wsgi_env["CONTENT_LENGTH"])
try:
self.__app = application(wsgi_env, self.__start_response)
except:
self.__handle_error("500 Internal Server Error", [], traceback.format_exc())
return
def __convert2wsgi_env(self, cgi_env):
wsgi_env = cgi_env
wsgi_env["wsgi.version"] = (1, 0,)
wsgi_env["wsgi.errors"] = sys.stderr
wsgi_env["wsgi.multithread"] = False
wsgi_env['wsgi.multiprocess'] = True
wsgi_env['wsgi.run_once'] = True
wsgi_env["wsgi.input"] = self.__reader
if cgi_env.get('HTTPS', 'off') in ('on', '1'):
cgi_env['wsgi.url_scheme'] = 'https'
else:
cgi_env['wsgi.url_scheme'] = 'http'
if "PATH_INFO" not in wsgi_env:
pos = wsgi_env["REQUEST_URI"].find("?")
if pos < 0:
wsgi_env["PATH_INFO"] = wsgi_env["REQUEST_URI"]
else:
wsgi_env["PATH_INFO"] = wsgi_env["REQUEST_URI"][0:pos]
''''''
return wsgi_env
def __handle_error(self, status, resp_headers, err_data=""):
if self.__is_resp_hdr:
sys.stderr.write(err_data.decode())
self.__finish_func()
return
self.__response_error(status, resp_headers, err_data)
def __response_error(self, status, resp_headers, resp_data=""):
self.__is_resp_hdr = True
self.__is_finish = True
byte_data = resp_data.encode()
resp_headers += [
("Content-Length", len(byte_data),),
("Content-Type", "text/plain;charset=utf-8",),
]
self.__output_hdr_func(status, resp_headers)
self.__output_body_func(byte_data)
self.__finish_func()
def __response_body(self, body_data):
if not self.__is_chunked:
n = self.__resp_content_length - self.__responsed_content_length
resp_data = body_data[0:n]
self.__responsed_content_length += len(resp_data)
self.__output_body_func(resp_data)
if self.__resp_content_length == self.__responsed_content_length: self.__is_finish = True
return
self.__chunked.input(body_data)
try:
self.__chunked.parse()
except httpchunked.chunkedErr:
self.__handle_error("500 Internal Server Error", [], traceback.format_exc())
return
chunk_data = self.__chunked.get_chunk_with_length()
if self.__chunked.is_ok(): self.__is_finish = True
if not chunk_data: return
self.__output_body_func(chunk_data)
def finish(self):
try:
if hasattr(self.__app, "close"): self.__app.close()
except:
self.__handle_error("500 Internal Server Error", [], traceback.format_exc())
def input(self, byte_data):
# 如果响应结束,那么丢弃所有的数据包
if self.__is_finish: return
rsize = self.__recv_length - self.__received_length
byte_data = byte_data[0:rsize]
self.__recv_length += len(byte_data)
self.__reader._putvalue(byte_data)
def handle(self):
if self.__is_finish:
if not self.__is_resp_hdr:
self.__output_hdr_func(self.__resp_status, self.__resp_headers)
self.__is_resp_hdr = True
self.__finish_func()
return
try:
for resp_data in self.__app:
if not self.__has_hdr: continue
if not self.__is_resp_hdr:
self.__output_hdr_func(self.__resp_status, self.__resp_headers)
self.__is_resp_hdr = True
self.__response_body(resp_data)
except:
self.__handle_error("500 Internal Server Error", [], traceback.format_exc())
return
if self.__is_finish: self.__finish_func()
return
def __start_response(self, status, response_headers, exc_info=None):
try:
self.__resp_stcode = int(status[0:3])
except ValueError:
self.__handle_error("500 Internal Server Error", [], traceback.format_exc())
return
if self.__resp_stcode < 100:
self.__handle_error("500 Internal Server Error", [], "wrong http status code %s" % self.__resp_stcode)
return
if self.__resp_stcode >= 100 and self.__resp_stcode < 200:
self.__output_hdr_func(status, response_headers)
return
if self.__is_resp_hdr:
self.__handle_error("500 Internal Server Error", [], "http master has responsed!")
return
if self.__resp_stcode >= 300:
self.__output_hdr_func(status, response_headers, )
self.__is_finish = True
self.__is_resp_hdr = True
return
self.__resp_status = status
self.__resp_headers = response_headers
self.__has_hdr = True
for name, value in response_headers:
name = name.lower()
if name == "content-length":
try:
self.__resp_content_length = int(value)
except ValueError:
self.__response_error("500 Internal Server Error", [], traceback.format_exc())
return
break
if name == "transfer-encoding" and value.lower() == "chunked":
self.__is_chunked = True
self.__chunked = httpchunked.parser()
break
if self.__is_chunked: return
if self.__resp_content_length == 0: self.__is_finish = True | pywind/web/lib/wsgi.py | import pywind.lib.reader as reader
import pywind.web.lib.httpchunked as httpchunked
import sys, traceback
class wsgi(object):
__app = None
__is_finish = False
__reader = None
__output_body_func = None
__output_hdr_func = None
__finish_func = None
__resp_status = None
__resp_headers = None
__resp_stcode = 0
__is_resp_hdr = False
__has_hdr = None
__is_chunked = False
__chunked = None
__resp_content_length = 0
__responsed_content_length = 0
__recv_length = 0
__received_length = 0
def __init__(self, application, cgi_env, output_hdr_func, output_body_func, finish_func):
"""
:param application:
:param caddr: client address
:param cgi_env: cgi env
:param output_hdr_func :hdr function
:param output_body_func: body function
:param finish_func : finish function
"""
self.__app = application
self.__reader = reader.reader()
self.__output_hdr_func = output_hdr_func
self.__output_body_func = output_body_func
self.__finish_func = finish_func
wsgi_env = self.__convert2wsgi_env(cgi_env)
self.__recv_length = int(wsgi_env["CONTENT_LENGTH"])
try:
self.__app = application(wsgi_env, self.__start_response)
except:
self.__handle_error("500 Internal Server Error", [], traceback.format_exc())
return
def __convert2wsgi_env(self, cgi_env):
wsgi_env = cgi_env
wsgi_env["wsgi.version"] = (1, 0,)
wsgi_env["wsgi.errors"] = sys.stderr
wsgi_env["wsgi.multithread"] = False
wsgi_env['wsgi.multiprocess'] = True
wsgi_env['wsgi.run_once'] = True
wsgi_env["wsgi.input"] = self.__reader
if cgi_env.get('HTTPS', 'off') in ('on', '1'):
cgi_env['wsgi.url_scheme'] = 'https'
else:
cgi_env['wsgi.url_scheme'] = 'http'
if "PATH_INFO" not in wsgi_env:
pos = wsgi_env["REQUEST_URI"].find("?")
if pos < 0:
wsgi_env["PATH_INFO"] = wsgi_env["REQUEST_URI"]
else:
wsgi_env["PATH_INFO"] = wsgi_env["REQUEST_URI"][0:pos]
''''''
return wsgi_env
def __handle_error(self, status, resp_headers, err_data=""):
if self.__is_resp_hdr:
sys.stderr.write(err_data.decode())
self.__finish_func()
return
self.__response_error(status, resp_headers, err_data)
def __response_error(self, status, resp_headers, resp_data=""):
self.__is_resp_hdr = True
self.__is_finish = True
byte_data = resp_data.encode()
resp_headers += [
("Content-Length", len(byte_data),),
("Content-Type", "text/plain;charset=utf-8",),
]
self.__output_hdr_func(status, resp_headers)
self.__output_body_func(byte_data)
self.__finish_func()
def __response_body(self, body_data):
if not self.__is_chunked:
n = self.__resp_content_length - self.__responsed_content_length
resp_data = body_data[0:n]
self.__responsed_content_length += len(resp_data)
self.__output_body_func(resp_data)
if self.__resp_content_length == self.__responsed_content_length: self.__is_finish = True
return
self.__chunked.input(body_data)
try:
self.__chunked.parse()
except httpchunked.chunkedErr:
self.__handle_error("500 Internal Server Error", [], traceback.format_exc())
return
chunk_data = self.__chunked.get_chunk_with_length()
if self.__chunked.is_ok(): self.__is_finish = True
if not chunk_data: return
self.__output_body_func(chunk_data)
def finish(self):
try:
if hasattr(self.__app, "close"): self.__app.close()
except:
self.__handle_error("500 Internal Server Error", [], traceback.format_exc())
def input(self, byte_data):
# 如果响应结束,那么丢弃所有的数据包
if self.__is_finish: return
rsize = self.__recv_length - self.__received_length
byte_data = byte_data[0:rsize]
self.__recv_length += len(byte_data)
self.__reader._putvalue(byte_data)
def handle(self):
if self.__is_finish:
if not self.__is_resp_hdr:
self.__output_hdr_func(self.__resp_status, self.__resp_headers)
self.__is_resp_hdr = True
self.__finish_func()
return
try:
for resp_data in self.__app:
if not self.__has_hdr: continue
if not self.__is_resp_hdr:
self.__output_hdr_func(self.__resp_status, self.__resp_headers)
self.__is_resp_hdr = True
self.__response_body(resp_data)
except:
self.__handle_error("500 Internal Server Error", [], traceback.format_exc())
return
if self.__is_finish: self.__finish_func()
return
def __start_response(self, status, response_headers, exc_info=None):
try:
self.__resp_stcode = int(status[0:3])
except ValueError:
self.__handle_error("500 Internal Server Error", [], traceback.format_exc())
return
if self.__resp_stcode < 100:
self.__handle_error("500 Internal Server Error", [], "wrong http status code %s" % self.__resp_stcode)
return
if self.__resp_stcode >= 100 and self.__resp_stcode < 200:
self.__output_hdr_func(status, response_headers)
return
if self.__is_resp_hdr:
self.__handle_error("500 Internal Server Error", [], "http master has responsed!")
return
if self.__resp_stcode >= 300:
self.__output_hdr_func(status, response_headers, )
self.__is_finish = True
self.__is_resp_hdr = True
return
self.__resp_status = status
self.__resp_headers = response_headers
self.__has_hdr = True
for name, value in response_headers:
name = name.lower()
if name == "content-length":
try:
self.__resp_content_length = int(value)
except ValueError:
self.__response_error("500 Internal Server Error", [], traceback.format_exc())
return
break
if name == "transfer-encoding" and value.lower() == "chunked":
self.__is_chunked = True
self.__chunked = httpchunked.parser()
break
if self.__is_chunked: return
if self.__resp_content_length == 0: self.__is_finish = True | 0.207375 | 0.071429 |
import logging
import re
from datetime import timedelta
import asyncio
from ..utils import load_plugins, invoke_plugins
from ..config import config
from .base import SearcherError, SearchResult
log = logging.getLogger('stagehand.searchers')
plugins, broken_plugins = load_plugins('searchers', ['easynews'])
@asyncio.coroutine
def start(manager):
"""
Called when the manager is starting.
"""
yield from invoke_plugins(plugins, 'start', manager)
for name, error in broken_plugins.items():
log.warning('failed to load searcher plugin %s: %s', name, error)
@asyncio.coroutine
def search(series, episodes, skip=[], loop=None):
try:
earliest = min(ep.airdate for ep in episodes if ep.airdate)
except ValueError:
# Empty sequence: no eps had an airdate.
earliest = None
if earliest:
# Allow for episodes to be posted 10 days before the supposed
# air date.
earliest = (earliest - timedelta(days=10)).strftime('%Y-%m-%d')
# XXX: should probably review these wild-ass min size guesses
mb_per_min = 2 if series.cfg.quality == 'HD' else 0.5
min_size = (series.runtime or 30) * mb_per_min * 1024 * 1024
# FIXME: magic factor
ideal_size = min_size * (5 if series.cfg.quality == 'Any' else 3)
log.info('min size=%d ideal size=%d runtime=%d' , min_size, ideal_size, series.runtime)
tried = set()
always = [name for name in plugins if plugins[name].Searcher.ALWAYS_ENABLED]
for name in config.searchers.enabled + always:
if name not in plugins or name in skip or name in tried:
continue
tried.add(name)
searcher = plugins[name].Searcher(loop=loop)
try:
results = yield from searcher.search(series, episodes, earliest, min_size, ideal_size, series.cfg.quality)
except SearcherError as e:
log.error('%s failed: %s', name, e.args[0])
except Exception:
log.exception('%s failed with unhandled error', name)
else:
# FIXME: if some episodes don't have results, need to try other searchers.
if results:
return results
else:
log.debug2('%s found no results', name)
return {} | stagehand/searchers/__init__.py | import logging
import re
from datetime import timedelta
import asyncio
from ..utils import load_plugins, invoke_plugins
from ..config import config
from .base import SearcherError, SearchResult
log = logging.getLogger('stagehand.searchers')
plugins, broken_plugins = load_plugins('searchers', ['easynews'])
@asyncio.coroutine
def start(manager):
"""
Called when the manager is starting.
"""
yield from invoke_plugins(plugins, 'start', manager)
for name, error in broken_plugins.items():
log.warning('failed to load searcher plugin %s: %s', name, error)
@asyncio.coroutine
def search(series, episodes, skip=[], loop=None):
try:
earliest = min(ep.airdate for ep in episodes if ep.airdate)
except ValueError:
# Empty sequence: no eps had an airdate.
earliest = None
if earliest:
# Allow for episodes to be posted 10 days before the supposed
# air date.
earliest = (earliest - timedelta(days=10)).strftime('%Y-%m-%d')
# XXX: should probably review these wild-ass min size guesses
mb_per_min = 2 if series.cfg.quality == 'HD' else 0.5
min_size = (series.runtime or 30) * mb_per_min * 1024 * 1024
# FIXME: magic factor
ideal_size = min_size * (5 if series.cfg.quality == 'Any' else 3)
log.info('min size=%d ideal size=%d runtime=%d' , min_size, ideal_size, series.runtime)
tried = set()
always = [name for name in plugins if plugins[name].Searcher.ALWAYS_ENABLED]
for name in config.searchers.enabled + always:
if name not in plugins or name in skip or name in tried:
continue
tried.add(name)
searcher = plugins[name].Searcher(loop=loop)
try:
results = yield from searcher.search(series, episodes, earliest, min_size, ideal_size, series.cfg.quality)
except SearcherError as e:
log.error('%s failed: %s', name, e.args[0])
except Exception:
log.exception('%s failed with unhandled error', name)
else:
# FIXME: if some episodes don't have results, need to try other searchers.
if results:
return results
else:
log.debug2('%s found no results', name)
return {} | 0.229363 | 0.101634 |
import os
import sys
import csv
import glob
import time
import json
import logging
import resource
import threading
import subprocess
# constants ------------------------------------------------
THREADS=1
TIMEOUT=900
INSTR_MAX=4000000000000000000
# globals --------------------------------------------------
dirs = glob.glob(f'_build/tests')
table = [['test', 'spec', 'Twasp', 'Tloop', 'Tsolver', 'paths', 'Cov']]
errors = list()
# helpers --------------------------------------------------
cmd = lambda p, r : [
'wasp',
p,
'-e',
f'(invoke \"__original_main\")',
'-b',
'-m',
str(INSTR_MAX),
'--workspace', r,
'--smt-assume'
]
def limit_ram() -> None:
limit = 15 * 1024 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
def run(test: str, out_dir: str):
try:
out = subprocess.check_output(
cmd(test, out_dir),
timeout=TIMEOUT,
stderr=subprocess.STDOUT,
preexec_fn=limit_ram
)
except (subprocess.CalledProcessError, \
subprocess.TimeoutExpired) as e:
logging.error('crashed')
return None
return out
#-----------------------------------------------------------
# main -----------------------------------------------------
fmt = '%(asctime)s: %(message)s'
date_fmt = '%H:%M:%S'
logging.basicConfig(format=fmt, level=logging.INFO, \
datefmt=date_fmt)
def main(argv):
tests = []
lock = threading.Lock()
def run_benchmark(test):
out_dir = os.path.join('output', os.path.basename(test))
t0 = time.time()
run(test, out_dir)
delta = time.time() - t0
report_file = os.path.join(out_dir, 'report.json')
if not os.path.exists(report_file):
lock.acquire()
errors.append(test)
lock.release()
logging.info(f'Crashed/Timeout {os.path.basename(test)}')
return
with open(report_file, 'r') as f:
try:
report = json.load(f)
except json.decoder.JSONDecodeError:
logging.info(f'Thread {i}: Can not read report \'{report_file}\'.')
return
if not report['specification']:
lock.acquire()
errors.append(test)
lock.release()
logging.info(f'Test {os.path.basename(test)} ' \
f'({report["specification"]}, ' \
f'T={round(delta,2)}s, L={float(report["loop_time"])}, S={float(report["solver_time"])}' \
f'{report["paths_explored"]})')
lock.acquire()
table.append([
f'{test}',
report['specification'],
round(delta, 2),
float(report['loop_time']),
float(report['solver_time']),
report['paths_explored'],
report['coverage']
])
lock.release()
def t_loop(i):
while True:
try:
lock.acquire()
test = tests.pop()
except IndexError:
break
finally:
lock.release()
run_benchmark(test)
if argv == []:
for dir in dirs:
tests = tests + glob.glob(f'{dir}/*.wat')
else:
tests = argv
threads = []
for i in range(THREADS):
t = threading.Thread(target=t_loop, args=(i,))
threads.append(t)
t.start()
for t in threads:
t.join()
with open('table.csv', 'w', newline='') as f:
writer = csv.writer(f)
writer.writerows(table)
for err in errors:
logging.info('Failed Test: ' + err)
if __name__ == '__main__':
main(sys.argv[1:])
#----------------------------------------------------------- | run.py | import os
import sys
import csv
import glob
import time
import json
import logging
import resource
import threading
import subprocess
# constants ------------------------------------------------
THREADS=1
TIMEOUT=900
INSTR_MAX=4000000000000000000
# globals --------------------------------------------------
dirs = glob.glob(f'_build/tests')
table = [['test', 'spec', 'Twasp', 'Tloop', 'Tsolver', 'paths', 'Cov']]
errors = list()
# helpers --------------------------------------------------
cmd = lambda p, r : [
'wasp',
p,
'-e',
f'(invoke \"__original_main\")',
'-b',
'-m',
str(INSTR_MAX),
'--workspace', r,
'--smt-assume'
]
def limit_ram() -> None:
limit = 15 * 1024 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
def run(test: str, out_dir: str):
try:
out = subprocess.check_output(
cmd(test, out_dir),
timeout=TIMEOUT,
stderr=subprocess.STDOUT,
preexec_fn=limit_ram
)
except (subprocess.CalledProcessError, \
subprocess.TimeoutExpired) as e:
logging.error('crashed')
return None
return out
#-----------------------------------------------------------
# main -----------------------------------------------------
fmt = '%(asctime)s: %(message)s'
date_fmt = '%H:%M:%S'
logging.basicConfig(format=fmt, level=logging.INFO, \
datefmt=date_fmt)
def main(argv):
tests = []
lock = threading.Lock()
def run_benchmark(test):
out_dir = os.path.join('output', os.path.basename(test))
t0 = time.time()
run(test, out_dir)
delta = time.time() - t0
report_file = os.path.join(out_dir, 'report.json')
if not os.path.exists(report_file):
lock.acquire()
errors.append(test)
lock.release()
logging.info(f'Crashed/Timeout {os.path.basename(test)}')
return
with open(report_file, 'r') as f:
try:
report = json.load(f)
except json.decoder.JSONDecodeError:
logging.info(f'Thread {i}: Can not read report \'{report_file}\'.')
return
if not report['specification']:
lock.acquire()
errors.append(test)
lock.release()
logging.info(f'Test {os.path.basename(test)} ' \
f'({report["specification"]}, ' \
f'T={round(delta,2)}s, L={float(report["loop_time"])}, S={float(report["solver_time"])}' \
f'{report["paths_explored"]})')
lock.acquire()
table.append([
f'{test}',
report['specification'],
round(delta, 2),
float(report['loop_time']),
float(report['solver_time']),
report['paths_explored'],
report['coverage']
])
lock.release()
def t_loop(i):
while True:
try:
lock.acquire()
test = tests.pop()
except IndexError:
break
finally:
lock.release()
run_benchmark(test)
if argv == []:
for dir in dirs:
tests = tests + glob.glob(f'{dir}/*.wat')
else:
tests = argv
threads = []
for i in range(THREADS):
t = threading.Thread(target=t_loop, args=(i,))
threads.append(t)
t.start()
for t in threads:
t.join()
with open('table.csv', 'w', newline='') as f:
writer = csv.writer(f)
writer.writerows(table)
for err in errors:
logging.info('Failed Test: ' + err)
if __name__ == '__main__':
main(sys.argv[1:])
#----------------------------------------------------------- | 0.145661 | 0.071235 |
from dataclasses import dataclass
from dataclasses import field
from pathlib import Path
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from xsdata.analyzer import ClassAnalyzer
from xsdata.builder import ClassBuilder
from xsdata.logger import logger
from xsdata.models.codegen import Class
from xsdata.models.elements import Import
from xsdata.models.elements import Include
from xsdata.models.elements import Override
from xsdata.models.elements import Redefine
from xsdata.models.elements import Schema
from xsdata.parser import SchemaParser
from xsdata.writer import writer
@dataclass
class SchemaTransformer:
print: bool
output: str
processed: List[Path] = field(init=False, default_factory=list)
def process(self, schemas: List[Path], package: str):
classes = self.process_schemas(schemas, package)
classes = self.analyze_classes(classes)
class_num, inner_num = self.count_classes(classes)
if class_num:
logger.info("Analyzer: %d main and %d inner classes", class_num, inner_num)
writer.designate(classes, self.output)
if self.print:
writer.print(classes, self.output)
else:
writer.write(classes, self.output)
else:
logger.warning("Analyzer returned zero classes!")
def process_schemas(self, schemas: List[Path], package: str) -> List[Class]:
classes = list()
for schema in schemas:
classes.extend(self.process_schema(schema, package))
return classes
def process_schema(
self, schema_path: Path, package: str, target_namespace: Optional[str] = None,
) -> List[Class]:
"""Recursively parse the given schema and all it's included schemas and
generate a list of classes."""
classes = []
if schema_path not in self.processed:
self.processed.append(schema_path)
logger.info("Parsing schema...")
schema = self.parse_schema(schema_path, target_namespace)
target_namespace = schema.target_namespace
for sub in schema.included():
included_classes = self.process_included(sub, package, target_namespace)
classes.extend(included_classes)
classes.extend(self.generate_classes(schema, package))
else:
logger.debug("Already processed skipping: %s", schema_path.name)
return classes
def process_included(
self,
included: Union[Import, Include, Redefine, Override],
package: str,
target_namespace: Optional[str],
) -> List[Class]:
"""Prepare the given included schema location and send it for
processing."""
classes = []
if not included.location:
logger.warning(
"%s: %s unresolved schema location..",
included.class_name,
included.schema_location,
)
elif included.location in self.processed:
logger.debug(
"%s: %s already included skipping..",
included.class_name,
included.schema_location,
)
else:
package = self.adjust_package(package, included.schema_location)
classes = self.process_schema(included.location, package, target_namespace)
return classes
def generate_classes(self, schema: Schema, package: str) -> List[Class]:
"""Convert the given schema tree to codegen classes and use the writer
factory to either generate or print the result code."""
logger.info("Compiling schema...")
classes = ClassBuilder(schema=schema, package=package).build()
class_num, inner_num = self.count_classes(classes)
if class_num > 0:
logger.info("Builder: %d main and %d inner classes", class_num, inner_num)
return classes
@staticmethod
def parse_schema(schema_path: Path, target_namespace: Optional[str]) -> Schema:
"""
Parse the given schema path and return the schema tree object.
Optionally add the target namespace if the schema is included
and is missing a target namespace.
"""
parser = SchemaParser(target_namespace=target_namespace)
return parser.from_xsd_path(schema_path)
@staticmethod
def analyze_classes(classes: List[Class]) -> List[Class]:
"""Analyzer the given class list and simplify attributes and
extensions."""
analyzer = ClassAnalyzer()
return analyzer.process(classes)
@staticmethod
def adjust_package(package: str, location: Optional[str]) -> str:
"""
Adjust if possible the package name relatively to the schema location
to make sense.
eg. foo.bar, ../common/schema.xsd -> foo.common
"""
if location and not location.startswith("http"):
pp = package.split(".")
for part in Path(location).parent.parts:
if part == "..":
pp.pop()
else:
pp.append(part)
if pp:
return ".".join(pp)
return package
def count_classes(self, classes: List[Class]) -> Tuple[int, int]:
"""Return a tuple of counters for the main and inner classes."""
main = len(classes)
inner = 0
for cls in classes:
inner += sum(self.count_classes(cls.inner))
return main, inner | xsdata/transformer.py | from dataclasses import dataclass
from dataclasses import field
from pathlib import Path
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from xsdata.analyzer import ClassAnalyzer
from xsdata.builder import ClassBuilder
from xsdata.logger import logger
from xsdata.models.codegen import Class
from xsdata.models.elements import Import
from xsdata.models.elements import Include
from xsdata.models.elements import Override
from xsdata.models.elements import Redefine
from xsdata.models.elements import Schema
from xsdata.parser import SchemaParser
from xsdata.writer import writer
@dataclass
class SchemaTransformer:
print: bool
output: str
processed: List[Path] = field(init=False, default_factory=list)
def process(self, schemas: List[Path], package: str):
classes = self.process_schemas(schemas, package)
classes = self.analyze_classes(classes)
class_num, inner_num = self.count_classes(classes)
if class_num:
logger.info("Analyzer: %d main and %d inner classes", class_num, inner_num)
writer.designate(classes, self.output)
if self.print:
writer.print(classes, self.output)
else:
writer.write(classes, self.output)
else:
logger.warning("Analyzer returned zero classes!")
def process_schemas(self, schemas: List[Path], package: str) -> List[Class]:
classes = list()
for schema in schemas:
classes.extend(self.process_schema(schema, package))
return classes
def process_schema(
self, schema_path: Path, package: str, target_namespace: Optional[str] = None,
) -> List[Class]:
"""Recursively parse the given schema and all it's included schemas and
generate a list of classes."""
classes = []
if schema_path not in self.processed:
self.processed.append(schema_path)
logger.info("Parsing schema...")
schema = self.parse_schema(schema_path, target_namespace)
target_namespace = schema.target_namespace
for sub in schema.included():
included_classes = self.process_included(sub, package, target_namespace)
classes.extend(included_classes)
classes.extend(self.generate_classes(schema, package))
else:
logger.debug("Already processed skipping: %s", schema_path.name)
return classes
def process_included(
self,
included: Union[Import, Include, Redefine, Override],
package: str,
target_namespace: Optional[str],
) -> List[Class]:
"""Prepare the given included schema location and send it for
processing."""
classes = []
if not included.location:
logger.warning(
"%s: %s unresolved schema location..",
included.class_name,
included.schema_location,
)
elif included.location in self.processed:
logger.debug(
"%s: %s already included skipping..",
included.class_name,
included.schema_location,
)
else:
package = self.adjust_package(package, included.schema_location)
classes = self.process_schema(included.location, package, target_namespace)
return classes
def generate_classes(self, schema: Schema, package: str) -> List[Class]:
"""Convert the given schema tree to codegen classes and use the writer
factory to either generate or print the result code."""
logger.info("Compiling schema...")
classes = ClassBuilder(schema=schema, package=package).build()
class_num, inner_num = self.count_classes(classes)
if class_num > 0:
logger.info("Builder: %d main and %d inner classes", class_num, inner_num)
return classes
@staticmethod
def parse_schema(schema_path: Path, target_namespace: Optional[str]) -> Schema:
"""
Parse the given schema path and return the schema tree object.
Optionally add the target namespace if the schema is included
and is missing a target namespace.
"""
parser = SchemaParser(target_namespace=target_namespace)
return parser.from_xsd_path(schema_path)
@staticmethod
def analyze_classes(classes: List[Class]) -> List[Class]:
"""Analyzer the given class list and simplify attributes and
extensions."""
analyzer = ClassAnalyzer()
return analyzer.process(classes)
@staticmethod
def adjust_package(package: str, location: Optional[str]) -> str:
"""
Adjust if possible the package name relatively to the schema location
to make sense.
eg. foo.bar, ../common/schema.xsd -> foo.common
"""
if location and not location.startswith("http"):
pp = package.split(".")
for part in Path(location).parent.parts:
if part == "..":
pp.pop()
else:
pp.append(part)
if pp:
return ".".join(pp)
return package
def count_classes(self, classes: List[Class]) -> Tuple[int, int]:
"""Return a tuple of counters for the main and inner classes."""
main = len(classes)
inner = 0
for cls in classes:
inner += sum(self.count_classes(cls.inner))
return main, inner | 0.912115 | 0.32298 |
from pathlib import Path
import numpy as np
import scipy.io as io
from utils.matching import optimum
import scipy.io as io
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
data_type = {"train": [1, 2, 5, 6, 9, 10, 13, 14], "test": [3, 7, 11, 15]}
Group = {
"Control": list(range(1, 5)),
"FGF2": list(range(5, 9)),
"BMP2": list(range(9, 13)),
"BMP2+FGF2": list(range(13, 17)),
}
def check_candidate(cand, annotation_path, sequence):
"""
:param cand: extracted candidate (t, x, y)
:param annotation_path: ground_truth path (t, x, y)
:param save_path:
:return: check candidate accuracy
"""
annotations = io.loadmat(str(annotation_path))["result"]
annotations[:, 1] = annotations[:, 1] + 30
annotations[:, 2] = annotations[:, 2] + 30
include_rate = []
for annotation in annotations:
t, x, y = annotation
cand_in_frame = cand[cand[:, 0] == t]
flag = np.any(
(cand_in_frame[:, 1] < x)
& (cand_in_frame[:, 3] > x)
& (cand_in_frame[:, 2] < y)
& (cand_in_frame[:, 4] > y)
)
include_rate.append(flag)
include_rate = sum(include_rate)
print(f"sequence{sequence}=")
print(include_rate / annotations.shape[0])
return include_rate / annotations.shape[0]
def cand_get_centor(cand_pos):
"""
patch position to image position
:param cand_pos: candidate position
:return: center coodinates of image
"""
cand_in_frame_center = [
((cand_pos[:, 3] + cand_pos[:, 1]) / 2),
((cand_pos[:, 4] + cand_pos[:, 2]) / 2),
]
return np.array(cand_in_frame_center).transpose(1, 0)
def associate_candidate(cand_path, sequence, annotation_path=None):
"""
Associate candidate patch to time direction.
associate_cand = [length, x_min, y_min, x_max, y_max, frame]
:param cand_path: file path for extracted candidate
:param sequence: chandidate sequence
:param annotation_path: To confirm candidate extraction accuracy
:return associate_candidate
"""
# load candidate patch
cands = io.loadmat(str(cand_path))["candi"][0]
associate_cand = []
for cand in cands:
associate_cand.append(cand[0])
cand = np.array(associate_cand)
# cand = [frame, x_min, y_min, x_max, y_max]
# add_id
id_box = []
ids = np.arange(cand.shape[0]).reshape(cand.shape[0], 1)
cand = np.append(cand, ids.reshape((ids.shape[0], 1)), axis=1)
associate_cand = cand[cand[:, 0] == 1]
associate_cand = np.append(
associate_cand, np.ones((associate_cand.shape[0], 1)), axis=1
)
for id in associate_cand[:, 5]:
id_box.append([int(id)])
# candidate 2 cand sequence
for frame in range(1, 1014):
cand_in_frame = cand[cand[:, 0] == frame]
in_farme_cand_center = cand_get_centor(cand_in_frame)
cand_next_frame = cand[cand[:, 0] == frame + 1]
next_farme_cand_center = cand_get_centor(cand_next_frame)
# associate candidate based on distance
associate_ids = optimum(in_farme_cand_center, next_farme_cand_center, 20)
cand_index = set(np.arange(cand_next_frame.shape[0])) - set(associate_ids[:, 1])
cand_next_frame[:, 0] = 1
# add new candidate
if cand_next_frame[list(cand_index)].shape[0] != 0:
for temp in cand_next_frame[list(cand_index)]:
temp = np.array([np.append(temp, frame + 1)])
associate_cand = np.append(associate_cand, temp, axis=0)
for temp in cand_next_frame[list(cand_index)][:, 5]:
id_box.append([int(temp)])
for associate_id in associate_ids.astype(np.int):
temp = associate_cand[
associate_cand[:, 5] == cand_in_frame[associate_id[0]][5]
]
change_mat_index = int(
np.where(associate_cand[:, 5] == cand_in_frame[associate_id[0]][5])[0]
)
id_box[change_mat_index].append(int(cand_next_frame[associate_id[1]][5]))
temp[0, 0] += 1
temp[0, 5] = cand_next_frame[associate_id[1]][5]
associate_cand[
associate_cand[:, 5] == cand_in_frame[associate_id[0]][5]
] = temp
if annotation_path is not None:
cut_index = np.where((associate_cand[:, 0] > 1))[0]
new_id_box = []
for index in cut_index:
new_id_box.extend(id_box[index])
cand_for_conf = []
for index in new_id_box:
cand_for_conf.append(cand[cand[:, 5] == index][0])
result = check_candidate(np.array(cand_for_conf), annotation_path, sequence)
cand_save_path = cand_path.parent.joinpath("prepro_candidate.mat")
io.savemat(cand_save_path, {"candi": associate_cand}) | utils/associate_candidate.py | from pathlib import Path
import numpy as np
import scipy.io as io
from utils.matching import optimum
import scipy.io as io
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
data_type = {"train": [1, 2, 5, 6, 9, 10, 13, 14], "test": [3, 7, 11, 15]}
Group = {
"Control": list(range(1, 5)),
"FGF2": list(range(5, 9)),
"BMP2": list(range(9, 13)),
"BMP2+FGF2": list(range(13, 17)),
}
def check_candidate(cand, annotation_path, sequence):
"""
:param cand: extracted candidate (t, x, y)
:param annotation_path: ground_truth path (t, x, y)
:param save_path:
:return: check candidate accuracy
"""
annotations = io.loadmat(str(annotation_path))["result"]
annotations[:, 1] = annotations[:, 1] + 30
annotations[:, 2] = annotations[:, 2] + 30
include_rate = []
for annotation in annotations:
t, x, y = annotation
cand_in_frame = cand[cand[:, 0] == t]
flag = np.any(
(cand_in_frame[:, 1] < x)
& (cand_in_frame[:, 3] > x)
& (cand_in_frame[:, 2] < y)
& (cand_in_frame[:, 4] > y)
)
include_rate.append(flag)
include_rate = sum(include_rate)
print(f"sequence{sequence}=")
print(include_rate / annotations.shape[0])
return include_rate / annotations.shape[0]
def cand_get_centor(cand_pos):
"""
patch position to image position
:param cand_pos: candidate position
:return: center coodinates of image
"""
cand_in_frame_center = [
((cand_pos[:, 3] + cand_pos[:, 1]) / 2),
((cand_pos[:, 4] + cand_pos[:, 2]) / 2),
]
return np.array(cand_in_frame_center).transpose(1, 0)
def associate_candidate(cand_path, sequence, annotation_path=None):
"""
Associate candidate patch to time direction.
associate_cand = [length, x_min, y_min, x_max, y_max, frame]
:param cand_path: file path for extracted candidate
:param sequence: chandidate sequence
:param annotation_path: To confirm candidate extraction accuracy
:return associate_candidate
"""
# load candidate patch
cands = io.loadmat(str(cand_path))["candi"][0]
associate_cand = []
for cand in cands:
associate_cand.append(cand[0])
cand = np.array(associate_cand)
# cand = [frame, x_min, y_min, x_max, y_max]
# add_id
id_box = []
ids = np.arange(cand.shape[0]).reshape(cand.shape[0], 1)
cand = np.append(cand, ids.reshape((ids.shape[0], 1)), axis=1)
associate_cand = cand[cand[:, 0] == 1]
associate_cand = np.append(
associate_cand, np.ones((associate_cand.shape[0], 1)), axis=1
)
for id in associate_cand[:, 5]:
id_box.append([int(id)])
# candidate 2 cand sequence
for frame in range(1, 1014):
cand_in_frame = cand[cand[:, 0] == frame]
in_farme_cand_center = cand_get_centor(cand_in_frame)
cand_next_frame = cand[cand[:, 0] == frame + 1]
next_farme_cand_center = cand_get_centor(cand_next_frame)
# associate candidate based on distance
associate_ids = optimum(in_farme_cand_center, next_farme_cand_center, 20)
cand_index = set(np.arange(cand_next_frame.shape[0])) - set(associate_ids[:, 1])
cand_next_frame[:, 0] = 1
# add new candidate
if cand_next_frame[list(cand_index)].shape[0] != 0:
for temp in cand_next_frame[list(cand_index)]:
temp = np.array([np.append(temp, frame + 1)])
associate_cand = np.append(associate_cand, temp, axis=0)
for temp in cand_next_frame[list(cand_index)][:, 5]:
id_box.append([int(temp)])
for associate_id in associate_ids.astype(np.int):
temp = associate_cand[
associate_cand[:, 5] == cand_in_frame[associate_id[0]][5]
]
change_mat_index = int(
np.where(associate_cand[:, 5] == cand_in_frame[associate_id[0]][5])[0]
)
id_box[change_mat_index].append(int(cand_next_frame[associate_id[1]][5]))
temp[0, 0] += 1
temp[0, 5] = cand_next_frame[associate_id[1]][5]
associate_cand[
associate_cand[:, 5] == cand_in_frame[associate_id[0]][5]
] = temp
if annotation_path is not None:
cut_index = np.where((associate_cand[:, 0] > 1))[0]
new_id_box = []
for index in cut_index:
new_id_box.extend(id_box[index])
cand_for_conf = []
for index in new_id_box:
cand_for_conf.append(cand[cand[:, 5] == index][0])
result = check_candidate(np.array(cand_for_conf), annotation_path, sequence)
cand_save_path = cand_path.parent.joinpath("prepro_candidate.mat")
io.savemat(cand_save_path, {"candi": associate_cand}) | 0.594316 | 0.592667 |
import os
import re
import sys
import glob
import shutil
import subprocess
import excons
import excons.devtoolset
import SCons.Script # pylint: disable=import-error
# pylint: disable=bad-indentation,global-statement,bare-except,deprecated-lambda
InstallExp = re.compile(r"^--\s+(Installing|Up-to-date):\s+([^\s].*)$")
CmdSep = ("&&" if sys.platform == "win32" else ";")
ConfigExtraDeps = {}
# Avoid listing microsoft runtime dlls:
# - concrtXXX.dll
# - msvcpXXX.dll
# - vcruntimeXXX.dll
_VC_ignores_by_ext = {"dll": set(["concrt", "msvcp", "msvcr", "vcruntime"])}
def VC_Filter(path):
global _VC_ignores_by_ext
bn = os.path.basename(path)
key = os.path.splitext(bn)[-1][1:].lower()
prefices = _VC_ignores_by_ext.get(key, [])
for prefix in prefices:
if bn.startswith(prefix):
return False
return True
def AddConfigureDependencies(name, deps):
global ConfigExtraDeps
lst = ConfigExtraDeps.get(name, [])
lst.extend(deps)
ConfigExtraDeps[name] = lst
def AdditionalConfigureDependencies(name):
global ConfigExtraDeps
return ConfigExtraDeps.get(name, [])
def BuildDir(name):
return excons.BuildBaseDirectory() + "/" + name
def ConfigCachePath(name):
return os.path.abspath(excons.out_dir + "/%s.cmake.config" % name)
def OutputsCachePath(name):
return os.path.abspath(excons.out_dir + "/%s.cmake.outputs" % name)
def Outputs(name):
lst = []
cof = OutputsCachePath(name)
if os.path.isfile(cof):
cofd = os.path.dirname(cof)
with open(cof, "r") as f:
lines = filter(lambda y: len(y)>0 and os.path.isfile(os.path.join(cofd, y)), map(lambda x: x.strip(), f.readlines()))
lst = filter(VC_Filter, map(lambda x: excons.out_dir + "/" + x, lines))
return lst
def Configure(name, topdir=None, opts=None, min_mscver=None, flags=None):
if SCons.Script.GetOption("clean"):
return True
if opts is None:
opts = {}
if topdir is None:
topdir = os.path.abspath(".")
bld = BuildDir(name)
relpath = os.path.relpath(topdir, bld)
cmd = "cd \"%s\" %s %s " % (bld, CmdSep, excons.GetArgument("with-cmake", "cmake"))
env = None
if sys.platform == "win32":
try:
mscver = float(excons.GetArgument("mscver", "10.0"))
if min_mscver is not None and mscver < min_mscver:
mscver = min_mscver
if mscver == 9.0:
cmd += "-G \"Visual Studio 9 2008 Win64\" "
elif mscver == 10.0:
cmd += "-G \"Visual Studio 10 2010 Win64\" "
elif mscver == 11.0:
cmd += "-G \"Visual Studio 11 2012 Win64\" "
elif mscver == 12.0:
cmd += "-G \"Visual Studio 12 2013 Win64\" "
elif mscver == 14.0:
cmd += "-G \"Visual Studio 14 2015 Win64\" "
elif mscver == 14.1:
cmd += "-G \"Visual Studio 15 2017 Win64\" "
elif mscver == 14.2:
cmd += "-G \"Visual Studio 16 2019 Win64\" "
else:
excons.Print("Unsupported visual studio version %s" % mscver, tool="cmake")
return False
except:
return False
else:
_env = excons.devtoolset.GetDevtoolsetEnv(excons.GetArgument("devtoolset", ""), merge=True)
if _env:
env = os.environ.copy()
env.update(_env)
if flags:
if not cmd.endswith(" "):
cmd += " "
cmd += flags
if not flags.endswith(" "):
cmd += " "
for k, v in opts.iteritems():
cmd += "-D%s=%s " % (k, ("\"%s\"" % v if type(v) in (str, unicode) else v))
cmd += "-DCMAKE_INSTALL_PREFIX=\"%s\" " % excons.OutputBaseDirectory()
if sys.platform != "win32":
cmd += "-DCMAKE_SKIP_BUILD_RPATH=0 "
cmd += "-DCMAKE_BUILD_WITH_INSTALL_RPATH=0 "
cmd += "-DCMAKE_INSTALL_RPATH_USE_LINK_PATH=0 "
if sys.platform == "darwin":
cmd += "-DCMAKE_MACOSX_RPATH=1 "
cmd += relpath
excons.Print("Run Command: %s" % cmd, tool="cmake")
p = subprocess.Popen(cmd, env=env, shell=True)
p.communicate()
return (p.returncode == 0)
def ParseOutputsInLines(lines, outfiles):
for line in lines:
excons.Print(line, tool="cmake")
m = InstallExp.match(line.strip())
if m is not None:
f = m.group(2)
if not os.path.isdir(f):
outfiles.add(f)
def Build(name, config=None, target=None):
if SCons.Script.GetOption("clean"):
return True
ccf = ConfigCachePath(name)
cof = OutputsCachePath(name)
if not os.path.isfile(ccf):
return False
outfiles = set()
if config is None:
config = excons.mode_dir
if target is None:
target = "install"
cmd = "cd \"%s\" %s %s --build . --config %s --target %s" % (BuildDir(name), CmdSep, excons.GetArgument("with-cmake", "cmake"), config, target)
env = None
extraargs = ""
njobs = SCons.Script.GetOption("num_jobs")
if njobs > 1:
if sys.platform == "win32":
extraargs += " /m:%d" % njobs
else:
extraargs += " -j %d" % njobs
if excons.GetArgument("show-cmds", 0, int):
if sys.platform == "win32":
extraargs += " /v:n" # normal verbosity
else:
extraargs += " V=1"
else:
if sys.platform == "win32":
extraargs += " /v:m" # minimal verbosity
if extraargs and (sys.platform != "win32" or float(excons.GetArgument("mscver", "10.0")) >= 10.0):
cmd += " --" + extraargs
if sys.platform != "win32":
_env = excons.devtoolset.GetDevtoolsetEnv(excons.GetArgument("devtoolset", ""), merge=True)
if _env:
env = os.environ.copy()
env.update(_env)
excons.Print("Run Command: %s" % cmd, tool="cmake")
p = subprocess.Popen(cmd, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
buf = ""
while p.poll() is None:
r = p.stdout.readline(512)
buf += r
lines = buf.split("\n")
if len(lines) > 1:
buf = lines[-1]
ParseOutputsInLines(lines[:-1], outfiles)
ParseOutputsInLines(buf.split("\n"), outfiles)
excons.Print(buf, tool="cmake")
# Write list of outputed files
if p.returncode == 0:
with open(cof, "w") as f:
lst = filter(VC_Filter, outfiles)
lst.sort()
f.write("\n".join(excons.NormalizedRelativePaths(lst, excons.out_dir)))
return True
else:
if os.path.isfile(cof):
os.remove(cof)
return False
def CleanOne(name):
if not SCons.Script.GetOption("clean"):
return
# Remove output files
for path in Outputs(name):
path = excons.out_dir + "/" + path
if os.path.isfile(path):
os.remove(path)
excons.Print("Removed: '%s'" % excons.NormalizedRelativePath(path, excons.out_dir), tool="cmake")
# Remove build temporary files
buildDir = BuildDir(name)
if os.path.isdir(buildDir):
shutil.rmtree(buildDir)
excons.Print("Removed: '%s'" % excons.NormalizedRelativePath(buildDir, excons.out_dir), tool="cmake")
path = ConfigCachePath(name)
if os.path.isfile(path):
os.remove(path)
excons.Print("Removed: '%s'" % excons.NormalizedRelativePath(path, excons.out_dir), tool="cmake")
path = OutputsCachePath(name)
if os.path.isfile(path):
os.remove(path)
excons.Print("Removed: '%s'" % excons.NormalizedRelativePath(path, excons.out_dir), tool="cmake")
def Clean():
if not SCons.Script.GetOption("clean"):
return
allnames = map(lambda x: ".".join(os.path.basename(x).split(".")[:-2]), glob.glob(excons.out_dir + "/*.cmake.outputs"))
if len(SCons.Script.COMMAND_LINE_TARGETS) == 0:
names = allnames[:]
else:
names = SCons.Script.COMMAND_LINE_TARGETS
for name in names:
CleanOne(name)
def ExternalLibRequire(configOpts, name, libnameFunc=None, definesFunc=None, extraEnvFunc=None, varPrefix=None):
rv = excons.ExternalLibRequire(name, libnameFunc=libnameFunc, definesFunc=definesFunc, extraEnvFunc=extraEnvFunc)
req = rv["require"]
if req is not None:
defines = ("" if definesFunc is None else definesFunc(rv["static"]))
if defines:
extraflags = " ".join(map(lambda x: "-D%s" % x, defines))
configOpts["CMAKE_CPP_FLAGS"] = "%s %s" % (configOpts.get("CMAKE_CPP_FLAGS", ""), extraflags)
if varPrefix is None:
varPrefix = name.upper() + "_"
excons.PrintOnce("Use CMake variable prefix '%s' for external dependency '%s'" % (varPrefix, name))
configOpts["%sINCLUDE_DIR" % varPrefix] = rv["incdir"]
configOpts["%sLIBRARY" % varPrefix] = rv["libpath"]
# sometimes LIBRARY is used, sometines LIBRARY_RELEASE / LIBRARY_DEBUG...
configOpts["%sLIBRARY_DEBUG" % varPrefix] = rv["libpath"]
configOpts["%sLIBRARY_RELEASE" % varPrefix] = rv["libpath"]
return rv | cmake.py |
import os
import re
import sys
import glob
import shutil
import subprocess
import excons
import excons.devtoolset
import SCons.Script # pylint: disable=import-error
# pylint: disable=bad-indentation,global-statement,bare-except,deprecated-lambda
InstallExp = re.compile(r"^--\s+(Installing|Up-to-date):\s+([^\s].*)$")
CmdSep = ("&&" if sys.platform == "win32" else ";")
ConfigExtraDeps = {}
# Avoid listing microsoft runtime dlls:
# - concrtXXX.dll
# - msvcpXXX.dll
# - vcruntimeXXX.dll
_VC_ignores_by_ext = {"dll": set(["concrt", "msvcp", "msvcr", "vcruntime"])}
def VC_Filter(path):
global _VC_ignores_by_ext
bn = os.path.basename(path)
key = os.path.splitext(bn)[-1][1:].lower()
prefices = _VC_ignores_by_ext.get(key, [])
for prefix in prefices:
if bn.startswith(prefix):
return False
return True
def AddConfigureDependencies(name, deps):
global ConfigExtraDeps
lst = ConfigExtraDeps.get(name, [])
lst.extend(deps)
ConfigExtraDeps[name] = lst
def AdditionalConfigureDependencies(name):
global ConfigExtraDeps
return ConfigExtraDeps.get(name, [])
def BuildDir(name):
return excons.BuildBaseDirectory() + "/" + name
def ConfigCachePath(name):
return os.path.abspath(excons.out_dir + "/%s.cmake.config" % name)
def OutputsCachePath(name):
return os.path.abspath(excons.out_dir + "/%s.cmake.outputs" % name)
def Outputs(name):
lst = []
cof = OutputsCachePath(name)
if os.path.isfile(cof):
cofd = os.path.dirname(cof)
with open(cof, "r") as f:
lines = filter(lambda y: len(y)>0 and os.path.isfile(os.path.join(cofd, y)), map(lambda x: x.strip(), f.readlines()))
lst = filter(VC_Filter, map(lambda x: excons.out_dir + "/" + x, lines))
return lst
def Configure(name, topdir=None, opts=None, min_mscver=None, flags=None):
if SCons.Script.GetOption("clean"):
return True
if opts is None:
opts = {}
if topdir is None:
topdir = os.path.abspath(".")
bld = BuildDir(name)
relpath = os.path.relpath(topdir, bld)
cmd = "cd \"%s\" %s %s " % (bld, CmdSep, excons.GetArgument("with-cmake", "cmake"))
env = None
if sys.platform == "win32":
try:
mscver = float(excons.GetArgument("mscver", "10.0"))
if min_mscver is not None and mscver < min_mscver:
mscver = min_mscver
if mscver == 9.0:
cmd += "-G \"Visual Studio 9 2008 Win64\" "
elif mscver == 10.0:
cmd += "-G \"Visual Studio 10 2010 Win64\" "
elif mscver == 11.0:
cmd += "-G \"Visual Studio 11 2012 Win64\" "
elif mscver == 12.0:
cmd += "-G \"Visual Studio 12 2013 Win64\" "
elif mscver == 14.0:
cmd += "-G \"Visual Studio 14 2015 Win64\" "
elif mscver == 14.1:
cmd += "-G \"Visual Studio 15 2017 Win64\" "
elif mscver == 14.2:
cmd += "-G \"Visual Studio 16 2019 Win64\" "
else:
excons.Print("Unsupported visual studio version %s" % mscver, tool="cmake")
return False
except:
return False
else:
_env = excons.devtoolset.GetDevtoolsetEnv(excons.GetArgument("devtoolset", ""), merge=True)
if _env:
env = os.environ.copy()
env.update(_env)
if flags:
if not cmd.endswith(" "):
cmd += " "
cmd += flags
if not flags.endswith(" "):
cmd += " "
for k, v in opts.iteritems():
cmd += "-D%s=%s " % (k, ("\"%s\"" % v if type(v) in (str, unicode) else v))
cmd += "-DCMAKE_INSTALL_PREFIX=\"%s\" " % excons.OutputBaseDirectory()
if sys.platform != "win32":
cmd += "-DCMAKE_SKIP_BUILD_RPATH=0 "
cmd += "-DCMAKE_BUILD_WITH_INSTALL_RPATH=0 "
cmd += "-DCMAKE_INSTALL_RPATH_USE_LINK_PATH=0 "
if sys.platform == "darwin":
cmd += "-DCMAKE_MACOSX_RPATH=1 "
cmd += relpath
excons.Print("Run Command: %s" % cmd, tool="cmake")
p = subprocess.Popen(cmd, env=env, shell=True)
p.communicate()
return (p.returncode == 0)
def ParseOutputsInLines(lines, outfiles):
for line in lines:
excons.Print(line, tool="cmake")
m = InstallExp.match(line.strip())
if m is not None:
f = m.group(2)
if not os.path.isdir(f):
outfiles.add(f)
def Build(name, config=None, target=None):
if SCons.Script.GetOption("clean"):
return True
ccf = ConfigCachePath(name)
cof = OutputsCachePath(name)
if not os.path.isfile(ccf):
return False
outfiles = set()
if config is None:
config = excons.mode_dir
if target is None:
target = "install"
cmd = "cd \"%s\" %s %s --build . --config %s --target %s" % (BuildDir(name), CmdSep, excons.GetArgument("with-cmake", "cmake"), config, target)
env = None
extraargs = ""
njobs = SCons.Script.GetOption("num_jobs")
if njobs > 1:
if sys.platform == "win32":
extraargs += " /m:%d" % njobs
else:
extraargs += " -j %d" % njobs
if excons.GetArgument("show-cmds", 0, int):
if sys.platform == "win32":
extraargs += " /v:n" # normal verbosity
else:
extraargs += " V=1"
else:
if sys.platform == "win32":
extraargs += " /v:m" # minimal verbosity
if extraargs and (sys.platform != "win32" or float(excons.GetArgument("mscver", "10.0")) >= 10.0):
cmd += " --" + extraargs
if sys.platform != "win32":
_env = excons.devtoolset.GetDevtoolsetEnv(excons.GetArgument("devtoolset", ""), merge=True)
if _env:
env = os.environ.copy()
env.update(_env)
excons.Print("Run Command: %s" % cmd, tool="cmake")
p = subprocess.Popen(cmd, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
buf = ""
while p.poll() is None:
r = p.stdout.readline(512)
buf += r
lines = buf.split("\n")
if len(lines) > 1:
buf = lines[-1]
ParseOutputsInLines(lines[:-1], outfiles)
ParseOutputsInLines(buf.split("\n"), outfiles)
excons.Print(buf, tool="cmake")
# Write list of outputed files
if p.returncode == 0:
with open(cof, "w") as f:
lst = filter(VC_Filter, outfiles)
lst.sort()
f.write("\n".join(excons.NormalizedRelativePaths(lst, excons.out_dir)))
return True
else:
if os.path.isfile(cof):
os.remove(cof)
return False
def CleanOne(name):
if not SCons.Script.GetOption("clean"):
return
# Remove output files
for path in Outputs(name):
path = excons.out_dir + "/" + path
if os.path.isfile(path):
os.remove(path)
excons.Print("Removed: '%s'" % excons.NormalizedRelativePath(path, excons.out_dir), tool="cmake")
# Remove build temporary files
buildDir = BuildDir(name)
if os.path.isdir(buildDir):
shutil.rmtree(buildDir)
excons.Print("Removed: '%s'" % excons.NormalizedRelativePath(buildDir, excons.out_dir), tool="cmake")
path = ConfigCachePath(name)
if os.path.isfile(path):
os.remove(path)
excons.Print("Removed: '%s'" % excons.NormalizedRelativePath(path, excons.out_dir), tool="cmake")
path = OutputsCachePath(name)
if os.path.isfile(path):
os.remove(path)
excons.Print("Removed: '%s'" % excons.NormalizedRelativePath(path, excons.out_dir), tool="cmake")
def Clean():
if not SCons.Script.GetOption("clean"):
return
allnames = map(lambda x: ".".join(os.path.basename(x).split(".")[:-2]), glob.glob(excons.out_dir + "/*.cmake.outputs"))
if len(SCons.Script.COMMAND_LINE_TARGETS) == 0:
names = allnames[:]
else:
names = SCons.Script.COMMAND_LINE_TARGETS
for name in names:
CleanOne(name)
def ExternalLibRequire(configOpts, name, libnameFunc=None, definesFunc=None, extraEnvFunc=None, varPrefix=None):
rv = excons.ExternalLibRequire(name, libnameFunc=libnameFunc, definesFunc=definesFunc, extraEnvFunc=extraEnvFunc)
req = rv["require"]
if req is not None:
defines = ("" if definesFunc is None else definesFunc(rv["static"]))
if defines:
extraflags = " ".join(map(lambda x: "-D%s" % x, defines))
configOpts["CMAKE_CPP_FLAGS"] = "%s %s" % (configOpts.get("CMAKE_CPP_FLAGS", ""), extraflags)
if varPrefix is None:
varPrefix = name.upper() + "_"
excons.PrintOnce("Use CMake variable prefix '%s' for external dependency '%s'" % (varPrefix, name))
configOpts["%sINCLUDE_DIR" % varPrefix] = rv["incdir"]
configOpts["%sLIBRARY" % varPrefix] = rv["libpath"]
# sometimes LIBRARY is used, sometines LIBRARY_RELEASE / LIBRARY_DEBUG...
configOpts["%sLIBRARY_DEBUG" % varPrefix] = rv["libpath"]
configOpts["%sLIBRARY_RELEASE" % varPrefix] = rv["libpath"]
return rv | 0.217171 | 0.066418 |
import logging
import time
from abc import ABC, abstractmethod
from google.protobuf.text_format import MessageToString
from th2_common.schema.message.message_listener import MessageListener
from th2_grpc_check2_recon.check2_recon_pb2_grpc import Check2ReconServicer
from th2_grpc_common.common_pb2 import MessageBatch, RequestStatus
from th2_check2_recon.common import MessageUtils
from th2_check2_recon.reconcommon import ReconMessage
logger = logging.getLogger(__name__)
class AbstractHandler(MessageListener, ABC):
def __init__(self, rule) -> None:
self._rule = rule
@abstractmethod
def handler(self, attributes: tuple, batch):
pass
class MessageHandler(AbstractHandler):
def handler(self, attributes: tuple, batch: MessageBatch):
try:
for proto_message in batch.messages:
message = ReconMessage(proto_message=proto_message)
process_timer = self._rule.RULE_PROCESSING_TIME
start_time = time.time()
self._rule.process(message, attributes)
process_timer.observe(time.time() - start_time)
logger.debug(" Processed '%s' id='%s'",
proto_message.metadata.message_type,
MessageUtils.str_message_id(proto_message))
logger.debug(" Cache size '%s': %s.", self._rule.get_name(), self._rule.log_groups_size())
except Exception:
logger.exception(f'Rule: {self._rule.get_name()}. '
f'An error occurred while processing the received message. '
f'Message: {MessageToString(batch, as_one_line=True)}')
class GRPCHandler(Check2ReconServicer):
def __init__(self, rules: list) -> None:
self._rules = rules
def submitGroupBatch(self, request, context):
try:
logger.debug(f'submitGroupBatch request: {MessageToString(request, as_one_line=True)}')
messages = [message.message for group in request.groups
for message in group.messages if message.HasField('message')]
for proto_message in messages:
message = ReconMessage(proto_message=proto_message)
for rule in self._rules:
process_timer = rule.RULE_PROCESSING_TIME
start_time = time.time()
try:
rule.process((), message)
except Exception:
logger.exception(f'Rule: {rule.get_name()}. '
f'An error occurred while processing the message. '
f'Message: {MessageToString(proto_message, as_one_line=True)}')
finally:
process_timer.observe(time.time() - start_time)
logger.debug(f"Processed '{proto_message.metadata.message_type}' "
f"id='{MessageUtils.str_message_id(proto_message)}'")
return RequestStatus(status=RequestStatus.SUCCESS, message='Successfully processed batch')
except Exception as e:
logger.exception('submitGroupBatch request failed')
return RequestStatus(status=RequestStatus.ERROR, message=str(e)) | th2_check2_recon/handler.py |
import logging
import time
from abc import ABC, abstractmethod
from google.protobuf.text_format import MessageToString
from th2_common.schema.message.message_listener import MessageListener
from th2_grpc_check2_recon.check2_recon_pb2_grpc import Check2ReconServicer
from th2_grpc_common.common_pb2 import MessageBatch, RequestStatus
from th2_check2_recon.common import MessageUtils
from th2_check2_recon.reconcommon import ReconMessage
logger = logging.getLogger(__name__)
class AbstractHandler(MessageListener, ABC):
def __init__(self, rule) -> None:
self._rule = rule
@abstractmethod
def handler(self, attributes: tuple, batch):
pass
class MessageHandler(AbstractHandler):
def handler(self, attributes: tuple, batch: MessageBatch):
try:
for proto_message in batch.messages:
message = ReconMessage(proto_message=proto_message)
process_timer = self._rule.RULE_PROCESSING_TIME
start_time = time.time()
self._rule.process(message, attributes)
process_timer.observe(time.time() - start_time)
logger.debug(" Processed '%s' id='%s'",
proto_message.metadata.message_type,
MessageUtils.str_message_id(proto_message))
logger.debug(" Cache size '%s': %s.", self._rule.get_name(), self._rule.log_groups_size())
except Exception:
logger.exception(f'Rule: {self._rule.get_name()}. '
f'An error occurred while processing the received message. '
f'Message: {MessageToString(batch, as_one_line=True)}')
class GRPCHandler(Check2ReconServicer):
def __init__(self, rules: list) -> None:
self._rules = rules
def submitGroupBatch(self, request, context):
try:
logger.debug(f'submitGroupBatch request: {MessageToString(request, as_one_line=True)}')
messages = [message.message for group in request.groups
for message in group.messages if message.HasField('message')]
for proto_message in messages:
message = ReconMessage(proto_message=proto_message)
for rule in self._rules:
process_timer = rule.RULE_PROCESSING_TIME
start_time = time.time()
try:
rule.process((), message)
except Exception:
logger.exception(f'Rule: {rule.get_name()}. '
f'An error occurred while processing the message. '
f'Message: {MessageToString(proto_message, as_one_line=True)}')
finally:
process_timer.observe(time.time() - start_time)
logger.debug(f"Processed '{proto_message.metadata.message_type}' "
f"id='{MessageUtils.str_message_id(proto_message)}'")
return RequestStatus(status=RequestStatus.SUCCESS, message='Successfully processed batch')
except Exception as e:
logger.exception('submitGroupBatch request failed')
return RequestStatus(status=RequestStatus.ERROR, message=str(e)) | 0.490724 | 0.066934 |
__all__ = ['odict']
__author__ = 'MF'
__version__ = '1.0'
try:
unicode = unicode
except NameError:
# 'unicode' is undefined, must be Python 3
str = str
unicode = str
bytes = bytes
basestring = (str,bytes)
__strtypes__ = [str, unicode]
else:
# 'unicode' exists, must be Python 2
str = str
unicode = unicode
bytes = str
basestring = basestring
__strtypes__ = [str, unicode]
# Add Numpy str type if possible
try:
import numpy as __np__
__strtypes__.append(__np__.string_)
except ImportError:
pass
class odict(object):
""" A simpler implementation than OrderedDict
This implementation aims at keeping ordering and also at allowing to
move the keys too
"""
def __init__(self, **kwargs):
self.__keys__ = []
self.__values__ = []
if ( len( kwargs) > 0 ):
for k, v in list(kwargs.items()):
self[k] = v
def keys(self):
return self.__keys__
def __setitem__(self, key, value):
if type(key) == int:
if key > len(self.__keys__) - 1:
raise Exception("Element %i does not exist" % key)
else:
self.__values__[key] = value
elif type(key) in __strtypes__:
if key in self.__keys__:
index = self.__keys__.index(key)
self.__values__[index] = value
else:
self.__keys__.append(key)
self.__values__.append(value)
else:
print(type(key))
print(__strtypes__)
raise Exception("Wrong type for key: %s" % type(key))
def __getitem__(self, key):
if type(key) == int:
return self.__values__[key]
elif type(key) in __strtypes__:
if not key in self.__keys__:
raise KeyError
index = self.__keys__.index(key)
return self.__values__[index]
else:
raise Exception("Wrong type for key: %s" % type(key))
def __repr__(self):
string = "odict({"
for i, key in enumerate(self.__keys__):
if i > 0:
string += ", "
string += "'%s': %s" % (key, self.__values__[i])
string += "})"
return string
def __contains__(self, key):
return key in self.__keys__
def pop(self, key):
"""remove specified key and return the corresponding value."""
if not key in self.__keys__:
raise KeyError
index = self.__keys__.index(key)
self.__keys__.pop(index)
return self.__values__.pop(index)
def __len__(self):
return len(self.__keys__)
def move(self, key, position):
""" Move a key-value position """
if not key in self.__keys__:
raise KeyError
v = self.pop(key)
self.insert(position, key, v)
def rename(self, oldkey, newkey):
""" Rename a key """
if not oldkey in self.__keys__:
raise KeyError
index = self.__keys__.index(oldkey)
self.__keys__[index] = newkey
return
def insert(self, position, key, value):
""" Insert a key-value pair at a given position """
self.__keys__.insert(position, key)
self.__values__.insert(position, value)
def __iter__(self):
return iter(self.__keys__)
def iteritems(self):
"""an iterator over the (key, values) of D"""
return iter(zip(self.__keys__, self.__values__))
def items(self):
""" (key, values) of D """
return list(zip(self.__keys__, self.__values__))
def iterkeys(self):
"""an iterator over the keys of D"""
return iter(self.__keys__)
def itervalues(self):
"""an iterator over the values of D"""
return iter(self.__values__)
def get(self, key, default=None):
if key in list(self.keys()):
return self[key]
else:
return default
def update(self, other):
if hasattr(other, 'keys'):
for e, v in list(other.items()):
self[e] = v
elif hasattr(other, '__iter__'):
for e, v in other:
self[e] = v
else:
raise AttributeError('argument must be a dict-like object or (key, value) pairs list') | beast/external/eztables/core/odict.py | __all__ = ['odict']
__author__ = 'MF'
__version__ = '1.0'
try:
unicode = unicode
except NameError:
# 'unicode' is undefined, must be Python 3
str = str
unicode = str
bytes = bytes
basestring = (str,bytes)
__strtypes__ = [str, unicode]
else:
# 'unicode' exists, must be Python 2
str = str
unicode = unicode
bytes = str
basestring = basestring
__strtypes__ = [str, unicode]
# Add Numpy str type if possible
try:
import numpy as __np__
__strtypes__.append(__np__.string_)
except ImportError:
pass
class odict(object):
""" A simpler implementation than OrderedDict
This implementation aims at keeping ordering and also at allowing to
move the keys too
"""
def __init__(self, **kwargs):
self.__keys__ = []
self.__values__ = []
if ( len( kwargs) > 0 ):
for k, v in list(kwargs.items()):
self[k] = v
def keys(self):
return self.__keys__
def __setitem__(self, key, value):
if type(key) == int:
if key > len(self.__keys__) - 1:
raise Exception("Element %i does not exist" % key)
else:
self.__values__[key] = value
elif type(key) in __strtypes__:
if key in self.__keys__:
index = self.__keys__.index(key)
self.__values__[index] = value
else:
self.__keys__.append(key)
self.__values__.append(value)
else:
print(type(key))
print(__strtypes__)
raise Exception("Wrong type for key: %s" % type(key))
def __getitem__(self, key):
if type(key) == int:
return self.__values__[key]
elif type(key) in __strtypes__:
if not key in self.__keys__:
raise KeyError
index = self.__keys__.index(key)
return self.__values__[index]
else:
raise Exception("Wrong type for key: %s" % type(key))
def __repr__(self):
string = "odict({"
for i, key in enumerate(self.__keys__):
if i > 0:
string += ", "
string += "'%s': %s" % (key, self.__values__[i])
string += "})"
return string
def __contains__(self, key):
return key in self.__keys__
def pop(self, key):
"""remove specified key and return the corresponding value."""
if not key in self.__keys__:
raise KeyError
index = self.__keys__.index(key)
self.__keys__.pop(index)
return self.__values__.pop(index)
def __len__(self):
return len(self.__keys__)
def move(self, key, position):
""" Move a key-value position """
if not key in self.__keys__:
raise KeyError
v = self.pop(key)
self.insert(position, key, v)
def rename(self, oldkey, newkey):
""" Rename a key """
if not oldkey in self.__keys__:
raise KeyError
index = self.__keys__.index(oldkey)
self.__keys__[index] = newkey
return
def insert(self, position, key, value):
""" Insert a key-value pair at a given position """
self.__keys__.insert(position, key)
self.__values__.insert(position, value)
def __iter__(self):
return iter(self.__keys__)
def iteritems(self):
"""an iterator over the (key, values) of D"""
return iter(zip(self.__keys__, self.__values__))
def items(self):
""" (key, values) of D """
return list(zip(self.__keys__, self.__values__))
def iterkeys(self):
"""an iterator over the keys of D"""
return iter(self.__keys__)
def itervalues(self):
"""an iterator over the values of D"""
return iter(self.__values__)
def get(self, key, default=None):
if key in list(self.keys()):
return self[key]
else:
return default
def update(self, other):
if hasattr(other, 'keys'):
for e, v in list(other.items()):
self[e] = v
elif hasattr(other, '__iter__'):
for e, v in other:
self[e] = v
else:
raise AttributeError('argument must be a dict-like object or (key, value) pairs list') | 0.512693 | 0.174921 |
from __future__ import unicode_literals
from django import forms
from django.utils.text import slugify
from djangocms_attributes_field.fields import AttributesFormField
from . import models
from .constants import (
CONTAINER_LAYOUTS,
SECTION_LAYOUTS,
SLIDESHOW_LAYOUTS,
SLIDE_LAYOUTS,
DEVICE_SIZES,
)
CONTAINER_LAYOUTS_CHOICES = zip(list(map(lambda s: slugify(s).replace('-', '_'), ('',) + CONTAINER_LAYOUTS)), ('default',) + CONTAINER_LAYOUTS)
SECTION_LAYOUTS_CHOICES = zip(list(map(lambda s: slugify(s).replace('-', '_'), ('',) + SECTION_LAYOUTS)), ('default',) + SECTION_LAYOUTS)
SLIDESHOW_LAYOUTS_CHOICES = zip(list(map(lambda s: slugify(s).replace('-', '_'), ('',) + SLIDESHOW_LAYOUTS)), ('default',) + SLIDESHOW_LAYOUTS)
SLIDE_LAYOUTS_CHOICES = zip(list(map(lambda s: slugify(s).replace('-', '_'), ('',) + SLIDE_LAYOUTS)), ('default',) + SLIDE_LAYOUTS)
class ShowcaseContainerForm(forms.ModelForm):
layout = forms.ChoiceField(choices=CONTAINER_LAYOUTS_CHOICES, required=False)
class Meta:
model = models.ShowcaseContainer
fields = '__all__'
class ShowcaseSectionBaseForm(forms.ModelForm):
layout = forms.ChoiceField(choices=SECTION_LAYOUTS_CHOICES, required=False)
class Meta:
model = models.ShowcaseSection
fields = '__all__'
class ShowcaseSlideshowBaseForm(forms.ModelForm):
layout = forms.ChoiceField(choices=SLIDESHOW_LAYOUTS_CHOICES, required=False)
class Meta:
model = models.ShowcaseSlideshow
fields = '__all__'
class ShowcaseSlideForm(forms.ModelForm):
layout = forms.ChoiceField(choices=SLIDE_LAYOUTS_CHOICES, required=False)
class Meta:
model = models.ShowcaseSlide
fields = '__all__'
extra_fields_column = {}
for size in DEVICE_SIZES:
extra_fields_column['{}_hide'.format(size)] = forms.BooleanField(
label='hide {}'.format(size),
required=False,
)
ShowcaseSlideshowForm = type(
str('Bootstrap4GridColumnBaseForm'),
(ShowcaseSlideshowBaseForm,),
extra_fields_column,
)
ShowcaseSectionForm = type(
str('Bootstrap4GridColumnBaseForm'),
(ShowcaseSectionBaseForm,),
extra_fields_column,
) | js_showcase/forms.py | from __future__ import unicode_literals
from django import forms
from django.utils.text import slugify
from djangocms_attributes_field.fields import AttributesFormField
from . import models
from .constants import (
CONTAINER_LAYOUTS,
SECTION_LAYOUTS,
SLIDESHOW_LAYOUTS,
SLIDE_LAYOUTS,
DEVICE_SIZES,
)
CONTAINER_LAYOUTS_CHOICES = zip(list(map(lambda s: slugify(s).replace('-', '_'), ('',) + CONTAINER_LAYOUTS)), ('default',) + CONTAINER_LAYOUTS)
SECTION_LAYOUTS_CHOICES = zip(list(map(lambda s: slugify(s).replace('-', '_'), ('',) + SECTION_LAYOUTS)), ('default',) + SECTION_LAYOUTS)
SLIDESHOW_LAYOUTS_CHOICES = zip(list(map(lambda s: slugify(s).replace('-', '_'), ('',) + SLIDESHOW_LAYOUTS)), ('default',) + SLIDESHOW_LAYOUTS)
SLIDE_LAYOUTS_CHOICES = zip(list(map(lambda s: slugify(s).replace('-', '_'), ('',) + SLIDE_LAYOUTS)), ('default',) + SLIDE_LAYOUTS)
class ShowcaseContainerForm(forms.ModelForm):
layout = forms.ChoiceField(choices=CONTAINER_LAYOUTS_CHOICES, required=False)
class Meta:
model = models.ShowcaseContainer
fields = '__all__'
class ShowcaseSectionBaseForm(forms.ModelForm):
layout = forms.ChoiceField(choices=SECTION_LAYOUTS_CHOICES, required=False)
class Meta:
model = models.ShowcaseSection
fields = '__all__'
class ShowcaseSlideshowBaseForm(forms.ModelForm):
layout = forms.ChoiceField(choices=SLIDESHOW_LAYOUTS_CHOICES, required=False)
class Meta:
model = models.ShowcaseSlideshow
fields = '__all__'
class ShowcaseSlideForm(forms.ModelForm):
layout = forms.ChoiceField(choices=SLIDE_LAYOUTS_CHOICES, required=False)
class Meta:
model = models.ShowcaseSlide
fields = '__all__'
extra_fields_column = {}
for size in DEVICE_SIZES:
extra_fields_column['{}_hide'.format(size)] = forms.BooleanField(
label='hide {}'.format(size),
required=False,
)
ShowcaseSlideshowForm = type(
str('Bootstrap4GridColumnBaseForm'),
(ShowcaseSlideshowBaseForm,),
extra_fields_column,
)
ShowcaseSectionForm = type(
str('Bootstrap4GridColumnBaseForm'),
(ShowcaseSectionBaseForm,),
extra_fields_column,
) | 0.427755 | 0.106644 |
from typing import Any, Callable, Dict, List, Optional, Text, Union
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import export_base
from official.vision import configs
from official.vision.beta.projects.yolo.configs.yolo import YoloTask
from official.vision.beta.projects.yolo.modeling import factory as yolo_factory
from official.vision.beta.projects.yolo.modeling.backbones import darknet # pylint: disable=unused-import
from official.vision.beta.projects.yolo.modeling.decoders import yolo_decoder # pylint: disable=unused-import
from official.vision.beta.projects.yolo.serving import model_fn as yolo_model_fn
from official.vision.dataloaders import classification_input
from official.vision.modeling import factory
from official.vision.serving import export_utils
class ExportModule(export_base.ExportModule):
"""Base Export Module."""
def __init__(self,
params: cfg.ExperimentConfig,
model: tf.keras.Model,
input_signature: Union[tf.TensorSpec, Dict[str, tf.TensorSpec]],
preprocessor: Optional[Callable[..., Any]] = None,
inference_step: Optional[Callable[..., Any]] = None,
postprocessor: Optional[Callable[..., Any]] = None,
eval_postprocessor: Optional[Callable[..., Any]] = None):
"""Initializes a module for export.
Args:
params: A dataclass for parameters to the module.
model: A tf.keras.Model instance to be exported.
input_signature: tf.TensorSpec, e.g.
tf.TensorSpec(shape=[None, 224, 224, 3], dtype=tf.uint8)
preprocessor: An optional callable to preprocess the inputs.
inference_step: An optional callable to forward-pass the model.
postprocessor: An optional callable to postprocess the model outputs.
eval_postprocessor: An optional callable to postprocess model outputs
used for model evaluation.
"""
super().__init__(
params,
model=model,
preprocessor=preprocessor,
inference_step=inference_step,
postprocessor=postprocessor)
self.eval_postprocessor = eval_postprocessor
self.input_signature = input_signature
@tf.function
def serve(self, inputs: Any) -> Any:
x = self.preprocessor(inputs=inputs) if self.preprocessor else inputs
x = self.inference_step(x)
x = self.postprocessor(x) if self.postprocessor else x
return x
@tf.function
def serve_eval(self, inputs: Any) -> Any:
x = self.preprocessor(inputs=inputs) if self.preprocessor else inputs
x = self.inference_step(x)
x = self.eval_postprocessor(x) if self.eval_postprocessor else x
return x
def get_inference_signatures(
self, function_keys: Dict[Text, Text]):
"""Gets defined function signatures.
Args:
function_keys: A dictionary with keys as the function to create signature
for and values as the signature keys when returns.
Returns:
A dictionary with key as signature key and value as concrete functions
that can be used for tf.saved_model.save.
"""
signatures = {}
for _, def_name in function_keys.items():
if 'eval' in def_name and self.eval_postprocessor:
signatures[def_name] = self.serve_eval.get_concrete_function(
self.input_signature)
else:
signatures[def_name] = self.serve.get_concrete_function(
self.input_signature)
return signatures
def create_classification_export_module(
params: cfg.ExperimentConfig,
input_type: str,
batch_size: int,
input_image_size: List[int],
num_channels: int = 3) -> ExportModule:
"""Creates classification export module."""
input_signature = export_utils.get_image_input_signatures(
input_type, batch_size, input_image_size, num_channels)
input_specs = tf.keras.layers.InputSpec(shape=[batch_size] +
input_image_size + [num_channels])
model = factory.build_classification_model(
input_specs=input_specs,
model_config=params.task.model,
l2_regularizer=None)
def preprocess_fn(inputs):
image_tensor = export_utils.parse_image(inputs, input_type,
input_image_size, num_channels)
# If input_type is `tflite`, do not apply image preprocessing.
if input_type == 'tflite':
return image_tensor
def preprocess_image_fn(inputs):
return classification_input.Parser.inference_fn(inputs, input_image_size,
num_channels)
images = tf.map_fn(
preprocess_image_fn,
elems=image_tensor,
fn_output_signature=tf.TensorSpec(
shape=input_image_size + [num_channels], dtype=tf.float32))
return images
def postprocess_fn(logits):
probs = tf.nn.softmax(logits)
return {'logits': logits, 'probs': probs}
export_module = ExportModule(
params,
model=model,
input_signature=input_signature,
preprocessor=preprocess_fn,
postprocessor=postprocess_fn)
return export_module
def create_yolo_export_module(
params: cfg.ExperimentConfig,
input_type: str,
batch_size: int,
input_image_size: List[int],
num_channels: int = 3) -> ExportModule:
"""Creates YOLO export module."""
input_signature = export_utils.get_image_input_signatures(
input_type, batch_size, input_image_size, num_channels)
input_specs = tf.keras.layers.InputSpec(shape=[batch_size] +
input_image_size + [num_channels])
model, _ = yolo_factory.build_yolo(
input_specs=input_specs,
model_config=params.task.model,
l2_regularization=None)
def preprocess_fn(inputs):
image_tensor = export_utils.parse_image(inputs, input_type,
input_image_size, num_channels)
# If input_type is `tflite`, do not apply image preprocessing.
if input_type == 'tflite':
return image_tensor
def preprocess_image_fn(inputs):
image = tf.cast(inputs, dtype=tf.float32)
image = image / 255.
(image, image_info) = yolo_model_fn.letterbox(
image,
input_image_size,
letter_box=params.task.validation_data.parser.letter_box)
return image, image_info
images_spec = tf.TensorSpec(shape=input_image_size + [3], dtype=tf.float32)
image_info_spec = tf.TensorSpec(shape=[4, 2], dtype=tf.float32)
images, image_info = tf.nest.map_structure(
tf.identity,
tf.map_fn(
preprocess_image_fn,
elems=image_tensor,
fn_output_signature=(images_spec, image_info_spec),
parallel_iterations=32))
return images, image_info
def inference_steps(inputs, model):
images, image_info = inputs
detection = model(images, training=False)
detection['bbox'] = yolo_model_fn.undo_info(
detection['bbox'],
detection['num_detections'],
image_info,
expand=False)
final_outputs = {
'detection_boxes': detection['bbox'],
'detection_scores': detection['confidence'],
'detection_classes': detection['classes'],
'num_detections': detection['num_detections']
}
return final_outputs
export_module = ExportModule(
params,
model=model,
input_signature=input_signature,
preprocessor=preprocess_fn,
inference_step=inference_steps)
return export_module
def get_export_module(params: cfg.ExperimentConfig,
input_type: str,
batch_size: Optional[int],
input_image_size: List[int],
num_channels: int = 3) -> ExportModule:
"""Factory for export modules."""
if isinstance(params.task,
configs.image_classification.ImageClassificationTask):
export_module = create_classification_export_module(params, input_type,
batch_size,
input_image_size,
num_channels)
elif isinstance(params.task, YoloTask):
export_module = create_yolo_export_module(params, input_type, batch_size,
input_image_size, num_channels)
else:
raise ValueError('Export module not implemented for {} task.'.format(
type(params.task)))
return export_module | official/vision/beta/projects/yolo/serving/export_module_factory.py | from typing import Any, Callable, Dict, List, Optional, Text, Union
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import export_base
from official.vision import configs
from official.vision.beta.projects.yolo.configs.yolo import YoloTask
from official.vision.beta.projects.yolo.modeling import factory as yolo_factory
from official.vision.beta.projects.yolo.modeling.backbones import darknet # pylint: disable=unused-import
from official.vision.beta.projects.yolo.modeling.decoders import yolo_decoder # pylint: disable=unused-import
from official.vision.beta.projects.yolo.serving import model_fn as yolo_model_fn
from official.vision.dataloaders import classification_input
from official.vision.modeling import factory
from official.vision.serving import export_utils
class ExportModule(export_base.ExportModule):
"""Base Export Module."""
def __init__(self,
params: cfg.ExperimentConfig,
model: tf.keras.Model,
input_signature: Union[tf.TensorSpec, Dict[str, tf.TensorSpec]],
preprocessor: Optional[Callable[..., Any]] = None,
inference_step: Optional[Callable[..., Any]] = None,
postprocessor: Optional[Callable[..., Any]] = None,
eval_postprocessor: Optional[Callable[..., Any]] = None):
"""Initializes a module for export.
Args:
params: A dataclass for parameters to the module.
model: A tf.keras.Model instance to be exported.
input_signature: tf.TensorSpec, e.g.
tf.TensorSpec(shape=[None, 224, 224, 3], dtype=tf.uint8)
preprocessor: An optional callable to preprocess the inputs.
inference_step: An optional callable to forward-pass the model.
postprocessor: An optional callable to postprocess the model outputs.
eval_postprocessor: An optional callable to postprocess model outputs
used for model evaluation.
"""
super().__init__(
params,
model=model,
preprocessor=preprocessor,
inference_step=inference_step,
postprocessor=postprocessor)
self.eval_postprocessor = eval_postprocessor
self.input_signature = input_signature
@tf.function
def serve(self, inputs: Any) -> Any:
x = self.preprocessor(inputs=inputs) if self.preprocessor else inputs
x = self.inference_step(x)
x = self.postprocessor(x) if self.postprocessor else x
return x
@tf.function
def serve_eval(self, inputs: Any) -> Any:
x = self.preprocessor(inputs=inputs) if self.preprocessor else inputs
x = self.inference_step(x)
x = self.eval_postprocessor(x) if self.eval_postprocessor else x
return x
def get_inference_signatures(
self, function_keys: Dict[Text, Text]):
"""Gets defined function signatures.
Args:
function_keys: A dictionary with keys as the function to create signature
for and values as the signature keys when returns.
Returns:
A dictionary with key as signature key and value as concrete functions
that can be used for tf.saved_model.save.
"""
signatures = {}
for _, def_name in function_keys.items():
if 'eval' in def_name and self.eval_postprocessor:
signatures[def_name] = self.serve_eval.get_concrete_function(
self.input_signature)
else:
signatures[def_name] = self.serve.get_concrete_function(
self.input_signature)
return signatures
def create_classification_export_module(
params: cfg.ExperimentConfig,
input_type: str,
batch_size: int,
input_image_size: List[int],
num_channels: int = 3) -> ExportModule:
"""Creates classification export module."""
input_signature = export_utils.get_image_input_signatures(
input_type, batch_size, input_image_size, num_channels)
input_specs = tf.keras.layers.InputSpec(shape=[batch_size] +
input_image_size + [num_channels])
model = factory.build_classification_model(
input_specs=input_specs,
model_config=params.task.model,
l2_regularizer=None)
def preprocess_fn(inputs):
image_tensor = export_utils.parse_image(inputs, input_type,
input_image_size, num_channels)
# If input_type is `tflite`, do not apply image preprocessing.
if input_type == 'tflite':
return image_tensor
def preprocess_image_fn(inputs):
return classification_input.Parser.inference_fn(inputs, input_image_size,
num_channels)
images = tf.map_fn(
preprocess_image_fn,
elems=image_tensor,
fn_output_signature=tf.TensorSpec(
shape=input_image_size + [num_channels], dtype=tf.float32))
return images
def postprocess_fn(logits):
probs = tf.nn.softmax(logits)
return {'logits': logits, 'probs': probs}
export_module = ExportModule(
params,
model=model,
input_signature=input_signature,
preprocessor=preprocess_fn,
postprocessor=postprocess_fn)
return export_module
def create_yolo_export_module(
params: cfg.ExperimentConfig,
input_type: str,
batch_size: int,
input_image_size: List[int],
num_channels: int = 3) -> ExportModule:
"""Creates YOLO export module."""
input_signature = export_utils.get_image_input_signatures(
input_type, batch_size, input_image_size, num_channels)
input_specs = tf.keras.layers.InputSpec(shape=[batch_size] +
input_image_size + [num_channels])
model, _ = yolo_factory.build_yolo(
input_specs=input_specs,
model_config=params.task.model,
l2_regularization=None)
def preprocess_fn(inputs):
image_tensor = export_utils.parse_image(inputs, input_type,
input_image_size, num_channels)
# If input_type is `tflite`, do not apply image preprocessing.
if input_type == 'tflite':
return image_tensor
def preprocess_image_fn(inputs):
image = tf.cast(inputs, dtype=tf.float32)
image = image / 255.
(image, image_info) = yolo_model_fn.letterbox(
image,
input_image_size,
letter_box=params.task.validation_data.parser.letter_box)
return image, image_info
images_spec = tf.TensorSpec(shape=input_image_size + [3], dtype=tf.float32)
image_info_spec = tf.TensorSpec(shape=[4, 2], dtype=tf.float32)
images, image_info = tf.nest.map_structure(
tf.identity,
tf.map_fn(
preprocess_image_fn,
elems=image_tensor,
fn_output_signature=(images_spec, image_info_spec),
parallel_iterations=32))
return images, image_info
def inference_steps(inputs, model):
images, image_info = inputs
detection = model(images, training=False)
detection['bbox'] = yolo_model_fn.undo_info(
detection['bbox'],
detection['num_detections'],
image_info,
expand=False)
final_outputs = {
'detection_boxes': detection['bbox'],
'detection_scores': detection['confidence'],
'detection_classes': detection['classes'],
'num_detections': detection['num_detections']
}
return final_outputs
export_module = ExportModule(
params,
model=model,
input_signature=input_signature,
preprocessor=preprocess_fn,
inference_step=inference_steps)
return export_module
def get_export_module(params: cfg.ExperimentConfig,
input_type: str,
batch_size: Optional[int],
input_image_size: List[int],
num_channels: int = 3) -> ExportModule:
"""Factory for export modules."""
if isinstance(params.task,
configs.image_classification.ImageClassificationTask):
export_module = create_classification_export_module(params, input_type,
batch_size,
input_image_size,
num_channels)
elif isinstance(params.task, YoloTask):
export_module = create_yolo_export_module(params, input_type, batch_size,
input_image_size, num_channels)
else:
raise ValueError('Export module not implemented for {} task.'.format(
type(params.task)))
return export_module | 0.958518 | 0.285042 |
# pylint: disable=import-error, too-many-locals, too-many-statements
# pylint: disable=pointless-string-statement, no-member
"""
average_ckpt.py: Tensorflow 2.1 averaging model weights.
It was referred from
https://stackoverflow.com/questions/48212110/average-weights-in-keras-models
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import os
import sys
import shutil
import numpy as np
import tensorflow as tf
from tfsr.helper.common_helper import ParseOption, Logger
from tfsr.helper.misc_helper import Util
import tfsr.helper.train_helper as th
from tfsr.model.sequence_router_einsum import SequenceRouter as SRFE
from tfsr.model.sequence_router_lowmemory import SequenceRouter as SRFL
from tfsr.model.sequence_router_naive import SequenceRouter as SRFN
from tfsr.model.lstm_encoder import LstmEncoder
from tfsr.model.cnn_encoder import CNNEncoder
from tfsr.model.cnn_stride_encoder import CNNStrideEncoder
def main():
"""
A main function to make averaged checkpoints.
"""
# pylint: disable=too-many-branches
# Initializing
Util.prepare_device()
logger = Logger(name="speech_transformer", level=Logger.DEBUG).logger
config = ParseOption(sys.argv, logger).args
# Loading a vocabulary
_, _, dec_in_dim, dec_out_dim =\
Util.load_vocab(Util.get_file_path(config.path_base,
config.path_vocab), logger)
dec_out_dim = dec_in_dim + 1
logger.info("The modified output Dimension %d", dec_out_dim)
# Model selection
# pylint: disable=invalid-name
model = None
if config.model_type.endswith("lstm"):
model = LstmEncoder(config, dec_out_dim)
elif config.model_type in ["cnn", "conv", "convolution"]:
if config.model_conv_is_mp:
model = CNNEncoder(config, logger, dec_out_dim)
else:
model = CNNStrideEncoder(config, logger, dec_out_dim)
else:
if config.model_caps_layer_time is None:
if config.model_caps_type == "lowmemory":
model = SRFL(config, logger, dec_out_dim)
elif config.model_caps_type == "einsum":
model = SRFE(config, logger, dec_out_dim)
elif config.model_caps_type == "naive":
model = SRFN(config, logger, dec_out_dim)
else:
logger.critical("LSRF is deprecated")
# Setting optimizer and checkpoint manager
optimizer = th.get_optimizer(config)
# Creating or loading a check point
ckpt = tf.train.Checkpoint(optimizer=optimizer, model=model)
ckpt_manager = \
tf.train.CheckpointManager(ckpt, config.path_ckpt, max_to_keep=None)
ckpts = []
for ckpt in ckpt_manager.checkpoints:
if config.train_max_epoch == 0 or \
int(ckpt.split("-")[-1]) <= config.train_max_epoch:
ckpts.append(ckpt)
optimizer = th.get_optimizer(config)
models = []
for ckpt_path in ckpts[-config.model_average_num:]:
logger.info(ckpt_path)
model = None
if config.model_type.endswith("lstm"):
model = LstmEncoder(config, dec_out_dim)
elif config.model_type in ["cnn", "conv", "convolution"]:
if config.model_conv_is_mp:
model = CNNEncoder(config, logger, dec_out_dim)
else:
model = CNNStrideEncoder(config, logger, dec_out_dim)
else:
if config.model_caps_layer_time is None:
if config.model_caps_type == "lowmemory":
model = SRFL(config, logger, dec_out_dim)
elif config.model_caps_type == "einsum":
model = SRFE(config, logger, dec_out_dim)
elif config.model_caps_type == "naive":
model = SRFN(config, logger, dec_out_dim)
else:
logger.critical("LSRF is deprecated")
# Creating or loading a check point
ckpt = tf.train.Checkpoint(optimizer=optimizer, model=model)
ckpt.restore(ckpt_path).expect_partial()
dummy_feats = tf.random.uniform([1, 20, config.feat_dim], dtype=tf.float32)
dummy_in_len = tf.ones(1) * 20
model(dummy_feats, input_lengths=dummy_in_len, training=False,
mask=None, att_mask=None)
models.append(model)
logger.info("Total %d models were loaded.", len(models))
# Computing averaged weights
weights = list()
for model in models:
weights.append(model.get_weights())
new_weights = list()
for weights_list_tuple in zip(*weights):
new_weights.append(
np.array([np.array(w).mean(axis=0) for w in zip(*weights_list_tuple)])
)
# Saving
model = None
if config.model_type.endswith("lstm"):
model = LstmEncoder(config, dec_out_dim)
elif config.model_type in ["cnn", "conv", "convolution"]:
if config.model_conv_is_mp:
model = CNNEncoder(config, logger, dec_out_dim)
else:
model = CNNStrideEncoder(config, logger, dec_out_dim)
else:
if config.model_caps_layer_time is None:
if config.model_caps_type == "lowmemory":
model = SRFL(config, logger, dec_out_dim)
elif config.model_caps_type == "einsum":
model = SRFE(config, logger, dec_out_dim)
elif config.model_caps_type == "naive":
model = SRFN(config, logger, dec_out_dim)
else:
logger.critical("LSRF is deprecated")
dummy_feats = tf.random.uniform([10, 20, config.feat_dim], dtype=tf.float32)
dummy_in_len = tf.ones(10) * 20
model(dummy_feats, input_lengths=dummy_in_len, training=False,
mask=None, att_mask=None)
model.set_weights(weights[0])
ckpt = tf.train.Checkpoint(optimizer=optimizer, model=model)
if os.path.exists(config.path_ckpt + "/avg"):
shutil.rmtree(config.path_ckpt + "/avg")
ckpt_manager = \
tf.train.CheckpointManager(ckpt, config.path_ckpt + "/avg", max_to_keep=1)
logger.info("Saved to %s", ckpt_manager.save())
if __name__ == "__main__":
main() | tfsr/utils/average_ckpt_sr.py |
# pylint: disable=import-error, too-many-locals, too-many-statements
# pylint: disable=pointless-string-statement, no-member
"""
average_ckpt.py: Tensorflow 2.1 averaging model weights.
It was referred from
https://stackoverflow.com/questions/48212110/average-weights-in-keras-models
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import os
import sys
import shutil
import numpy as np
import tensorflow as tf
from tfsr.helper.common_helper import ParseOption, Logger
from tfsr.helper.misc_helper import Util
import tfsr.helper.train_helper as th
from tfsr.model.sequence_router_einsum import SequenceRouter as SRFE
from tfsr.model.sequence_router_lowmemory import SequenceRouter as SRFL
from tfsr.model.sequence_router_naive import SequenceRouter as SRFN
from tfsr.model.lstm_encoder import LstmEncoder
from tfsr.model.cnn_encoder import CNNEncoder
from tfsr.model.cnn_stride_encoder import CNNStrideEncoder
def main():
"""
A main function to make averaged checkpoints.
"""
# pylint: disable=too-many-branches
# Initializing
Util.prepare_device()
logger = Logger(name="speech_transformer", level=Logger.DEBUG).logger
config = ParseOption(sys.argv, logger).args
# Loading a vocabulary
_, _, dec_in_dim, dec_out_dim =\
Util.load_vocab(Util.get_file_path(config.path_base,
config.path_vocab), logger)
dec_out_dim = dec_in_dim + 1
logger.info("The modified output Dimension %d", dec_out_dim)
# Model selection
# pylint: disable=invalid-name
model = None
if config.model_type.endswith("lstm"):
model = LstmEncoder(config, dec_out_dim)
elif config.model_type in ["cnn", "conv", "convolution"]:
if config.model_conv_is_mp:
model = CNNEncoder(config, logger, dec_out_dim)
else:
model = CNNStrideEncoder(config, logger, dec_out_dim)
else:
if config.model_caps_layer_time is None:
if config.model_caps_type == "lowmemory":
model = SRFL(config, logger, dec_out_dim)
elif config.model_caps_type == "einsum":
model = SRFE(config, logger, dec_out_dim)
elif config.model_caps_type == "naive":
model = SRFN(config, logger, dec_out_dim)
else:
logger.critical("LSRF is deprecated")
# Setting optimizer and checkpoint manager
optimizer = th.get_optimizer(config)
# Creating or loading a check point
ckpt = tf.train.Checkpoint(optimizer=optimizer, model=model)
ckpt_manager = \
tf.train.CheckpointManager(ckpt, config.path_ckpt, max_to_keep=None)
ckpts = []
for ckpt in ckpt_manager.checkpoints:
if config.train_max_epoch == 0 or \
int(ckpt.split("-")[-1]) <= config.train_max_epoch:
ckpts.append(ckpt)
optimizer = th.get_optimizer(config)
models = []
for ckpt_path in ckpts[-config.model_average_num:]:
logger.info(ckpt_path)
model = None
if config.model_type.endswith("lstm"):
model = LstmEncoder(config, dec_out_dim)
elif config.model_type in ["cnn", "conv", "convolution"]:
if config.model_conv_is_mp:
model = CNNEncoder(config, logger, dec_out_dim)
else:
model = CNNStrideEncoder(config, logger, dec_out_dim)
else:
if config.model_caps_layer_time is None:
if config.model_caps_type == "lowmemory":
model = SRFL(config, logger, dec_out_dim)
elif config.model_caps_type == "einsum":
model = SRFE(config, logger, dec_out_dim)
elif config.model_caps_type == "naive":
model = SRFN(config, logger, dec_out_dim)
else:
logger.critical("LSRF is deprecated")
# Creating or loading a check point
ckpt = tf.train.Checkpoint(optimizer=optimizer, model=model)
ckpt.restore(ckpt_path).expect_partial()
dummy_feats = tf.random.uniform([1, 20, config.feat_dim], dtype=tf.float32)
dummy_in_len = tf.ones(1) * 20
model(dummy_feats, input_lengths=dummy_in_len, training=False,
mask=None, att_mask=None)
models.append(model)
logger.info("Total %d models were loaded.", len(models))
# Computing averaged weights
weights = list()
for model in models:
weights.append(model.get_weights())
new_weights = list()
for weights_list_tuple in zip(*weights):
new_weights.append(
np.array([np.array(w).mean(axis=0) for w in zip(*weights_list_tuple)])
)
# Saving
model = None
if config.model_type.endswith("lstm"):
model = LstmEncoder(config, dec_out_dim)
elif config.model_type in ["cnn", "conv", "convolution"]:
if config.model_conv_is_mp:
model = CNNEncoder(config, logger, dec_out_dim)
else:
model = CNNStrideEncoder(config, logger, dec_out_dim)
else:
if config.model_caps_layer_time is None:
if config.model_caps_type == "lowmemory":
model = SRFL(config, logger, dec_out_dim)
elif config.model_caps_type == "einsum":
model = SRFE(config, logger, dec_out_dim)
elif config.model_caps_type == "naive":
model = SRFN(config, logger, dec_out_dim)
else:
logger.critical("LSRF is deprecated")
dummy_feats = tf.random.uniform([10, 20, config.feat_dim], dtype=tf.float32)
dummy_in_len = tf.ones(10) * 20
model(dummy_feats, input_lengths=dummy_in_len, training=False,
mask=None, att_mask=None)
model.set_weights(weights[0])
ckpt = tf.train.Checkpoint(optimizer=optimizer, model=model)
if os.path.exists(config.path_ckpt + "/avg"):
shutil.rmtree(config.path_ckpt + "/avg")
ckpt_manager = \
tf.train.CheckpointManager(ckpt, config.path_ckpt + "/avg", max_to_keep=1)
logger.info("Saved to %s", ckpt_manager.save())
if __name__ == "__main__":
main() | 0.602412 | 0.171824 |
from __future__ import unicode_literals
import dataent
from dataent import _
import dataent.defaults
from dataent.modules.import_file import get_file_path, read_doc_from_file
from dataent.translate import send_translations
from dataent.core.doctype.doctype.doctype import (clear_permissions_cache,
validate_permissions_for_doctype)
from dataent.permissions import (reset_perms, get_linked_doctypes, get_all_perms,
setup_custom_perms, add_permission, update_permission_property)
not_allowed_in_permission_manager = ["DocType", "Patch Log", "Module Def", "Transaction Log"]
@dataent.whitelist()
def get_roles_and_doctypes():
dataent.only_for("System Manager")
send_translations(dataent.get_lang_dict("doctype", "DocPerm"))
active_domains = dataent.get_active_domains()
doctypes = dataent.get_all("DocType", filters={
"istable": 0,
"name": ("not in", ",".join(not_allowed_in_permission_manager)),
}, or_filters={
"ifnull(restrict_to_domain, '')": "",
"restrict_to_domain": ("in", active_domains)
}, fields=["name"])
roles = dataent.get_all("Role", filters={
"name": ("not in", "Administrator"),
"disabled": 0,
}, or_filters={
"ifnull(restrict_to_domain, '')": "",
"restrict_to_domain": ("in", active_domains)
}, fields=["name"])
doctypes_list = [ {"label":_(d.get("name")), "value":d.get("name")} for d in doctypes]
roles_list = [ {"label":_(d.get("name")), "value":d.get("name")} for d in roles]
return {
"doctypes": sorted(doctypes_list, key=lambda d: d['label']),
"roles": sorted(roles_list, key=lambda d: d['label'])
}
@dataent.whitelist()
def get_permissions(doctype=None, role=None):
dataent.only_for("System Manager")
if role:
out = get_all_perms(role)
if doctype:
out = [p for p in out if p.parent == doctype]
else:
out = dataent.get_all('Custom DocPerm', fields='*', filters=dict(parent = doctype), order_by="permlevel")
if not out:
out = dataent.get_all('DocPerm', fields='*', filters=dict(parent = doctype), order_by="permlevel")
linked_doctypes = {}
for d in out:
if not d.parent in linked_doctypes:
linked_doctypes[d.parent] = get_linked_doctypes(d.parent)
d.linked_doctypes = linked_doctypes[d.parent]
meta = dataent.get_meta(d.parent)
if meta:
d.is_submittable = meta.is_submittable
return out
@dataent.whitelist()
def add(parent, role, permlevel):
dataent.only_for("System Manager")
add_permission(parent, role, permlevel)
@dataent.whitelist()
def update(doctype, role, permlevel, ptype, value=None):
dataent.only_for("System Manager")
out = update_permission_property(doctype, role, permlevel, ptype, value)
return 'refresh' if out else None
@dataent.whitelist()
def remove(doctype, role, permlevel):
dataent.only_for("System Manager")
setup_custom_perms(doctype)
name = dataent.get_value('Custom DocPerm', dict(parent=doctype, role=role, permlevel=permlevel))
dataent.db.sql('delete from `tabCustom DocPerm` where name=%s', name)
if not dataent.get_all('Custom DocPerm', dict(parent=doctype)):
dataent.throw(_('There must be atleast one permission rule.'), title=_('Cannot Remove'))
validate_permissions_for_doctype(doctype, for_remove=True)
@dataent.whitelist()
def reset(doctype):
dataent.only_for("System Manager")
reset_perms(doctype)
clear_permissions_cache(doctype)
@dataent.whitelist()
def get_users_with_role(role):
dataent.only_for("System Manager")
return [p[0] for p in dataent.db.sql("""select distinct tabUser.name
from `tabHas Role`, tabUser where
`tabHas Role`.role=%s
and tabUser.name != "Administrator"
and `tabHas Role`.parent = tabUser.name
and tabUser.enabled=1""", role)]
@dataent.whitelist()
def get_standard_permissions(doctype):
dataent.only_for("System Manager")
meta = dataent.get_meta(doctype)
if meta.custom:
doc = dataent.get_doc('DocType', doctype)
return [p.as_dict() for p in doc.permissions]
else:
# also used to setup permissions via patch
path = get_file_path(meta.module, "DocType", doctype)
return read_doc_from_file(path).get("permissions") | dataent/core/page/permission_manager/permission_manager.py |
from __future__ import unicode_literals
import dataent
from dataent import _
import dataent.defaults
from dataent.modules.import_file import get_file_path, read_doc_from_file
from dataent.translate import send_translations
from dataent.core.doctype.doctype.doctype import (clear_permissions_cache,
validate_permissions_for_doctype)
from dataent.permissions import (reset_perms, get_linked_doctypes, get_all_perms,
setup_custom_perms, add_permission, update_permission_property)
not_allowed_in_permission_manager = ["DocType", "Patch Log", "Module Def", "Transaction Log"]
@dataent.whitelist()
def get_roles_and_doctypes():
dataent.only_for("System Manager")
send_translations(dataent.get_lang_dict("doctype", "DocPerm"))
active_domains = dataent.get_active_domains()
doctypes = dataent.get_all("DocType", filters={
"istable": 0,
"name": ("not in", ",".join(not_allowed_in_permission_manager)),
}, or_filters={
"ifnull(restrict_to_domain, '')": "",
"restrict_to_domain": ("in", active_domains)
}, fields=["name"])
roles = dataent.get_all("Role", filters={
"name": ("not in", "Administrator"),
"disabled": 0,
}, or_filters={
"ifnull(restrict_to_domain, '')": "",
"restrict_to_domain": ("in", active_domains)
}, fields=["name"])
doctypes_list = [ {"label":_(d.get("name")), "value":d.get("name")} for d in doctypes]
roles_list = [ {"label":_(d.get("name")), "value":d.get("name")} for d in roles]
return {
"doctypes": sorted(doctypes_list, key=lambda d: d['label']),
"roles": sorted(roles_list, key=lambda d: d['label'])
}
@dataent.whitelist()
def get_permissions(doctype=None, role=None):
dataent.only_for("System Manager")
if role:
out = get_all_perms(role)
if doctype:
out = [p for p in out if p.parent == doctype]
else:
out = dataent.get_all('Custom DocPerm', fields='*', filters=dict(parent = doctype), order_by="permlevel")
if not out:
out = dataent.get_all('DocPerm', fields='*', filters=dict(parent = doctype), order_by="permlevel")
linked_doctypes = {}
for d in out:
if not d.parent in linked_doctypes:
linked_doctypes[d.parent] = get_linked_doctypes(d.parent)
d.linked_doctypes = linked_doctypes[d.parent]
meta = dataent.get_meta(d.parent)
if meta:
d.is_submittable = meta.is_submittable
return out
@dataent.whitelist()
def add(parent, role, permlevel):
dataent.only_for("System Manager")
add_permission(parent, role, permlevel)
@dataent.whitelist()
def update(doctype, role, permlevel, ptype, value=None):
dataent.only_for("System Manager")
out = update_permission_property(doctype, role, permlevel, ptype, value)
return 'refresh' if out else None
@dataent.whitelist()
def remove(doctype, role, permlevel):
dataent.only_for("System Manager")
setup_custom_perms(doctype)
name = dataent.get_value('Custom DocPerm', dict(parent=doctype, role=role, permlevel=permlevel))
dataent.db.sql('delete from `tabCustom DocPerm` where name=%s', name)
if not dataent.get_all('Custom DocPerm', dict(parent=doctype)):
dataent.throw(_('There must be atleast one permission rule.'), title=_('Cannot Remove'))
validate_permissions_for_doctype(doctype, for_remove=True)
@dataent.whitelist()
def reset(doctype):
dataent.only_for("System Manager")
reset_perms(doctype)
clear_permissions_cache(doctype)
@dataent.whitelist()
def get_users_with_role(role):
dataent.only_for("System Manager")
return [p[0] for p in dataent.db.sql("""select distinct tabUser.name
from `tabHas Role`, tabUser where
`tabHas Role`.role=%s
and tabUser.name != "Administrator"
and `tabHas Role`.parent = tabUser.name
and tabUser.enabled=1""", role)]
@dataent.whitelist()
def get_standard_permissions(doctype):
dataent.only_for("System Manager")
meta = dataent.get_meta(doctype)
if meta.custom:
doc = dataent.get_doc('DocType', doctype)
return [p.as_dict() for p in doc.permissions]
else:
# also used to setup permissions via patch
path = get_file_path(meta.module, "DocType", doctype)
return read_doc_from_file(path).get("permissions") | 0.440469 | 0.214702 |
import unittest
from dyna_settings.core import DynaSettings, _dyna_controller, register_dyna_settings, dyna_value, \
NoMatchingSettingsClass, DynaSettingsController
__author__ = 'curtis'
class ChildOK_Match(DynaSettings):
def value_dict(self):
return {'A': 'a', 'B': 'b', 'C': 9}
def env_detector(self):
return True
class ChildOK_NoMatch(DynaSettings):
def value_dict(self):
return {'A': 'aa', 'B': 'bb', 'C': 99}
def env_detector(self):
return False
class EnvSettingTrue(DynaSettings):
def __init__(self):
super(EnvSettingTrue, self).__init__()
self._environ_vars_trump = True
def value_dict(self):
return {
'PATH': 'a very wrong path',
'AINT_THAR': 'This aint gonna be there'
}
def env_detector(self):
return True
class TestDynaSettings(unittest.TestCase):
def test_parent_interface_excepts(self):
bad = DynaSettings()
with self.assertRaises(NotImplementedError):
bad.env_detector()
with self.assertRaises(NotImplementedError):
bad.value_dict()
def test_child_interface(self):
good = ChildOK_Match()
self.assertIsInstance(good.value_dict(), dict)
self.assertTrue(good.env_detector())
good.init_values()
self.assertEqual(good.get_value('A', 'x'), 'a')
def test_no_match_child_interface(self):
good = ChildOK_NoMatch()
self.assertIsInstance(good.value_dict(), dict)
self.assertFalse(good.env_detector())
good.init_values()
self.assertEqual(good.get_value('A', 'x'), 'aa')
def test_register_match(self):
_dyna_controller.reset()
instance = ChildOK_Match()
register_dyna_settings(instance)
register_dyna_settings(ChildOK_NoMatch())
self.assertEqual(_dyna_controller.detected_settings, instance)
def test_register_nomatch(self):
_dyna_controller.reset()
register_dyna_settings(ChildOK_NoMatch())
self.assertIsNone(_dyna_controller.detected_settings)
def test_get_values(self):
_dyna_controller.reset()
register_dyna_settings(ChildOK_Match())
register_dyna_settings(ChildOK_NoMatch())
val = dyna_value('A', production_value='x')
self.assertEqual(val, 'a')
val = dyna_value('B', production_value='x')
self.assertEqual(val, 'b')
val = dyna_value('UNDEFINED', production_value='prod')
self.assertEqual(val, 'prod')
def test_get_values_with_no_settings_class(self):
_dyna_controller.reset()
with self.assertRaises(NoMatchingSettingsClass):
val = dyna_value('BAD')
def test_environ_var_trump_global(self):
"""
Verify that with the global trump set True we'll get from the environment
:return:
"""
DynaSettingsController.set_environ_vars_trump(flag=True)
self.assertTrue(_dyna_controller.environ_vars_trump)
import os
path = os.environ.get('PATH')
self.assertTrue(path)
path_from_settings = dyna_value('PATH', production_value=None)
self.assertTrue(path_from_settings)
self.assertEqual(path_from_settings, path)
def test_environ_var_trump_off(self):
"""
Verify that with the environment var trump off we obtain the value from
our dyna settings and not the environment variable.
:return:
"""
DynaSettingsController.set_environ_vars_trump(flag=False)
self.assertFalse(_dyna_controller.environ_vars_trump)
import os
path = os.environ.get('PATH')
self.assertTrue(path)
path_from_settings = dyna_value('PATH', production_value='Internal path')
self.assertTrue(path_from_settings)
self.assertNotEqual(path_from_settings, path)
def test_environ_var_trump_instance(self):
"""
Verify that, with a DynaSettings instance registered that sets trump True it behaves
properly by obtaining the value from the environment variable. Should ignore both the
production_value and the settings class definition.
:return:
"""
_dyna_controller.reset()
self.assertFalse(_dyna_controller.environ_vars_trump)
register_dyna_settings(EnvSettingTrue())
import os
path = os.environ.get('PATH')
self.assertTrue(path)
path_from_settings = dyna_value('PATH', production_value='Internal path')
self.assertTrue(path_from_settings)
self.assertEqual(path_from_settings, path)
def test_environ_var_trump_no_env_var(self):
"""
Verify that if trump is True but the environment var is not defined we'll still pick
up the value if the class instance has defined it
:return:
"""
_dyna_controller.reset()
register_dyna_settings(EnvSettingTrue())
path = dyna_value('AINT_THAR', production_value=None)
self.assertTrue(path)
def test_environ_var_trump_fail(self):
"""
Verifies that if Trump is true, environment doesn't have the variable, production_value doesn't
define it, and the class does not either, then exception is raised.
:return:
"""
_dyna_controller.reset()
register_dyna_settings(EnvSettingTrue())
with self.assertRaises(NoMatchingSettingsClass):
bad = dyna_value('VOODOOUDO', production_value=None)
print bad
if __name__ == '__main__':
unittest.main() | tests/test_dyna_settings.py | import unittest
from dyna_settings.core import DynaSettings, _dyna_controller, register_dyna_settings, dyna_value, \
NoMatchingSettingsClass, DynaSettingsController
__author__ = 'curtis'
class ChildOK_Match(DynaSettings):
def value_dict(self):
return {'A': 'a', 'B': 'b', 'C': 9}
def env_detector(self):
return True
class ChildOK_NoMatch(DynaSettings):
def value_dict(self):
return {'A': 'aa', 'B': 'bb', 'C': 99}
def env_detector(self):
return False
class EnvSettingTrue(DynaSettings):
def __init__(self):
super(EnvSettingTrue, self).__init__()
self._environ_vars_trump = True
def value_dict(self):
return {
'PATH': 'a very wrong path',
'AINT_THAR': 'This aint gonna be there'
}
def env_detector(self):
return True
class TestDynaSettings(unittest.TestCase):
def test_parent_interface_excepts(self):
bad = DynaSettings()
with self.assertRaises(NotImplementedError):
bad.env_detector()
with self.assertRaises(NotImplementedError):
bad.value_dict()
def test_child_interface(self):
good = ChildOK_Match()
self.assertIsInstance(good.value_dict(), dict)
self.assertTrue(good.env_detector())
good.init_values()
self.assertEqual(good.get_value('A', 'x'), 'a')
def test_no_match_child_interface(self):
good = ChildOK_NoMatch()
self.assertIsInstance(good.value_dict(), dict)
self.assertFalse(good.env_detector())
good.init_values()
self.assertEqual(good.get_value('A', 'x'), 'aa')
def test_register_match(self):
_dyna_controller.reset()
instance = ChildOK_Match()
register_dyna_settings(instance)
register_dyna_settings(ChildOK_NoMatch())
self.assertEqual(_dyna_controller.detected_settings, instance)
def test_register_nomatch(self):
_dyna_controller.reset()
register_dyna_settings(ChildOK_NoMatch())
self.assertIsNone(_dyna_controller.detected_settings)
def test_get_values(self):
_dyna_controller.reset()
register_dyna_settings(ChildOK_Match())
register_dyna_settings(ChildOK_NoMatch())
val = dyna_value('A', production_value='x')
self.assertEqual(val, 'a')
val = dyna_value('B', production_value='x')
self.assertEqual(val, 'b')
val = dyna_value('UNDEFINED', production_value='prod')
self.assertEqual(val, 'prod')
def test_get_values_with_no_settings_class(self):
_dyna_controller.reset()
with self.assertRaises(NoMatchingSettingsClass):
val = dyna_value('BAD')
def test_environ_var_trump_global(self):
"""
Verify that with the global trump set True we'll get from the environment
:return:
"""
DynaSettingsController.set_environ_vars_trump(flag=True)
self.assertTrue(_dyna_controller.environ_vars_trump)
import os
path = os.environ.get('PATH')
self.assertTrue(path)
path_from_settings = dyna_value('PATH', production_value=None)
self.assertTrue(path_from_settings)
self.assertEqual(path_from_settings, path)
def test_environ_var_trump_off(self):
"""
Verify that with the environment var trump off we obtain the value from
our dyna settings and not the environment variable.
:return:
"""
DynaSettingsController.set_environ_vars_trump(flag=False)
self.assertFalse(_dyna_controller.environ_vars_trump)
import os
path = os.environ.get('PATH')
self.assertTrue(path)
path_from_settings = dyna_value('PATH', production_value='Internal path')
self.assertTrue(path_from_settings)
self.assertNotEqual(path_from_settings, path)
def test_environ_var_trump_instance(self):
"""
Verify that, with a DynaSettings instance registered that sets trump True it behaves
properly by obtaining the value from the environment variable. Should ignore both the
production_value and the settings class definition.
:return:
"""
_dyna_controller.reset()
self.assertFalse(_dyna_controller.environ_vars_trump)
register_dyna_settings(EnvSettingTrue())
import os
path = os.environ.get('PATH')
self.assertTrue(path)
path_from_settings = dyna_value('PATH', production_value='Internal path')
self.assertTrue(path_from_settings)
self.assertEqual(path_from_settings, path)
def test_environ_var_trump_no_env_var(self):
"""
Verify that if trump is True but the environment var is not defined we'll still pick
up the value if the class instance has defined it
:return:
"""
_dyna_controller.reset()
register_dyna_settings(EnvSettingTrue())
path = dyna_value('AINT_THAR', production_value=None)
self.assertTrue(path)
def test_environ_var_trump_fail(self):
"""
Verifies that if Trump is true, environment doesn't have the variable, production_value doesn't
define it, and the class does not either, then exception is raised.
:return:
"""
_dyna_controller.reset()
register_dyna_settings(EnvSettingTrue())
with self.assertRaises(NoMatchingSettingsClass):
bad = dyna_value('VOODOOUDO', production_value=None)
print bad
if __name__ == '__main__':
unittest.main() | 0.631026 | 0.513363 |
import os
import pytest
import uuid
from unittest.mock import Mock
from ixian.check.checker import Checker, MultiValueChecker
from ixian.modules.filesystem.file_hash import FileHash, get_flags
class MockChecker(Checker):
def __init__(self, mock_save=True, mock_check=True, *args, **kwargs):
self.mock_save = mock_save
if mock_save:
self.save = Mock()
if mock_check:
self.check = Mock(return_value=True)
self.mocked_state = 1
# random id so tests so filename is unique and tests dont leak
self.id = uuid.uuid4()
def state(self):
return {"mock": self.mocked_state}
def filename(self):
return "mock-%s" % str(self.id)
def clone(self):
instance = type(self)(self.mock_save)
instance.mocked_state = self.mocked_state
instance.id = self.id
if self.mock_save:
instance.save = self.save
return instance
class MockMultiValueChecker(MultiValueChecker):
def __init__(self, *args):
self.mocked_state = 100
super(MockMultiValueChecker, self).__init__(*args)
def state(self):
return {"mock": self.mocked_state}
class FailingCheck(MockChecker):
"""A checker that always fails the check"""
def __init__(self, *args, **kwargs):
super(FailingCheck, self).__init__(*args, **kwargs)
self.check = Mock(return_value=False)
class PassingCheck(MockChecker):
"""A checker that always passes the check"""
pass
class TestChecker:
"""Core Checker tests"""
@property
def checker(self):
return MockChecker(mock_save=False, mock_check=False)
def test_cache(self, temp_builder):
"""Test reading and writing from hash cache"""
checker = self.checker
assert checker.saved_state() is None
checker.save()
assert checker.saved_state() == checker.state()
def test_check_never_run(self, temp_builder):
"""Check should return False if task has never been run"""
checker = self.checker
assert checker.saved_state() is None
assert not checker.check()
def test_check_hash_match(self, temp_builder):
"""Check should return True if hash matches"""
checker = self.checker
checker.save()
assert checker.check()
def test_check_hash_mismatch(self, temp_builder):
"""Check should return False if hash does not match"""
checker = self.checker
checker.save()
checker.mocked_state += 1
assert not checker.check()
class TestMultiValueChecker(TestChecker):
@property
def checker(self):
# random key so tests don't leak
return MockMultiValueChecker(str(uuid.uuid4()))
def test_requires_at_least_one_key(self):
with pytest.raises(AssertionError, match="At least one key must be given"):
MockMultiValueChecker()
def test_clone(self):
checker = self.checker
clone = checker.clone()
assert checker.keys == clone.keys
assert checker.filename() == clone.filename()
assert checker.file_path() == clone.file_path()
assert checker.state() == clone.state()
assert checker.saved_state() == clone.saved_state()
def file_hash_mock_path(path):
import ixian.tests.mocks as mocks_module
module_dir = os.path.dirname(os.path.realpath(mocks_module.__file__))
return f"{module_dir}/file_hash_mocks/{path}"
class TestFileHash:
"""Tests for the FileHash checker"""
MOCK_DIR = "dir"
MOCK_FILE_1 = "file_1"
MOCK_FILE_2 = "file_2"
MOCK_FILE_1_DIFF_PERMISSIONS = "file_1_diff_permissions"
MOCK_FILE_1_RENAMED = "file_1_renamed"
MOCK_FILE_EMPTY = "file_2"
MOCK_FILE_MISSING = "file_missing"
MOCK_DIR_COPY = "dir_copy"
MOCK_DIR_EMPTY = "dir_empty"
MOCK_DIR_MISSING = "dir_missing"
MOCK_DIR_DIFF_PERMISSIONS = "dir_different_permissions"
MOCK_NESTED = "dir/level_2"
MOCK_DIR_FILE_PERM_CHANGE = "nested_file_perm_change/level_2/file_1"
MOCK_DIR_DIR_PERM_CHANGE = "nested_dir_perm_change/level_2"
MOCK_DIR_FILE_RENAME = "nested_file_rename/level_2"
MOCK_DIR_DIR_RENAME = "nested_dir_rename/level_2_renamed"
MOCK_DIR_FILE_CHANGE = "dir_file_change"
MOCK_DIR_FILE_EMPTY = "nested_file_empty/level_2"
MOCK_DIR_DIR_EMPTY = "nested_dir_empty/level_2"
MOCK_DIR_FILE_MISSING = "nested_file_missing/level_2"
MOCK_DIR_DIR_MISSING = "nested_dir_missing/level_2"
MOCK_NESTED_FILE_PERM_CHANGE = "nested_file_perm_change"
MOCK_NESTED_DIR_PERM_CHANGE = "nested_dir_perm_change"
MOCK_NESTED_FILE_RENAME = "nested_file_rename"
MOCK_NESTED_DIR_RENAME = "nested_dir_rename"
MOCK_NESTED_FILE_CHANGE = "nested_file_change"
MOCK_NESTED_FILE_EMPTY = "nested_file_empty"
MOCK_NESTED_DIR_EMPTY = "nested_dir_empty"
MOCK_NESTED_FILE_MISSING = "nested_file_missing"
MOCK_NESTED_DIR_MISSING = "nested_dir_missing"
def assert_paths(self, path_1, path_2, expected):
checker_1 = FileHash(path_1)
checker_2 = FileHash(path_2)
assert expected == (checker_1 == checker_2)
# File Tests
def test_file_hash(self):
"""Test hashing a single file"""
path = file_hash_mock_path(self.MOCK_FILE_1)
checker_1 = FileHash(path)
expected = {path: "529208ab580d05f4e081d2da2cde8b80da46c39ae8f0a31d20b905057bf2f2bc"}
assert checker_1.state() == expected
def test_file_permission_change(self):
"""Changing permission on file changes it's hash"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_DIR))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_FILE_1_DIFF_PERMISSIONS))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_file_rename(self):
"""Testing changing a file's name"""
# file hash
file_1 = file_hash_mock_path(self.MOCK_FILE_1)
file_2 = file_hash_mock_path(self.MOCK_FILE_1_RENAMED)
# make sure flags are identical
assert get_flags(file_1) == get_flags(file_2)
checker_1 = FileHash(file_1)
checker_2 = FileHash(file_2)
assert list(checker_1.state().values())[0] == list(checker_2.state().values())[0]
def test_file_contents_change(self):
"""Changing file contents should change file and parent dir hash"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_FILE_1))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_FILE_2))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_file_empty(self):
"""Empty file should not hash the same as a missing file"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_FILE_1))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_FILE_EMPTY))
self.assert_paths(checker_1.state(), checker_2.state(), False)
checker_1 = FileHash(file_hash_mock_path(self.MOCK_FILE_EMPTY))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_FILE_MISSING))
self.assert_paths(checker_1.state(), checker_2.state(), False)
# =========================================================================
# Directory tests
# =========================================================================
def test_dir_hash_test(self):
"""Test hashing a directory"""
path = file_hash_mock_path(self.MOCK_DIR)
checker_1 = FileHash(path)
expected = {path: "f443aa643743df88ff39648d3cc04973813be298bee1c29372a9e103ad20fb47"}
assert checker_1.state() == expected
def test_dir_rename(self):
"""Test changing a directory's name"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_DIR))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_DIR_COPY))
assert list(checker_1.state().values())[0] == list(checker_2.state().values())[0]
def test_dir_permission_change(self):
"""Changing permissions on a dir changes it's hash"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_DIR))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_DIR_DIFF_PERMISSIONS))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_dir_missing(self):
"""Hashing a missing dir"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_DIR_EMPTY))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_DIR_MISSING))
self.assert_paths(checker_1.state(), checker_2.state(), False)
checker_1 = FileHash(file_hash_mock_path(self.MOCK_DIR))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_DIR_MISSING))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_dir_missing_dir(self):
"""A dir missing from a dir"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_NESTED))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_DIR_DIR_MISSING))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_dir_dir_rename(self):
"""A dir containing renamed dir"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_NESTED))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_DIR_DIR_RENAME))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_dir_file_rename(self):
"""A dir containing renamed file"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_NESTED))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_DIR_FILE_RENAME))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_dir_file_change(self):
"""A dir containing renamed file"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_NESTED))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_DIR_FILE_CHANGE))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_dir_file_empty(self):
"""Missing and empty file in nested directory are not the same"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_DIR_FILE_MISSING))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_DIR_FILE_EMPTY))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_dir_dir_empty(self):
"""Missing and empty file in nested directory are not the same"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_DIR_DIR_MISSING))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_DIR_DIR_EMPTY))
self.assert_paths(checker_1.state(), checker_2.state(), False)
# =========================================================================
# Nested directory tests
# =========================================================================
def test_nested_file_permission_change(self):
"""changing permissions on a nested file"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_DIR))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_NESTED_FILE_PERM_CHANGE))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_nested_dir_permission_change(self):
"""changing permissions on a nested dir"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_DIR))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_NESTED_DIR_PERM_CHANGE))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_nested_file_rename(self):
"""rename file in nested dir"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_DIR))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_NESTED_FILE_RENAME))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_nested_dir_rename(self):
"""rename dir in nested dir"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_DIR))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_NESTED_DIR_RENAME))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_nested_dir_missing_file(self):
"""A nested dir that is missing a file"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_DIR))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_NESTED_FILE_MISSING))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_nested_dir_missing_dir(self):
"""A nested dir that is missing a dir"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_DIR))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_NESTED_DIR_MISSING))
self.assert_paths(checker_1.state(), checker_2.state(), False) | ixian/tests/test_checker.py |
import os
import pytest
import uuid
from unittest.mock import Mock
from ixian.check.checker import Checker, MultiValueChecker
from ixian.modules.filesystem.file_hash import FileHash, get_flags
class MockChecker(Checker):
def __init__(self, mock_save=True, mock_check=True, *args, **kwargs):
self.mock_save = mock_save
if mock_save:
self.save = Mock()
if mock_check:
self.check = Mock(return_value=True)
self.mocked_state = 1
# random id so tests so filename is unique and tests dont leak
self.id = uuid.uuid4()
def state(self):
return {"mock": self.mocked_state}
def filename(self):
return "mock-%s" % str(self.id)
def clone(self):
instance = type(self)(self.mock_save)
instance.mocked_state = self.mocked_state
instance.id = self.id
if self.mock_save:
instance.save = self.save
return instance
class MockMultiValueChecker(MultiValueChecker):
def __init__(self, *args):
self.mocked_state = 100
super(MockMultiValueChecker, self).__init__(*args)
def state(self):
return {"mock": self.mocked_state}
class FailingCheck(MockChecker):
"""A checker that always fails the check"""
def __init__(self, *args, **kwargs):
super(FailingCheck, self).__init__(*args, **kwargs)
self.check = Mock(return_value=False)
class PassingCheck(MockChecker):
"""A checker that always passes the check"""
pass
class TestChecker:
"""Core Checker tests"""
@property
def checker(self):
return MockChecker(mock_save=False, mock_check=False)
def test_cache(self, temp_builder):
"""Test reading and writing from hash cache"""
checker = self.checker
assert checker.saved_state() is None
checker.save()
assert checker.saved_state() == checker.state()
def test_check_never_run(self, temp_builder):
"""Check should return False if task has never been run"""
checker = self.checker
assert checker.saved_state() is None
assert not checker.check()
def test_check_hash_match(self, temp_builder):
"""Check should return True if hash matches"""
checker = self.checker
checker.save()
assert checker.check()
def test_check_hash_mismatch(self, temp_builder):
"""Check should return False if hash does not match"""
checker = self.checker
checker.save()
checker.mocked_state += 1
assert not checker.check()
class TestMultiValueChecker(TestChecker):
@property
def checker(self):
# random key so tests don't leak
return MockMultiValueChecker(str(uuid.uuid4()))
def test_requires_at_least_one_key(self):
with pytest.raises(AssertionError, match="At least one key must be given"):
MockMultiValueChecker()
def test_clone(self):
checker = self.checker
clone = checker.clone()
assert checker.keys == clone.keys
assert checker.filename() == clone.filename()
assert checker.file_path() == clone.file_path()
assert checker.state() == clone.state()
assert checker.saved_state() == clone.saved_state()
def file_hash_mock_path(path):
import ixian.tests.mocks as mocks_module
module_dir = os.path.dirname(os.path.realpath(mocks_module.__file__))
return f"{module_dir}/file_hash_mocks/{path}"
class TestFileHash:
"""Tests for the FileHash checker"""
MOCK_DIR = "dir"
MOCK_FILE_1 = "file_1"
MOCK_FILE_2 = "file_2"
MOCK_FILE_1_DIFF_PERMISSIONS = "file_1_diff_permissions"
MOCK_FILE_1_RENAMED = "file_1_renamed"
MOCK_FILE_EMPTY = "file_2"
MOCK_FILE_MISSING = "file_missing"
MOCK_DIR_COPY = "dir_copy"
MOCK_DIR_EMPTY = "dir_empty"
MOCK_DIR_MISSING = "dir_missing"
MOCK_DIR_DIFF_PERMISSIONS = "dir_different_permissions"
MOCK_NESTED = "dir/level_2"
MOCK_DIR_FILE_PERM_CHANGE = "nested_file_perm_change/level_2/file_1"
MOCK_DIR_DIR_PERM_CHANGE = "nested_dir_perm_change/level_2"
MOCK_DIR_FILE_RENAME = "nested_file_rename/level_2"
MOCK_DIR_DIR_RENAME = "nested_dir_rename/level_2_renamed"
MOCK_DIR_FILE_CHANGE = "dir_file_change"
MOCK_DIR_FILE_EMPTY = "nested_file_empty/level_2"
MOCK_DIR_DIR_EMPTY = "nested_dir_empty/level_2"
MOCK_DIR_FILE_MISSING = "nested_file_missing/level_2"
MOCK_DIR_DIR_MISSING = "nested_dir_missing/level_2"
MOCK_NESTED_FILE_PERM_CHANGE = "nested_file_perm_change"
MOCK_NESTED_DIR_PERM_CHANGE = "nested_dir_perm_change"
MOCK_NESTED_FILE_RENAME = "nested_file_rename"
MOCK_NESTED_DIR_RENAME = "nested_dir_rename"
MOCK_NESTED_FILE_CHANGE = "nested_file_change"
MOCK_NESTED_FILE_EMPTY = "nested_file_empty"
MOCK_NESTED_DIR_EMPTY = "nested_dir_empty"
MOCK_NESTED_FILE_MISSING = "nested_file_missing"
MOCK_NESTED_DIR_MISSING = "nested_dir_missing"
def assert_paths(self, path_1, path_2, expected):
checker_1 = FileHash(path_1)
checker_2 = FileHash(path_2)
assert expected == (checker_1 == checker_2)
# File Tests
def test_file_hash(self):
"""Test hashing a single file"""
path = file_hash_mock_path(self.MOCK_FILE_1)
checker_1 = FileHash(path)
expected = {path: "529208ab580d05f4e081d2da2cde8b80da46c39ae8f0a31d20b905057bf2f2bc"}
assert checker_1.state() == expected
def test_file_permission_change(self):
"""Changing permission on file changes it's hash"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_DIR))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_FILE_1_DIFF_PERMISSIONS))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_file_rename(self):
"""Testing changing a file's name"""
# file hash
file_1 = file_hash_mock_path(self.MOCK_FILE_1)
file_2 = file_hash_mock_path(self.MOCK_FILE_1_RENAMED)
# make sure flags are identical
assert get_flags(file_1) == get_flags(file_2)
checker_1 = FileHash(file_1)
checker_2 = FileHash(file_2)
assert list(checker_1.state().values())[0] == list(checker_2.state().values())[0]
def test_file_contents_change(self):
"""Changing file contents should change file and parent dir hash"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_FILE_1))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_FILE_2))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_file_empty(self):
"""Empty file should not hash the same as a missing file"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_FILE_1))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_FILE_EMPTY))
self.assert_paths(checker_1.state(), checker_2.state(), False)
checker_1 = FileHash(file_hash_mock_path(self.MOCK_FILE_EMPTY))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_FILE_MISSING))
self.assert_paths(checker_1.state(), checker_2.state(), False)
# =========================================================================
# Directory tests
# =========================================================================
def test_dir_hash_test(self):
"""Test hashing a directory"""
path = file_hash_mock_path(self.MOCK_DIR)
checker_1 = FileHash(path)
expected = {path: "f443aa643743df88ff39648d3cc04973813be298bee1c29372a9e103ad20fb47"}
assert checker_1.state() == expected
def test_dir_rename(self):
"""Test changing a directory's name"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_DIR))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_DIR_COPY))
assert list(checker_1.state().values())[0] == list(checker_2.state().values())[0]
def test_dir_permission_change(self):
"""Changing permissions on a dir changes it's hash"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_DIR))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_DIR_DIFF_PERMISSIONS))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_dir_missing(self):
"""Hashing a missing dir"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_DIR_EMPTY))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_DIR_MISSING))
self.assert_paths(checker_1.state(), checker_2.state(), False)
checker_1 = FileHash(file_hash_mock_path(self.MOCK_DIR))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_DIR_MISSING))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_dir_missing_dir(self):
"""A dir missing from a dir"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_NESTED))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_DIR_DIR_MISSING))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_dir_dir_rename(self):
"""A dir containing renamed dir"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_NESTED))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_DIR_DIR_RENAME))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_dir_file_rename(self):
"""A dir containing renamed file"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_NESTED))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_DIR_FILE_RENAME))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_dir_file_change(self):
"""A dir containing renamed file"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_NESTED))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_DIR_FILE_CHANGE))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_dir_file_empty(self):
"""Missing and empty file in nested directory are not the same"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_DIR_FILE_MISSING))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_DIR_FILE_EMPTY))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_dir_dir_empty(self):
"""Missing and empty file in nested directory are not the same"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_DIR_DIR_MISSING))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_DIR_DIR_EMPTY))
self.assert_paths(checker_1.state(), checker_2.state(), False)
# =========================================================================
# Nested directory tests
# =========================================================================
def test_nested_file_permission_change(self):
"""changing permissions on a nested file"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_DIR))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_NESTED_FILE_PERM_CHANGE))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_nested_dir_permission_change(self):
"""changing permissions on a nested dir"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_DIR))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_NESTED_DIR_PERM_CHANGE))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_nested_file_rename(self):
"""rename file in nested dir"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_DIR))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_NESTED_FILE_RENAME))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_nested_dir_rename(self):
"""rename dir in nested dir"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_DIR))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_NESTED_DIR_RENAME))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_nested_dir_missing_file(self):
"""A nested dir that is missing a file"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_DIR))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_NESTED_FILE_MISSING))
self.assert_paths(checker_1.state(), checker_2.state(), False)
def test_nested_dir_missing_dir(self):
"""A nested dir that is missing a dir"""
checker_1 = FileHash(file_hash_mock_path(self.MOCK_DIR))
checker_2 = FileHash(file_hash_mock_path(self.MOCK_NESTED_DIR_MISSING))
self.assert_paths(checker_1.state(), checker_2.state(), False) | 0.688049 | 0.254996 |
import tensorflow as tf
def bahdanau_attention(units: int, query_dim: int, value_dim: int, d_type: tf.dtypes.DType = tf.float32,
name: str = "bahdanau_attention") -> tf.keras.Model:
"""Bahdanau Attention实现
:param units:
:param query_dim: query最后一个维度
:param value_dim: value最后一个维度
:param d_type: 运算精度
:param name: 名称
"""
query = tf.keras.Input(shape=(query_dim,), dtype=d_type, name="{}_query".format(name))
value = tf.keras.Input(shape=(None, value_dim), dtype=d_type, name="{}_value".format(name))
hidden_with_time_axis = tf.expand_dims(query, 1)
state = tf.keras.layers.Dense(units=units, dtype=d_type, name="{}_state_dense".format(name))(value)
hidden = tf.keras.layers.Dense(units=units, dtype=d_type, name="{}_hidden_dense".format(name))(
hidden_with_time_axis)
effect = tf.nn.tanh(x=state + hidden, name="{}_tanh".format(name))
score = tf.keras.layers.Dense(units=1, dtype=d_type, name="{}_score_dense".format(name))(effect)
attention_weights = tf.nn.softmax(logits=score, axis=1, name="{}_softmax".format(name))
context_vector = attention_weights * value
context_vector = tf.reduce_sum(input_tensor=context_vector, axis=1, name="{}_reduce_sum".format(name))
return tf.keras.Model(inputs=[query, value], outputs=[context_vector, attention_weights])
# 点积注意力
def scaled_dot_product_attention(q, k, v, mask=None):
"""计算注意力权重。
q, k, v 必须具有匹配的前置维度。
k, v 必须有匹配的倒数第二个维度,例如:seq_len_k = seq_len_v。
虽然 mask 根据其类型(填充或前瞻)有不同的形状,
但是 mask 必须能进行广播转换以便求和。
参数:
q: 请求的形状 == (..., seq_len_q, depth)
k: 主键的形状 == (..., seq_len_k, depth)
v: 数值的形状 == (..., seq_len_v, depth_v)
mask: Float 张量,其形状能转换成
(..., seq_len_q, seq_len_k)。默认为None。
返回值:
输出,注意力权重
"""
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
# 缩放 matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# 将 mask 加入到缩放的张量上。
if mask is not None:
scaled_attention_logits += (mask * -1e9)
# softmax 在最后一个轴(seq_len_k)上归一化,因此分数相加等于1。
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
return output, attention_weights
# 多头注意力层
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.wq = tf.keras.layers.Dense(d_model)
self.wk = tf.keras.layers.Dense(d_model)
self.wv = tf.keras.layers.Dense(d_model)
self.dense = tf.keras.layers.Dense(d_model)
def split_heads(self, x, batch_size):
"""分拆最后一个维度到 (num_heads, depth).
转置结果使得形状为 (batch_size, num_heads, seq_len, depth)
"""
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, v, k, q, mask=None):
batch_size = tf.shape(q)[0]
q = self.wq(q) # (batch_size, seq_len, d_model)
k = self.wk(k) # (batch_size, seq_len, d_model)
v = self.wv(v) # (batch_size, seq_len, d_model)
q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)
k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)
v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)
# scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
# attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
scaled_attention, attention_weights = scaled_dot_product_attention(
q, k, v, mask)
scaled_attention = tf.transpose(scaled_attention,
perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth)
concat_attention = tf.reshape(scaled_attention,
(batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model)
output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)
return output, attention_weights | dialogue/tensorflow/layers.py | import tensorflow as tf
def bahdanau_attention(units: int, query_dim: int, value_dim: int, d_type: tf.dtypes.DType = tf.float32,
name: str = "bahdanau_attention") -> tf.keras.Model:
"""Bahdanau Attention实现
:param units:
:param query_dim: query最后一个维度
:param value_dim: value最后一个维度
:param d_type: 运算精度
:param name: 名称
"""
query = tf.keras.Input(shape=(query_dim,), dtype=d_type, name="{}_query".format(name))
value = tf.keras.Input(shape=(None, value_dim), dtype=d_type, name="{}_value".format(name))
hidden_with_time_axis = tf.expand_dims(query, 1)
state = tf.keras.layers.Dense(units=units, dtype=d_type, name="{}_state_dense".format(name))(value)
hidden = tf.keras.layers.Dense(units=units, dtype=d_type, name="{}_hidden_dense".format(name))(
hidden_with_time_axis)
effect = tf.nn.tanh(x=state + hidden, name="{}_tanh".format(name))
score = tf.keras.layers.Dense(units=1, dtype=d_type, name="{}_score_dense".format(name))(effect)
attention_weights = tf.nn.softmax(logits=score, axis=1, name="{}_softmax".format(name))
context_vector = attention_weights * value
context_vector = tf.reduce_sum(input_tensor=context_vector, axis=1, name="{}_reduce_sum".format(name))
return tf.keras.Model(inputs=[query, value], outputs=[context_vector, attention_weights])
# 点积注意力
def scaled_dot_product_attention(q, k, v, mask=None):
"""计算注意力权重。
q, k, v 必须具有匹配的前置维度。
k, v 必须有匹配的倒数第二个维度,例如:seq_len_k = seq_len_v。
虽然 mask 根据其类型(填充或前瞻)有不同的形状,
但是 mask 必须能进行广播转换以便求和。
参数:
q: 请求的形状 == (..., seq_len_q, depth)
k: 主键的形状 == (..., seq_len_k, depth)
v: 数值的形状 == (..., seq_len_v, depth_v)
mask: Float 张量,其形状能转换成
(..., seq_len_q, seq_len_k)。默认为None。
返回值:
输出,注意力权重
"""
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
# 缩放 matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# 将 mask 加入到缩放的张量上。
if mask is not None:
scaled_attention_logits += (mask * -1e9)
# softmax 在最后一个轴(seq_len_k)上归一化,因此分数相加等于1。
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
return output, attention_weights
# 多头注意力层
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.wq = tf.keras.layers.Dense(d_model)
self.wk = tf.keras.layers.Dense(d_model)
self.wv = tf.keras.layers.Dense(d_model)
self.dense = tf.keras.layers.Dense(d_model)
def split_heads(self, x, batch_size):
"""分拆最后一个维度到 (num_heads, depth).
转置结果使得形状为 (batch_size, num_heads, seq_len, depth)
"""
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, v, k, q, mask=None):
batch_size = tf.shape(q)[0]
q = self.wq(q) # (batch_size, seq_len, d_model)
k = self.wk(k) # (batch_size, seq_len, d_model)
v = self.wv(v) # (batch_size, seq_len, d_model)
q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)
k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)
v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)
# scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
# attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
scaled_attention, attention_weights = scaled_dot_product_attention(
q, k, v, mask)
scaled_attention = tf.transpose(scaled_attention,
perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth)
concat_attention = tf.reshape(scaled_attention,
(batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model)
output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)
return output, attention_weights | 0.789153 | 0.571408 |
from math import e
# Import the create dense files
from p4_create_dense_files import inflation, annuity_term
test_case = 1
dense_cf_filename = 'p4_test%d_dense_cf.txt' % test_case
dense_rates_filename = 'p4_test%d_dense_rates.txt' % test_case
total = 0.0
cf_npv = 0.0
cf_file = open(dense_cf_filename, 'r') # opens the file
rates_file = open(dense_rates_filename, 'r') #opens the file
months = 1
for cf_line, rate_line in zip(cf_file, rates_file):
rate = float(rate_line) / 12.0 / 100.0 # change the rate
cash_flow = float(cf_line)
total += cash_flow
cf_npv += cash_flow * e ** (-rate * months) # calculates the cashflow * discount rate
months += 1
cf_file.close()
print('Total customer-provided cash flows: $%.2f' % total)
print('NPV of customer-provided cash flows: $%.2f' % cf_npv)
lo, hi = 0.0, total
print('\nSearching for correct beginning monthly annuity payment:')
last_guess = lo
guess = hi
iter_count = 1
ttl_npv = 0
for months in range(annuity_term):
months = 1
rates_file = open(dense_rates_filename, 'r')
guess = round(((lo + hi) / 2),2)
pmt = guess
for rate_line in rates_file:
rate = float(rate_line) / 12.0 / 100.0 # gets rate
disc = e ** (-rate * months) # gets discount rate
pmt_npv = disc * pmt # gets the payment amount
# Following if statements are ways to calculate the total NPV
if months == 1:
ttl_npv = cf_npv - pmt_npv
else:
ttl_npv = ttl_npv - pmt_npv
if months % 12 == 0:
pmt += round(pmt * inflation, 2) # takes into account inflation after every year
months += 1
last_guess = guess
if ttl_npv < 0.0:
hi = guess
else:
lo = guess
print(f'{iter_count}: With annuity of ({lo} + {hi})/2 = {round(guess,2)}, NPV is ${round(ttl_npv, 2)}')
iter_count += 1
print(f'\nFirst year pay ${guess} per month')
print(f'For a total of ${guess * round(annuity_term,2)}')
print('=' * 41)
print('Details of the customer NPV calculations:')
print('=' * 41)
cf_file = open(dense_cf_filename, 'r')
rates_file = open(dense_rates_filename, 'r')
count = 1
for cf, rate_line in zip(cf_file, rates_file):
if float(cf) != 0.0:
rate = float(rate_line) / 12.0 / 100.0
print(f'months: {count} cashflow: {float(cf)} discount: {(e ** (-rate * count))} npv: {float(cf) * (e ** (-rate * count))}')
count += 1
print('Total customer-provided cash flows: $%.2f' % total)
print('NPV of customer-provided cash flows: $%.2f' % cf_npv) | Week 05/p4_instructor_files/p4_search_annuity.py | from math import e
# Import the create dense files
from p4_create_dense_files import inflation, annuity_term
test_case = 1
dense_cf_filename = 'p4_test%d_dense_cf.txt' % test_case
dense_rates_filename = 'p4_test%d_dense_rates.txt' % test_case
total = 0.0
cf_npv = 0.0
cf_file = open(dense_cf_filename, 'r') # opens the file
rates_file = open(dense_rates_filename, 'r') #opens the file
months = 1
for cf_line, rate_line in zip(cf_file, rates_file):
rate = float(rate_line) / 12.0 / 100.0 # change the rate
cash_flow = float(cf_line)
total += cash_flow
cf_npv += cash_flow * e ** (-rate * months) # calculates the cashflow * discount rate
months += 1
cf_file.close()
print('Total customer-provided cash flows: $%.2f' % total)
print('NPV of customer-provided cash flows: $%.2f' % cf_npv)
lo, hi = 0.0, total
print('\nSearching for correct beginning monthly annuity payment:')
last_guess = lo
guess = hi
iter_count = 1
ttl_npv = 0
for months in range(annuity_term):
months = 1
rates_file = open(dense_rates_filename, 'r')
guess = round(((lo + hi) / 2),2)
pmt = guess
for rate_line in rates_file:
rate = float(rate_line) / 12.0 / 100.0 # gets rate
disc = e ** (-rate * months) # gets discount rate
pmt_npv = disc * pmt # gets the payment amount
# Following if statements are ways to calculate the total NPV
if months == 1:
ttl_npv = cf_npv - pmt_npv
else:
ttl_npv = ttl_npv - pmt_npv
if months % 12 == 0:
pmt += round(pmt * inflation, 2) # takes into account inflation after every year
months += 1
last_guess = guess
if ttl_npv < 0.0:
hi = guess
else:
lo = guess
print(f'{iter_count}: With annuity of ({lo} + {hi})/2 = {round(guess,2)}, NPV is ${round(ttl_npv, 2)}')
iter_count += 1
print(f'\nFirst year pay ${guess} per month')
print(f'For a total of ${guess * round(annuity_term,2)}')
print('=' * 41)
print('Details of the customer NPV calculations:')
print('=' * 41)
cf_file = open(dense_cf_filename, 'r')
rates_file = open(dense_rates_filename, 'r')
count = 1
for cf, rate_line in zip(cf_file, rates_file):
if float(cf) != 0.0:
rate = float(rate_line) / 12.0 / 100.0
print(f'months: {count} cashflow: {float(cf)} discount: {(e ** (-rate * count))} npv: {float(cf) * (e ** (-rate * count))}')
count += 1
print('Total customer-provided cash flows: $%.2f' % total)
print('NPV of customer-provided cash flows: $%.2f' % cf_npv) | 0.395601 | 0.409929 |
from bs4 import BeautifulSoup
from javaCopyrightsParser import JavaCopyrightsParser
class JavaLicenseParser:
SEPARATOR = ','
def __init__(self, input_license_file):
try:
with open(input_license_file) as fp:
self.soup = BeautifulSoup(fp, 'html.parser')
self.read_licenses()
except FileNotFoundError:
print("Can't open a licenses report file: ", input_license_file)
self.copyrightsParser = None
def get_license_type(self, package_name_prefix, package_name, package_version):
return self.licenses_list.get(package_name + JavaLicenseParser.SEPARATOR + package_version + JavaLicenseParser.SEPARATOR + package_name_prefix, '-,-,-,-')
def get_copyrights(self, package_name_prefix, package_name, package_version):
return self.copyrights_list.get(package_name + JavaLicenseParser.SEPARATOR + package_version + JavaLicenseParser.SEPARATOR + package_name_prefix, '-')
def read_licenses(self):
all_values = []
sep = ","
license_type_for_td = {}
copyrights_for_license = {}
# Finding all td values in table
for tr in self.soup.table:
td_values = []
td_href = ['','']
m = 0
for td in tr:
if td != "\n":
td_values.append(td.string)
try:
td_href[m] = td.a.get('href')
m = m + 1
except:
a = 1
if len(td_values) == 0:
continue
package_prefix = str(td_values[0])
package_name = str(td_values[1])
package_version = str(td_values[2])
license_type = str(td_values[5])
package_url = str('https://mvnrepository.com/artifact/' + package_prefix + '/' + package_name + '/' + package_version)
license_url = str(td_href[1])
# Removing commas from license types
license_type_split = license_type.split(",")
join_string = ""
try:
license_type = join_string.join(license_type_split)
except:
license_type = license_type
# Getting declared licenses
declared_license = self.get_declared_license(license_type)
license_type += sep+declared_license
# Getting copyrights
self.copyrightsParser = JavaCopyrightsParser()
copyright = ''
not_required_licenses = {"Apache-2.0", "Public Domain", "EPL-1.0", "EPL-2.0", "MPL-2.0", "CDDL-1.0", "CDDL-1.1", "EDL-1.0", "None"}
for i in not_required_licenses:
if i == declared_license:
copyright = "-"
if copyright == '':
try:
copyright = self.copyrightsParser.read_copyrigths(license_url)
except:
copyright = "HTTP Error 403: Forbidden"
all_parameters = package_name + sep + package_version + sep + package_prefix
p = license_type + sep + package_url + sep + license_url
license_type_for_td.setdefault(all_parameters, p)
self.licenses_list = license_type_for_td
copyrights_for_license.setdefault(all_parameters, copyright)
self.copyrights_list = copyrights_for_license
def get_declared_license(self, license_type):
# The dictionary of the possible license types
declared_licenses = {}
declared_licenses["Apache-2.0"] = ("Apache", "ASL", "Apache Software Licenses", "The Apache")
declared_licenses["MIT"] = ("MIT", "The MIT")
declared_licenses["BSD-2-Clause"] = ("BSD-2-Clause", "BSD 2-Clause")
declared_licenses["BSD-3-Clause"] = ("BSD-3-Clause", "BSD 3-Clause", "BSD License 3")
declared_licenses["BSD-Equivalent"] = ("BSD-Equivalent", "BSD", "BSD License")
declared_licenses["ISC"] = ("ISC", "ISC")
declared_licenses["EPL-1.0"] = ("EPL-1.0", "EPL 1.0", "Eclipse Public License")
declared_licenses["EPL-2.0"] = ("EPL-2.0", "EPL 2.0", "Eclipse Public License")
declared_licenses["EDL-1.0"] = ("EDL-1.0", "EDL 1.0", "Eclipse Distribution")
declared_licenses["MPL-2.0"] = ("MPL-2.0", "MPL 2.0")
declared_licenses["CDDL-1.0"] = ("CDDL-1.0", "CDDL 1.0", "CDDL")
declared_licenses["CDDL-1.1"] = ("CDDL-1.1", "CDDL 1.1")
declared_licenses["CC0-1.0"] = ("CC0-1.0", "CC0 1.0")
declared_licenses["Public Domain"] = ("Public Domain", "Public Domain")
declared_licenses["-"] = ("None", "None")
declared_license = ''
k = 0
for key in declared_licenses.keys():
for value in declared_licenses[key]:
bool = license_type.startswith(value)
if bool:
declared_license = key
k += 1
break
else:
continue
if k >= 1:
break
if declared_license == '':
declared_license = license_type
return declared_license | javaLicenseParser.py | from bs4 import BeautifulSoup
from javaCopyrightsParser import JavaCopyrightsParser
class JavaLicenseParser:
SEPARATOR = ','
def __init__(self, input_license_file):
try:
with open(input_license_file) as fp:
self.soup = BeautifulSoup(fp, 'html.parser')
self.read_licenses()
except FileNotFoundError:
print("Can't open a licenses report file: ", input_license_file)
self.copyrightsParser = None
def get_license_type(self, package_name_prefix, package_name, package_version):
return self.licenses_list.get(package_name + JavaLicenseParser.SEPARATOR + package_version + JavaLicenseParser.SEPARATOR + package_name_prefix, '-,-,-,-')
def get_copyrights(self, package_name_prefix, package_name, package_version):
return self.copyrights_list.get(package_name + JavaLicenseParser.SEPARATOR + package_version + JavaLicenseParser.SEPARATOR + package_name_prefix, '-')
def read_licenses(self):
all_values = []
sep = ","
license_type_for_td = {}
copyrights_for_license = {}
# Finding all td values in table
for tr in self.soup.table:
td_values = []
td_href = ['','']
m = 0
for td in tr:
if td != "\n":
td_values.append(td.string)
try:
td_href[m] = td.a.get('href')
m = m + 1
except:
a = 1
if len(td_values) == 0:
continue
package_prefix = str(td_values[0])
package_name = str(td_values[1])
package_version = str(td_values[2])
license_type = str(td_values[5])
package_url = str('https://mvnrepository.com/artifact/' + package_prefix + '/' + package_name + '/' + package_version)
license_url = str(td_href[1])
# Removing commas from license types
license_type_split = license_type.split(",")
join_string = ""
try:
license_type = join_string.join(license_type_split)
except:
license_type = license_type
# Getting declared licenses
declared_license = self.get_declared_license(license_type)
license_type += sep+declared_license
# Getting copyrights
self.copyrightsParser = JavaCopyrightsParser()
copyright = ''
not_required_licenses = {"Apache-2.0", "Public Domain", "EPL-1.0", "EPL-2.0", "MPL-2.0", "CDDL-1.0", "CDDL-1.1", "EDL-1.0", "None"}
for i in not_required_licenses:
if i == declared_license:
copyright = "-"
if copyright == '':
try:
copyright = self.copyrightsParser.read_copyrigths(license_url)
except:
copyright = "HTTP Error 403: Forbidden"
all_parameters = package_name + sep + package_version + sep + package_prefix
p = license_type + sep + package_url + sep + license_url
license_type_for_td.setdefault(all_parameters, p)
self.licenses_list = license_type_for_td
copyrights_for_license.setdefault(all_parameters, copyright)
self.copyrights_list = copyrights_for_license
def get_declared_license(self, license_type):
# The dictionary of the possible license types
declared_licenses = {}
declared_licenses["Apache-2.0"] = ("Apache", "ASL", "Apache Software Licenses", "The Apache")
declared_licenses["MIT"] = ("MIT", "The MIT")
declared_licenses["BSD-2-Clause"] = ("BSD-2-Clause", "BSD 2-Clause")
declared_licenses["BSD-3-Clause"] = ("BSD-3-Clause", "BSD 3-Clause", "BSD License 3")
declared_licenses["BSD-Equivalent"] = ("BSD-Equivalent", "BSD", "BSD License")
declared_licenses["ISC"] = ("ISC", "ISC")
declared_licenses["EPL-1.0"] = ("EPL-1.0", "EPL 1.0", "Eclipse Public License")
declared_licenses["EPL-2.0"] = ("EPL-2.0", "EPL 2.0", "Eclipse Public License")
declared_licenses["EDL-1.0"] = ("EDL-1.0", "EDL 1.0", "Eclipse Distribution")
declared_licenses["MPL-2.0"] = ("MPL-2.0", "MPL 2.0")
declared_licenses["CDDL-1.0"] = ("CDDL-1.0", "CDDL 1.0", "CDDL")
declared_licenses["CDDL-1.1"] = ("CDDL-1.1", "CDDL 1.1")
declared_licenses["CC0-1.0"] = ("CC0-1.0", "CC0 1.0")
declared_licenses["Public Domain"] = ("Public Domain", "Public Domain")
declared_licenses["-"] = ("None", "None")
declared_license = ''
k = 0
for key in declared_licenses.keys():
for value in declared_licenses[key]:
bool = license_type.startswith(value)
if bool:
declared_license = key
k += 1
break
else:
continue
if k >= 1:
break
if declared_license == '':
declared_license = license_type
return declared_license | 0.311427 | 0.124186 |
import time
from pyspark.rdd import portable_hash
from itertools import groupby, starmap, chain, filterfalse
def partition_by_first_value(key):
return portable_hash(key[0])
def get_sorted_values(lines):
groups_with_key_duplicates = groupby(lines, key=lambda x: x[0])
return groups_with_key_duplicates
def concat_and_min(listval1, listval2):
return (listval1[0] + listval2[0], min(listval1[1], listval2[1]))
def ccf_iterate_online_min(graph):
sym = graph.flatMap(lambda edge : [edge, edge[::-1]])
frmt_for_reduce = sym.map(
lambda keyval : (keyval[0], ([keyval[1]], keyval[1]))
)
grps_mins = frmt_for_reduce.reduceByKey(concat_and_min)
fltr = grps_mins.filter(
lambda keyval : keyval[1][1] < keyval[0]
)
new_pairs = sc.accumulator(0)
def pair_and_count(keyval):
minval = keyval[1][1]
key_min_pair = [(keyval[0], minval)]
other_pairs = [(val, minval) for val in keyval[1][0] if minval != val]
new_pairs.add(len(other_pairs))
return chain(key_min_pair, other_pairs)
return fltr.flatMap(pair_and_count), new_pairs
def ccf_iterate_offline_min(graph):
sym = graph.flatMap(lambda edge : [edge, edge[::-1]])
grps = sym.groupByKey()
grps_mins = grps.map(
lambda keyval : (keyval[0], (keyval[1], min(keyval[1])))
)
fltr = grps_mins.filter(
lambda keyval : keyval[1][1] < keyval[0]
)
new_pairs = sc.accumulator(0)
def pair_and_count(keyval):
minval = keyval[1][1]
out = [(value, minval) for value in keyval[1][0] if value != minval]
new_pairs.add(len(out))
return [(keyval[0], minval)] + out
return fltr.flatMap(pair_and_count), new_pairs
def ccf_iterate_secondary_sorting(graph):
sym = graph.flatMap(lambda edge: [edge, edge[::-1]])
composite_key = sym.map(lambda edge: (edge,None))
partition_sorted_composite = composite_key.\
repartitionAndSortWithinPartitions(
partitionFunc=partition_by_first_value
)
partition_sorted = partition_sorted_composite.map(
lambda compkey:tuple(compkey[0])
)
sorted_groups = partition_sorted.mapPartitions(
get_sorted_values, preservesPartitioning=True
)
groups_with_min = sorted_groups.mapValues(
lambda group: (next(group), group)
)
fltr = groups_with_min.filter(
lambda keymingroup: keymingroup[1][0][1] < keymingroup[0]
)
new_pairs = sc.accumulator(0)
def pair_and_count(keymingroup):
minval = keymingroup[1][0][1]
key_min_pair = zip([keymingroup[0]],[minval])
def pair_and_increment(duplicated_key, value):
new_pairs.add(1)
return (value, minval)
other_pairs = starmap(
pair_and_increment,
keymingroup[1][1]
)
return chain(key_min_pair, other_pairs)
return fltr.flatMap(pair_and_count), new_pairs
def ccf_dedup(graph):
temp = graph.map(lambda keyval : (keyval, None))
reduced = temp.reduceByKey(lambda v1,v2 : v1)
return reduced.map(lambda keyval:keyval[0])
def find_connected_components(graph, method):
if method=='online':
ccf_iterate = ccf_iterate_online_min
elif method=='offline':
ccf_iterate = ccf_iterate_offline_min
elif method=='secondary':
ccf_iterate = ccf_iterate_secondary_sorting
new_pairs = -1
n_loops = 0
while new_pairs != 0:
graph, acc = ccf_iterate(graph)
graph = ccf_dedup(graph)
graph.persist()
graph.foreach(lambda x:x)
new_pairs = acc.value
n_loops += 1
return graph, n_loops
def run(graph, method, show=False):
start = time.time()
output, iterations = find_connected_components(graph, method=method)
end = time.time()
if show:
print(output.collect())
print('Finding connected components with '
f'method={method} in {end - start:.3} s '
f'in {iterations} iterations.') | ccf_pyspark.py | import time
from pyspark.rdd import portable_hash
from itertools import groupby, starmap, chain, filterfalse
def partition_by_first_value(key):
return portable_hash(key[0])
def get_sorted_values(lines):
groups_with_key_duplicates = groupby(lines, key=lambda x: x[0])
return groups_with_key_duplicates
def concat_and_min(listval1, listval2):
return (listval1[0] + listval2[0], min(listval1[1], listval2[1]))
def ccf_iterate_online_min(graph):
sym = graph.flatMap(lambda edge : [edge, edge[::-1]])
frmt_for_reduce = sym.map(
lambda keyval : (keyval[0], ([keyval[1]], keyval[1]))
)
grps_mins = frmt_for_reduce.reduceByKey(concat_and_min)
fltr = grps_mins.filter(
lambda keyval : keyval[1][1] < keyval[0]
)
new_pairs = sc.accumulator(0)
def pair_and_count(keyval):
minval = keyval[1][1]
key_min_pair = [(keyval[0], minval)]
other_pairs = [(val, minval) for val in keyval[1][0] if minval != val]
new_pairs.add(len(other_pairs))
return chain(key_min_pair, other_pairs)
return fltr.flatMap(pair_and_count), new_pairs
def ccf_iterate_offline_min(graph):
sym = graph.flatMap(lambda edge : [edge, edge[::-1]])
grps = sym.groupByKey()
grps_mins = grps.map(
lambda keyval : (keyval[0], (keyval[1], min(keyval[1])))
)
fltr = grps_mins.filter(
lambda keyval : keyval[1][1] < keyval[0]
)
new_pairs = sc.accumulator(0)
def pair_and_count(keyval):
minval = keyval[1][1]
out = [(value, minval) for value in keyval[1][0] if value != minval]
new_pairs.add(len(out))
return [(keyval[0], minval)] + out
return fltr.flatMap(pair_and_count), new_pairs
def ccf_iterate_secondary_sorting(graph):
sym = graph.flatMap(lambda edge: [edge, edge[::-1]])
composite_key = sym.map(lambda edge: (edge,None))
partition_sorted_composite = composite_key.\
repartitionAndSortWithinPartitions(
partitionFunc=partition_by_first_value
)
partition_sorted = partition_sorted_composite.map(
lambda compkey:tuple(compkey[0])
)
sorted_groups = partition_sorted.mapPartitions(
get_sorted_values, preservesPartitioning=True
)
groups_with_min = sorted_groups.mapValues(
lambda group: (next(group), group)
)
fltr = groups_with_min.filter(
lambda keymingroup: keymingroup[1][0][1] < keymingroup[0]
)
new_pairs = sc.accumulator(0)
def pair_and_count(keymingroup):
minval = keymingroup[1][0][1]
key_min_pair = zip([keymingroup[0]],[minval])
def pair_and_increment(duplicated_key, value):
new_pairs.add(1)
return (value, minval)
other_pairs = starmap(
pair_and_increment,
keymingroup[1][1]
)
return chain(key_min_pair, other_pairs)
return fltr.flatMap(pair_and_count), new_pairs
def ccf_dedup(graph):
temp = graph.map(lambda keyval : (keyval, None))
reduced = temp.reduceByKey(lambda v1,v2 : v1)
return reduced.map(lambda keyval:keyval[0])
def find_connected_components(graph, method):
if method=='online':
ccf_iterate = ccf_iterate_online_min
elif method=='offline':
ccf_iterate = ccf_iterate_offline_min
elif method=='secondary':
ccf_iterate = ccf_iterate_secondary_sorting
new_pairs = -1
n_loops = 0
while new_pairs != 0:
graph, acc = ccf_iterate(graph)
graph = ccf_dedup(graph)
graph.persist()
graph.foreach(lambda x:x)
new_pairs = acc.value
n_loops += 1
return graph, n_loops
def run(graph, method, show=False):
start = time.time()
output, iterations = find_connected_components(graph, method=method)
end = time.time()
if show:
print(output.collect())
print('Finding connected components with '
f'method={method} in {end - start:.3} s '
f'in {iterations} iterations.') | 0.401453 | 0.307449 |
from ccobra import CCobraTaskEncoder
class SyllogisticTaskEncoder(CCobraTaskEncoder):
""" Syllogistic encoder. Provides functions for abbreviating syllogistic tasks.
"""
@staticmethod
def encode_task(task):
""" Encodes a task to its syllogistic encoding.
Parameters
----------
task : list(list(str))
List representation of the syllogism (e.g., [['All', 'A', 'B'], ['Some', 'B', 'C']]).
Returns
-------
str
Syllogistic task encoding (e.g., 'AI1').
Raises
------
ValueError
If figure of syllogism cannot be determined.
"""
prem_1, prem_2 = task
quant1 = prem_1[0].replace('All', 'A').replace(
'Some not', 'O').replace('Some', 'I').replace('No', 'E')
quant2 = prem_2[0].replace('All', 'A').replace(
'Some not', 'O').replace('Some', 'I').replace('No', 'E')
figure = 1
if prem_1[1] == prem_2[1]:
figure = 4
elif prem_1[2] == prem_2[1]:
figure = 1
elif prem_1[2] == prem_2[2]:
figure = 3
elif prem_1[1] == prem_2[2]:
figure = 2
else:
raise ValueError('Could not determine figure of:', task)
return quant1 + quant2 + str(figure)
@staticmethod
def encode_response(response, task):
""" Encodes a response to its syllogistic encoding.
Parameters
----------
response : list(str)
Syllogistc response in list representation (e.g., ['All', 'A', 'C'])
task : list(list(str))
Syllogistic task in list representation (e.g., [['All', 'A', 'B'], ['Some', 'B', 'C']]).
Returns
-------
str
Syllogistic response encoding (e.g., 'Aac').
"""
if not isinstance(response[0], list):
response = [response]
if response[0] == 'NVC':
return 'NVC'
if response[0][0] == 'NVC':
return 'NVC'
object_sets = [set(x[1:]) for x in task]
midterm = object_sets[0].intersection(object_sets[1])
obj_a = object_sets[0] - midterm
quant = response[0][0].replace('All', 'A').replace(
'Some not', 'O').replace('Some', 'I').replace('No', 'E')
return quant + ('ac' if response[0][1] == list(obj_a)[0] else 'ca') | ccobra/syllogistic/task_encoder_syl.py | from ccobra import CCobraTaskEncoder
class SyllogisticTaskEncoder(CCobraTaskEncoder):
""" Syllogistic encoder. Provides functions for abbreviating syllogistic tasks.
"""
@staticmethod
def encode_task(task):
""" Encodes a task to its syllogistic encoding.
Parameters
----------
task : list(list(str))
List representation of the syllogism (e.g., [['All', 'A', 'B'], ['Some', 'B', 'C']]).
Returns
-------
str
Syllogistic task encoding (e.g., 'AI1').
Raises
------
ValueError
If figure of syllogism cannot be determined.
"""
prem_1, prem_2 = task
quant1 = prem_1[0].replace('All', 'A').replace(
'Some not', 'O').replace('Some', 'I').replace('No', 'E')
quant2 = prem_2[0].replace('All', 'A').replace(
'Some not', 'O').replace('Some', 'I').replace('No', 'E')
figure = 1
if prem_1[1] == prem_2[1]:
figure = 4
elif prem_1[2] == prem_2[1]:
figure = 1
elif prem_1[2] == prem_2[2]:
figure = 3
elif prem_1[1] == prem_2[2]:
figure = 2
else:
raise ValueError('Could not determine figure of:', task)
return quant1 + quant2 + str(figure)
@staticmethod
def encode_response(response, task):
""" Encodes a response to its syllogistic encoding.
Parameters
----------
response : list(str)
Syllogistc response in list representation (e.g., ['All', 'A', 'C'])
task : list(list(str))
Syllogistic task in list representation (e.g., [['All', 'A', 'B'], ['Some', 'B', 'C']]).
Returns
-------
str
Syllogistic response encoding (e.g., 'Aac').
"""
if not isinstance(response[0], list):
response = [response]
if response[0] == 'NVC':
return 'NVC'
if response[0][0] == 'NVC':
return 'NVC'
object_sets = [set(x[1:]) for x in task]
midterm = object_sets[0].intersection(object_sets[1])
obj_a = object_sets[0] - midterm
quant = response[0][0].replace('All', 'A').replace(
'Some not', 'O').replace('Some', 'I').replace('No', 'E')
return quant + ('ac' if response[0][1] == list(obj_a)[0] else 'ca') | 0.841272 | 0.486636 |
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
class UsefullFeatureSelector(BaseEstimator, TransformerMixin):
def __init__(self, nans_thr=1, const_thr=1):
self.nans_thr = nans_thr
self.const_thr = const_thr
self.useless_ = None
def fit(self, X, y=None):
self.useless_ = np.unique(self._nans_detect(X) +
self._const_detect(X) +
self._id_detect(X))
print('Columns to drop:', len(self.useless_))
return self
def transform(self, X):
return X.drop(self.useless_, axis=1)
def fit_transform(self, X, y=None, **fit_params):
return self.fit(X).transform(X)
def _nans_detect(self, X):
threshold = self.nans_thr
# получаем сведения о пропусках в данных
nans = X.isna().mean()
# получаем список признаков, в которых
# процент пропусков выше порога
nans_features = nans[nans >= threshold].index.to_list()
print('NaNs features were found:', len(nans_features))
return nans_features
def _const_detect(self, X):
threshold = self.const_thr
# создаем пустой список
constant_features = []
# для каждого признака
for feature in X:
# определяем самое часто
# встречающееся значение в признаке
try:
dominant = (X[feature]
.value_counts(normalize=True)
.sort_values(ascending=False)
.values[0])
# если признак полностью состоит из пропусков, то пропускаем его
except IndexError:
continue
# если доля такого значения превышает заданный порог
# тогда добавляем признак в список
if dominant >= threshold:
constant_features.append(feature)
print('Constant features were found:', len(constant_features))
return constant_features
@staticmethod
def _id_detect(X):
id_features = []
for feature in X:
rows = X[feature].dropna().shape[0]
nunique = X[feature].dropna().nunique()
if rows & (rows == nunique):
id_features.append(feature)
print('ID features were found:', len(id_features))
return id_features
def correct_features_lists(all_features, numerical, categorical):
numerical = np.intersect1d(numerical, all_features).tolist()
categorical = np.intersect1d(categorical, all_features).tolist()
print('After correction: numerical -', len(numerical), ', categorical: - ', len(categorical), '\n')
return numerical, categorical | ocp/features/feature_selection.py | import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
class UsefullFeatureSelector(BaseEstimator, TransformerMixin):
def __init__(self, nans_thr=1, const_thr=1):
self.nans_thr = nans_thr
self.const_thr = const_thr
self.useless_ = None
def fit(self, X, y=None):
self.useless_ = np.unique(self._nans_detect(X) +
self._const_detect(X) +
self._id_detect(X))
print('Columns to drop:', len(self.useless_))
return self
def transform(self, X):
return X.drop(self.useless_, axis=1)
def fit_transform(self, X, y=None, **fit_params):
return self.fit(X).transform(X)
def _nans_detect(self, X):
threshold = self.nans_thr
# получаем сведения о пропусках в данных
nans = X.isna().mean()
# получаем список признаков, в которых
# процент пропусков выше порога
nans_features = nans[nans >= threshold].index.to_list()
print('NaNs features were found:', len(nans_features))
return nans_features
def _const_detect(self, X):
threshold = self.const_thr
# создаем пустой список
constant_features = []
# для каждого признака
for feature in X:
# определяем самое часто
# встречающееся значение в признаке
try:
dominant = (X[feature]
.value_counts(normalize=True)
.sort_values(ascending=False)
.values[0])
# если признак полностью состоит из пропусков, то пропускаем его
except IndexError:
continue
# если доля такого значения превышает заданный порог
# тогда добавляем признак в список
if dominant >= threshold:
constant_features.append(feature)
print('Constant features were found:', len(constant_features))
return constant_features
@staticmethod
def _id_detect(X):
id_features = []
for feature in X:
rows = X[feature].dropna().shape[0]
nunique = X[feature].dropna().nunique()
if rows & (rows == nunique):
id_features.append(feature)
print('ID features were found:', len(id_features))
return id_features
def correct_features_lists(all_features, numerical, categorical):
numerical = np.intersect1d(numerical, all_features).tolist()
categorical = np.intersect1d(categorical, all_features).tolist()
print('After correction: numerical -', len(numerical), ', categorical: - ', len(categorical), '\n')
return numerical, categorical | 0.395718 | 0.274218 |
import logging
import pickle
from typing import List
import torch
from transformers import RobertaTokenizer, RobertaModel
from src.dataobjs.mention_data import MentionData
logger = logging.getLogger(__name__)
class EmbedTransformersGenerics(object):
def __init__(self, max_surrounding_contx,
finetune=False, use_cuda=True):
self.model = RobertaModel.from_pretrained("roberta-large")
# self.model = BertModel.from_pretrained("bert-large-cased")
self.tokenizer = RobertaTokenizer.from_pretrained("roberta-large")
# self.tokenizer = BertTokenizer.from_pretrained("bert-large-cased")
self.max_surrounding_contx = max_surrounding_contx
self.use_cuda = use_cuda
self.finetune = finetune
self.embed_size = 1024
if self.use_cuda:
self.model.cuda()
def get_mention_full_rep(self, mention):
sent_ids, ment1_inx_start, ment1_inx_end = self.mention_feat_to_vec(mention)
if self.use_cuda:
sent_ids = sent_ids.cuda()
if not self.finetune:
with torch.no_grad():
last_hidden_span = self.model(sent_ids).last_hidden_state
else:
last_hidden_span = self.model(sent_ids).last_hidden_state
mention_hidden_span = last_hidden_span.view(last_hidden_span.shape[1], -1)[ment1_inx_start:ment1_inx_end]
return mention_hidden_span, mention_hidden_span[0], mention_hidden_span[-1], mention_hidden_span.shape[0]
@staticmethod
def extract_mention_surrounding_context(mention):
tokens_inds = mention.tokens_number
context = mention.mention_context
start_mention_index = tokens_inds[0]
end_mention_index = tokens_inds[-1] + 1
assert len(tokens_inds) == len(mention.tokens_str.split(" "))
ret_context_before = context[0:start_mention_index]
ret_mention = context[start_mention_index:end_mention_index]
ret_context_after = context[end_mention_index:]
assert ret_mention == mention.tokens_str.split(" ")
assert ret_context_before + ret_mention + ret_context_after == mention.mention_context
return ret_context_before, ret_mention, ret_context_after
def mention_feat_to_vec(self, mention):
cntx_before_str, ment_span_str, cntx_after_str = EmbedTransformersGenerics.\
extract_mention_surrounding_context(mention)
cntx_before, cntx_after = cntx_before_str, cntx_after_str
if len(cntx_before_str) != 0:
cntx_before = self.tokenizer.encode(" ".join(cntx_before_str), add_special_tokens=False)
if len(cntx_after_str) != 0:
cntx_after = self.tokenizer.encode(" ".join(cntx_after_str), add_special_tokens=False)
if self.max_surrounding_contx != -1:
if len(cntx_before) > self.max_surrounding_contx:
cntx_before = cntx_before[-self.max_surrounding_contx+1:]
if len(cntx_after) > self.max_surrounding_contx:
cntx_after = cntx_after[:self.max_surrounding_contx-1]
ment_span = self.tokenizer.encode(" ".join(ment_span_str), add_special_tokens=False)
if isinstance(ment_span, torch.Tensor):
ment_span = ment_span.tolist()
if isinstance(cntx_before, torch.Tensor):
cntx_before = cntx_before.tolist()
if isinstance(cntx_after, torch.Tensor):
cntx_after = cntx_after.tolist()
all_sent_toks = [[0] + cntx_before + ment_span + cntx_after + [2]]
sent_tokens = torch.tensor(all_sent_toks)
mention_start_idx = len(cntx_before) + 1
mention_end_idx = len(cntx_before) + len(ment_span) + 1
assert all_sent_toks[0][mention_start_idx:mention_end_idx] == ment_span
return sent_tokens, mention_start_idx, mention_end_idx
def get_embed_size(self):
return self.embed_size
class EmbedFromFile(object):
def __init__(self, files_to_load: List[str]):
"""
:param files_to_load: list of pre-generated embedding file names
"""
self.embed_size = 1024
bert_dict = dict()
if files_to_load is not None and len(files_to_load) > 0:
for file_ in files_to_load:
bert_dict.update(pickle.load(open(file_, "rb")))
logger.info("Bert representation loaded-" + file_)
self.embeddings = list(bert_dict.values())
self.embed_key = {k: i for i, k in enumerate(bert_dict.keys())}
def get_mention_full_rep(self, mention):
return self.embeddings[self.embed_key[mention.mention_id]]
def get_mentions_rep(self, mentions_list):
embed_list = [self.embeddings[self.embed_key[mention.mention_id]] for mention in mentions_list]
return embed_list
def get_embed_size(self):
return self.embed_size
class EmbedInMemory(object):
def __init__(self, mentions: List[MentionData], max_surrounding_contx, use_cuda):
self.embed_size = 1024
bert_dict = dict()
embed_model = EmbedTransformersGenerics(max_surrounding_contx=max_surrounding_contx, use_cuda=use_cuda)
for ment in mentions:
hidden, first_tok, last_tok, ment_size = embed_model.get_mention_full_rep(ment)
bert_dict[ment.mention_id] = (hidden.cpu(), first_tok.cpu(), last_tok.cpu(), ment_size)
self.embeddings = list(bert_dict.values())
self.embed_key = {k: i for i, k in enumerate(bert_dict.keys())}
def get_mention_full_rep(self, mention):
return self.embeddings[self.embed_key[mention.mention_id]]
def get_mentions_rep(self, mentions_list):
embed_list = [self.embeddings[self.embed_key[mention.mention_id]] for mention in mentions_list]
return embed_list
def get_embed_size(self):
return self.embed_size | src/utils/embed_utils.py | import logging
import pickle
from typing import List
import torch
from transformers import RobertaTokenizer, RobertaModel
from src.dataobjs.mention_data import MentionData
logger = logging.getLogger(__name__)
class EmbedTransformersGenerics(object):
def __init__(self, max_surrounding_contx,
finetune=False, use_cuda=True):
self.model = RobertaModel.from_pretrained("roberta-large")
# self.model = BertModel.from_pretrained("bert-large-cased")
self.tokenizer = RobertaTokenizer.from_pretrained("roberta-large")
# self.tokenizer = BertTokenizer.from_pretrained("bert-large-cased")
self.max_surrounding_contx = max_surrounding_contx
self.use_cuda = use_cuda
self.finetune = finetune
self.embed_size = 1024
if self.use_cuda:
self.model.cuda()
def get_mention_full_rep(self, mention):
sent_ids, ment1_inx_start, ment1_inx_end = self.mention_feat_to_vec(mention)
if self.use_cuda:
sent_ids = sent_ids.cuda()
if not self.finetune:
with torch.no_grad():
last_hidden_span = self.model(sent_ids).last_hidden_state
else:
last_hidden_span = self.model(sent_ids).last_hidden_state
mention_hidden_span = last_hidden_span.view(last_hidden_span.shape[1], -1)[ment1_inx_start:ment1_inx_end]
return mention_hidden_span, mention_hidden_span[0], mention_hidden_span[-1], mention_hidden_span.shape[0]
@staticmethod
def extract_mention_surrounding_context(mention):
tokens_inds = mention.tokens_number
context = mention.mention_context
start_mention_index = tokens_inds[0]
end_mention_index = tokens_inds[-1] + 1
assert len(tokens_inds) == len(mention.tokens_str.split(" "))
ret_context_before = context[0:start_mention_index]
ret_mention = context[start_mention_index:end_mention_index]
ret_context_after = context[end_mention_index:]
assert ret_mention == mention.tokens_str.split(" ")
assert ret_context_before + ret_mention + ret_context_after == mention.mention_context
return ret_context_before, ret_mention, ret_context_after
def mention_feat_to_vec(self, mention):
cntx_before_str, ment_span_str, cntx_after_str = EmbedTransformersGenerics.\
extract_mention_surrounding_context(mention)
cntx_before, cntx_after = cntx_before_str, cntx_after_str
if len(cntx_before_str) != 0:
cntx_before = self.tokenizer.encode(" ".join(cntx_before_str), add_special_tokens=False)
if len(cntx_after_str) != 0:
cntx_after = self.tokenizer.encode(" ".join(cntx_after_str), add_special_tokens=False)
if self.max_surrounding_contx != -1:
if len(cntx_before) > self.max_surrounding_contx:
cntx_before = cntx_before[-self.max_surrounding_contx+1:]
if len(cntx_after) > self.max_surrounding_contx:
cntx_after = cntx_after[:self.max_surrounding_contx-1]
ment_span = self.tokenizer.encode(" ".join(ment_span_str), add_special_tokens=False)
if isinstance(ment_span, torch.Tensor):
ment_span = ment_span.tolist()
if isinstance(cntx_before, torch.Tensor):
cntx_before = cntx_before.tolist()
if isinstance(cntx_after, torch.Tensor):
cntx_after = cntx_after.tolist()
all_sent_toks = [[0] + cntx_before + ment_span + cntx_after + [2]]
sent_tokens = torch.tensor(all_sent_toks)
mention_start_idx = len(cntx_before) + 1
mention_end_idx = len(cntx_before) + len(ment_span) + 1
assert all_sent_toks[0][mention_start_idx:mention_end_idx] == ment_span
return sent_tokens, mention_start_idx, mention_end_idx
def get_embed_size(self):
return self.embed_size
class EmbedFromFile(object):
def __init__(self, files_to_load: List[str]):
"""
:param files_to_load: list of pre-generated embedding file names
"""
self.embed_size = 1024
bert_dict = dict()
if files_to_load is not None and len(files_to_load) > 0:
for file_ in files_to_load:
bert_dict.update(pickle.load(open(file_, "rb")))
logger.info("Bert representation loaded-" + file_)
self.embeddings = list(bert_dict.values())
self.embed_key = {k: i for i, k in enumerate(bert_dict.keys())}
def get_mention_full_rep(self, mention):
return self.embeddings[self.embed_key[mention.mention_id]]
def get_mentions_rep(self, mentions_list):
embed_list = [self.embeddings[self.embed_key[mention.mention_id]] for mention in mentions_list]
return embed_list
def get_embed_size(self):
return self.embed_size
class EmbedInMemory(object):
def __init__(self, mentions: List[MentionData], max_surrounding_contx, use_cuda):
self.embed_size = 1024
bert_dict = dict()
embed_model = EmbedTransformersGenerics(max_surrounding_contx=max_surrounding_contx, use_cuda=use_cuda)
for ment in mentions:
hidden, first_tok, last_tok, ment_size = embed_model.get_mention_full_rep(ment)
bert_dict[ment.mention_id] = (hidden.cpu(), first_tok.cpu(), last_tok.cpu(), ment_size)
self.embeddings = list(bert_dict.values())
self.embed_key = {k: i for i, k in enumerate(bert_dict.keys())}
def get_mention_full_rep(self, mention):
return self.embeddings[self.embed_key[mention.mention_id]]
def get_mentions_rep(self, mentions_list):
embed_list = [self.embeddings[self.embed_key[mention.mention_id]] for mention in mentions_list]
return embed_list
def get_embed_size(self):
return self.embed_size | 0.772015 | 0.28284 |
import torch.nn as nn
from .ssim import SSIMLoss, MSSSIMLoss
from .dice import DiceLoss, BCEDiceLoss
from .rmse import RMSELoss
def cross_entropy_loss():
"""Cross Entropy Loss.
The loss automatically applies the softmax activation
function on the prediction input.
Returns:
Cross entroy loss function
"""
return nn.CrossEntropyLoss()
def bce_loss():
"""Binary Cross Entropy Loss.
The loss automatically applies the sigmoid activation
function on the prediction input.
Returns:
Binary cross entropy loss function
"""
return nn.BCEWithLogitsLoss()
def mse_loss():
"""Mean Squared Error Loss.
Returns:
Mean squared error loss function
"""
return nn.MSELoss()
def rmse_loss(smooth=1e-6):
"""Root Mean Squared Error Loss.
Returns:
Root mean squared error loss function
"""
return RMSELoss(smooth=1e-6)
def dice_loss(smooth=1):
"""Dice Loss.
Args:
smooth (:obj:`float`, optional): Smoothing value. A larger
smooth value (also known as Laplace smooth, or
Additive smooth) can be used to avoid overfitting.
(default: 1)
Returns:
Dice loss function
"""
return DiceLoss(smooth=smooth)
def bce_dice_loss(smooth=1e-6):
"""BCE Dice Loss.
Args:
smooth (:obj:`float`, optional): Smoothing value.
Returns:
BCE Dice loss function
"""
return BCEDiceLoss(smooth=smooth)
def ssim_loss(data_range=1.0, size_average=True, channel=1):
"""SSIM Loss.
Args:
data_range (:obj:`float` or :obj:`int`, optional): Value range of input
images (usually 1.0 or 255). (default: 255)
size_average (:obj:`bool`, optional): If size_average=True, ssim
of all images will be averaged as a scalar. (default: True)
channel (:obj:`int`, optional): input channels (default: 1)
Returns:
SSIM loss function
"""
return SSIMLoss(
data_range=data_range, size_average=size_average, channel=channel
)
def ms_ssim_loss(data_range=1.0, size_average=True, channel=1):
"""MS-SSIM Loss.
Args:
data_range (:obj:`float` or :obj:`int`, optional): Value range of input
images (usually 1.0 or 255). (default: 1.0)
size_average (:obj:`bool`, optional): If size_average=True, ssim
of all images will be averaged as a scalar. (default: True)
channel (:obj:`int`, optional): input channels (default: 1)
Returns:
MS-SSIM loss function
"""
return MSSSIMLoss(
data_range=data_range, size_average=size_average, channel=channel
) | tensornet/models/loss/__init__.py | import torch.nn as nn
from .ssim import SSIMLoss, MSSSIMLoss
from .dice import DiceLoss, BCEDiceLoss
from .rmse import RMSELoss
def cross_entropy_loss():
"""Cross Entropy Loss.
The loss automatically applies the softmax activation
function on the prediction input.
Returns:
Cross entroy loss function
"""
return nn.CrossEntropyLoss()
def bce_loss():
"""Binary Cross Entropy Loss.
The loss automatically applies the sigmoid activation
function on the prediction input.
Returns:
Binary cross entropy loss function
"""
return nn.BCEWithLogitsLoss()
def mse_loss():
"""Mean Squared Error Loss.
Returns:
Mean squared error loss function
"""
return nn.MSELoss()
def rmse_loss(smooth=1e-6):
"""Root Mean Squared Error Loss.
Returns:
Root mean squared error loss function
"""
return RMSELoss(smooth=1e-6)
def dice_loss(smooth=1):
"""Dice Loss.
Args:
smooth (:obj:`float`, optional): Smoothing value. A larger
smooth value (also known as Laplace smooth, or
Additive smooth) can be used to avoid overfitting.
(default: 1)
Returns:
Dice loss function
"""
return DiceLoss(smooth=smooth)
def bce_dice_loss(smooth=1e-6):
"""BCE Dice Loss.
Args:
smooth (:obj:`float`, optional): Smoothing value.
Returns:
BCE Dice loss function
"""
return BCEDiceLoss(smooth=smooth)
def ssim_loss(data_range=1.0, size_average=True, channel=1):
"""SSIM Loss.
Args:
data_range (:obj:`float` or :obj:`int`, optional): Value range of input
images (usually 1.0 or 255). (default: 255)
size_average (:obj:`bool`, optional): If size_average=True, ssim
of all images will be averaged as a scalar. (default: True)
channel (:obj:`int`, optional): input channels (default: 1)
Returns:
SSIM loss function
"""
return SSIMLoss(
data_range=data_range, size_average=size_average, channel=channel
)
def ms_ssim_loss(data_range=1.0, size_average=True, channel=1):
"""MS-SSIM Loss.
Args:
data_range (:obj:`float` or :obj:`int`, optional): Value range of input
images (usually 1.0 or 255). (default: 1.0)
size_average (:obj:`bool`, optional): If size_average=True, ssim
of all images will be averaged as a scalar. (default: True)
channel (:obj:`int`, optional): input channels (default: 1)
Returns:
MS-SSIM loss function
"""
return MSSSIMLoss(
data_range=data_range, size_average=size_average, channel=channel
) | 0.964739 | 0.591133 |
import logging
import os.path
import subprocess
import gi
import nm_tools
gi.require_version('Gdk', '3.0')
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.client.Extension import Extension
from ulauncher.api.shared.action.ExtensionCustomAction import ExtensionCustomAction
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.event import KeywordQueryEvent, ItemEnterEvent
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
logger = logging.getLogger(__name__)
description_active = "{} | ACTIVE | Select to disconnect"
description_inactive = "{} | Select to connect"
class NetworkManagerExtension(Extension):
def __init__(self):
super(NetworkManagerExtension, self).__init__()
# Subscribe plugin listeners to launcher
self.subscribe(KeywordQueryEvent, KeywordQueryEventListener())
self.subscribe(ItemEnterEvent, ItemEnterEventListener())
class KeywordQueryEventListener(EventListener):
def on_event(self, event, extension):
search_query = event.get_argument()
hidden_types = extension.preferences.get("hidden_type_list").split(",")
connections = nm_tools.get_connections()
connections = sorted(connections, key=lambda d: d["name"].lower())
items = []
for a in connections:
name = a["name"]
if search_query is not None and search_query not in name.lower():
continue
description = description_active if a["active"] else description_inactive
description = description.format(a["type"])
icon_name = "{}_{}".format(a["type"], a["active"])
icon_path = 'images/{}.png'.format(icon_name)
if not os.path.isfile(icon_path):
logger.warning("Icon not found: " + icon_path)
if a["type"] in hidden_types:
continue
on_click_event = ExtensionCustomAction(a, keep_app_open=False)
item_row = ExtensionResultItem(icon=icon_path,
name=name,
description=description,
on_enter=on_click_event)
items.append(item_row)
return RenderResultListAction(items)
class ItemEnterEventListener(EventListener):
def on_event(self, event, extension):
con = event.get_data()
uuid = con["uuid"]
if con["active"]:
log, result = nm_tools.disconnect(uuid)
else:
log, result = nm_tools.connect(uuid)
logging.debug(log)
# Notification
if extension.preferences.get("enable_notifications") == "true":
if not result:
# Operation failed
nm_tools.send_notification("Operation failed: " + log)
elif con["active"]:
# Success, disconnected
nm_tools.send_notification("Now disconnected: " + con["name"])
else:
# Success, connected
nm_tools.send_notification("Now connected: " + con["name"])
# Run script if successfully connected and script isn't empty
script = extension.preferences.get("script_on_connect")
if not con["active"] and script != "" and result:
subprocess.run([script, con["name"], con["uuid"]], stdout=subprocess.PIPE)
if __name__ == '__main__':
NetworkManagerExtension().run() | main.py | import logging
import os.path
import subprocess
import gi
import nm_tools
gi.require_version('Gdk', '3.0')
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.client.Extension import Extension
from ulauncher.api.shared.action.ExtensionCustomAction import ExtensionCustomAction
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.event import KeywordQueryEvent, ItemEnterEvent
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
logger = logging.getLogger(__name__)
description_active = "{} | ACTIVE | Select to disconnect"
description_inactive = "{} | Select to connect"
class NetworkManagerExtension(Extension):
def __init__(self):
super(NetworkManagerExtension, self).__init__()
# Subscribe plugin listeners to launcher
self.subscribe(KeywordQueryEvent, KeywordQueryEventListener())
self.subscribe(ItemEnterEvent, ItemEnterEventListener())
class KeywordQueryEventListener(EventListener):
def on_event(self, event, extension):
search_query = event.get_argument()
hidden_types = extension.preferences.get("hidden_type_list").split(",")
connections = nm_tools.get_connections()
connections = sorted(connections, key=lambda d: d["name"].lower())
items = []
for a in connections:
name = a["name"]
if search_query is not None and search_query not in name.lower():
continue
description = description_active if a["active"] else description_inactive
description = description.format(a["type"])
icon_name = "{}_{}".format(a["type"], a["active"])
icon_path = 'images/{}.png'.format(icon_name)
if not os.path.isfile(icon_path):
logger.warning("Icon not found: " + icon_path)
if a["type"] in hidden_types:
continue
on_click_event = ExtensionCustomAction(a, keep_app_open=False)
item_row = ExtensionResultItem(icon=icon_path,
name=name,
description=description,
on_enter=on_click_event)
items.append(item_row)
return RenderResultListAction(items)
class ItemEnterEventListener(EventListener):
def on_event(self, event, extension):
con = event.get_data()
uuid = con["uuid"]
if con["active"]:
log, result = nm_tools.disconnect(uuid)
else:
log, result = nm_tools.connect(uuid)
logging.debug(log)
# Notification
if extension.preferences.get("enable_notifications") == "true":
if not result:
# Operation failed
nm_tools.send_notification("Operation failed: " + log)
elif con["active"]:
# Success, disconnected
nm_tools.send_notification("Now disconnected: " + con["name"])
else:
# Success, connected
nm_tools.send_notification("Now connected: " + con["name"])
# Run script if successfully connected and script isn't empty
script = extension.preferences.get("script_on_connect")
if not con["active"] and script != "" and result:
subprocess.run([script, con["name"], con["uuid"]], stdout=subprocess.PIPE)
if __name__ == '__main__':
NetworkManagerExtension().run() | 0.415373 | 0.042206 |
import hail as hl
from hail.utils import FatalError
from hail.utils.java import Env, info, scala_object
import os
import logging
import flask
import json
master = os.environ.get('HAIL_APISERVER_SPARK_MASTER')
hl.init(master=master, min_block_size=0)
app = flask.Flask('hail-apiserver')
@app.route('/execute', methods=['POST'])
def execute():
code = flask.request.json
info(f'execute: {code}')
jir = Env.hail().expr.ir.IRParser.parse_value_ir(code, {}, {})
typ = hl.dtype(jir.typ().toString())
value = Env.hail().expr.ir.Interpret.interpretJSON(jir)
result = {
'type': str(typ),
'value': value
}
info(f'result: {result}')
return flask.jsonify(result)
@app.route('/type/value', methods=['POST'])
def value_type():
code = flask.request.json
info(f'value type: {code}')
jir = Env.hail().expr.ir.IRParser.parse_value_ir(code, {}, {})
result = jir.typ().toString()
info(f'result: {result}')
return flask.jsonify(result)
@app.route('/type/table', methods=['POST'])
def table_type():
code = flask.request.json
info(f'table type: {code}')
jir = Env.hail().expr.ir.IRParser.parse_table_ir(code, {}, {})
ttyp = hl.ttable._from_java(jir.typ())
result = {'global': str(ttyp.global_type),
'row': str(ttyp.row_type),
'row_key': ttyp.row_key}
info(f'result: {result}')
return flask.jsonify(result)
@app.route('/type/matrix', methods=['POST'])
def matrix_type():
code = flask.request.json
info(f'matrix type: {code}')
jir = Env.hail().expr.ir.IRParser.parse_matrix_ir(code, {}, {})
mtyp = hl.tmatrix._from_java(jir.typ())
result = {'global': str(mtyp.global_type),
'col': str(mtyp.col_type),
'col_key': mtyp.col_key,
'row': str(mtyp.row_type),
'row_key': mtyp.row_key,
'entry': str(mtyp.entry_type)}
info(f'result: {result}')
return flask.jsonify(result)
@app.route('/references/create', methods=['POST'])
def create_reference():
try:
config = flask.request.json
hl.ReferenceGenome._from_config(config)
return '', 204
except FatalError as e:
return flask.jsonify({
'message': e.args[0]
}), 400
@app.route('/references/create/fasta', methods=['POST'])
def create_reference_from_fasta():
try:
data = flask.request.json
hl.ReferenceGenome.from_fasta_file(
data['name'],
data['fasta_file'],
data['index_file'],
data['x_contigs'],
data['y_contigs'],
data['mt_contigs'],
data['par'])
return '', 204
except FatalError as e:
return flask.jsonify({
'message': e.args[0]
}), 400
@app.route('/references/delete', methods=['DELETE'])
def delete_reference():
try:
data = flask.request.json
Env.hail().variant.ReferenceGenome.removeReference(data['name'])
return '', 204
except FatalError as e:
return flask.jsonify({
'message': e.args[0]
}), 400
@app.route('/references/get', methods=['GET'])
def get_reference():
try:
data = flask.request.json
return flask.jsonify(
json.loads(Env.hail().variant.ReferenceGenome.getReference(data['name']).toJSONString()))
except FatalError as e:
return flask.jsonify({
'message': e.args[0]
}), 400
@app.route('/references/sequence/set', methods=['POST'])
def reference_add_sequence():
try:
data = flask.request.json
scala_object(Env.hail().variant, 'ReferenceGenome').addSequence(data['name'], data['fasta_file'], data['index_file'])
return '', 204
except FatalError as e:
return flask.jsonify({
'message': e.args[0]
}), 400
@app.route('/references/sequence/delete', methods=['DELETE'])
def reference_remove_sequence():
try:
data = flask.request.json
Env.hail().variant.ReferenceGenome.removeSequence(data['name'], data['fasta_file'], data['index_file'])
return '', 204
except FatalError as e:
return flask.jsonify({
'message': e.args[0]
}), 400
@app.route('/references/liftover/add', methods=['POST'])
def reference_add_liftover():
try:
data = flask.request.json
Env.hail().variant.ReferenceGenome.referenceAddLiftover(data['name'], data['chain_file'], data['dest_reference_genome'])
return '', 204
except FatalError as e:
return flask.jsonify({
'message': e.args[0]
}), 400
@app.route('/references/liftover/remove', methods=['DELETE'])
def reference_remove_liftover():
try:
data = flask.request.json
Env.hail().variant.ReferenceGenome.referenceRemoveLiftover(data['name'], data['dest_reference_genome'])
return '', 204
except FatalError as e:
return flask.jsonify({
'message': e.args[0]
}), 400
@app.route('/parse-vcf-metadata', methods=['POST'])
def parse_vcf_metadata():
try:
data = flask.request.json
metadata = Env.hc()._jhc.pyParseVCFMetadata(data['path'])
return metadata
except FatalError as e:
return flask.jsonify({
'message': e.args[0]
}), 400
app.run(threaded=False, host='0.0.0.0') | apiserver/apiserver/apiserver.py | import hail as hl
from hail.utils import FatalError
from hail.utils.java import Env, info, scala_object
import os
import logging
import flask
import json
master = os.environ.get('HAIL_APISERVER_SPARK_MASTER')
hl.init(master=master, min_block_size=0)
app = flask.Flask('hail-apiserver')
@app.route('/execute', methods=['POST'])
def execute():
code = flask.request.json
info(f'execute: {code}')
jir = Env.hail().expr.ir.IRParser.parse_value_ir(code, {}, {})
typ = hl.dtype(jir.typ().toString())
value = Env.hail().expr.ir.Interpret.interpretJSON(jir)
result = {
'type': str(typ),
'value': value
}
info(f'result: {result}')
return flask.jsonify(result)
@app.route('/type/value', methods=['POST'])
def value_type():
code = flask.request.json
info(f'value type: {code}')
jir = Env.hail().expr.ir.IRParser.parse_value_ir(code, {}, {})
result = jir.typ().toString()
info(f'result: {result}')
return flask.jsonify(result)
@app.route('/type/table', methods=['POST'])
def table_type():
code = flask.request.json
info(f'table type: {code}')
jir = Env.hail().expr.ir.IRParser.parse_table_ir(code, {}, {})
ttyp = hl.ttable._from_java(jir.typ())
result = {'global': str(ttyp.global_type),
'row': str(ttyp.row_type),
'row_key': ttyp.row_key}
info(f'result: {result}')
return flask.jsonify(result)
@app.route('/type/matrix', methods=['POST'])
def matrix_type():
code = flask.request.json
info(f'matrix type: {code}')
jir = Env.hail().expr.ir.IRParser.parse_matrix_ir(code, {}, {})
mtyp = hl.tmatrix._from_java(jir.typ())
result = {'global': str(mtyp.global_type),
'col': str(mtyp.col_type),
'col_key': mtyp.col_key,
'row': str(mtyp.row_type),
'row_key': mtyp.row_key,
'entry': str(mtyp.entry_type)}
info(f'result: {result}')
return flask.jsonify(result)
@app.route('/references/create', methods=['POST'])
def create_reference():
try:
config = flask.request.json
hl.ReferenceGenome._from_config(config)
return '', 204
except FatalError as e:
return flask.jsonify({
'message': e.args[0]
}), 400
@app.route('/references/create/fasta', methods=['POST'])
def create_reference_from_fasta():
try:
data = flask.request.json
hl.ReferenceGenome.from_fasta_file(
data['name'],
data['fasta_file'],
data['index_file'],
data['x_contigs'],
data['y_contigs'],
data['mt_contigs'],
data['par'])
return '', 204
except FatalError as e:
return flask.jsonify({
'message': e.args[0]
}), 400
@app.route('/references/delete', methods=['DELETE'])
def delete_reference():
try:
data = flask.request.json
Env.hail().variant.ReferenceGenome.removeReference(data['name'])
return '', 204
except FatalError as e:
return flask.jsonify({
'message': e.args[0]
}), 400
@app.route('/references/get', methods=['GET'])
def get_reference():
try:
data = flask.request.json
return flask.jsonify(
json.loads(Env.hail().variant.ReferenceGenome.getReference(data['name']).toJSONString()))
except FatalError as e:
return flask.jsonify({
'message': e.args[0]
}), 400
@app.route('/references/sequence/set', methods=['POST'])
def reference_add_sequence():
try:
data = flask.request.json
scala_object(Env.hail().variant, 'ReferenceGenome').addSequence(data['name'], data['fasta_file'], data['index_file'])
return '', 204
except FatalError as e:
return flask.jsonify({
'message': e.args[0]
}), 400
@app.route('/references/sequence/delete', methods=['DELETE'])
def reference_remove_sequence():
try:
data = flask.request.json
Env.hail().variant.ReferenceGenome.removeSequence(data['name'], data['fasta_file'], data['index_file'])
return '', 204
except FatalError as e:
return flask.jsonify({
'message': e.args[0]
}), 400
@app.route('/references/liftover/add', methods=['POST'])
def reference_add_liftover():
try:
data = flask.request.json
Env.hail().variant.ReferenceGenome.referenceAddLiftover(data['name'], data['chain_file'], data['dest_reference_genome'])
return '', 204
except FatalError as e:
return flask.jsonify({
'message': e.args[0]
}), 400
@app.route('/references/liftover/remove', methods=['DELETE'])
def reference_remove_liftover():
try:
data = flask.request.json
Env.hail().variant.ReferenceGenome.referenceRemoveLiftover(data['name'], data['dest_reference_genome'])
return '', 204
except FatalError as e:
return flask.jsonify({
'message': e.args[0]
}), 400
@app.route('/parse-vcf-metadata', methods=['POST'])
def parse_vcf_metadata():
try:
data = flask.request.json
metadata = Env.hc()._jhc.pyParseVCFMetadata(data['path'])
return metadata
except FatalError as e:
return flask.jsonify({
'message': e.args[0]
}), 400
app.run(threaded=False, host='0.0.0.0') | 0.409457 | 0.1273 |
import re
from moonreader_tools.accessors.file_reader import FileReader
from moonreader_tools.notes import Note
from moonreader_tools.parsers.note_extractor import NoteExtractorMixin
class PDFNoteParser(FileReader, NoteExtractorMixin):
# TODO: inherit from the basic object
"""Reads notes from PDF flike object"""
NOTE_START = "#A*#"
NOTE_END = "#A@#"
PARSED_FORMAT = "PDF"
SPLITTER_PATTERN = r"#A[0-9@\*]#"
CORRESP_TABLE = (
(0, "unknown_1"),
(1, "page"),
(2, "timestamp"),
(3, "unknown_2"), # Highlight start index?
(4, "unknown_3"), # Highlight end index?
(5, "color"),
(6, "style"),
(7, "note"),
(8, "text"),
(9, None),
)
@classmethod
def from_file_obj(cls, flike_obj):
return cls.from_text(cls.read_file_obj(flike_obj))
@classmethod
def from_text(cls, text):
"""Creates PDF note class instance from string"""
note_texts = cls._find_note_text_pieces(text)
notes = cls._notes_from_note_texts(note_texts)
return notes
@classmethod
def _find_note_text_pieces(cls, text):
"""Splits notes text and return notes"""
notes = []
_text = text
while _text:
start_pos = _text.find(cls.NOTE_START)
end_pos = _text.find(cls.NOTE_END)
if start_pos != -1 and end_pos != -1:
note_len = len(cls.NOTE_END)
note_text = _text[start_pos : end_pos + note_len]
notes.append(note_text)
else:
break
_text = _text[end_pos + len(cls.NOTE_END) :]
return notes
@classmethod
def _notes_from_note_texts(cls, note_texts):
"""Create note objects from text and return list"""
return [cls.single_note_from_text(text) for text in note_texts]
@classmethod
def single_note_from_text(cls, text) -> Note:
"""Create note from text"""
token_dict = cls._dict_from_text(text)
return cls.note_from_dictionary(token_dict)
@classmethod
def _dict_from_text(cls, text):
"""Split note's text according to regex, and return fields"""
note_tokens = re.split(cls.SPLITTER_PATTERN, text)
assert len(note_tokens) > 8
note_dict = {}
for item in cls.CORRESP_TABLE:
if not item[1]:
continue
note_dict[item[1]] = note_tokens[item[0]]
return note_dict | moonreader_tools/parsers/pdf_parser.py | import re
from moonreader_tools.accessors.file_reader import FileReader
from moonreader_tools.notes import Note
from moonreader_tools.parsers.note_extractor import NoteExtractorMixin
class PDFNoteParser(FileReader, NoteExtractorMixin):
# TODO: inherit from the basic object
"""Reads notes from PDF flike object"""
NOTE_START = "#A*#"
NOTE_END = "#A@#"
PARSED_FORMAT = "PDF"
SPLITTER_PATTERN = r"#A[0-9@\*]#"
CORRESP_TABLE = (
(0, "unknown_1"),
(1, "page"),
(2, "timestamp"),
(3, "unknown_2"), # Highlight start index?
(4, "unknown_3"), # Highlight end index?
(5, "color"),
(6, "style"),
(7, "note"),
(8, "text"),
(9, None),
)
@classmethod
def from_file_obj(cls, flike_obj):
return cls.from_text(cls.read_file_obj(flike_obj))
@classmethod
def from_text(cls, text):
"""Creates PDF note class instance from string"""
note_texts = cls._find_note_text_pieces(text)
notes = cls._notes_from_note_texts(note_texts)
return notes
@classmethod
def _find_note_text_pieces(cls, text):
"""Splits notes text and return notes"""
notes = []
_text = text
while _text:
start_pos = _text.find(cls.NOTE_START)
end_pos = _text.find(cls.NOTE_END)
if start_pos != -1 and end_pos != -1:
note_len = len(cls.NOTE_END)
note_text = _text[start_pos : end_pos + note_len]
notes.append(note_text)
else:
break
_text = _text[end_pos + len(cls.NOTE_END) :]
return notes
@classmethod
def _notes_from_note_texts(cls, note_texts):
"""Create note objects from text and return list"""
return [cls.single_note_from_text(text) for text in note_texts]
@classmethod
def single_note_from_text(cls, text) -> Note:
"""Create note from text"""
token_dict = cls._dict_from_text(text)
return cls.note_from_dictionary(token_dict)
@classmethod
def _dict_from_text(cls, text):
"""Split note's text according to regex, and return fields"""
note_tokens = re.split(cls.SPLITTER_PATTERN, text)
assert len(note_tokens) > 8
note_dict = {}
for item in cls.CORRESP_TABLE:
if not item[1]:
continue
note_dict[item[1]] = note_tokens[item[0]]
return note_dict | 0.25992 | 0.222025 |
"""Setup the tg2express application"""
from __future__ import print_function
import logging
from tg import config
from tg2express import model
import transaction
def bootstrap(command, conf, vars):
"""Place any commands to setup tg2express here"""
# <websetup.bootstrap.before.auth
from sqlalchemy.exc import IntegrityError
try:
u = model.User()
u.user_name = 'manager'
u.display_name = 'Example manager'
u.email_address = '<EMAIL>'
u.password = '<PASSWORD>'
model.DBSession.add(u)
g = model.Group()
g.group_name = 'managers'
g.display_name = 'Managers Group'
g.users.append(u)
model.DBSession.add(g)
p = model.Permission()
p.permission_name = 'manage'
p.description = 'This permission give an administrative right to the bearer'
p.groups.append(g)
model.DBSession.add(p)
u1 = model.User()
u1.user_name = 'editor'
u1.display_name = 'Example editor'
u1.email_address = '<EMAIL>'
u1.password = '<PASSWORD>'
model.DBSession.add(u1)
model.DBSession.flush()
transaction.commit()
except IntegrityError:
print('Warning, there was a problem adding your auth data, it may have already been added:')
import traceback
print(traceback.format_exc())
transaction.abort()
print('Continuing with bootstrapping...')
# <websetup.bootstrap.after.auth>
try:
w1 = model.Writer(firstname='Mingcai', lastname='SHEN')
w2 = model.Writer(firstname='Fangze', lastname='SHEN')
a1 = model.Article(title='Test note1', keys='test,note,another', content=u'This is just a test note1')
a2 = model.Article(title='Test note2', keys='note,another', content=u'This is just a test note2')
a3 = model.Article(title='Test note3', keys='test,another', content=u'This is just a test note3')
a4 = model.Article(title='Test note4', keys='test', content=u'This is just a test note4')
c1 = model.Comment(comment='Good! Thanks.')
c2 = model.Comment(comment='Well, ok!')
a4.comments.append(c1)
a4.comments.append(c2)
w1.articles.append(a1)
w1.articles.append(a3)
w2.articles.append(a2)
w2.articles.append(a4)
model.DBSession.add_all([w1, w2])
model.DBSession.flush()
transaction.commit()
except IntegrityError:
print('Warning, there was a problem adding your auth data, it may have already been added:')
import traceback
print(traceback.format_exc())
transaction.abort()
print('Continuing with bootstrapping...') | example/tg2express/websetup/bootstrap.py | """Setup the tg2express application"""
from __future__ import print_function
import logging
from tg import config
from tg2express import model
import transaction
def bootstrap(command, conf, vars):
"""Place any commands to setup tg2express here"""
# <websetup.bootstrap.before.auth
from sqlalchemy.exc import IntegrityError
try:
u = model.User()
u.user_name = 'manager'
u.display_name = 'Example manager'
u.email_address = '<EMAIL>'
u.password = '<PASSWORD>'
model.DBSession.add(u)
g = model.Group()
g.group_name = 'managers'
g.display_name = 'Managers Group'
g.users.append(u)
model.DBSession.add(g)
p = model.Permission()
p.permission_name = 'manage'
p.description = 'This permission give an administrative right to the bearer'
p.groups.append(g)
model.DBSession.add(p)
u1 = model.User()
u1.user_name = 'editor'
u1.display_name = 'Example editor'
u1.email_address = '<EMAIL>'
u1.password = '<PASSWORD>'
model.DBSession.add(u1)
model.DBSession.flush()
transaction.commit()
except IntegrityError:
print('Warning, there was a problem adding your auth data, it may have already been added:')
import traceback
print(traceback.format_exc())
transaction.abort()
print('Continuing with bootstrapping...')
# <websetup.bootstrap.after.auth>
try:
w1 = model.Writer(firstname='Mingcai', lastname='SHEN')
w2 = model.Writer(firstname='Fangze', lastname='SHEN')
a1 = model.Article(title='Test note1', keys='test,note,another', content=u'This is just a test note1')
a2 = model.Article(title='Test note2', keys='note,another', content=u'This is just a test note2')
a3 = model.Article(title='Test note3', keys='test,another', content=u'This is just a test note3')
a4 = model.Article(title='Test note4', keys='test', content=u'This is just a test note4')
c1 = model.Comment(comment='Good! Thanks.')
c2 = model.Comment(comment='Well, ok!')
a4.comments.append(c1)
a4.comments.append(c2)
w1.articles.append(a1)
w1.articles.append(a3)
w2.articles.append(a2)
w2.articles.append(a4)
model.DBSession.add_all([w1, w2])
model.DBSession.flush()
transaction.commit()
except IntegrityError:
print('Warning, there was a problem adding your auth data, it may have already been added:')
import traceback
print(traceback.format_exc())
transaction.abort()
print('Continuing with bootstrapping...') | 0.432782 | 0.200989 |
from .utils.tools import Coord
from datetime import datetime, timedelta
class Stage:
EVENT_STAGES = []
CHIP_STAGES = {
'lvlcoord': [Coord(430, 450), Coord(830, 260)],
'a': ['mon', 'thu', 'fri', 'sun'],
'b': ['mon', 'tue', 'fri', 'sat'],
'c': ['wed', 'thu', 'sat', 'sun'],
'd': ['tue', 'wed', 'sat', 'sun']
}
SUPPLY_STAGES = {
'lvlcoord': [Coord(200, 570), Coord(475, 520), Coord(680, 400), Coord(850, 300), Coord(950, 180)],
'ap': ['mon', 'thu', 'sat', 'sun'],
'ca': ['tue', 'wed', 'fri', 'sun'],
'ce': ['tue', 'thu', 'sat', 'sun'],
'sk': ['mon', 'wed', 'fri', 'sat']
}
def __init__(self, name):
self.name = name.lower()
self.identify()
def __hash__(self):
return hash((self.name))
def __eq__(self, other):
return self.name == other.name
def identify(self):
name = self.name.split('-')
s_prefix = name[0]
s_suffix = name[-1:][0]
self.level = s_suffix
if any(char.isdigit() for char in s_prefix):
self.classifier = 'main'
self.issstages = not s_prefix.isdigit()
self.chapter = self.name[1] if self.issstages else self.name[0]
elif self.name not in self.EVENT_STAGES:
if len(self.name.split('-')) == 3:
self.classifier = 'chips'
self.opcode = name[1]
self.isopen = (
(datetime.utcnow() - timedelta(hours=11)).strftime('%a').lower()
in self.CHIP_STAGES[self.opcode]
)
self.coord = self.CHIP_STAGES['lvlcoord'][int(self.level)-1]
else:
self.classifier = 'supplies'
self.opcode = s_prefix
self.isopen = (
(datetime.utcnow() - timedelta(hours=11)).strftime('%a').lower()
in self.SUPPLY_STAGES[self.opcode]
or self.opcode == 'ls'
)
self.coord = self.SUPPLY_STAGES['lvlcoord'][int(self.level)-1]
else:
self.classifier = 'event' | arknights_farmer/stage.py | from .utils.tools import Coord
from datetime import datetime, timedelta
class Stage:
EVENT_STAGES = []
CHIP_STAGES = {
'lvlcoord': [Coord(430, 450), Coord(830, 260)],
'a': ['mon', 'thu', 'fri', 'sun'],
'b': ['mon', 'tue', 'fri', 'sat'],
'c': ['wed', 'thu', 'sat', 'sun'],
'd': ['tue', 'wed', 'sat', 'sun']
}
SUPPLY_STAGES = {
'lvlcoord': [Coord(200, 570), Coord(475, 520), Coord(680, 400), Coord(850, 300), Coord(950, 180)],
'ap': ['mon', 'thu', 'sat', 'sun'],
'ca': ['tue', 'wed', 'fri', 'sun'],
'ce': ['tue', 'thu', 'sat', 'sun'],
'sk': ['mon', 'wed', 'fri', 'sat']
}
def __init__(self, name):
self.name = name.lower()
self.identify()
def __hash__(self):
return hash((self.name))
def __eq__(self, other):
return self.name == other.name
def identify(self):
name = self.name.split('-')
s_prefix = name[0]
s_suffix = name[-1:][0]
self.level = s_suffix
if any(char.isdigit() for char in s_prefix):
self.classifier = 'main'
self.issstages = not s_prefix.isdigit()
self.chapter = self.name[1] if self.issstages else self.name[0]
elif self.name not in self.EVENT_STAGES:
if len(self.name.split('-')) == 3:
self.classifier = 'chips'
self.opcode = name[1]
self.isopen = (
(datetime.utcnow() - timedelta(hours=11)).strftime('%a').lower()
in self.CHIP_STAGES[self.opcode]
)
self.coord = self.CHIP_STAGES['lvlcoord'][int(self.level)-1]
else:
self.classifier = 'supplies'
self.opcode = s_prefix
self.isopen = (
(datetime.utcnow() - timedelta(hours=11)).strftime('%a').lower()
in self.SUPPLY_STAGES[self.opcode]
or self.opcode == 'ls'
)
self.coord = self.SUPPLY_STAGES['lvlcoord'][int(self.level)-1]
else:
self.classifier = 'event' | 0.595493 | 0.226281 |
from aaweb import app
from aaweb.api import api_json_error, api_json_response
from aaweb.models import Plane, PlaneLayout
from flask import url_for
@app.route('/api/current/planes', methods = ('GET',))
def api_current_planes():
data = []
for plane in Plane.select():
details = {}
flying, coordinates, code = plane.current_position()
details = {
'name' : plane.alias,
'manufacturer' : plane.manufacturer,
'aircraft' : plane.aircraft,
'registration' : plane.registration,
'situation' : 'IN-FLIGHT' if flying else 'ON-GROUND',
'current_coordinates' : {
'lat' : coordinates[0],
'lon' : coordinates[1],
'map' : url_for('static', filename='imgs/flightmap.png'),
'flight' if flying else 'airport' : code,
},
'details' : url_for('api_plane_details', registration=plane.registration),
}
data.append(details)
return api_json_response(data)
@app.route('/api/plane/<registration>', methods = ('GET',))
def api_plane_details(registration):
try:
plane = Plane.get(Plane.registration == registration.upper())
except:
return api_json_error('No such plane found')
flying, coordinates, code = plane.current_position()
data = {
'registration' : plane.registration,
'name' : plane.alias,
'aircraft' : plane.aircraft,
'manufacturer' : plane.manufacturer,
'crew' : {
'pilots' : plane.pilots,
'flight_attendants' : plane.layout.flight_attendants,
'total' : plane.layout.flight_attendants + plane.pilots,
},
'passangers': {
'economy' : plane.layout.economy_class,
'business' : plane.layout.business_class,
'first' : plane.layout.first_class,
'total' : plane.layout.economy_class + plane.layout.business_class + plane.layout.first_class,
},
'imgs' : {
'cabin' : url_for('static', filename='imgs/layout/%s' % plane.layout.picture),
'livery' : url_for('static', filename='imgs/planes/%s' % plane.picture),
},
'situation' : 'IN-FLIGHT' if flying else 'ON-GROUND',
'current_coordinates' : {
'lat' : coordinates[0],
'lon' : coordinates[1],
'map' : url_for('gallery_live_map'),
'flight' if flying else 'airport' : code,
}
}
return api_json_response(data) | aaweb/api/plane.py |
from aaweb import app
from aaweb.api import api_json_error, api_json_response
from aaweb.models import Plane, PlaneLayout
from flask import url_for
@app.route('/api/current/planes', methods = ('GET',))
def api_current_planes():
data = []
for plane in Plane.select():
details = {}
flying, coordinates, code = plane.current_position()
details = {
'name' : plane.alias,
'manufacturer' : plane.manufacturer,
'aircraft' : plane.aircraft,
'registration' : plane.registration,
'situation' : 'IN-FLIGHT' if flying else 'ON-GROUND',
'current_coordinates' : {
'lat' : coordinates[0],
'lon' : coordinates[1],
'map' : url_for('static', filename='imgs/flightmap.png'),
'flight' if flying else 'airport' : code,
},
'details' : url_for('api_plane_details', registration=plane.registration),
}
data.append(details)
return api_json_response(data)
@app.route('/api/plane/<registration>', methods = ('GET',))
def api_plane_details(registration):
try:
plane = Plane.get(Plane.registration == registration.upper())
except:
return api_json_error('No such plane found')
flying, coordinates, code = plane.current_position()
data = {
'registration' : plane.registration,
'name' : plane.alias,
'aircraft' : plane.aircraft,
'manufacturer' : plane.manufacturer,
'crew' : {
'pilots' : plane.pilots,
'flight_attendants' : plane.layout.flight_attendants,
'total' : plane.layout.flight_attendants + plane.pilots,
},
'passangers': {
'economy' : plane.layout.economy_class,
'business' : plane.layout.business_class,
'first' : plane.layout.first_class,
'total' : plane.layout.economy_class + plane.layout.business_class + plane.layout.first_class,
},
'imgs' : {
'cabin' : url_for('static', filename='imgs/layout/%s' % plane.layout.picture),
'livery' : url_for('static', filename='imgs/planes/%s' % plane.picture),
},
'situation' : 'IN-FLIGHT' if flying else 'ON-GROUND',
'current_coordinates' : {
'lat' : coordinates[0],
'lon' : coordinates[1],
'map' : url_for('gallery_live_map'),
'flight' if flying else 'airport' : code,
}
}
return api_json_response(data) | 0.546254 | 0.130368 |
from django.conf import settings
from django import forms
from django_mako_plus import view_function, render_template
from inspect import Signature
import inspect
formsig = Signature.from_callable(forms.Form.__init__)
class Formless(forms.Form):
"""
A mixin that prints a full form (instead of just the fields).
In your view.py file:
from django.http import HttpResponseRedirect
from django_mako_plus import view_function
from formlib import Formless
from django import forms
@view_function
def process_request(request, user:amod.User=None):
# process the form
form = MyForm(request, user=user)
if form.is_valid():
form.commit()
return HttpResponseRedirect('/app/successurl/')
# render the template
return request.dmp.render('mytemplate.html', {
'form': form,
})
class MyForm(formlib.Form): # extending formlib.Form, not Django's forms.Form
'''An example form'''
def init(self):
'''Adds the fields for this form (called at end of __init__)'''
# note that self.user is available (see MyForm constructor call above)
self.initial = { 'name': self.user.first_name' }
self.fields['name'] = forms.CharField()
def clean_name(self):
name = self.cleaned_data.get('name')
# ...
return name
def commit(self):
'''Process the form action'''
# self.user still available
self.user.first_name = self.cleaned_data.get('name')
self.user.save()
In your template.html file:
${ form }
"""
form_id = 'form'
form_action = None
form_method = 'POST'
submit_text = 'Submit'
field_css = [ 'form-control' ]
def __init__(self, request, *args, **kwargs):
'''Constructor'''
# save the request object
self.request = request
# any extra kwargs should be set on this object
for name in tuple(kwargs.keys()):
if name not in formsig.parameters:
setattr(self, name, kwargs.pop(name))
# create the arguments for the super call, adding `data` and `files` if needed
# then call the superclass (calling old-fashioned way because self is in the args)
super_args = formsig.bind(self, *args, **kwargs)
if request.method == 'POST':
super_args.arguments['data'] = super_args.arguments.get('data', request.POST)
super_args.arguments['files'] = super_args.arguments.get('files', request.FILES)
super_args.apply_defaults()
forms.Form.__init__(*super_args.args, **super_args.kwargs)
# call the init() as the last thing in the constructor
# this gives the subclass a hook without having to override __init__ and call super()
self.init()
def init(self):
'''Hook for subclasses to add fields and any other initialization.'''
pass
def as_full(self, extra=None):
'''Returns the HTML for this form, including <form>, submit, and csrf tags.'''
# add the bootstrap css
css = set(self.field_css)
for field in self.fields.values():
current = set(( c.strip() for c in field.widget.attrs.get('class', '').split(' ') if c ))
field.widget.attrs['class'] = ' '.join(css | current)
# render the string
return render_template(self.request, 'formlib', 'form.htm', {
'form': self,
'extra': extra,
})
def __str__(self):
'''Returns the HTML for this form, including <form>, submit, and csrf tags.'''
return self.as_full()
def commit(self, *args, **kwargs):
'''
Commits the form after it has been validated.
'''
pass | formlib/form.py |
from django.conf import settings
from django import forms
from django_mako_plus import view_function, render_template
from inspect import Signature
import inspect
formsig = Signature.from_callable(forms.Form.__init__)
class Formless(forms.Form):
"""
A mixin that prints a full form (instead of just the fields).
In your view.py file:
from django.http import HttpResponseRedirect
from django_mako_plus import view_function
from formlib import Formless
from django import forms
@view_function
def process_request(request, user:amod.User=None):
# process the form
form = MyForm(request, user=user)
if form.is_valid():
form.commit()
return HttpResponseRedirect('/app/successurl/')
# render the template
return request.dmp.render('mytemplate.html', {
'form': form,
})
class MyForm(formlib.Form): # extending formlib.Form, not Django's forms.Form
'''An example form'''
def init(self):
'''Adds the fields for this form (called at end of __init__)'''
# note that self.user is available (see MyForm constructor call above)
self.initial = { 'name': self.user.first_name' }
self.fields['name'] = forms.CharField()
def clean_name(self):
name = self.cleaned_data.get('name')
# ...
return name
def commit(self):
'''Process the form action'''
# self.user still available
self.user.first_name = self.cleaned_data.get('name')
self.user.save()
In your template.html file:
${ form }
"""
form_id = 'form'
form_action = None
form_method = 'POST'
submit_text = 'Submit'
field_css = [ 'form-control' ]
def __init__(self, request, *args, **kwargs):
'''Constructor'''
# save the request object
self.request = request
# any extra kwargs should be set on this object
for name in tuple(kwargs.keys()):
if name not in formsig.parameters:
setattr(self, name, kwargs.pop(name))
# create the arguments for the super call, adding `data` and `files` if needed
# then call the superclass (calling old-fashioned way because self is in the args)
super_args = formsig.bind(self, *args, **kwargs)
if request.method == 'POST':
super_args.arguments['data'] = super_args.arguments.get('data', request.POST)
super_args.arguments['files'] = super_args.arguments.get('files', request.FILES)
super_args.apply_defaults()
forms.Form.__init__(*super_args.args, **super_args.kwargs)
# call the init() as the last thing in the constructor
# this gives the subclass a hook without having to override __init__ and call super()
self.init()
def init(self):
'''Hook for subclasses to add fields and any other initialization.'''
pass
def as_full(self, extra=None):
'''Returns the HTML for this form, including <form>, submit, and csrf tags.'''
# add the bootstrap css
css = set(self.field_css)
for field in self.fields.values():
current = set(( c.strip() for c in field.widget.attrs.get('class', '').split(' ') if c ))
field.widget.attrs['class'] = ' '.join(css | current)
# render the string
return render_template(self.request, 'formlib', 'form.htm', {
'form': self,
'extra': extra,
})
def __str__(self):
'''Returns the HTML for this form, including <form>, submit, and csrf tags.'''
return self.as_full()
def commit(self, *args, **kwargs):
'''
Commits the form after it has been validated.
'''
pass | 0.46952 | 0.076304 |
import pulumi
import pulumi.runtime
class Service(pulumi.CustomResource):
"""
Manages a V2 Neutron VPN service resource within OpenStack.
"""
def __init__(__self__, __name__, __opts__=None, admin_state_up=None, description=None, name=None, region=None, router_id=None, subnet_id=None, tenant_id=None, value_specs=None):
"""Create a Service resource with the given unique name, props, and options."""
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, basestring):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if admin_state_up and not isinstance(admin_state_up, bool):
raise TypeError('Expected property admin_state_up to be a bool')
__self__.admin_state_up = admin_state_up
"""
The administrative state of the resource. Can either be up(true) or down(false).
Changing this updates the administrative state of the existing service.
"""
__props__['adminStateUp'] = admin_state_up
if description and not isinstance(description, basestring):
raise TypeError('Expected property description to be a basestring')
__self__.description = description
"""
The human-readable description for the service.
Changing this updates the description of the existing service.
"""
__props__['description'] = description
if name and not isinstance(name, basestring):
raise TypeError('Expected property name to be a basestring')
__self__.name = name
"""
The name of the service. Changing this updates the name of
the existing service.
"""
__props__['name'] = name
if region and not isinstance(region, basestring):
raise TypeError('Expected property region to be a basestring')
__self__.region = region
"""
The region in which to obtain the V2 Networking client.
A Networking client is needed to create a VPN service. If omitted, the
`region` argument of the provider is used. Changing this creates a new
service.
"""
__props__['region'] = region
if not router_id:
raise TypeError('Missing required property router_id')
elif not isinstance(router_id, basestring):
raise TypeError('Expected property router_id to be a basestring')
__self__.router_id = router_id
"""
The ID of the router. Changing this creates a new service.
"""
__props__['routerId'] = router_id
if subnet_id and not isinstance(subnet_id, basestring):
raise TypeError('Expected property subnet_id to be a basestring')
__self__.subnet_id = subnet_id
"""
SubnetID is the ID of the subnet. Default is null.
"""
__props__['subnetId'] = subnet_id
if tenant_id and not isinstance(tenant_id, basestring):
raise TypeError('Expected property tenant_id to be a basestring')
__self__.tenant_id = tenant_id
"""
The owner of the service. Required if admin wants to
create a service for another project. Changing this creates a new service.
"""
__props__['tenantId'] = tenant_id
if value_specs and not isinstance(value_specs, dict):
raise TypeError('Expected property value_specs to be a dict')
__self__.value_specs = value_specs
"""
Map of additional options.
"""
__props__['valueSpecs'] = value_specs
__self__.external_v4_ip = pulumi.runtime.UNKNOWN
"""
The read-only external (public) IPv4 address that is used for the VPN service.
"""
__self__.external_v6_ip = pulumi.runtime.UNKNOWN
"""
The read-only external (public) IPv6 address that is used for the VPN service.
"""
__self__.status = pulumi.runtime.UNKNOWN
"""
Indicates whether IPsec VPN service is currently operational. Values are ACTIVE, DOWN, BUILD, ERROR, PENDING_CREATE, PENDING_UPDATE, or PENDING_DELETE.
"""
super(Service, __self__).__init__(
'openstack:vpnaas/service:Service',
__name__,
__props__,
__opts__)
def set_outputs(self, outs):
if 'adminStateUp' in outs:
self.admin_state_up = outs['adminStateUp']
if 'description' in outs:
self.description = outs['description']
if 'externalV4Ip' in outs:
self.external_v4_ip = outs['externalV4Ip']
if 'externalV6Ip' in outs:
self.external_v6_ip = outs['externalV6Ip']
if 'name' in outs:
self.name = outs['name']
if 'region' in outs:
self.region = outs['region']
if 'routerId' in outs:
self.router_id = outs['routerId']
if 'status' in outs:
self.status = outs['status']
if 'subnetId' in outs:
self.subnet_id = outs['subnetId']
if 'tenantId' in outs:
self.tenant_id = outs['tenantId']
if 'valueSpecs' in outs:
self.value_specs = outs['valueSpecs'] | sdk/python/pulumi_openstack/vpnaas/service.py |
import pulumi
import pulumi.runtime
class Service(pulumi.CustomResource):
"""
Manages a V2 Neutron VPN service resource within OpenStack.
"""
def __init__(__self__, __name__, __opts__=None, admin_state_up=None, description=None, name=None, region=None, router_id=None, subnet_id=None, tenant_id=None, value_specs=None):
"""Create a Service resource with the given unique name, props, and options."""
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, basestring):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if admin_state_up and not isinstance(admin_state_up, bool):
raise TypeError('Expected property admin_state_up to be a bool')
__self__.admin_state_up = admin_state_up
"""
The administrative state of the resource. Can either be up(true) or down(false).
Changing this updates the administrative state of the existing service.
"""
__props__['adminStateUp'] = admin_state_up
if description and not isinstance(description, basestring):
raise TypeError('Expected property description to be a basestring')
__self__.description = description
"""
The human-readable description for the service.
Changing this updates the description of the existing service.
"""
__props__['description'] = description
if name and not isinstance(name, basestring):
raise TypeError('Expected property name to be a basestring')
__self__.name = name
"""
The name of the service. Changing this updates the name of
the existing service.
"""
__props__['name'] = name
if region and not isinstance(region, basestring):
raise TypeError('Expected property region to be a basestring')
__self__.region = region
"""
The region in which to obtain the V2 Networking client.
A Networking client is needed to create a VPN service. If omitted, the
`region` argument of the provider is used. Changing this creates a new
service.
"""
__props__['region'] = region
if not router_id:
raise TypeError('Missing required property router_id')
elif not isinstance(router_id, basestring):
raise TypeError('Expected property router_id to be a basestring')
__self__.router_id = router_id
"""
The ID of the router. Changing this creates a new service.
"""
__props__['routerId'] = router_id
if subnet_id and not isinstance(subnet_id, basestring):
raise TypeError('Expected property subnet_id to be a basestring')
__self__.subnet_id = subnet_id
"""
SubnetID is the ID of the subnet. Default is null.
"""
__props__['subnetId'] = subnet_id
if tenant_id and not isinstance(tenant_id, basestring):
raise TypeError('Expected property tenant_id to be a basestring')
__self__.tenant_id = tenant_id
"""
The owner of the service. Required if admin wants to
create a service for another project. Changing this creates a new service.
"""
__props__['tenantId'] = tenant_id
if value_specs and not isinstance(value_specs, dict):
raise TypeError('Expected property value_specs to be a dict')
__self__.value_specs = value_specs
"""
Map of additional options.
"""
__props__['valueSpecs'] = value_specs
__self__.external_v4_ip = pulumi.runtime.UNKNOWN
"""
The read-only external (public) IPv4 address that is used for the VPN service.
"""
__self__.external_v6_ip = pulumi.runtime.UNKNOWN
"""
The read-only external (public) IPv6 address that is used for the VPN service.
"""
__self__.status = pulumi.runtime.UNKNOWN
"""
Indicates whether IPsec VPN service is currently operational. Values are ACTIVE, DOWN, BUILD, ERROR, PENDING_CREATE, PENDING_UPDATE, or PENDING_DELETE.
"""
super(Service, __self__).__init__(
'openstack:vpnaas/service:Service',
__name__,
__props__,
__opts__)
def set_outputs(self, outs):
if 'adminStateUp' in outs:
self.admin_state_up = outs['adminStateUp']
if 'description' in outs:
self.description = outs['description']
if 'externalV4Ip' in outs:
self.external_v4_ip = outs['externalV4Ip']
if 'externalV6Ip' in outs:
self.external_v6_ip = outs['externalV6Ip']
if 'name' in outs:
self.name = outs['name']
if 'region' in outs:
self.region = outs['region']
if 'routerId' in outs:
self.router_id = outs['routerId']
if 'status' in outs:
self.status = outs['status']
if 'subnetId' in outs:
self.subnet_id = outs['subnetId']
if 'tenantId' in outs:
self.tenant_id = outs['tenantId']
if 'valueSpecs' in outs:
self.value_specs = outs['valueSpecs'] | 0.63341 | 0.057229 |
import numpy as np
try:
import spotpy
except ImportError:
import sys
sys.path.append(".")
import spotpy
import unittest
from spotpy.likelihoods import LikelihoodError
# We use all available likelihood functions. The pydoc of every function tells, if we can add a
# parameter `param` to the function which includes model parameter. The `param` must be None or a tuple with values
# and names. If `param` is None, the needed values are calculated by the function itself.
class TestLikelihood(unittest.TestCase):
def setUp(self):
np.random.seed(12)
self.normal_data, self.normal_comparedata = np.random.normal(1500, 2530, 20), np.random.normal(15, 25, 20)
self.binom_data, self.binom_comparedata = np.random.binomial(20, 0.1, 20), np.random.binomial(20, 0.1, 20)
self.do_print = True
def test_logLikelihood(self):
l_normal = spotpy.likelihoods.logLikelihood(self.normal_data, self.normal_comparedata)
self.assertGreaterEqual(np.abs(l_normal), 900)
self.assertEqual(type(np.float(l_normal)), type(np.float(1)))
if self.do_print:
print("logLikelihood: " + str(l_normal))
l_binom = spotpy.likelihoods.logLikelihood(self.binom_data, self.binom_comparedata)
self.assertGreaterEqual(np.abs(l_binom), 900)
self.assertEqual(type(np.float(l_binom)), type(np.float(1)))
if self.do_print:
print("logLikelihood: " + str(l_binom))
def test_gaussianLikelihoodMeasErrorOut(self):
l_normal = spotpy.likelihoods.gaussianLikelihoodMeasErrorOut(self.normal_data, self.normal_comparedata)
self.assertGreaterEqual(-40, l_normal)
self.assertEqual(type(np.float(l_normal)), type(np.float(1)))
if self.do_print:
print("gaussianLikelihoodMeasErrorOut: " + str(l_normal))
l_binom = spotpy.likelihoods.gaussianLikelihoodMeasErrorOut(self.binom_data, self.binom_comparedata)
self.assertGreaterEqual(-40, l_binom)
self.assertEqual(type(np.float(l_binom)), type(np.float(1)))
if self.do_print:
print("gaussianLikelihoodMeasErrorOut: " + str(l_binom))
def test_gaussianLikelihoodHomoHeteroDataError(self):
l_normal = spotpy.likelihoods.gaussianLikelihoodHomoHeteroDataError(self.normal_data, self.normal_comparedata)
self.assertGreaterEqual(5, np.abs(l_normal))
self.assertEqual(type(np.float(l_normal)), type(np.float(1)))
if self.do_print:
print("gaussianLikelihoodHomoHeteroDataError: " + str(l_normal))
l_binom = spotpy.likelihoods.gaussianLikelihoodHomoHeteroDataError(self.binom_data, self.binom_comparedata)
self.assertGreaterEqual(10, np.abs(l_binom))
self.assertEqual(type(np.float(l_binom)), type(np.float(1)))
if self.do_print:
print("gaussianLikelihoodHomoHeteroDataError: " + str(l_binom))
def test_LikelihoodAR1NoC(self):
l_list = []
l_list.append(spotpy.likelihoods.LikelihoodAR1NoC(self.normal_data, self.normal_comparedata,
params=([0.98], ["likelihood_phi"])))
try:
l_list.append(spotpy.likelihoods.LikelihoodAR1NoC(self.normal_data, self.normal_comparedata,
params=([], [])))
except LikelihoodError as e:
print("LikelihoodError occurred: " + str(e))
l_list.append(spotpy.likelihoods.LikelihoodAR1NoC(self.normal_data, self.normal_comparedata,
params=([1.1], ["likelihood_phi"])))
l_list.append(spotpy.likelihoods.LikelihoodAR1NoC(self.binom_data, self.binom_data))
for l in l_list:
self.assertNotEqual(None, l)
if self.do_print:
print("LikelihoodAR1NoC: " + str(l))
def test_LikelihoodAR1WithC(self):
l_normal_list = []
try:
l_normal_list.append(spotpy.likelihoods.LikelihoodAR1WithC(self.normal_data, self.normal_comparedata,
params=([], [])))
except LikelihoodError as e:
print("Likelihood Error occurred " + str(e))
l_normal_list.append(spotpy.likelihoods.LikelihoodAR1WithC(self.normal_data, self.normal_comparedata,
params=([0.98], ["likelihood_phi"])))
l_normal_list.append(spotpy.likelihoods.LikelihoodAR1WithC(self.normal_data, self.normal_comparedata,
params=([1.1], ["likelihood_phi"])))
l_normal_list.append(spotpy.likelihoods.LikelihoodAR1WithC(self.binom_data, self.binom_comparedata))
for l_normal in l_normal_list:
self.assertNotEqual(None, l_normal)
if self.do_print:
print("LikelihoodAR1WithC: " + str(l_normal))
def test_generalizedLikelihoodFunction(self):
size = 1000
data, comparedata = np.random.normal(1500, 2530, size), np.random.normal(355, 25, size)
param_list = ["likelihood_beta", "likelihood_xi", "likelihood_sigma0", "likelihood_sigma1", "likelihood_phi1",
"likelihood_muh"]
l_normal_list = []
l_normal_list.append(spotpy.likelihoods.generalizedLikelihoodFunction(data, comparedata, params=
([-0.09, 1, 0.5, 0.567, 0.98, 57.32], param_list)))
try:
l_normal_list.append(spotpy.likelihoods.generalizedLikelihoodFunction(data, comparedata, params=
([], [])))
except LikelihoodError as e:
print("Likelihood Error occurred " + str(e))
l_normal_list.append(spotpy.likelihoods.generalizedLikelihoodFunction(data, comparedata, params=
([2, 1, 0.5, 0.567, 0.98, 57.32], param_list)))
l_normal_list.append(spotpy.likelihoods.generalizedLikelihoodFunction(data, comparedata, params=
([-0.09, 11, 0.5, 0.567, 0.98, 57.32], param_list)))
l_normal_list.append(spotpy.likelihoods.generalizedLikelihoodFunction(data, comparedata, params=
([-0.09, 1, 1.5, 0.567, 0.98, 57.32], param_list)))
l_normal_list.append(spotpy.likelihoods.generalizedLikelihoodFunction(data, comparedata, params=
([-0.09, 1, 0.5, 1.567, 0.98, 57.32], param_list)))
l_normal_list.append(spotpy.likelihoods.generalizedLikelihoodFunction(data, comparedata, params=
([-0.09, 1, 0.5, 0.567, 2.98, 57.32], param_list)))
l_normal_list.append(spotpy.likelihoods.generalizedLikelihoodFunction(data, comparedata, params=
([-0.09, 1, 0.5, 0.567, 0.98, 101], param_list)))
l_normal_list.append(spotpy.likelihoods.generalizedLikelihoodFunction(data, comparedata, params=
([-0.09, 0.0, 0.5, 0.567, 0.98, 101], param_list)))
for l_normal in l_normal_list:
self.assertNotEqual(None, l_normal)
self.assertEqual(type(np.float(l_normal)), type(np.float(1)))
if self.do_print:
print("generalizedLikelihoodFunction: " + str(l_normal))
l_binom = spotpy.likelihoods.generalizedLikelihoodFunction(self.binom_data, self.binom_comparedata)
self.assertNotEqual(None, l_binom)
self.assertGreaterEqual(-10000, l_binom)
self.assertEqual(type(np.float(l_binom)), type(np.float(1)))
if self.do_print:
print("generalizedLikelihoodFunction: " + str(l_binom))
def test_LaplacianLikelihood(self):
l_normal = spotpy.likelihoods.LaplacianLikelihood(self.normal_data, self.normal_comparedata)
self.assertNotEqual(None, l_normal)
self.assertEqual(type(np.float(l_normal)), type(np.float(1)))
if self.do_print:
print("LaplacianLikelihood: " + str(l_normal))
l_binom = spotpy.likelihoods.LaplacianLikelihood(self.binom_data, self.binom_comparedata)
self.assertNotEqual(None, l_normal)
self.assertEqual(type(np.float(l_binom)), type(np.float(1)))
if self.do_print:
print("LaplacianLikelihood: " + str(l_binom))
def test_SkewedStudentLikelihoodHomoscedastic(self):
l_normal = spotpy.likelihoods.SkewedStudentLikelihoodHomoscedastic(self.normal_data, self.normal_comparedata)
self.assertGreaterEqual(12, np.abs(l_normal))
self.assertEqual(type(np.float(l_normal)), type(np.float(1)))
if self.do_print:
print("SkewedStudentLikelihoodHomoscedastic: " + str(l_normal))
l_binom = spotpy.likelihoods.SkewedStudentLikelihoodHomoscedastic(self.binom_data, self.binom_comparedata)
self.assertGreaterEqual(17, np.abs(l_binom))
self.assertEqual(type(np.float(l_binom)), type(np.float(1)))
if self.do_print:
print("SkewedStudentLikelihoodHomoscedastic: " + str(l_binom))
def test_SkewedStudentLikelihoodHeteroscedastic(self):
l_normal_list = []
paramDependencies = ["likelihood_nu", "likelihood_kappa", "likelihood_phi"]
l_normal_list.append(
spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedastic(self.normal_data, self.normal_comparedata,
params=([2.4, 0.15, 0.87], paramDependencies)))
try:
l_normal_list.append(
spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedastic(self.normal_data, self.normal_comparedata,
params=([], [])))
except LikelihoodError as e:
print("An error occurred: " + str(e))
l_normal_list.append(
spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedastic(self.normal_data, self.normal_comparedata,
params=([1, 0.15, 1.87], paramDependencies)))
l_normal_list.append(
spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedastic(self.normal_data, self.normal_comparedata,
params=([1, 0.15, 0.87], paramDependencies)))
l_normal_list.append(
spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedastic(self.normal_data, self.normal_comparedata,
params=([1, -0.15, 0.87], paramDependencies)))
for l_normal in l_normal_list:
if not np.isnan(l_normal):
self.assertGreaterEqual(-100, l_normal)
self.assertEqual(type(np.float(l_normal)), type(np.float(1)))
if self.do_print:
print("SkewedStudentLikelihoodHeteroscedastic: " + str(l_normal))
l_binom = spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedastic(self.binom_data, self.binom_comparedata)
if not np.isnan(l_binom):
self.assertGreaterEqual(-100, l_binom)
self.assertEqual(type(np.float(l_binom)), type(np.float(1)))
if self.do_print:
print("SkewedStudentLikelihoodHeteroscedastic: " + str(l_binom))
def test_SkewedStudentLikelihoodHeteroscedasticAdvancedARModel(self):
l_normal_list = []
params = ["likelihood_nu", "likelihood_kappa", "likelihood_phi"]
l_normal_list.append(spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedasticAdvancedARModel(
self.normal_data, self.normal_comparedata, params=([4, 43, 0.4], params)))
try:
l_normal_list.append(spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedasticAdvancedARModel(
self.normal_data, self.normal_comparedata, params=([], [])))
except LikelihoodError as e:
print("Likelihood Error occurred " + str(e))
l_normal_list.append(spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedasticAdvancedARModel(
self.normal_data, self.normal_comparedata, params=([4, 43, 2.4], params)))
l_normal_list.append(spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedasticAdvancedARModel(
self.normal_data, self.normal_comparedata, params=([1, 43, 0.4], params)))
l_normal_list.append(spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedasticAdvancedARModel(
self.normal_data, self.normal_comparedata, params=([4, -3, 0.4], params)))
for l_normal in l_normal_list:
self.assertNotEqual(None, l_normal)
self.assertEqual(type(np.float(l_normal)), type(np.float(1)))
if self.do_print:
print("SkewedStudentLikelihoodHeteroscedasticAdvancedARModel: " + str(l_normal))
l_binom = spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedasticAdvancedARModel(
self.normal_data, self.normal_comparedata)
self.assertNotEqual(None, l_binom)
self.assertEqual(type(np.float(l_binom)), type(np.float(1)))
if self.do_print:
print("SkewedStudentLikelihoodHeteroscedasticAdvancedARModel: " + str(l_binom))
def test_NoisyABCGaussianLikelihood(self):
l_normal = spotpy.likelihoods.NoisyABCGaussianLikelihood(self.normal_data, self.normal_comparedata)
self.assertNotEqual(None, l_normal)
self.assertEqual(type(np.float(l_normal)), type(np.float(1)))
if self.do_print:
print("NoisyABCGaussianLikelihood: " + str(l_normal))
l_binom = spotpy.likelihoods.NoisyABCGaussianLikelihood(self.binom_data, self.binom_data,
measerror=[0.0])
self.assertNotEqual(None, l_binom)
self.assertEqual(type(np.float(l_binom)), type(np.float(1)))
if self.do_print:
print("NoisyABCGaussianLikelihood: " + str(l_binom))
def test_ABCBoxcarLikelihood(self):
l_normal = spotpy.likelihoods.ABCBoxcarLikelihood(self.normal_data, self.normal_comparedata)
self.assertNotEqual(None, l_normal)
self.assertNotEqual(np.nan, l_normal)
self.assertEqual(type(np.float(l_normal)), type(np.float(1)))
if self.do_print:
print("ABCBoxcarLikelihood: " + str(l_normal))
l_binom = spotpy.likelihoods.ABCBoxcarLikelihood(self.binom_data, self.binom_comparedata)
self.assertNotEqual(None, l_binom)
self.assertNotEqual(np.nan, l_binom)
self.assertEqual(type(np.float(l_binom)), type(np.float(1)))
if self.do_print:
print("ABCBoxcarLikelihood: " + str(l_binom))
def test_LimitsOfAcceptability(self):
l_normal = spotpy.likelihoods.LimitsOfAcceptability(self.normal_data, self.normal_comparedata)
self.assertEqual(12, l_normal)
self.assertNotEqual(None, l_normal)
self.assertEqual(type(np.int(l_normal)), type(int(1)))
if self.do_print:
print("LimitsOfAcceptability: " + str(l_normal))
l_binom = spotpy.likelihoods.LimitsOfAcceptability(self.binom_data, self.binom_comparedata)
self.assertEqual(5, l_binom)
self.assertNotEqual(None, l_binom)
self.assertEqual(type(np.int(l_binom)), type(int(1)))
if self.do_print:
print("LimitsOfAcceptability: " + str(l_binom))
def test_InverseErrorVarianceShapingFactor(self):
l_normal = spotpy.likelihoods.InverseErrorVarianceShapingFactor(self.normal_data, self.normal_comparedata)
self.assertGreaterEqual(-10, l_normal)
self.assertEqual(type(np.float(l_normal)), type(np.float(1)))
if self.do_print:
print("inverseErrorVarianceShapingFactor: " + str(l_normal))
l_binom = spotpy.likelihoods.InverseErrorVarianceShapingFactor(self.binom_data, self.binom_comparedata)
self.assertGreaterEqual(-10, l_binom)
self.assertEqual(type(np.float(l_binom)), type(np.float(1)))
if self.do_print:
print("inverseErrorVarianceShapingFactor: " + str(l_binom))
def test_ExponentialTransformErrVarShapingFactor(self):
l_binom = spotpy.likelihoods.ExponentialTransformErrVarShapingFactor(self.binom_data, self.binom_comparedata)
self.assertGreaterEqual(-30, l_binom)
self.assertEqual(type(np.float(l_binom)), type(np.float(1)))
if self.do_print:
print("inverseErrorVarianceShapingFactor: " + str(l_binom))
l_gauss = spotpy.likelihoods.ExponentialTransformErrVarShapingFactor(self.normal_data, self.normal_comparedata)
self.assertGreaterEqual(-30, l_gauss)
self.assertEqual(type(np.float(l_gauss)), type(np.float(1)))
if self.do_print:
print("inverseErrorVarianceShapingFactor: " + str(l_gauss))
def test_NashSutcliffeEfficiencyShapingFactor(self):
l_normal_list = []
l_normal_list.append(spotpy.likelihoods.NashSutcliffeEfficiencyShapingFactor(self.normal_data,
self.normal_comparedata))
l_normal_list.append(spotpy.likelihoods.NashSutcliffeEfficiencyShapingFactor(self.normal_data,
self.normal_data))
l_normal_list.append(spotpy.likelihoods.NashSutcliffeEfficiencyShapingFactor(self.binom_data,
self.binom_comparedata))
try:
l_normal_list.append(spotpy.likelihoods.NashSutcliffeEfficiencyShapingFactor([],
[]))
except LikelihoodError as e:
print("Likelihood Error occurred: " + str(e))
try:
l_normal_list.append(spotpy.likelihoods.NashSutcliffeEfficiencyShapingFactor([1],
[]))
except LikelihoodError as e:
print("Likelihood Error occurred " + str(e))
for l_normal in l_normal_list:
self.assertNotEqual(None, l_normal)
self.assertEqual(type(np.float(l_normal)), type(np.float(1)))
if self.do_print:
print("NashSutcliffeEfficiencyShapingFactor: " + str(l_normal))
def test_sumOfAbsoluteErrorResiduals(self):
l_normal = spotpy.likelihoods.sumOfAbsoluteErrorResiduals(self.normal_data, self.normal_comparedata)
self.assertGreaterEqual(7, np.abs(np.abs(l_normal) - 10))
self.assertEqual(type(np.float(l_normal)), type(np.float(1)))
if self.do_print:
print("sumOfAbsoluteErrorResiduals: " + str(l_normal))
l_binom = spotpy.likelihoods.sumOfAbsoluteErrorResiduals(self.binom_data, self.binom_comparedata)
self.assertGreaterEqual(7, np.abs(np.abs(l_binom) - 10))
self.assertEqual(type(np.float(l_binom)), type(np.float(1)))
if self.do_print:
print("sumOfAbsoluteErrorResiduals: " + str(l_binom))
if __name__ == '__main__':
unittest.main() | tests/test_likelihood.py | import numpy as np
try:
import spotpy
except ImportError:
import sys
sys.path.append(".")
import spotpy
import unittest
from spotpy.likelihoods import LikelihoodError
# We use all available likelihood functions. The pydoc of every function tells, if we can add a
# parameter `param` to the function which includes model parameter. The `param` must be None or a tuple with values
# and names. If `param` is None, the needed values are calculated by the function itself.
class TestLikelihood(unittest.TestCase):
def setUp(self):
np.random.seed(12)
self.normal_data, self.normal_comparedata = np.random.normal(1500, 2530, 20), np.random.normal(15, 25, 20)
self.binom_data, self.binom_comparedata = np.random.binomial(20, 0.1, 20), np.random.binomial(20, 0.1, 20)
self.do_print = True
def test_logLikelihood(self):
l_normal = spotpy.likelihoods.logLikelihood(self.normal_data, self.normal_comparedata)
self.assertGreaterEqual(np.abs(l_normal), 900)
self.assertEqual(type(np.float(l_normal)), type(np.float(1)))
if self.do_print:
print("logLikelihood: " + str(l_normal))
l_binom = spotpy.likelihoods.logLikelihood(self.binom_data, self.binom_comparedata)
self.assertGreaterEqual(np.abs(l_binom), 900)
self.assertEqual(type(np.float(l_binom)), type(np.float(1)))
if self.do_print:
print("logLikelihood: " + str(l_binom))
def test_gaussianLikelihoodMeasErrorOut(self):
l_normal = spotpy.likelihoods.gaussianLikelihoodMeasErrorOut(self.normal_data, self.normal_comparedata)
self.assertGreaterEqual(-40, l_normal)
self.assertEqual(type(np.float(l_normal)), type(np.float(1)))
if self.do_print:
print("gaussianLikelihoodMeasErrorOut: " + str(l_normal))
l_binom = spotpy.likelihoods.gaussianLikelihoodMeasErrorOut(self.binom_data, self.binom_comparedata)
self.assertGreaterEqual(-40, l_binom)
self.assertEqual(type(np.float(l_binom)), type(np.float(1)))
if self.do_print:
print("gaussianLikelihoodMeasErrorOut: " + str(l_binom))
def test_gaussianLikelihoodHomoHeteroDataError(self):
l_normal = spotpy.likelihoods.gaussianLikelihoodHomoHeteroDataError(self.normal_data, self.normal_comparedata)
self.assertGreaterEqual(5, np.abs(l_normal))
self.assertEqual(type(np.float(l_normal)), type(np.float(1)))
if self.do_print:
print("gaussianLikelihoodHomoHeteroDataError: " + str(l_normal))
l_binom = spotpy.likelihoods.gaussianLikelihoodHomoHeteroDataError(self.binom_data, self.binom_comparedata)
self.assertGreaterEqual(10, np.abs(l_binom))
self.assertEqual(type(np.float(l_binom)), type(np.float(1)))
if self.do_print:
print("gaussianLikelihoodHomoHeteroDataError: " + str(l_binom))
def test_LikelihoodAR1NoC(self):
l_list = []
l_list.append(spotpy.likelihoods.LikelihoodAR1NoC(self.normal_data, self.normal_comparedata,
params=([0.98], ["likelihood_phi"])))
try:
l_list.append(spotpy.likelihoods.LikelihoodAR1NoC(self.normal_data, self.normal_comparedata,
params=([], [])))
except LikelihoodError as e:
print("LikelihoodError occurred: " + str(e))
l_list.append(spotpy.likelihoods.LikelihoodAR1NoC(self.normal_data, self.normal_comparedata,
params=([1.1], ["likelihood_phi"])))
l_list.append(spotpy.likelihoods.LikelihoodAR1NoC(self.binom_data, self.binom_data))
for l in l_list:
self.assertNotEqual(None, l)
if self.do_print:
print("LikelihoodAR1NoC: " + str(l))
def test_LikelihoodAR1WithC(self):
l_normal_list = []
try:
l_normal_list.append(spotpy.likelihoods.LikelihoodAR1WithC(self.normal_data, self.normal_comparedata,
params=([], [])))
except LikelihoodError as e:
print("Likelihood Error occurred " + str(e))
l_normal_list.append(spotpy.likelihoods.LikelihoodAR1WithC(self.normal_data, self.normal_comparedata,
params=([0.98], ["likelihood_phi"])))
l_normal_list.append(spotpy.likelihoods.LikelihoodAR1WithC(self.normal_data, self.normal_comparedata,
params=([1.1], ["likelihood_phi"])))
l_normal_list.append(spotpy.likelihoods.LikelihoodAR1WithC(self.binom_data, self.binom_comparedata))
for l_normal in l_normal_list:
self.assertNotEqual(None, l_normal)
if self.do_print:
print("LikelihoodAR1WithC: " + str(l_normal))
def test_generalizedLikelihoodFunction(self):
size = 1000
data, comparedata = np.random.normal(1500, 2530, size), np.random.normal(355, 25, size)
param_list = ["likelihood_beta", "likelihood_xi", "likelihood_sigma0", "likelihood_sigma1", "likelihood_phi1",
"likelihood_muh"]
l_normal_list = []
l_normal_list.append(spotpy.likelihoods.generalizedLikelihoodFunction(data, comparedata, params=
([-0.09, 1, 0.5, 0.567, 0.98, 57.32], param_list)))
try:
l_normal_list.append(spotpy.likelihoods.generalizedLikelihoodFunction(data, comparedata, params=
([], [])))
except LikelihoodError as e:
print("Likelihood Error occurred " + str(e))
l_normal_list.append(spotpy.likelihoods.generalizedLikelihoodFunction(data, comparedata, params=
([2, 1, 0.5, 0.567, 0.98, 57.32], param_list)))
l_normal_list.append(spotpy.likelihoods.generalizedLikelihoodFunction(data, comparedata, params=
([-0.09, 11, 0.5, 0.567, 0.98, 57.32], param_list)))
l_normal_list.append(spotpy.likelihoods.generalizedLikelihoodFunction(data, comparedata, params=
([-0.09, 1, 1.5, 0.567, 0.98, 57.32], param_list)))
l_normal_list.append(spotpy.likelihoods.generalizedLikelihoodFunction(data, comparedata, params=
([-0.09, 1, 0.5, 1.567, 0.98, 57.32], param_list)))
l_normal_list.append(spotpy.likelihoods.generalizedLikelihoodFunction(data, comparedata, params=
([-0.09, 1, 0.5, 0.567, 2.98, 57.32], param_list)))
l_normal_list.append(spotpy.likelihoods.generalizedLikelihoodFunction(data, comparedata, params=
([-0.09, 1, 0.5, 0.567, 0.98, 101], param_list)))
l_normal_list.append(spotpy.likelihoods.generalizedLikelihoodFunction(data, comparedata, params=
([-0.09, 0.0, 0.5, 0.567, 0.98, 101], param_list)))
for l_normal in l_normal_list:
self.assertNotEqual(None, l_normal)
self.assertEqual(type(np.float(l_normal)), type(np.float(1)))
if self.do_print:
print("generalizedLikelihoodFunction: " + str(l_normal))
l_binom = spotpy.likelihoods.generalizedLikelihoodFunction(self.binom_data, self.binom_comparedata)
self.assertNotEqual(None, l_binom)
self.assertGreaterEqual(-10000, l_binom)
self.assertEqual(type(np.float(l_binom)), type(np.float(1)))
if self.do_print:
print("generalizedLikelihoodFunction: " + str(l_binom))
def test_LaplacianLikelihood(self):
l_normal = spotpy.likelihoods.LaplacianLikelihood(self.normal_data, self.normal_comparedata)
self.assertNotEqual(None, l_normal)
self.assertEqual(type(np.float(l_normal)), type(np.float(1)))
if self.do_print:
print("LaplacianLikelihood: " + str(l_normal))
l_binom = spotpy.likelihoods.LaplacianLikelihood(self.binom_data, self.binom_comparedata)
self.assertNotEqual(None, l_normal)
self.assertEqual(type(np.float(l_binom)), type(np.float(1)))
if self.do_print:
print("LaplacianLikelihood: " + str(l_binom))
def test_SkewedStudentLikelihoodHomoscedastic(self):
l_normal = spotpy.likelihoods.SkewedStudentLikelihoodHomoscedastic(self.normal_data, self.normal_comparedata)
self.assertGreaterEqual(12, np.abs(l_normal))
self.assertEqual(type(np.float(l_normal)), type(np.float(1)))
if self.do_print:
print("SkewedStudentLikelihoodHomoscedastic: " + str(l_normal))
l_binom = spotpy.likelihoods.SkewedStudentLikelihoodHomoscedastic(self.binom_data, self.binom_comparedata)
self.assertGreaterEqual(17, np.abs(l_binom))
self.assertEqual(type(np.float(l_binom)), type(np.float(1)))
if self.do_print:
print("SkewedStudentLikelihoodHomoscedastic: " + str(l_binom))
def test_SkewedStudentLikelihoodHeteroscedastic(self):
l_normal_list = []
paramDependencies = ["likelihood_nu", "likelihood_kappa", "likelihood_phi"]
l_normal_list.append(
spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedastic(self.normal_data, self.normal_comparedata,
params=([2.4, 0.15, 0.87], paramDependencies)))
try:
l_normal_list.append(
spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedastic(self.normal_data, self.normal_comparedata,
params=([], [])))
except LikelihoodError as e:
print("An error occurred: " + str(e))
l_normal_list.append(
spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedastic(self.normal_data, self.normal_comparedata,
params=([1, 0.15, 1.87], paramDependencies)))
l_normal_list.append(
spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedastic(self.normal_data, self.normal_comparedata,
params=([1, 0.15, 0.87], paramDependencies)))
l_normal_list.append(
spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedastic(self.normal_data, self.normal_comparedata,
params=([1, -0.15, 0.87], paramDependencies)))
for l_normal in l_normal_list:
if not np.isnan(l_normal):
self.assertGreaterEqual(-100, l_normal)
self.assertEqual(type(np.float(l_normal)), type(np.float(1)))
if self.do_print:
print("SkewedStudentLikelihoodHeteroscedastic: " + str(l_normal))
l_binom = spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedastic(self.binom_data, self.binom_comparedata)
if not np.isnan(l_binom):
self.assertGreaterEqual(-100, l_binom)
self.assertEqual(type(np.float(l_binom)), type(np.float(1)))
if self.do_print:
print("SkewedStudentLikelihoodHeteroscedastic: " + str(l_binom))
def test_SkewedStudentLikelihoodHeteroscedasticAdvancedARModel(self):
l_normal_list = []
params = ["likelihood_nu", "likelihood_kappa", "likelihood_phi"]
l_normal_list.append(spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedasticAdvancedARModel(
self.normal_data, self.normal_comparedata, params=([4, 43, 0.4], params)))
try:
l_normal_list.append(spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedasticAdvancedARModel(
self.normal_data, self.normal_comparedata, params=([], [])))
except LikelihoodError as e:
print("Likelihood Error occurred " + str(e))
l_normal_list.append(spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedasticAdvancedARModel(
self.normal_data, self.normal_comparedata, params=([4, 43, 2.4], params)))
l_normal_list.append(spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedasticAdvancedARModel(
self.normal_data, self.normal_comparedata, params=([1, 43, 0.4], params)))
l_normal_list.append(spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedasticAdvancedARModel(
self.normal_data, self.normal_comparedata, params=([4, -3, 0.4], params)))
for l_normal in l_normal_list:
self.assertNotEqual(None, l_normal)
self.assertEqual(type(np.float(l_normal)), type(np.float(1)))
if self.do_print:
print("SkewedStudentLikelihoodHeteroscedasticAdvancedARModel: " + str(l_normal))
l_binom = spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedasticAdvancedARModel(
self.normal_data, self.normal_comparedata)
self.assertNotEqual(None, l_binom)
self.assertEqual(type(np.float(l_binom)), type(np.float(1)))
if self.do_print:
print("SkewedStudentLikelihoodHeteroscedasticAdvancedARModel: " + str(l_binom))
def test_NoisyABCGaussianLikelihood(self):
l_normal = spotpy.likelihoods.NoisyABCGaussianLikelihood(self.normal_data, self.normal_comparedata)
self.assertNotEqual(None, l_normal)
self.assertEqual(type(np.float(l_normal)), type(np.float(1)))
if self.do_print:
print("NoisyABCGaussianLikelihood: " + str(l_normal))
l_binom = spotpy.likelihoods.NoisyABCGaussianLikelihood(self.binom_data, self.binom_data,
measerror=[0.0])
self.assertNotEqual(None, l_binom)
self.assertEqual(type(np.float(l_binom)), type(np.float(1)))
if self.do_print:
print("NoisyABCGaussianLikelihood: " + str(l_binom))
def test_ABCBoxcarLikelihood(self):
l_normal = spotpy.likelihoods.ABCBoxcarLikelihood(self.normal_data, self.normal_comparedata)
self.assertNotEqual(None, l_normal)
self.assertNotEqual(np.nan, l_normal)
self.assertEqual(type(np.float(l_normal)), type(np.float(1)))
if self.do_print:
print("ABCBoxcarLikelihood: " + str(l_normal))
l_binom = spotpy.likelihoods.ABCBoxcarLikelihood(self.binom_data, self.binom_comparedata)
self.assertNotEqual(None, l_binom)
self.assertNotEqual(np.nan, l_binom)
self.assertEqual(type(np.float(l_binom)), type(np.float(1)))
if self.do_print:
print("ABCBoxcarLikelihood: " + str(l_binom))
def test_LimitsOfAcceptability(self):
l_normal = spotpy.likelihoods.LimitsOfAcceptability(self.normal_data, self.normal_comparedata)
self.assertEqual(12, l_normal)
self.assertNotEqual(None, l_normal)
self.assertEqual(type(np.int(l_normal)), type(int(1)))
if self.do_print:
print("LimitsOfAcceptability: " + str(l_normal))
l_binom = spotpy.likelihoods.LimitsOfAcceptability(self.binom_data, self.binom_comparedata)
self.assertEqual(5, l_binom)
self.assertNotEqual(None, l_binom)
self.assertEqual(type(np.int(l_binom)), type(int(1)))
if self.do_print:
print("LimitsOfAcceptability: " + str(l_binom))
def test_InverseErrorVarianceShapingFactor(self):
l_normal = spotpy.likelihoods.InverseErrorVarianceShapingFactor(self.normal_data, self.normal_comparedata)
self.assertGreaterEqual(-10, l_normal)
self.assertEqual(type(np.float(l_normal)), type(np.float(1)))
if self.do_print:
print("inverseErrorVarianceShapingFactor: " + str(l_normal))
l_binom = spotpy.likelihoods.InverseErrorVarianceShapingFactor(self.binom_data, self.binom_comparedata)
self.assertGreaterEqual(-10, l_binom)
self.assertEqual(type(np.float(l_binom)), type(np.float(1)))
if self.do_print:
print("inverseErrorVarianceShapingFactor: " + str(l_binom))
def test_ExponentialTransformErrVarShapingFactor(self):
l_binom = spotpy.likelihoods.ExponentialTransformErrVarShapingFactor(self.binom_data, self.binom_comparedata)
self.assertGreaterEqual(-30, l_binom)
self.assertEqual(type(np.float(l_binom)), type(np.float(1)))
if self.do_print:
print("inverseErrorVarianceShapingFactor: " + str(l_binom))
l_gauss = spotpy.likelihoods.ExponentialTransformErrVarShapingFactor(self.normal_data, self.normal_comparedata)
self.assertGreaterEqual(-30, l_gauss)
self.assertEqual(type(np.float(l_gauss)), type(np.float(1)))
if self.do_print:
print("inverseErrorVarianceShapingFactor: " + str(l_gauss))
def test_NashSutcliffeEfficiencyShapingFactor(self):
l_normal_list = []
l_normal_list.append(spotpy.likelihoods.NashSutcliffeEfficiencyShapingFactor(self.normal_data,
self.normal_comparedata))
l_normal_list.append(spotpy.likelihoods.NashSutcliffeEfficiencyShapingFactor(self.normal_data,
self.normal_data))
l_normal_list.append(spotpy.likelihoods.NashSutcliffeEfficiencyShapingFactor(self.binom_data,
self.binom_comparedata))
try:
l_normal_list.append(spotpy.likelihoods.NashSutcliffeEfficiencyShapingFactor([],
[]))
except LikelihoodError as e:
print("Likelihood Error occurred: " + str(e))
try:
l_normal_list.append(spotpy.likelihoods.NashSutcliffeEfficiencyShapingFactor([1],
[]))
except LikelihoodError as e:
print("Likelihood Error occurred " + str(e))
for l_normal in l_normal_list:
self.assertNotEqual(None, l_normal)
self.assertEqual(type(np.float(l_normal)), type(np.float(1)))
if self.do_print:
print("NashSutcliffeEfficiencyShapingFactor: " + str(l_normal))
def test_sumOfAbsoluteErrorResiduals(self):
l_normal = spotpy.likelihoods.sumOfAbsoluteErrorResiduals(self.normal_data, self.normal_comparedata)
self.assertGreaterEqual(7, np.abs(np.abs(l_normal) - 10))
self.assertEqual(type(np.float(l_normal)), type(np.float(1)))
if self.do_print:
print("sumOfAbsoluteErrorResiduals: " + str(l_normal))
l_binom = spotpy.likelihoods.sumOfAbsoluteErrorResiduals(self.binom_data, self.binom_comparedata)
self.assertGreaterEqual(7, np.abs(np.abs(l_binom) - 10))
self.assertEqual(type(np.float(l_binom)), type(np.float(1)))
if self.do_print:
print("sumOfAbsoluteErrorResiduals: " + str(l_binom))
if __name__ == '__main__':
unittest.main() | 0.531696 | 0.671087 |
from getratings.models.ratings import Ratings
class NA_Orianna_Bot_Aatrox(Ratings):
pass
class NA_Orianna_Bot_Ahri(Ratings):
pass
class NA_Orianna_Bot_Akali(Ratings):
pass
class NA_Orianna_Bot_Alistar(Ratings):
pass
class NA_Orianna_Bot_Amumu(Ratings):
pass
class NA_Orianna_Bot_Anivia(Ratings):
pass
class NA_Orianna_Bot_Annie(Ratings):
pass
class NA_Orianna_Bot_Ashe(Ratings):
pass
class NA_Orianna_Bot_AurelionSol(Ratings):
pass
class NA_Orianna_Bot_Azir(Ratings):
pass
class NA_Orianna_Bot_Bard(Ratings):
pass
class NA_Orianna_Bot_Blitzcrank(Ratings):
pass
class NA_Orianna_Bot_Brand(Ratings):
pass
class NA_Orianna_Bot_Braum(Ratings):
pass
class NA_Orianna_Bot_Caitlyn(Ratings):
pass
class NA_Orianna_Bot_Camille(Ratings):
pass
class NA_Orianna_Bot_Cassiopeia(Ratings):
pass
class NA_Orianna_Bot_Chogath(Ratings):
pass
class NA_Orianna_Bot_Corki(Ratings):
pass
class NA_Orianna_Bot_Darius(Ratings):
pass
class NA_Orianna_Bot_Diana(Ratings):
pass
class NA_Orianna_Bot_Draven(Ratings):
pass
class NA_Orianna_Bot_DrMundo(Ratings):
pass
class NA_Orianna_Bot_Ekko(Ratings):
pass
class NA_Orianna_Bot_Elise(Ratings):
pass
class NA_Orianna_Bot_Evelynn(Ratings):
pass
class NA_Orianna_Bot_Ezreal(Ratings):
pass
class NA_Orianna_Bot_Fiddlesticks(Ratings):
pass
class NA_Orianna_Bot_Fiora(Ratings):
pass
class NA_Orianna_Bot_Fizz(Ratings):
pass
class NA_Orianna_Bot_Galio(Ratings):
pass
class NA_Orianna_Bot_Gangplank(Ratings):
pass
class NA_Orianna_Bot_Garen(Ratings):
pass
class NA_Orianna_Bot_Gnar(Ratings):
pass
class NA_Orianna_Bot_Gragas(Ratings):
pass
class NA_Orianna_Bot_Graves(Ratings):
pass
class NA_Orianna_Bot_Hecarim(Ratings):
pass
class NA_Orianna_Bot_Heimerdinger(Ratings):
pass
class NA_Orianna_Bot_Illaoi(Ratings):
pass
class NA_Orianna_Bot_Irelia(Ratings):
pass
class NA_Orianna_Bot_Ivern(Ratings):
pass
class NA_Orianna_Bot_Janna(Ratings):
pass
class NA_Orianna_Bot_JarvanIV(Ratings):
pass
class NA_Orianna_Bot_Jax(Ratings):
pass
class NA_Orianna_Bot_Jayce(Ratings):
pass
class NA_Orianna_Bot_Jhin(Ratings):
pass
class NA_Orianna_Bot_Jinx(Ratings):
pass
class NA_Orianna_Bot_Kalista(Ratings):
pass
class NA_Orianna_Bot_Karma(Ratings):
pass
class NA_Orianna_Bot_Karthus(Ratings):
pass
class NA_Orianna_Bot_Kassadin(Ratings):
pass
class NA_Orianna_Bot_Katarina(Ratings):
pass
class NA_Orianna_Bot_Kayle(Ratings):
pass
class NA_Orianna_Bot_Kayn(Ratings):
pass
class NA_Orianna_Bot_Kennen(Ratings):
pass
class NA_Orianna_Bot_Khazix(Ratings):
pass
class NA_Orianna_Bot_Kindred(Ratings):
pass
class NA_Orianna_Bot_Kled(Ratings):
pass
class NA_Orianna_Bot_KogMaw(Ratings):
pass
class NA_Orianna_Bot_Leblanc(Ratings):
pass
class NA_Orianna_Bot_LeeSin(Ratings):
pass
class NA_Orianna_Bot_Leona(Ratings):
pass
class NA_Orianna_Bot_Lissandra(Ratings):
pass
class NA_Orianna_Bot_Lucian(Ratings):
pass
class NA_Orianna_Bot_Lulu(Ratings):
pass
class NA_Orianna_Bot_Lux(Ratings):
pass
class NA_Orianna_Bot_Malphite(Ratings):
pass
class NA_Orianna_Bot_Malzahar(Ratings):
pass
class NA_Orianna_Bot_Maokai(Ratings):
pass
class NA_Orianna_Bot_MasterYi(Ratings):
pass
class NA_Orianna_Bot_MissFortune(Ratings):
pass
class NA_Orianna_Bot_MonkeyKing(Ratings):
pass
class NA_Orianna_Bot_Mordekaiser(Ratings):
pass
class NA_Orianna_Bot_Morgana(Ratings):
pass
class NA_Orianna_Bot_Nami(Ratings):
pass
class NA_Orianna_Bot_Nasus(Ratings):
pass
class NA_Orianna_Bot_Nautilus(Ratings):
pass
class NA_Orianna_Bot_Nidalee(Ratings):
pass
class NA_Orianna_Bot_Nocturne(Ratings):
pass
class NA_Orianna_Bot_Nunu(Ratings):
pass
class NA_Orianna_Bot_Olaf(Ratings):
pass
class NA_Orianna_Bot_Orianna(Ratings):
pass
class NA_Orianna_Bot_Ornn(Ratings):
pass
class NA_Orianna_Bot_Pantheon(Ratings):
pass
class NA_Orianna_Bot_Poppy(Ratings):
pass
class NA_Orianna_Bot_Quinn(Ratings):
pass
class NA_Orianna_Bot_Rakan(Ratings):
pass
class NA_Orianna_Bot_Rammus(Ratings):
pass
class NA_Orianna_Bot_RekSai(Ratings):
pass
class NA_Orianna_Bot_Renekton(Ratings):
pass
class NA_Orianna_Bot_Rengar(Ratings):
pass
class NA_Orianna_Bot_Riven(Ratings):
pass
class NA_Orianna_Bot_Rumble(Ratings):
pass
class NA_Orianna_Bot_Ryze(Ratings):
pass
class NA_Orianna_Bot_Sejuani(Ratings):
pass
class NA_Orianna_Bot_Shaco(Ratings):
pass
class NA_Orianna_Bot_Shen(Ratings):
pass
class NA_Orianna_Bot_Shyvana(Ratings):
pass
class NA_Orianna_Bot_Singed(Ratings):
pass
class NA_Orianna_Bot_Sion(Ratings):
pass
class NA_Orianna_Bot_Sivir(Ratings):
pass
class NA_Orianna_Bot_Skarner(Ratings):
pass
class NA_Orianna_Bot_Sona(Ratings):
pass
class NA_Orianna_Bot_Soraka(Ratings):
pass
class NA_Orianna_Bot_Swain(Ratings):
pass
class NA_Orianna_Bot_Syndra(Ratings):
pass
class NA_Orianna_Bot_TahmKench(Ratings):
pass
class NA_Orianna_Bot_Taliyah(Ratings):
pass
class NA_Orianna_Bot_Talon(Ratings):
pass
class NA_Orianna_Bot_Taric(Ratings):
pass
class NA_Orianna_Bot_Teemo(Ratings):
pass
class NA_Orianna_Bot_Thresh(Ratings):
pass
class NA_Orianna_Bot_Tristana(Ratings):
pass
class NA_Orianna_Bot_Trundle(Ratings):
pass
class NA_Orianna_Bot_Tryndamere(Ratings):
pass
class NA_Orianna_Bot_TwistedFate(Ratings):
pass
class NA_Orianna_Bot_Twitch(Ratings):
pass
class NA_Orianna_Bot_Udyr(Ratings):
pass
class NA_Orianna_Bot_Urgot(Ratings):
pass
class NA_Orianna_Bot_Varus(Ratings):
pass
class NA_Orianna_Bot_Vayne(Ratings):
pass
class NA_Orianna_Bot_Veigar(Ratings):
pass
class NA_Orianna_Bot_Velkoz(Ratings):
pass
class NA_Orianna_Bot_Vi(Ratings):
pass
class NA_Orianna_Bot_Viktor(Ratings):
pass
class NA_Orianna_Bot_Vladimir(Ratings):
pass
class NA_Orianna_Bot_Volibear(Ratings):
pass
class NA_Orianna_Bot_Warwick(Ratings):
pass
class NA_Orianna_Bot_Xayah(Ratings):
pass
class NA_Orianna_Bot_Xerath(Ratings):
pass
class NA_Orianna_Bot_XinZhao(Ratings):
pass
class NA_Orianna_Bot_Yasuo(Ratings):
pass
class NA_Orianna_Bot_Yorick(Ratings):
pass
class NA_Orianna_Bot_Zac(Ratings):
pass
class NA_Orianna_Bot_Zed(Ratings):
pass
class NA_Orianna_Bot_Ziggs(Ratings):
pass
class NA_Orianna_Bot_Zilean(Ratings):
pass
class NA_Orianna_Bot_Zyra(Ratings):
pass | loldib/getratings/models/NA/na_orianna/na_orianna_bot.py | from getratings.models.ratings import Ratings
class NA_Orianna_Bot_Aatrox(Ratings):
pass
class NA_Orianna_Bot_Ahri(Ratings):
pass
class NA_Orianna_Bot_Akali(Ratings):
pass
class NA_Orianna_Bot_Alistar(Ratings):
pass
class NA_Orianna_Bot_Amumu(Ratings):
pass
class NA_Orianna_Bot_Anivia(Ratings):
pass
class NA_Orianna_Bot_Annie(Ratings):
pass
class NA_Orianna_Bot_Ashe(Ratings):
pass
class NA_Orianna_Bot_AurelionSol(Ratings):
pass
class NA_Orianna_Bot_Azir(Ratings):
pass
class NA_Orianna_Bot_Bard(Ratings):
pass
class NA_Orianna_Bot_Blitzcrank(Ratings):
pass
class NA_Orianna_Bot_Brand(Ratings):
pass
class NA_Orianna_Bot_Braum(Ratings):
pass
class NA_Orianna_Bot_Caitlyn(Ratings):
pass
class NA_Orianna_Bot_Camille(Ratings):
pass
class NA_Orianna_Bot_Cassiopeia(Ratings):
pass
class NA_Orianna_Bot_Chogath(Ratings):
pass
class NA_Orianna_Bot_Corki(Ratings):
pass
class NA_Orianna_Bot_Darius(Ratings):
pass
class NA_Orianna_Bot_Diana(Ratings):
pass
class NA_Orianna_Bot_Draven(Ratings):
pass
class NA_Orianna_Bot_DrMundo(Ratings):
pass
class NA_Orianna_Bot_Ekko(Ratings):
pass
class NA_Orianna_Bot_Elise(Ratings):
pass
class NA_Orianna_Bot_Evelynn(Ratings):
pass
class NA_Orianna_Bot_Ezreal(Ratings):
pass
class NA_Orianna_Bot_Fiddlesticks(Ratings):
pass
class NA_Orianna_Bot_Fiora(Ratings):
pass
class NA_Orianna_Bot_Fizz(Ratings):
pass
class NA_Orianna_Bot_Galio(Ratings):
pass
class NA_Orianna_Bot_Gangplank(Ratings):
pass
class NA_Orianna_Bot_Garen(Ratings):
pass
class NA_Orianna_Bot_Gnar(Ratings):
pass
class NA_Orianna_Bot_Gragas(Ratings):
pass
class NA_Orianna_Bot_Graves(Ratings):
pass
class NA_Orianna_Bot_Hecarim(Ratings):
pass
class NA_Orianna_Bot_Heimerdinger(Ratings):
pass
class NA_Orianna_Bot_Illaoi(Ratings):
pass
class NA_Orianna_Bot_Irelia(Ratings):
pass
class NA_Orianna_Bot_Ivern(Ratings):
pass
class NA_Orianna_Bot_Janna(Ratings):
pass
class NA_Orianna_Bot_JarvanIV(Ratings):
pass
class NA_Orianna_Bot_Jax(Ratings):
pass
class NA_Orianna_Bot_Jayce(Ratings):
pass
class NA_Orianna_Bot_Jhin(Ratings):
pass
class NA_Orianna_Bot_Jinx(Ratings):
pass
class NA_Orianna_Bot_Kalista(Ratings):
pass
class NA_Orianna_Bot_Karma(Ratings):
pass
class NA_Orianna_Bot_Karthus(Ratings):
pass
class NA_Orianna_Bot_Kassadin(Ratings):
pass
class NA_Orianna_Bot_Katarina(Ratings):
pass
class NA_Orianna_Bot_Kayle(Ratings):
pass
class NA_Orianna_Bot_Kayn(Ratings):
pass
class NA_Orianna_Bot_Kennen(Ratings):
pass
class NA_Orianna_Bot_Khazix(Ratings):
pass
class NA_Orianna_Bot_Kindred(Ratings):
pass
class NA_Orianna_Bot_Kled(Ratings):
pass
class NA_Orianna_Bot_KogMaw(Ratings):
pass
class NA_Orianna_Bot_Leblanc(Ratings):
pass
class NA_Orianna_Bot_LeeSin(Ratings):
pass
class NA_Orianna_Bot_Leona(Ratings):
pass
class NA_Orianna_Bot_Lissandra(Ratings):
pass
class NA_Orianna_Bot_Lucian(Ratings):
pass
class NA_Orianna_Bot_Lulu(Ratings):
pass
class NA_Orianna_Bot_Lux(Ratings):
pass
class NA_Orianna_Bot_Malphite(Ratings):
pass
class NA_Orianna_Bot_Malzahar(Ratings):
pass
class NA_Orianna_Bot_Maokai(Ratings):
pass
class NA_Orianna_Bot_MasterYi(Ratings):
pass
class NA_Orianna_Bot_MissFortune(Ratings):
pass
class NA_Orianna_Bot_MonkeyKing(Ratings):
pass
class NA_Orianna_Bot_Mordekaiser(Ratings):
pass
class NA_Orianna_Bot_Morgana(Ratings):
pass
class NA_Orianna_Bot_Nami(Ratings):
pass
class NA_Orianna_Bot_Nasus(Ratings):
pass
class NA_Orianna_Bot_Nautilus(Ratings):
pass
class NA_Orianna_Bot_Nidalee(Ratings):
pass
class NA_Orianna_Bot_Nocturne(Ratings):
pass
class NA_Orianna_Bot_Nunu(Ratings):
pass
class NA_Orianna_Bot_Olaf(Ratings):
pass
class NA_Orianna_Bot_Orianna(Ratings):
pass
class NA_Orianna_Bot_Ornn(Ratings):
pass
class NA_Orianna_Bot_Pantheon(Ratings):
pass
class NA_Orianna_Bot_Poppy(Ratings):
pass
class NA_Orianna_Bot_Quinn(Ratings):
pass
class NA_Orianna_Bot_Rakan(Ratings):
pass
class NA_Orianna_Bot_Rammus(Ratings):
pass
class NA_Orianna_Bot_RekSai(Ratings):
pass
class NA_Orianna_Bot_Renekton(Ratings):
pass
class NA_Orianna_Bot_Rengar(Ratings):
pass
class NA_Orianna_Bot_Riven(Ratings):
pass
class NA_Orianna_Bot_Rumble(Ratings):
pass
class NA_Orianna_Bot_Ryze(Ratings):
pass
class NA_Orianna_Bot_Sejuani(Ratings):
pass
class NA_Orianna_Bot_Shaco(Ratings):
pass
class NA_Orianna_Bot_Shen(Ratings):
pass
class NA_Orianna_Bot_Shyvana(Ratings):
pass
class NA_Orianna_Bot_Singed(Ratings):
pass
class NA_Orianna_Bot_Sion(Ratings):
pass
class NA_Orianna_Bot_Sivir(Ratings):
pass
class NA_Orianna_Bot_Skarner(Ratings):
pass
class NA_Orianna_Bot_Sona(Ratings):
pass
class NA_Orianna_Bot_Soraka(Ratings):
pass
class NA_Orianna_Bot_Swain(Ratings):
pass
class NA_Orianna_Bot_Syndra(Ratings):
pass
class NA_Orianna_Bot_TahmKench(Ratings):
pass
class NA_Orianna_Bot_Taliyah(Ratings):
pass
class NA_Orianna_Bot_Talon(Ratings):
pass
class NA_Orianna_Bot_Taric(Ratings):
pass
class NA_Orianna_Bot_Teemo(Ratings):
pass
class NA_Orianna_Bot_Thresh(Ratings):
pass
class NA_Orianna_Bot_Tristana(Ratings):
pass
class NA_Orianna_Bot_Trundle(Ratings):
pass
class NA_Orianna_Bot_Tryndamere(Ratings):
pass
class NA_Orianna_Bot_TwistedFate(Ratings):
pass
class NA_Orianna_Bot_Twitch(Ratings):
pass
class NA_Orianna_Bot_Udyr(Ratings):
pass
class NA_Orianna_Bot_Urgot(Ratings):
pass
class NA_Orianna_Bot_Varus(Ratings):
pass
class NA_Orianna_Bot_Vayne(Ratings):
pass
class NA_Orianna_Bot_Veigar(Ratings):
pass
class NA_Orianna_Bot_Velkoz(Ratings):
pass
class NA_Orianna_Bot_Vi(Ratings):
pass
class NA_Orianna_Bot_Viktor(Ratings):
pass
class NA_Orianna_Bot_Vladimir(Ratings):
pass
class NA_Orianna_Bot_Volibear(Ratings):
pass
class NA_Orianna_Bot_Warwick(Ratings):
pass
class NA_Orianna_Bot_Xayah(Ratings):
pass
class NA_Orianna_Bot_Xerath(Ratings):
pass
class NA_Orianna_Bot_XinZhao(Ratings):
pass
class NA_Orianna_Bot_Yasuo(Ratings):
pass
class NA_Orianna_Bot_Yorick(Ratings):
pass
class NA_Orianna_Bot_Zac(Ratings):
pass
class NA_Orianna_Bot_Zed(Ratings):
pass
class NA_Orianna_Bot_Ziggs(Ratings):
pass
class NA_Orianna_Bot_Zilean(Ratings):
pass
class NA_Orianna_Bot_Zyra(Ratings):
pass | 0.197754 | 0.076649 |
import re
from urllib.parse import urljoin
from flexget.utils.soup import get_soup
from ..base.request import check_network_state, NetworkState
from ..base.sign_in import check_final_state, SignState, Work
from ..utils.net_utils import get_module_name
from ..schema.unit3d import Unit3D
from ..utils import net_utils
from ..utils.value_hanlder import handle_join_date, handle_infinite
class MainClass(Unit3D):
URL = 'https://pt.hdpost.top'
USER_CLASSES = {
'uploaded': [109951162777600],
'days': [365]
}
@classmethod
def sign_in_build_schema(cls):
return {
get_module_name(cls): {
'type': 'object',
'properties': {
'cookie': {'type': 'string'},
'login': {
'type': 'object',
'properties': {
'username': {'type': 'string'},
'password': {'type': 'string'}
},
'additionalProperties': False
}
},
'additionalProperties': False
}
}
@classmethod
def reseed_build_schema(cls):
return {
get_module_name(cls): {
'type': 'object',
'properties': {
'rsskey': {'type': 'string'}
},
'additionalProperties': False
}
}
@classmethod
def reseed_build_entry(cls, entry, config, site, passkey, torrent_id):
download_page = site['download_page'].format(torrent_id=torrent_id, rsskey=passkey['rsskey'])
entry['url'] = urljoin(MainClass.URL, download_page)
def sign_in_build_login_workflow(self, entry, config):
return [
Work(
url='/login',
method=self.sign_in_by_get,
assert_state=(check_network_state, NetworkState.SUCCEED),
),
Work(
url='/login',
method=self.sign_in_by_login,
assert_state=(check_network_state, NetworkState.SUCCEED),
response_urls=['', '/pages/1'],
)
]
def sign_in_build_workflow(self, entry, config):
return [
Work(
url='/',
method=self.sign_in_by_get,
succeed_regex=[('<a class="top-nav__username" href="https://pt.hdpost.top/users/(.*?)">', 1)],
assert_state=(check_final_state, SignState.SUCCEED),
is_base_content=True,
response_urls=['', '/']
)
]
def sign_in_build_login_data(self, login, last_content):
login_page = get_soup(last_content)
hidden_input = login_page.select_one('#formContent > form > input[type=hidden]:nth-child(7)')
name = hidden_input.attrs['name']
value = hidden_input.attrs['value']
return {
'_token': re.search(r'(?<=name="_token" value=").+?(?=")', last_content).group(),
'username': login['username'],
'password': login['password'],
'remember': 'on',
'_captcha': re.search(r'(?<=name="_captcha" value=").+?(?=")', last_content).group(),
'_username': '',
name: value,
}
@property
def details_selector(self) -> dict:
selector = super().details_selector
net_utils.dict_merge(selector, {
'user_id': '/users/(.*?)/',
'detail_sources': {
'default': {
'do_not_strip': True,
'elements': {
'bar': 'ul.top-nav__ratio-bar',
'header': '.header',
'data_table': '.user-info'
}
}
},
'details': {
'uploaded': {
'regex': '上传.+?([\\d.]+ ?[ZEPTGMK]?iB)'
},
'downloaded': {
'regex': '下载.+?([\\d.]+ ?[ZEPTGMK]?iB)'
},
'share_ratio': {
'regex': '分享率.+?([\\d.]+)',
'handle': handle_infinite
},
'points': {
'regex': '魔力.+?(\\d[\\d,. ]*)',
'handle': self.handle_points
},
'join_date': {
'regex': '注册日期 (.*?\\d{4})',
'handle': handle_join_date
},
'seeding': {
'regex': '做种.+?(\\d+)'
},
'leeching': {
'regex': '吸血.+?(\\d+)'
},
'hr': {
'regex': '有效.+?(\\d+)'
}
}
})
return selector | ptsites/sites/hdpost.py | import re
from urllib.parse import urljoin
from flexget.utils.soup import get_soup
from ..base.request import check_network_state, NetworkState
from ..base.sign_in import check_final_state, SignState, Work
from ..utils.net_utils import get_module_name
from ..schema.unit3d import Unit3D
from ..utils import net_utils
from ..utils.value_hanlder import handle_join_date, handle_infinite
class MainClass(Unit3D):
URL = 'https://pt.hdpost.top'
USER_CLASSES = {
'uploaded': [109951162777600],
'days': [365]
}
@classmethod
def sign_in_build_schema(cls):
return {
get_module_name(cls): {
'type': 'object',
'properties': {
'cookie': {'type': 'string'},
'login': {
'type': 'object',
'properties': {
'username': {'type': 'string'},
'password': {'type': 'string'}
},
'additionalProperties': False
}
},
'additionalProperties': False
}
}
@classmethod
def reseed_build_schema(cls):
return {
get_module_name(cls): {
'type': 'object',
'properties': {
'rsskey': {'type': 'string'}
},
'additionalProperties': False
}
}
@classmethod
def reseed_build_entry(cls, entry, config, site, passkey, torrent_id):
download_page = site['download_page'].format(torrent_id=torrent_id, rsskey=passkey['rsskey'])
entry['url'] = urljoin(MainClass.URL, download_page)
def sign_in_build_login_workflow(self, entry, config):
return [
Work(
url='/login',
method=self.sign_in_by_get,
assert_state=(check_network_state, NetworkState.SUCCEED),
),
Work(
url='/login',
method=self.sign_in_by_login,
assert_state=(check_network_state, NetworkState.SUCCEED),
response_urls=['', '/pages/1'],
)
]
def sign_in_build_workflow(self, entry, config):
return [
Work(
url='/',
method=self.sign_in_by_get,
succeed_regex=[('<a class="top-nav__username" href="https://pt.hdpost.top/users/(.*?)">', 1)],
assert_state=(check_final_state, SignState.SUCCEED),
is_base_content=True,
response_urls=['', '/']
)
]
def sign_in_build_login_data(self, login, last_content):
login_page = get_soup(last_content)
hidden_input = login_page.select_one('#formContent > form > input[type=hidden]:nth-child(7)')
name = hidden_input.attrs['name']
value = hidden_input.attrs['value']
return {
'_token': re.search(r'(?<=name="_token" value=").+?(?=")', last_content).group(),
'username': login['username'],
'password': login['password'],
'remember': 'on',
'_captcha': re.search(r'(?<=name="_captcha" value=").+?(?=")', last_content).group(),
'_username': '',
name: value,
}
@property
def details_selector(self) -> dict:
selector = super().details_selector
net_utils.dict_merge(selector, {
'user_id': '/users/(.*?)/',
'detail_sources': {
'default': {
'do_not_strip': True,
'elements': {
'bar': 'ul.top-nav__ratio-bar',
'header': '.header',
'data_table': '.user-info'
}
}
},
'details': {
'uploaded': {
'regex': '上传.+?([\\d.]+ ?[ZEPTGMK]?iB)'
},
'downloaded': {
'regex': '下载.+?([\\d.]+ ?[ZEPTGMK]?iB)'
},
'share_ratio': {
'regex': '分享率.+?([\\d.]+)',
'handle': handle_infinite
},
'points': {
'regex': '魔力.+?(\\d[\\d,. ]*)',
'handle': self.handle_points
},
'join_date': {
'regex': '注册日期 (.*?\\d{4})',
'handle': handle_join_date
},
'seeding': {
'regex': '做种.+?(\\d+)'
},
'leeching': {
'regex': '吸血.+?(\\d+)'
},
'hr': {
'regex': '有效.+?(\\d+)'
}
}
})
return selector | 0.396535 | 0.193528 |
from datetime import datetime
from dateutil import tz
from pathlib import Path
from typing import List
from urllib.parse import quote
import daiquiri
from lxml import etree
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import pendulum
import requests
from sqlalchemy import create_engine
from sqlalchemy.orm.exc import NoResultFound
from webapp.config import Config
import webapp.db as db
logger = daiquiri.getLogger(__name__)
ABQ_TZ = tz.gettz("America/Denver")
def clean(text):
return " ".join(text.split())
def get_d1_date_uploaded(sysmeta_xml: str) -> str:
root = etree.fromstring(sysmeta_xml.encode('utf-8'))
date_uploaded = root.find('.//dateUploaded')
return date_uploaded.text
def get_d1_date_replica_verified(sysmeta_xml: str) -> str:
root = etree.fromstring(sysmeta_xml.encode('utf-8'))
date_verified = root.find('.//replicaVerified')
return date_verified.text
def get_d1_solr_count(solr_xml: str) -> int:
root = etree.fromstring(solr_xml.encode('utf-8'))
result = root.find('.//result')
return int(result.get('numFound'))
def get_d1_solr_result(pid: str, d1_url: str) -> tuple:
pid = quote(f'"{pid}"', safe='')
url = f'{d1_url}/query/solr/?start=0&rows=10&fl=id%2Ctitle%2CformatId&q=id%3A{pid}'
r = requests.get(url)
if r.status_code == requests.codes.ok:
return True, r.text
elif r.status_code == requests.codes.not_found:
return False, 'Not Found'
elif r.status_code == requests.codes.unauthorized:
return False, 'Unauthorized'
else:
return False, f'Unknown error with status code: {r.status_code}'
def get_d1_sysmeta(pid: str, d1_url: str) -> tuple:
pid = quote(pid, safe='')
url = f'{d1_url}/meta/{pid}'
r = requests.get(url)
if r.status_code == requests.codes.ok:
return True, r.text
elif r.status_code == requests.codes.not_found:
return False, 'Not Found'
elif r.status_code == requests.codes.unauthorized:
return False, 'Unauthorized'
else:
return False, f'Unknown error with status code: {r.status_code}'
def get_resource_counts(rid: str, start: str = None, end: str = None) -> int:
sql = (
"SELECT COUNT(*) FROM auditmanager.eventlog "
"WHERE servicemethod='<SERVICE_METHOD>' AND statuscode=200 "
"AND userid NOT LIKE '%%robot%%' AND resourceid='<RID>'"
)
if "/metadata/eml/" in rid:
service_method = "readMetadata"
elif "/report/eml/" in rid:
service_method = "readDataPackageReport"
else:
service_method = "readDataEntity"
sql = sql.replace("<SERVICE_METHOD>", service_method)
sql = sql.replace("<RID>", rid.replace("%", "%%"))
if start is not None:
sql += f" AND entrytime >= '{start}'"
if end is not None:
sql += f" AND entrytime <= '{end}'"
rs = db.select_all(Config.DB_HOST_AUDIT, sql)
return rs[0][0]
def get_resource_downloads(rid: str, start: str = None, end: str = None):
sql = (
"SELECT entrytime FROM auditmanager.eventlog "
"WHERE servicemethod='<SERVICE_METHOD>' AND statuscode=200 "
"AND userid NOT LIKE '%%robot%%' AND resourceid='<RID>' "
)
if "/metadata/eml/" in rid:
service_method = "readMetadata"
elif "/report/eml/" in rid:
service_method = "readDataPackageReport"
else:
service_method = "readDataEntity"
sql = sql.replace("<SERVICE_METHOD>", service_method)
sql = sql.replace("<RID>", rid.replace("%", "%%"))
if start is not None:
sql += f"AND entrytime >= '{start}' "
if end is not None:
sql += f"AND entrytime <= '{end}' "
sql += "ORDER BY entrytime ASC"
rs = db.select_all(Config.DB_HOST_AUDIT, sql)
return rs
def get_entity_name(dataset, rid: str):
name = None
urls = dataset.findall("./physical/distribution/online/url")
for url in urls:
if rid == url.text.strip():
name = dataset.find("./entityName").text.strip()
break
return name
def get_package_doi(pid: list, auth: tuple = None) -> str:
url = Config.PASTA_URL + f'/doi/eml/{pid[0]}/{pid[1]}/{pid[2]}'
r = requests.get(url=url, auth=auth)
if r.status_code == requests.codes.ok:
return r.text
else:
return 'None'
def get_resource_create_date(resource_xml: str) -> str:
root = etree.fromstring(resource_xml.encode('utf-8'))
date_created = root.find('.//dateCreated')
return date_created.text
def get_package_eml(pid: list, auth: tuple = None) -> str:
url = Config.PASTA_URL + f'/metadata/eml/{pid[0]}/{pid[1]}/{pid[2]}'
r = requests.get(url=url, auth=auth)
r.raise_for_status()
return r.text
def get_resource_metadata(pid: list, auth: tuple = None) -> str:
url = Config.PASTA_URL + f'/rmd/eml/{pid[0]}/{pid[1]}/{pid[2]}'
r = requests.get(url=url, auth=auth)
r.raise_for_status()
return r.text
def get_resources(pid: list, auth: tuple = None) -> tuple:
url = Config.PASTA_URL + f'/eml/{pid[0]}/{pid[1]}/{pid[2]}'
r = requests.get(url=url, auth=auth)
if r.status_code == requests.codes.ok:
return True, r.text
elif r.status_code == requests.codes.not_found:
return False, 'Not Found'
elif r.status_code == requests.codes.unauthorized:
return False, 'Unauthorized'
else:
return False, f'Unknown error with status code: {r.status_code}'
def is_real_package(pid: list, auth: tuple = None):
is_real = False
url = Config.PASTA_URL + f'/rmd/eml/{pid[0]}/{pid[1]}/{pid[2]}'
r = requests.get(url=url, auth=auth)
if r.status_code == requests.codes.ok:
is_real = True
return is_real
def plot(stats: List) -> str:
first_download = stats[0][0]
now = pendulum.now()
delta = now - first_download.astimezone(tz=ABQ_TZ)
days = int(delta.total_days())
_ = pendulum.datetime(
year=now.year, month=now.month, day=now.day
)
dt_tbl = {}
for day in range(days + 2):
dt_tbl[_.subtract(days=day)] = 0
for result in stats:
p = pendulum.instance(result[0])
_ = pendulum.datetime(year=p.year, month=p.month, day=p.day)
dt_tbl[_] += 1
dt = []
count = []
for _ in dt_tbl:
dt.append(datetime.strptime(_.to_datetime_string(), "%Y-%m-%d %H:%M:%S"))
count.append(dt_tbl[_])
p = Path(Config.STATIC)
if not p.exists():
p.mkdir(parents=True)
file_name = f"{now.timestamp()}.png"
file_path = p / file_name
plt.figure(figsize=(8.0, 2.4), tight_layout=True)
plt.plot(dt, count, "g")
# plt.xlabel("Date")
plt.ylabel("Downloads")
plt.gca().set_ylim(bottom=0.0)
plt.gca().yaxis.set_major_locator(MaxNLocator(integer=True))
if sum(count) == 0:
plt.gca().set_yticks([0.0, 1.0])
plt.gca().grid(True)
plt.gcf().autofmt_xdate()
plt.savefig(file_path)
plt.close()
return f"/static/{file_name}"
def query(host: str, sql: str):
rs = None
db = (
f"{Config.DB_DRIVER}://"
f"{Config.DB_USER}:"
f"{Config.DB_PW}@"
f"{host}/"
f"{Config.DB_DB}"
)
engine = create_engine(db)
try:
with engine.connect() as connection:
rs = connection.execute(sql).fetchall()
except NoResultFound as e:
logger.warning(e)
rs = list()
except Exception as e:
logger.error(sql)
logger.error(e)
raise e
return rs
class PackageStatus(object):
def __init__(self, package_identifier: str):
self._package_identifier = package_identifier.strip()
self._pid = self._package_identifier.split('.')
self._is_real = is_real_package(self._pid)
if self._is_real:
eml = get_package_eml(self._pid)
self._eml = etree.fromstring(eml.encode("utf-8"))
self._date_created_mt, self._date_created_utc = self.get_pasta_create_date()
self._package_resources = self.get_pasta_resources()
self._package_resource_downloads = self.get_resource_downloads()
self._gmn_host = self.get_gmn_host()
self._gmn_url = self.get_gmn_url()
self._gmn_resources = self.get_gmn_resource_times()
self._cn_url = 'https://cn.dataone.org/cn/v2'
self._cn_sync_times = self.get_cn_sync_times()
self._cn_index_status = self.get_cn_indexed_status()
self._title = self._get_title()
@property
def title(self):
return self._title
@property
def cn_index_status(self):
return self._cn_index_status
@property
def cn_sync_times(self):
return self._cn_sync_times
@property
def cn_url(self):
return self._cn_url
@property
def date_created_mt(self):
return self._date_created_mt
@property
def date_created_utc(self):
return self._date_created_utc
@property
def gmn_resources(self):
return self._gmn_resources
@property
def gmn_host(self):
return self._gmn_host
@property
def gmn_url(self):
return self._gmn_url
@property
def is_real(self):
return self._is_real
@property
def package_identifier(self):
return self._package_identifier
@property
def package_resources(self):
return self._package_resources
@property
def resource_downloads(self):
return self._package_resource_downloads
def get_cn_sync_times(self):
resources = dict()
for resource in self._package_resources[:-1]:
success, response = get_d1_sysmeta(resource, self._cn_url)
if success:
dt_utc = pendulum.parse(get_d1_date_replica_verified(response))
date_verified = dt_utc.to_iso8601_string()
resources[resource] = date_verified
else:
resources[resource] = response
return resources
def get_cn_indexed_status(self):
status = False
for resource in self._package_resources[:-1]:
if 'metadata/eml' in resource:
break
success, response = get_d1_solr_result(resource, self._cn_url)
if success:
solr_count = get_d1_solr_count(response)
if solr_count >= 1:
status = True
return status
def _get_title(self) -> str:
title = clean(self._eml.find("./dataset/title").xpath("string()"))
return title
def get_gmn_resource_times(self):
resources = dict()
for resource in self._package_resources[:-1]:
success, response = get_d1_sysmeta(resource, self._gmn_url)
if success:
dt_utc = pendulum.parse(get_d1_date_uploaded(response))
date_uploaded = dt_utc.to_iso8601_string()
resources[resource] = date_uploaded
else:
resources[resource] = response
return resources
def get_gmn_host(self):
if self._pid[0] == 'edi':
gmn_host = 'EDI'
else:
gmn_host = 'LTER'
return gmn_host
def get_gmn_url(self):
if self._pid[0] == 'edi':
gmn_host = 'edirepository.org'
else:
gmn_host = 'lternet.edu'
return f'https://gmn.{gmn_host}/mn/v2'
def get_pasta_create_date(self):
xml = get_resource_metadata(self._pid)
date_created_raw = get_resource_create_date(xml)
local_tz = 'America/Denver'
utc_tz = pendulum.timezone('UTC')
dt_mt = pendulum.parse(date_created_raw, tz=local_tz)
dt_utc = pendulum.instance(utc_tz.convert(dt_mt))
date_created_mt = dt_mt.to_iso8601_string()
date_created_utc = pendulum.parse(
dt_utc.to_iso8601_string()).to_iso8601_string()
return date_created_mt, date_created_utc
def get_pasta_resources(self):
resources = list()
success, response = get_resources(self._pid)
if success:
resources = response.strip().split('\n')
resources.append(resources[-1])
resources[-2] = get_package_doi(self._pid)
return resources
def get_resource_downloads(self):
resource_downloads = dict()
for resource in self._package_resources[:-2]:
count = get_resource_counts(resource)
series = get_resource_downloads(resource)
plot_name = plot(series)
if "/data/eml/" in resource:
name = self.get_entity_name(resource)
elif "/metadata/eml/" in resource:
name = "EML Metadata"
elif "/report/eml/" in resource:
name = "Quality Report"
else:
name = ""
resource_downloads[resource] = (count, plot_name, name)
return resource_downloads
def get_entity_name(self, rid: str) -> str:
name = None
datatables = self._eml.findall("./dataset/dataTable")
for datatable in datatables:
name = get_entity_name(datatable, rid)
if name is not None: return name
otherentities = self._eml.findall("./dataset/otherEntity")
for otherentity in otherentities:
name = get_entity_name(otherentity, rid)
if name is not None: return name
spatialrasters = self._eml.findall("./dataset/spatialRaster")
for spatialraster in spatialrasters:
name = get_entity_name(spatialraster, rid)
if name is not None: return name
spatialvectors = self._eml.findall("./dataset/spatialVector")
for spatialvector in spatialvectors:
name = get_entity_name(spatialvector, rid)
return name | webapp/reports/package_tracker.py | from datetime import datetime
from dateutil import tz
from pathlib import Path
from typing import List
from urllib.parse import quote
import daiquiri
from lxml import etree
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import pendulum
import requests
from sqlalchemy import create_engine
from sqlalchemy.orm.exc import NoResultFound
from webapp.config import Config
import webapp.db as db
logger = daiquiri.getLogger(__name__)
ABQ_TZ = tz.gettz("America/Denver")
def clean(text):
return " ".join(text.split())
def get_d1_date_uploaded(sysmeta_xml: str) -> str:
root = etree.fromstring(sysmeta_xml.encode('utf-8'))
date_uploaded = root.find('.//dateUploaded')
return date_uploaded.text
def get_d1_date_replica_verified(sysmeta_xml: str) -> str:
root = etree.fromstring(sysmeta_xml.encode('utf-8'))
date_verified = root.find('.//replicaVerified')
return date_verified.text
def get_d1_solr_count(solr_xml: str) -> int:
root = etree.fromstring(solr_xml.encode('utf-8'))
result = root.find('.//result')
return int(result.get('numFound'))
def get_d1_solr_result(pid: str, d1_url: str) -> tuple:
pid = quote(f'"{pid}"', safe='')
url = f'{d1_url}/query/solr/?start=0&rows=10&fl=id%2Ctitle%2CformatId&q=id%3A{pid}'
r = requests.get(url)
if r.status_code == requests.codes.ok:
return True, r.text
elif r.status_code == requests.codes.not_found:
return False, 'Not Found'
elif r.status_code == requests.codes.unauthorized:
return False, 'Unauthorized'
else:
return False, f'Unknown error with status code: {r.status_code}'
def get_d1_sysmeta(pid: str, d1_url: str) -> tuple:
pid = quote(pid, safe='')
url = f'{d1_url}/meta/{pid}'
r = requests.get(url)
if r.status_code == requests.codes.ok:
return True, r.text
elif r.status_code == requests.codes.not_found:
return False, 'Not Found'
elif r.status_code == requests.codes.unauthorized:
return False, 'Unauthorized'
else:
return False, f'Unknown error with status code: {r.status_code}'
def get_resource_counts(rid: str, start: str = None, end: str = None) -> int:
sql = (
"SELECT COUNT(*) FROM auditmanager.eventlog "
"WHERE servicemethod='<SERVICE_METHOD>' AND statuscode=200 "
"AND userid NOT LIKE '%%robot%%' AND resourceid='<RID>'"
)
if "/metadata/eml/" in rid:
service_method = "readMetadata"
elif "/report/eml/" in rid:
service_method = "readDataPackageReport"
else:
service_method = "readDataEntity"
sql = sql.replace("<SERVICE_METHOD>", service_method)
sql = sql.replace("<RID>", rid.replace("%", "%%"))
if start is not None:
sql += f" AND entrytime >= '{start}'"
if end is not None:
sql += f" AND entrytime <= '{end}'"
rs = db.select_all(Config.DB_HOST_AUDIT, sql)
return rs[0][0]
def get_resource_downloads(rid: str, start: str = None, end: str = None):
sql = (
"SELECT entrytime FROM auditmanager.eventlog "
"WHERE servicemethod='<SERVICE_METHOD>' AND statuscode=200 "
"AND userid NOT LIKE '%%robot%%' AND resourceid='<RID>' "
)
if "/metadata/eml/" in rid:
service_method = "readMetadata"
elif "/report/eml/" in rid:
service_method = "readDataPackageReport"
else:
service_method = "readDataEntity"
sql = sql.replace("<SERVICE_METHOD>", service_method)
sql = sql.replace("<RID>", rid.replace("%", "%%"))
if start is not None:
sql += f"AND entrytime >= '{start}' "
if end is not None:
sql += f"AND entrytime <= '{end}' "
sql += "ORDER BY entrytime ASC"
rs = db.select_all(Config.DB_HOST_AUDIT, sql)
return rs
def get_entity_name(dataset, rid: str):
name = None
urls = dataset.findall("./physical/distribution/online/url")
for url in urls:
if rid == url.text.strip():
name = dataset.find("./entityName").text.strip()
break
return name
def get_package_doi(pid: list, auth: tuple = None) -> str:
url = Config.PASTA_URL + f'/doi/eml/{pid[0]}/{pid[1]}/{pid[2]}'
r = requests.get(url=url, auth=auth)
if r.status_code == requests.codes.ok:
return r.text
else:
return 'None'
def get_resource_create_date(resource_xml: str) -> str:
root = etree.fromstring(resource_xml.encode('utf-8'))
date_created = root.find('.//dateCreated')
return date_created.text
def get_package_eml(pid: list, auth: tuple = None) -> str:
url = Config.PASTA_URL + f'/metadata/eml/{pid[0]}/{pid[1]}/{pid[2]}'
r = requests.get(url=url, auth=auth)
r.raise_for_status()
return r.text
def get_resource_metadata(pid: list, auth: tuple = None) -> str:
url = Config.PASTA_URL + f'/rmd/eml/{pid[0]}/{pid[1]}/{pid[2]}'
r = requests.get(url=url, auth=auth)
r.raise_for_status()
return r.text
def get_resources(pid: list, auth: tuple = None) -> tuple:
url = Config.PASTA_URL + f'/eml/{pid[0]}/{pid[1]}/{pid[2]}'
r = requests.get(url=url, auth=auth)
if r.status_code == requests.codes.ok:
return True, r.text
elif r.status_code == requests.codes.not_found:
return False, 'Not Found'
elif r.status_code == requests.codes.unauthorized:
return False, 'Unauthorized'
else:
return False, f'Unknown error with status code: {r.status_code}'
def is_real_package(pid: list, auth: tuple = None):
is_real = False
url = Config.PASTA_URL + f'/rmd/eml/{pid[0]}/{pid[1]}/{pid[2]}'
r = requests.get(url=url, auth=auth)
if r.status_code == requests.codes.ok:
is_real = True
return is_real
def plot(stats: List) -> str:
first_download = stats[0][0]
now = pendulum.now()
delta = now - first_download.astimezone(tz=ABQ_TZ)
days = int(delta.total_days())
_ = pendulum.datetime(
year=now.year, month=now.month, day=now.day
)
dt_tbl = {}
for day in range(days + 2):
dt_tbl[_.subtract(days=day)] = 0
for result in stats:
p = pendulum.instance(result[0])
_ = pendulum.datetime(year=p.year, month=p.month, day=p.day)
dt_tbl[_] += 1
dt = []
count = []
for _ in dt_tbl:
dt.append(datetime.strptime(_.to_datetime_string(), "%Y-%m-%d %H:%M:%S"))
count.append(dt_tbl[_])
p = Path(Config.STATIC)
if not p.exists():
p.mkdir(parents=True)
file_name = f"{now.timestamp()}.png"
file_path = p / file_name
plt.figure(figsize=(8.0, 2.4), tight_layout=True)
plt.plot(dt, count, "g")
# plt.xlabel("Date")
plt.ylabel("Downloads")
plt.gca().set_ylim(bottom=0.0)
plt.gca().yaxis.set_major_locator(MaxNLocator(integer=True))
if sum(count) == 0:
plt.gca().set_yticks([0.0, 1.0])
plt.gca().grid(True)
plt.gcf().autofmt_xdate()
plt.savefig(file_path)
plt.close()
return f"/static/{file_name}"
def query(host: str, sql: str):
rs = None
db = (
f"{Config.DB_DRIVER}://"
f"{Config.DB_USER}:"
f"{Config.DB_PW}@"
f"{host}/"
f"{Config.DB_DB}"
)
engine = create_engine(db)
try:
with engine.connect() as connection:
rs = connection.execute(sql).fetchall()
except NoResultFound as e:
logger.warning(e)
rs = list()
except Exception as e:
logger.error(sql)
logger.error(e)
raise e
return rs
class PackageStatus(object):
def __init__(self, package_identifier: str):
self._package_identifier = package_identifier.strip()
self._pid = self._package_identifier.split('.')
self._is_real = is_real_package(self._pid)
if self._is_real:
eml = get_package_eml(self._pid)
self._eml = etree.fromstring(eml.encode("utf-8"))
self._date_created_mt, self._date_created_utc = self.get_pasta_create_date()
self._package_resources = self.get_pasta_resources()
self._package_resource_downloads = self.get_resource_downloads()
self._gmn_host = self.get_gmn_host()
self._gmn_url = self.get_gmn_url()
self._gmn_resources = self.get_gmn_resource_times()
self._cn_url = 'https://cn.dataone.org/cn/v2'
self._cn_sync_times = self.get_cn_sync_times()
self._cn_index_status = self.get_cn_indexed_status()
self._title = self._get_title()
@property
def title(self):
return self._title
@property
def cn_index_status(self):
return self._cn_index_status
@property
def cn_sync_times(self):
return self._cn_sync_times
@property
def cn_url(self):
return self._cn_url
@property
def date_created_mt(self):
return self._date_created_mt
@property
def date_created_utc(self):
return self._date_created_utc
@property
def gmn_resources(self):
return self._gmn_resources
@property
def gmn_host(self):
return self._gmn_host
@property
def gmn_url(self):
return self._gmn_url
@property
def is_real(self):
return self._is_real
@property
def package_identifier(self):
return self._package_identifier
@property
def package_resources(self):
return self._package_resources
@property
def resource_downloads(self):
return self._package_resource_downloads
def get_cn_sync_times(self):
resources = dict()
for resource in self._package_resources[:-1]:
success, response = get_d1_sysmeta(resource, self._cn_url)
if success:
dt_utc = pendulum.parse(get_d1_date_replica_verified(response))
date_verified = dt_utc.to_iso8601_string()
resources[resource] = date_verified
else:
resources[resource] = response
return resources
def get_cn_indexed_status(self):
status = False
for resource in self._package_resources[:-1]:
if 'metadata/eml' in resource:
break
success, response = get_d1_solr_result(resource, self._cn_url)
if success:
solr_count = get_d1_solr_count(response)
if solr_count >= 1:
status = True
return status
def _get_title(self) -> str:
title = clean(self._eml.find("./dataset/title").xpath("string()"))
return title
def get_gmn_resource_times(self):
resources = dict()
for resource in self._package_resources[:-1]:
success, response = get_d1_sysmeta(resource, self._gmn_url)
if success:
dt_utc = pendulum.parse(get_d1_date_uploaded(response))
date_uploaded = dt_utc.to_iso8601_string()
resources[resource] = date_uploaded
else:
resources[resource] = response
return resources
def get_gmn_host(self):
if self._pid[0] == 'edi':
gmn_host = 'EDI'
else:
gmn_host = 'LTER'
return gmn_host
def get_gmn_url(self):
if self._pid[0] == 'edi':
gmn_host = 'edirepository.org'
else:
gmn_host = 'lternet.edu'
return f'https://gmn.{gmn_host}/mn/v2'
def get_pasta_create_date(self):
xml = get_resource_metadata(self._pid)
date_created_raw = get_resource_create_date(xml)
local_tz = 'America/Denver'
utc_tz = pendulum.timezone('UTC')
dt_mt = pendulum.parse(date_created_raw, tz=local_tz)
dt_utc = pendulum.instance(utc_tz.convert(dt_mt))
date_created_mt = dt_mt.to_iso8601_string()
date_created_utc = pendulum.parse(
dt_utc.to_iso8601_string()).to_iso8601_string()
return date_created_mt, date_created_utc
def get_pasta_resources(self):
resources = list()
success, response = get_resources(self._pid)
if success:
resources = response.strip().split('\n')
resources.append(resources[-1])
resources[-2] = get_package_doi(self._pid)
return resources
def get_resource_downloads(self):
resource_downloads = dict()
for resource in self._package_resources[:-2]:
count = get_resource_counts(resource)
series = get_resource_downloads(resource)
plot_name = plot(series)
if "/data/eml/" in resource:
name = self.get_entity_name(resource)
elif "/metadata/eml/" in resource:
name = "EML Metadata"
elif "/report/eml/" in resource:
name = "Quality Report"
else:
name = ""
resource_downloads[resource] = (count, plot_name, name)
return resource_downloads
def get_entity_name(self, rid: str) -> str:
name = None
datatables = self._eml.findall("./dataset/dataTable")
for datatable in datatables:
name = get_entity_name(datatable, rid)
if name is not None: return name
otherentities = self._eml.findall("./dataset/otherEntity")
for otherentity in otherentities:
name = get_entity_name(otherentity, rid)
if name is not None: return name
spatialrasters = self._eml.findall("./dataset/spatialRaster")
for spatialraster in spatialrasters:
name = get_entity_name(spatialraster, rid)
if name is not None: return name
spatialvectors = self._eml.findall("./dataset/spatialVector")
for spatialvector in spatialvectors:
name = get_entity_name(spatialvector, rid)
return name | 0.441673 | 0.099077 |
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from embyapi.api_client import ApiClient
class MediaInfoServiceApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_items_by_id_playbackinfo(self, id, user_id, **kwargs): # noqa: E501
"""Gets live playback media info for an item # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_items_by_id_playbackinfo(id, user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Item Id (required)
:param str user_id: User Id (required)
:return: MediaInfoPlaybackInfoResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_items_by_id_playbackinfo_with_http_info(id, user_id, **kwargs) # noqa: E501
else:
(data) = self.get_items_by_id_playbackinfo_with_http_info(id, user_id, **kwargs) # noqa: E501
return data
def get_items_by_id_playbackinfo_with_http_info(self, id, user_id, **kwargs): # noqa: E501
"""Gets live playback media info for an item # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_items_by_id_playbackinfo_with_http_info(id, user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Item Id (required)
:param str user_id: User Id (required)
:return: MediaInfoPlaybackInfoResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'user_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_items_by_id_playbackinfo" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_items_by_id_playbackinfo`") # noqa: E501
# verify the required parameter 'user_id' is set
if ('user_id' not in params or
params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `get_items_by_id_playbackinfo`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['Id'] = params['id'] # noqa: E501
query_params = []
if 'user_id' in params:
query_params.append(('UserId', params['user_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Items/{Id}/PlaybackInfo', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MediaInfoPlaybackInfoResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_playback_bitratetest(self, size, **kwargs): # noqa: E501
"""get_playback_bitratetest # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_playback_bitratetest(size, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int size: Size (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_playback_bitratetest_with_http_info(size, **kwargs) # noqa: E501
else:
(data) = self.get_playback_bitratetest_with_http_info(size, **kwargs) # noqa: E501
return data
def get_playback_bitratetest_with_http_info(self, size, **kwargs): # noqa: E501
"""get_playback_bitratetest # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_playback_bitratetest_with_http_info(size, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int size: Size (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_playback_bitratetest" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'size' is set
if ('size' not in params or
params['size'] is None):
raise ValueError("Missing the required parameter `size` when calling `get_playback_bitratetest`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'size' in params:
query_params.append(('Size', params['size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Playback/BitrateTest', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_items_by_id_playbackinfo(self, body, id, **kwargs): # noqa: E501
"""Gets live playback media info for an item # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_items_by_id_playbackinfo(body, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MediaInfoPlaybackInfoRequest body: PlaybackInfoRequest: (required)
:param str id: (required)
:return: MediaInfoPlaybackInfoResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_items_by_id_playbackinfo_with_http_info(body, id, **kwargs) # noqa: E501
else:
(data) = self.post_items_by_id_playbackinfo_with_http_info(body, id, **kwargs) # noqa: E501
return data
def post_items_by_id_playbackinfo_with_http_info(self, body, id, **kwargs): # noqa: E501
"""Gets live playback media info for an item # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_items_by_id_playbackinfo_with_http_info(body, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MediaInfoPlaybackInfoRequest body: PlaybackInfoRequest: (required)
:param str id: (required)
:return: MediaInfoPlaybackInfoResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_items_by_id_playbackinfo" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_items_by_id_playbackinfo`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `post_items_by_id_playbackinfo`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['Id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Items/{Id}/PlaybackInfo', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MediaInfoPlaybackInfoResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_livestreams_close(self, live_stream_id, **kwargs): # noqa: E501
"""Closes a media source # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_livestreams_close(live_stream_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str live_stream_id: LiveStreamId (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_livestreams_close_with_http_info(live_stream_id, **kwargs) # noqa: E501
else:
(data) = self.post_livestreams_close_with_http_info(live_stream_id, **kwargs) # noqa: E501
return data
def post_livestreams_close_with_http_info(self, live_stream_id, **kwargs): # noqa: E501
"""Closes a media source # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_livestreams_close_with_http_info(live_stream_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str live_stream_id: LiveStreamId (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['live_stream_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_livestreams_close" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'live_stream_id' is set
if ('live_stream_id' not in params or
params['live_stream_id'] is None):
raise ValueError("Missing the required parameter `live_stream_id` when calling `post_livestreams_close`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'live_stream_id' in params:
query_params.append(('LiveStreamId', params['live_stream_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/LiveStreams/Close', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_livestreams_mediainfo(self, live_stream_id, **kwargs): # noqa: E501
"""Closes a media source # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_livestreams_mediainfo(live_stream_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str live_stream_id: LiveStreamId (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_livestreams_mediainfo_with_http_info(live_stream_id, **kwargs) # noqa: E501
else:
(data) = self.post_livestreams_mediainfo_with_http_info(live_stream_id, **kwargs) # noqa: E501
return data
def post_livestreams_mediainfo_with_http_info(self, live_stream_id, **kwargs): # noqa: E501
"""Closes a media source # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_livestreams_mediainfo_with_http_info(live_stream_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str live_stream_id: LiveStreamId (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['live_stream_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_livestreams_mediainfo" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'live_stream_id' is set
if ('live_stream_id' not in params or
params['live_stream_id'] is None):
raise ValueError("Missing the required parameter `live_stream_id` when calling `post_livestreams_mediainfo`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'live_stream_id' in params:
query_params.append(('LiveStreamId', params['live_stream_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/LiveStreams/MediaInfo', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_livestreams_open(self, body, **kwargs): # noqa: E501
"""Opens a media source # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_livestreams_open(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MediaInfoLiveStreamRequest body: LiveStreamRequest: (required)
:return: MediaInfoLiveStreamResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_livestreams_open_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.post_livestreams_open_with_http_info(body, **kwargs) # noqa: E501
return data
def post_livestreams_open_with_http_info(self, body, **kwargs): # noqa: E501
"""Opens a media source # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_livestreams_open_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MediaInfoLiveStreamRequest body: LiveStreamRequest: (required)
:return: MediaInfoLiveStreamResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_livestreams_open" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_livestreams_open`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/LiveStreams/Open', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MediaInfoLiveStreamResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) | embyapi/api/media_info_service_api.py | from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from embyapi.api_client import ApiClient
class MediaInfoServiceApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_items_by_id_playbackinfo(self, id, user_id, **kwargs): # noqa: E501
"""Gets live playback media info for an item # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_items_by_id_playbackinfo(id, user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Item Id (required)
:param str user_id: User Id (required)
:return: MediaInfoPlaybackInfoResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_items_by_id_playbackinfo_with_http_info(id, user_id, **kwargs) # noqa: E501
else:
(data) = self.get_items_by_id_playbackinfo_with_http_info(id, user_id, **kwargs) # noqa: E501
return data
def get_items_by_id_playbackinfo_with_http_info(self, id, user_id, **kwargs): # noqa: E501
"""Gets live playback media info for an item # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_items_by_id_playbackinfo_with_http_info(id, user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Item Id (required)
:param str user_id: User Id (required)
:return: MediaInfoPlaybackInfoResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'user_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_items_by_id_playbackinfo" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_items_by_id_playbackinfo`") # noqa: E501
# verify the required parameter 'user_id' is set
if ('user_id' not in params or
params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `get_items_by_id_playbackinfo`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['Id'] = params['id'] # noqa: E501
query_params = []
if 'user_id' in params:
query_params.append(('UserId', params['user_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Items/{Id}/PlaybackInfo', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MediaInfoPlaybackInfoResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_playback_bitratetest(self, size, **kwargs): # noqa: E501
"""get_playback_bitratetest # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_playback_bitratetest(size, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int size: Size (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_playback_bitratetest_with_http_info(size, **kwargs) # noqa: E501
else:
(data) = self.get_playback_bitratetest_with_http_info(size, **kwargs) # noqa: E501
return data
def get_playback_bitratetest_with_http_info(self, size, **kwargs): # noqa: E501
"""get_playback_bitratetest # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_playback_bitratetest_with_http_info(size, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int size: Size (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_playback_bitratetest" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'size' is set
if ('size' not in params or
params['size'] is None):
raise ValueError("Missing the required parameter `size` when calling `get_playback_bitratetest`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'size' in params:
query_params.append(('Size', params['size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Playback/BitrateTest', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_items_by_id_playbackinfo(self, body, id, **kwargs): # noqa: E501
"""Gets live playback media info for an item # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_items_by_id_playbackinfo(body, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MediaInfoPlaybackInfoRequest body: PlaybackInfoRequest: (required)
:param str id: (required)
:return: MediaInfoPlaybackInfoResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_items_by_id_playbackinfo_with_http_info(body, id, **kwargs) # noqa: E501
else:
(data) = self.post_items_by_id_playbackinfo_with_http_info(body, id, **kwargs) # noqa: E501
return data
def post_items_by_id_playbackinfo_with_http_info(self, body, id, **kwargs): # noqa: E501
"""Gets live playback media info for an item # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_items_by_id_playbackinfo_with_http_info(body, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MediaInfoPlaybackInfoRequest body: PlaybackInfoRequest: (required)
:param str id: (required)
:return: MediaInfoPlaybackInfoResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_items_by_id_playbackinfo" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_items_by_id_playbackinfo`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `post_items_by_id_playbackinfo`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['Id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Items/{Id}/PlaybackInfo', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MediaInfoPlaybackInfoResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_livestreams_close(self, live_stream_id, **kwargs): # noqa: E501
"""Closes a media source # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_livestreams_close(live_stream_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str live_stream_id: LiveStreamId (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_livestreams_close_with_http_info(live_stream_id, **kwargs) # noqa: E501
else:
(data) = self.post_livestreams_close_with_http_info(live_stream_id, **kwargs) # noqa: E501
return data
def post_livestreams_close_with_http_info(self, live_stream_id, **kwargs): # noqa: E501
"""Closes a media source # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_livestreams_close_with_http_info(live_stream_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str live_stream_id: LiveStreamId (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['live_stream_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_livestreams_close" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'live_stream_id' is set
if ('live_stream_id' not in params or
params['live_stream_id'] is None):
raise ValueError("Missing the required parameter `live_stream_id` when calling `post_livestreams_close`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'live_stream_id' in params:
query_params.append(('LiveStreamId', params['live_stream_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/LiveStreams/Close', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_livestreams_mediainfo(self, live_stream_id, **kwargs): # noqa: E501
"""Closes a media source # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_livestreams_mediainfo(live_stream_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str live_stream_id: LiveStreamId (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_livestreams_mediainfo_with_http_info(live_stream_id, **kwargs) # noqa: E501
else:
(data) = self.post_livestreams_mediainfo_with_http_info(live_stream_id, **kwargs) # noqa: E501
return data
def post_livestreams_mediainfo_with_http_info(self, live_stream_id, **kwargs): # noqa: E501
"""Closes a media source # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_livestreams_mediainfo_with_http_info(live_stream_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str live_stream_id: LiveStreamId (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['live_stream_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_livestreams_mediainfo" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'live_stream_id' is set
if ('live_stream_id' not in params or
params['live_stream_id'] is None):
raise ValueError("Missing the required parameter `live_stream_id` when calling `post_livestreams_mediainfo`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'live_stream_id' in params:
query_params.append(('LiveStreamId', params['live_stream_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/LiveStreams/MediaInfo', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_livestreams_open(self, body, **kwargs): # noqa: E501
"""Opens a media source # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_livestreams_open(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MediaInfoLiveStreamRequest body: LiveStreamRequest: (required)
:return: MediaInfoLiveStreamResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_livestreams_open_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.post_livestreams_open_with_http_info(body, **kwargs) # noqa: E501
return data
def post_livestreams_open_with_http_info(self, body, **kwargs): # noqa: E501
"""Opens a media source # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_livestreams_open_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MediaInfoLiveStreamRequest body: LiveStreamRequest: (required)
:return: MediaInfoLiveStreamResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_livestreams_open" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_livestreams_open`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/LiveStreams/Open', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MediaInfoLiveStreamResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) | 0.684053 | 0.045841 |
from . plotly_words import INFO
class PlotlyList(list):
"""
A container for PlotlyDicts, inherits from standard list.
Plotly uses lists and dicts as collections to hold information about a
figure. This container is simply a list that understands some plotly
language and apes the methods in a PlotlyDict, passing them on to its
constituents.
It can be initialized like any other list so long as the entries are all
PlotlyDict objects or subclasses thereof.
Any available methods that hold for a list object hold for a PlotlyList.
"""
def __init__(self, *args):
"""Initialize PlotlyList.
Differs from list initialization only in that it forces all new items
to be added through the `append`.
Positional arguments:
args -- a list of positional arguments of any length
"""
super(PlotlyList, self).__init__(self)
for arg in args:
self.append(arg)
def __setitem__(self, key, value):
if not isinstance(value, PlotlyDict):
raise ValueError("Only PlotlyDict or subclasses thereof can "
"populate a PlotlyList.")
super(PlotlyList, self).__setitem__(key, value)
def append(self, arg):
if not isinstance(arg, PlotlyDict):
raise ValueError("Only PlotlyDict or subclasses thereof can "
"populate a PlotlyList.")
super(PlotlyList, self).append(arg)
def get_json(self):
"""Return a json structure representation for the PlotlyList."""
l = list()
for pd in self:
l.append(pd.get_json())
return l
def strip_style(self):
"""Strip style information from each of the PlotlyList's entries."""
for pd in self:
pd.strip_style()
def clean(self):
"""Remove any entries that are NoneType from PlotlyLists's entries."""
for item in self:
item.clean()
def check(self):
"""Check each entry in the PlotlyList for invalid keys."""
for item in self:
item.check()
def repair_vals(self):
"""Some unfortunately placed functionality for use with mplexporter."""
for item in self:
item.repair_vals()
def repair_keys(self):
"""Some unfortunately placed functionality for use with mplexporter."""
for item in self:
item.repair_keys()
class PlotlyDict(dict):
"""A base class for all objects that style a figure in plotly.
A PlotlyDict can be instantiated like any dict object. This class offers
some useful recursive methods that can be used by higher-level subclasses
and containers so long as all plot objects are instantiated as a subclass
of PlotlyDict. Each PlotlyDict should be instantiated with a `kind`
keyword argument. This defines the special _info dictionary for the
object.
Any available methods that hold for a dict hold for a PlotlyDict.
"""
def __init__(self, kind=None, **kwargs):
if kind is not None:
kwargs['_info'] = INFO[kind]
else:
kwargs['_info'] = INFO['base']
super(PlotlyDict, self).__init__(**kwargs)
def __str__(self):
return str(self.get_json())
def _pop_info(self):
"""Remove `private` info from PlotlyDict.
This is only temporary and should be used only by the PlotlyDict class.
"""
return self.pop('_info')
def _push_info(self, _info):
"""Add `private` info back to PlotlyDict.
This is only temporary and should be used only by the PlotlyDict class.
"""
self['_info'] = _info
def get_json(self):
"""Get a JSON representation for the PlotlyDict.
This function changes all of the internal PlotlyDicts and PlotlyLists
into normal lists and dicts. Though duck-typing should allow
PlotlyLists and PlotlyDicts to be sent to plotly directly, this is a
safer approach for compatibility.
"""
d = dict()
_info = self._pop_info()
for key, val in self.items():
try:
d[key] = val.get_json()
except AttributeError:
d[key] = val
self._push_info(_info)
return d
def strip_style(self):
"""Strip style from the current representation of the plotly figure.
All PlotlyDicts and PlotlyLists are guaranteed to survive the
stripping process, though they made be left empty. This is allowable.
The other attributes that will not be deleted are stored in the
plotly_words module under INFO['*']['safe'] for each `kind` of plotly
object.
"""
_info = self._pop_info()
keys = self.keys()
for key in keys:
try:
self[key].strip_style()
except AttributeError:
if key not in _info['safe']:
del self[key]
self._push_info(_info)
def clean(self):
"""Recursively rid PlotlyDict of `None` entries.
This only rids a PlotlyDict of `None` entries, not empty dictionaries or
lists.
"""
del_keys = [key for key in self if self[key] is None]
for key in del_keys:
del self[key]
for val in self.values():
try:
val.clean()
except AttributeError:
pass
def check(self):
"""Recursively check the validity of the keys in a PlotlyDict.
The valid keys are stored in plotly_word.py under INFO['*']['valid']
for each `kind` of plotly object.
"""
_info = self._pop_info()
for key, val in self.items():
try:
val.check()
except AttributeError:
if key not in _info['valid']:
raise KeyError("Invalid key, '{}', for PlotlyDict kind, "
"'{}'".format(key, _info['kind']))
self._push_info(_info)
def repair_vals(self):
"""Repair known common value problems.
Plotly objects that require this functionality define a
non-trivial INFO['*']['repair_vals'] `dict` in plotly_words.py. The
structure of these dictionaries are as follows:
INFO['*']['repair_vals'] =
dict(key_1=[suspect_val_1, correct_val_1], ...)
"""
_info = self._pop_info()
for key in self:
try:
self[key].repair_vals()
except AttributeError:
try:
if self[key] == _info['repair_vals'][key][0]:
self[key] = _info['repair_vals'][key][1]
except KeyError:
pass
self._push_info(_info)
self.clean()
def repair_keys(self):
"""Repair known common key problems.
Plotly objects that require this functionality define a private
non-trivial INFO['*']['repair_keys'] `dict` in plotly_words.py. The
structure of these dictionaries are as follows:
INFO['*']['repair_keys'] = dict(suspect_key_1=correct_key_1, ...)
"""
_info = self._pop_info()
for key in self:
if key in _info['repair_keys']:
self[_info['repair_keys'][key]] = self.pop(key)
for key in self:
try:
self[key].repair_keys()
except AttributeError:
pass
self._push_info(_info)
self.clean() | venv/lib/python3.7/site-packages/matplotlylib/plotly_objs.py | from . plotly_words import INFO
class PlotlyList(list):
"""
A container for PlotlyDicts, inherits from standard list.
Plotly uses lists and dicts as collections to hold information about a
figure. This container is simply a list that understands some plotly
language and apes the methods in a PlotlyDict, passing them on to its
constituents.
It can be initialized like any other list so long as the entries are all
PlotlyDict objects or subclasses thereof.
Any available methods that hold for a list object hold for a PlotlyList.
"""
def __init__(self, *args):
"""Initialize PlotlyList.
Differs from list initialization only in that it forces all new items
to be added through the `append`.
Positional arguments:
args -- a list of positional arguments of any length
"""
super(PlotlyList, self).__init__(self)
for arg in args:
self.append(arg)
def __setitem__(self, key, value):
if not isinstance(value, PlotlyDict):
raise ValueError("Only PlotlyDict or subclasses thereof can "
"populate a PlotlyList.")
super(PlotlyList, self).__setitem__(key, value)
def append(self, arg):
if not isinstance(arg, PlotlyDict):
raise ValueError("Only PlotlyDict or subclasses thereof can "
"populate a PlotlyList.")
super(PlotlyList, self).append(arg)
def get_json(self):
"""Return a json structure representation for the PlotlyList."""
l = list()
for pd in self:
l.append(pd.get_json())
return l
def strip_style(self):
"""Strip style information from each of the PlotlyList's entries."""
for pd in self:
pd.strip_style()
def clean(self):
"""Remove any entries that are NoneType from PlotlyLists's entries."""
for item in self:
item.clean()
def check(self):
"""Check each entry in the PlotlyList for invalid keys."""
for item in self:
item.check()
def repair_vals(self):
"""Some unfortunately placed functionality for use with mplexporter."""
for item in self:
item.repair_vals()
def repair_keys(self):
"""Some unfortunately placed functionality for use with mplexporter."""
for item in self:
item.repair_keys()
class PlotlyDict(dict):
"""A base class for all objects that style a figure in plotly.
A PlotlyDict can be instantiated like any dict object. This class offers
some useful recursive methods that can be used by higher-level subclasses
and containers so long as all plot objects are instantiated as a subclass
of PlotlyDict. Each PlotlyDict should be instantiated with a `kind`
keyword argument. This defines the special _info dictionary for the
object.
Any available methods that hold for a dict hold for a PlotlyDict.
"""
def __init__(self, kind=None, **kwargs):
if kind is not None:
kwargs['_info'] = INFO[kind]
else:
kwargs['_info'] = INFO['base']
super(PlotlyDict, self).__init__(**kwargs)
def __str__(self):
return str(self.get_json())
def _pop_info(self):
"""Remove `private` info from PlotlyDict.
This is only temporary and should be used only by the PlotlyDict class.
"""
return self.pop('_info')
def _push_info(self, _info):
"""Add `private` info back to PlotlyDict.
This is only temporary and should be used only by the PlotlyDict class.
"""
self['_info'] = _info
def get_json(self):
"""Get a JSON representation for the PlotlyDict.
This function changes all of the internal PlotlyDicts and PlotlyLists
into normal lists and dicts. Though duck-typing should allow
PlotlyLists and PlotlyDicts to be sent to plotly directly, this is a
safer approach for compatibility.
"""
d = dict()
_info = self._pop_info()
for key, val in self.items():
try:
d[key] = val.get_json()
except AttributeError:
d[key] = val
self._push_info(_info)
return d
def strip_style(self):
"""Strip style from the current representation of the plotly figure.
All PlotlyDicts and PlotlyLists are guaranteed to survive the
stripping process, though they made be left empty. This is allowable.
The other attributes that will not be deleted are stored in the
plotly_words module under INFO['*']['safe'] for each `kind` of plotly
object.
"""
_info = self._pop_info()
keys = self.keys()
for key in keys:
try:
self[key].strip_style()
except AttributeError:
if key not in _info['safe']:
del self[key]
self._push_info(_info)
def clean(self):
"""Recursively rid PlotlyDict of `None` entries.
This only rids a PlotlyDict of `None` entries, not empty dictionaries or
lists.
"""
del_keys = [key for key in self if self[key] is None]
for key in del_keys:
del self[key]
for val in self.values():
try:
val.clean()
except AttributeError:
pass
def check(self):
"""Recursively check the validity of the keys in a PlotlyDict.
The valid keys are stored in plotly_word.py under INFO['*']['valid']
for each `kind` of plotly object.
"""
_info = self._pop_info()
for key, val in self.items():
try:
val.check()
except AttributeError:
if key not in _info['valid']:
raise KeyError("Invalid key, '{}', for PlotlyDict kind, "
"'{}'".format(key, _info['kind']))
self._push_info(_info)
def repair_vals(self):
"""Repair known common value problems.
Plotly objects that require this functionality define a
non-trivial INFO['*']['repair_vals'] `dict` in plotly_words.py. The
structure of these dictionaries are as follows:
INFO['*']['repair_vals'] =
dict(key_1=[suspect_val_1, correct_val_1], ...)
"""
_info = self._pop_info()
for key in self:
try:
self[key].repair_vals()
except AttributeError:
try:
if self[key] == _info['repair_vals'][key][0]:
self[key] = _info['repair_vals'][key][1]
except KeyError:
pass
self._push_info(_info)
self.clean()
def repair_keys(self):
"""Repair known common key problems.
Plotly objects that require this functionality define a private
non-trivial INFO['*']['repair_keys'] `dict` in plotly_words.py. The
structure of these dictionaries are as follows:
INFO['*']['repair_keys'] = dict(suspect_key_1=correct_key_1, ...)
"""
_info = self._pop_info()
for key in self:
if key in _info['repair_keys']:
self[_info['repair_keys'][key]] = self.pop(key)
for key in self:
try:
self[key].repair_keys()
except AttributeError:
pass
self._push_info(_info)
self.clean() | 0.881088 | 0.527864 |
# Lint as: python3
"""Downloads and installs nasm in a temporary directory."""
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import urllib.parse
import urllib.request
import zipfile
# pylint: disable=g-import-not-at-top
# pylint: disable=W0403
sys.path.append(os.path.dirname(__file__))
import shell
# pylint: enable=g-import-not-at-top
# pylint: enable=W0403
NASM_ZIP_NAME = 'nasm.zip'
class NasmInstaller:
"""Installs nasm into a temporary directory."""
def __init__(self, installer_url: str, installer_dir: str = None):
"""Initialize the installer instance.
Args:
installer_url: URL to the nasm installer.
installer_dir: Optional path to copy nasm.
"""
self._installer_url = installer_url
if not installer_dir:
self._installer_dir = tempfile.TemporaryDirectory().name
else:
self._installer_dir = installer_dir
# Add nasm installation directory to path.
os.environ['PATH'] = (self._installer_dir + os.path.pathsep +
os.environ['PATH'])
@property
def installer_path(self):
"""Get the path where nasm is going to be installed."""
return self._installer_dir
def install(self):
"""Install nasm to project.
Returns:
True when installed, false otherwise.
Raises:
urllib.error.URLError: If an error occurs while downloading the
installer.
zipfile.BadZipFile: if unzipping fails.
subprocess.CalledProcessError: If failed to set path.
"""
# Download installer.
installer_filename = self._download()
if installer_filename:
# Unzip installer.
self._unzip(installer_filename)
# Add installer to path.
self._check_nasm()
return True
return False
def _download(self) -> str:
"""Download the installer and places into temporary folder.
Returns:
Path to the downloaded installer.
Raises:
urllib.error.URLError: If an error occurs while downloading the
installer.
"""
if not self._installer_url:
return ''
# Create installation directory if doesn't exist.
os.makedirs(self._installer_dir, exist_ok=True)
installer_filename = os.path.join(self._installer_dir, NASM_ZIP_NAME)
with open(installer_filename, 'wb') as installer_file:
logging.info('Copying %s --> %s', self._installer_url, installer_filename)
with urllib.request.urlopen(self._installer_url) as urlfile:
shutil.copyfileobj(urlfile, installer_file)
return installer_filename
def _unzip(self, zip_path: str) -> bool:
"""Unzips nasm package.
Args:
zip_path: Path to the zip file.
Raises:
zipfile.BadZipFile: if unzipping fails.
"""
try:
with zipfile.ZipFile(zip_path) as handle:
for item_info in handle.infolist():
# Remove first folder, so nasm.exe can be found when setting PATH.
target_filename = os.path.join(
self._installer_dir,
os.path.join(*(
os.path.normpath(item_info.filename).split(os.path.sep)[1:])))
# Open the file inside zip and save it on the desired location.
with handle.open(item_info.filename, 'r') as input_file:
os.makedirs(os.path.dirname(target_filename), exist_ok=True)
with open(target_filename, 'wb') as output_file:
output_file.write(input_file.read())
except (zipfile.BadZipFile) as error:
logging.exception('Failed to unzip %s: %s', zip_path, error)
raise
def _check_nasm(self) -> str:
"""Check that nasm runs on cmd.
Raises:
subprocess.CalledProcessError: If failed to run nasm.
"""
try:
shell.run_command('nasm -h')
except subprocess.CalledProcessError as error:
logging.exception('Failed to add nasm to path: %s', error)
raise | sdk/build/nasm_installer.py |
# Lint as: python3
"""Downloads and installs nasm in a temporary directory."""
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import urllib.parse
import urllib.request
import zipfile
# pylint: disable=g-import-not-at-top
# pylint: disable=W0403
sys.path.append(os.path.dirname(__file__))
import shell
# pylint: enable=g-import-not-at-top
# pylint: enable=W0403
NASM_ZIP_NAME = 'nasm.zip'
class NasmInstaller:
"""Installs nasm into a temporary directory."""
def __init__(self, installer_url: str, installer_dir: str = None):
"""Initialize the installer instance.
Args:
installer_url: URL to the nasm installer.
installer_dir: Optional path to copy nasm.
"""
self._installer_url = installer_url
if not installer_dir:
self._installer_dir = tempfile.TemporaryDirectory().name
else:
self._installer_dir = installer_dir
# Add nasm installation directory to path.
os.environ['PATH'] = (self._installer_dir + os.path.pathsep +
os.environ['PATH'])
@property
def installer_path(self):
"""Get the path where nasm is going to be installed."""
return self._installer_dir
def install(self):
"""Install nasm to project.
Returns:
True when installed, false otherwise.
Raises:
urllib.error.URLError: If an error occurs while downloading the
installer.
zipfile.BadZipFile: if unzipping fails.
subprocess.CalledProcessError: If failed to set path.
"""
# Download installer.
installer_filename = self._download()
if installer_filename:
# Unzip installer.
self._unzip(installer_filename)
# Add installer to path.
self._check_nasm()
return True
return False
def _download(self) -> str:
"""Download the installer and places into temporary folder.
Returns:
Path to the downloaded installer.
Raises:
urllib.error.URLError: If an error occurs while downloading the
installer.
"""
if not self._installer_url:
return ''
# Create installation directory if doesn't exist.
os.makedirs(self._installer_dir, exist_ok=True)
installer_filename = os.path.join(self._installer_dir, NASM_ZIP_NAME)
with open(installer_filename, 'wb') as installer_file:
logging.info('Copying %s --> %s', self._installer_url, installer_filename)
with urllib.request.urlopen(self._installer_url) as urlfile:
shutil.copyfileobj(urlfile, installer_file)
return installer_filename
def _unzip(self, zip_path: str) -> bool:
"""Unzips nasm package.
Args:
zip_path: Path to the zip file.
Raises:
zipfile.BadZipFile: if unzipping fails.
"""
try:
with zipfile.ZipFile(zip_path) as handle:
for item_info in handle.infolist():
# Remove first folder, so nasm.exe can be found when setting PATH.
target_filename = os.path.join(
self._installer_dir,
os.path.join(*(
os.path.normpath(item_info.filename).split(os.path.sep)[1:])))
# Open the file inside zip and save it on the desired location.
with handle.open(item_info.filename, 'r') as input_file:
os.makedirs(os.path.dirname(target_filename), exist_ok=True)
with open(target_filename, 'wb') as output_file:
output_file.write(input_file.read())
except (zipfile.BadZipFile) as error:
logging.exception('Failed to unzip %s: %s', zip_path, error)
raise
def _check_nasm(self) -> str:
"""Check that nasm runs on cmd.
Raises:
subprocess.CalledProcessError: If failed to run nasm.
"""
try:
shell.run_command('nasm -h')
except subprocess.CalledProcessError as error:
logging.exception('Failed to add nasm to path: %s', error)
raise | 0.640186 | 0.111628 |
__copyright__ = """
Copyright 2019 Amazon.com, Inc. or its affiliates.
Copyright 2019 Netflix Inc.
Copyright 2019 Google LLC
"""
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from threading import Event
from threading import Thread
from mqttclient import MqttClient
from mqttrequest import MqttRequest
from mqttwait import MqttWait
import logging
import json
import time
class RoundRobin:
"""Round robin through all the discovered media apps
This class will walk through the list of discovered media apps, launching each one and
waiting for it's status to change to "running".
"""
def __init__(self, host, port):
"""Create the RoundRobin object."""
self.host = host
self.port = port
self.mqtt_client = MqttClient(host, port)
self.thread = None
self.stop_event = Event()
self.active_requests = []
self.app_args = [
{
"app_id": "netflix",
"startup-args": {"args": []},
"args": {"args": ["m=https://api-global.netflix.com/catalog/titles/movie/80223967&trackId=14855318"]}},
{
"app_id": "prime-video",
"startup-args": {"args": []},
"args": {"contentId": "B015YJRQ8U", "mediaType": "movie"}},
{
"app_id": "youtube",
"startup-args": {},
"args": {"video_id": "KEcbFSaLpoM"}},
]
self.logger = logging.getLogger(__name__)
def execute(self, request):
"""Execute a request.
Basically block until a request has been executed and the response returned.
"""
self.active_requests.append(request)
response = request.execute()
self.active_requests.remove(request)
return response
@staticmethod
def build_app_topic(app, topic):
return "apps/{}/{}".format(app["app_id"], topic)
@staticmethod
def build_platform_topic(capability, topic):
return "platform/{}/{}".format(capability, topic)
def start_app(self, app, content_reference):
"""Attempt to launch the specified app.
This function executes the start request and waits for the app status to change to "running".
"""
req = MqttRequest(self.mqtt_client, self.build_app_topic(app, "app/start"), json.dumps(content_reference))
response = self.execute(req)
if response["status"] == 200:
wait = MqttWait(self.mqtt_client, self.build_app_topic(app, "status/lifecycle"), {"status": "started"}, 30.0)
response = self.execute(wait)
if not response:
raise Exception('Timeout waiting for app status')
else:
raise Exception('Bad status from request: ' + json.dumps(response))
def stop_app(self, app):
"""Attempt to launch the specified app.
This function executes the start request and waits for the app status to change to "running".
"""
req = MqttRequest(self.mqtt_client, self.build_app_topic(app, "app/stop"), json.dumps({"args": []}))
response = self.execute(req)
if response["status"] == 200:
wait = MqttWait(self.mqtt_client, self.build_app_topic(app, "status/lifecycle"), {"status": "stopped"}, 30.0)
response = self.execute(wait)
if not response:
raise Exception('Timeout waiting for app status')
else:
raise Exception('Bad status from request: ' + json.dumps(response))
def start_media(self, app, content_reference):
"""Attempt to play specified content on the specified app.
This function executes the media/start request and waits for the media status to change to "playing".
"""
req = MqttRequest(self.mqtt_client, self.build_app_topic(app, "media/start"), json.dumps(content_reference))
response = self.execute(req)
if response["status"] == 200:
wait = MqttWait(self.mqtt_client, self.build_app_topic(app, "status/media"), {"status": "playing"}, 30.0)
response = self.execute(wait)
if not response:
raise Exception('Timeout waiting for media status')
else:
raise Exception('Bad status from request: ' + json.dumps(response))
def send_input_event(self, app, device, key):
req = MqttRequest(self.mqtt_client,
self.build_platform_topic("input", "{}/{}".format(device, key)),
json.dumps({"app_id": app["app_id"]}))
response = self.execute(req)
if response["status"] != 200:
raise Exception('Bad status from request: ' + json.dumps(response))
def memory_monitor(self, app, action):
"""Control monitoring memory on the specified app.
This function executes the telemetry/memory/monitor/<action> request.
"""
req = MqttRequest(self.mqtt_client,
self.build_platform_topic("telemetry", "{}/{}".format("monitor", action)),
json.dumps({"app_id": app["app_id"]}))
response = self.execute(req)
if response["status"] != 200:
raise Exception('Bad status from request: ' + json.dumps(response))
def run_test(self, app):
self.logger.info("Running test on {}".format(app["name"]))
self.logger.info("Starting app...")
if not self.stop_event.is_set():
# Start the app and wait for it to startup
content_reference = next(args for args in self.app_args if args["app_id"] == app["app_id"])
if content_reference:
self.start_app(app, content_reference["startup-args"])
else:
raise Exception("No args found for app {}.".format(app["name"]))
time.sleep(1)
self.memory_monitor(app, "start")
if app["name"] == "Netflix":
# Enter the default profile for Netflix
time.sleep(5)
self.send_input_event(app, "remote", "OK")
time.sleep(2)
if app["name"] == "Prime Video":
self.send_input_event(app, "remote", "down")
time.sleep(2)
self.send_input_event(app, "remote", "down")
time.sleep(2)
self.send_input_event(app, "remote", "right")
time.sleep(2)
self.send_input_event(app, "remote", "right")
time.sleep(2)
self.send_input_event(app, "remote", "down")
time.sleep(2)
self.logger.info("Starting playback...")
if not self.stop_event.is_set():
# Play some media
content_reference = next(args for args in self.app_args if args["app_id"] == app["app_id"])
if content_reference:
self.start_media(app, content_reference["args"])
else:
raise Exception("No args found for app {}.".format(app["name"]))
# Let playback start and run for a bit...
sleep_time_seconds = 10
self.logger.info("Play for {} seconds...".format(sleep_time_seconds))
if not self.stop_event.is_set():
time.sleep(sleep_time_seconds)
# Pause playback
self.send_input_event(app, "remote", "pause")
time.sleep(5)
self.send_input_event(app, "remote", "play")
time.sleep(5)
self.send_input_event(app, "remote", "pause")
time.sleep(5)
self.send_input_event(app, "remote", "play")
time.sleep(5)
self.memory_monitor(app, "stop")
self.logger.info("Stopping app...")
if not self.stop_event.is_set():
# Stop the app
self.stop_app(app)
self.logger.info("Test complete.")
def _start(self):
"""Private start function that starts the client and loops through the apps forever."""
self.mqtt_client.start()
discovered_apps = self.mqtt_client.get_discovered_apps()
while not self.stop_event.is_set():
for app in discovered_apps:
self.run_test(app)
def start(self):
"""Public start function to get the party started."""
self.thread = Thread(target=self._start, daemon=True)
self.thread.start()
return self.thread
def stop(self):
"""Party's over."""
self.mqtt_client.stop()
self.stop_event.set()
for active_request in self.active_requests:
active_request.cancel() | dab-client/roundrobin.py | __copyright__ = """
Copyright 2019 Amazon.com, Inc. or its affiliates.
Copyright 2019 Netflix Inc.
Copyright 2019 Google LLC
"""
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from threading import Event
from threading import Thread
from mqttclient import MqttClient
from mqttrequest import MqttRequest
from mqttwait import MqttWait
import logging
import json
import time
class RoundRobin:
"""Round robin through all the discovered media apps
This class will walk through the list of discovered media apps, launching each one and
waiting for it's status to change to "running".
"""
def __init__(self, host, port):
"""Create the RoundRobin object."""
self.host = host
self.port = port
self.mqtt_client = MqttClient(host, port)
self.thread = None
self.stop_event = Event()
self.active_requests = []
self.app_args = [
{
"app_id": "netflix",
"startup-args": {"args": []},
"args": {"args": ["m=https://api-global.netflix.com/catalog/titles/movie/80223967&trackId=14855318"]}},
{
"app_id": "prime-video",
"startup-args": {"args": []},
"args": {"contentId": "B015YJRQ8U", "mediaType": "movie"}},
{
"app_id": "youtube",
"startup-args": {},
"args": {"video_id": "KEcbFSaLpoM"}},
]
self.logger = logging.getLogger(__name__)
def execute(self, request):
"""Execute a request.
Basically block until a request has been executed and the response returned.
"""
self.active_requests.append(request)
response = request.execute()
self.active_requests.remove(request)
return response
@staticmethod
def build_app_topic(app, topic):
return "apps/{}/{}".format(app["app_id"], topic)
@staticmethod
def build_platform_topic(capability, topic):
return "platform/{}/{}".format(capability, topic)
def start_app(self, app, content_reference):
"""Attempt to launch the specified app.
This function executes the start request and waits for the app status to change to "running".
"""
req = MqttRequest(self.mqtt_client, self.build_app_topic(app, "app/start"), json.dumps(content_reference))
response = self.execute(req)
if response["status"] == 200:
wait = MqttWait(self.mqtt_client, self.build_app_topic(app, "status/lifecycle"), {"status": "started"}, 30.0)
response = self.execute(wait)
if not response:
raise Exception('Timeout waiting for app status')
else:
raise Exception('Bad status from request: ' + json.dumps(response))
def stop_app(self, app):
"""Attempt to launch the specified app.
This function executes the start request and waits for the app status to change to "running".
"""
req = MqttRequest(self.mqtt_client, self.build_app_topic(app, "app/stop"), json.dumps({"args": []}))
response = self.execute(req)
if response["status"] == 200:
wait = MqttWait(self.mqtt_client, self.build_app_topic(app, "status/lifecycle"), {"status": "stopped"}, 30.0)
response = self.execute(wait)
if not response:
raise Exception('Timeout waiting for app status')
else:
raise Exception('Bad status from request: ' + json.dumps(response))
def start_media(self, app, content_reference):
"""Attempt to play specified content on the specified app.
This function executes the media/start request and waits for the media status to change to "playing".
"""
req = MqttRequest(self.mqtt_client, self.build_app_topic(app, "media/start"), json.dumps(content_reference))
response = self.execute(req)
if response["status"] == 200:
wait = MqttWait(self.mqtt_client, self.build_app_topic(app, "status/media"), {"status": "playing"}, 30.0)
response = self.execute(wait)
if not response:
raise Exception('Timeout waiting for media status')
else:
raise Exception('Bad status from request: ' + json.dumps(response))
def send_input_event(self, app, device, key):
req = MqttRequest(self.mqtt_client,
self.build_platform_topic("input", "{}/{}".format(device, key)),
json.dumps({"app_id": app["app_id"]}))
response = self.execute(req)
if response["status"] != 200:
raise Exception('Bad status from request: ' + json.dumps(response))
def memory_monitor(self, app, action):
"""Control monitoring memory on the specified app.
This function executes the telemetry/memory/monitor/<action> request.
"""
req = MqttRequest(self.mqtt_client,
self.build_platform_topic("telemetry", "{}/{}".format("monitor", action)),
json.dumps({"app_id": app["app_id"]}))
response = self.execute(req)
if response["status"] != 200:
raise Exception('Bad status from request: ' + json.dumps(response))
def run_test(self, app):
self.logger.info("Running test on {}".format(app["name"]))
self.logger.info("Starting app...")
if not self.stop_event.is_set():
# Start the app and wait for it to startup
content_reference = next(args for args in self.app_args if args["app_id"] == app["app_id"])
if content_reference:
self.start_app(app, content_reference["startup-args"])
else:
raise Exception("No args found for app {}.".format(app["name"]))
time.sleep(1)
self.memory_monitor(app, "start")
if app["name"] == "Netflix":
# Enter the default profile for Netflix
time.sleep(5)
self.send_input_event(app, "remote", "OK")
time.sleep(2)
if app["name"] == "Prime Video":
self.send_input_event(app, "remote", "down")
time.sleep(2)
self.send_input_event(app, "remote", "down")
time.sleep(2)
self.send_input_event(app, "remote", "right")
time.sleep(2)
self.send_input_event(app, "remote", "right")
time.sleep(2)
self.send_input_event(app, "remote", "down")
time.sleep(2)
self.logger.info("Starting playback...")
if not self.stop_event.is_set():
# Play some media
content_reference = next(args for args in self.app_args if args["app_id"] == app["app_id"])
if content_reference:
self.start_media(app, content_reference["args"])
else:
raise Exception("No args found for app {}.".format(app["name"]))
# Let playback start and run for a bit...
sleep_time_seconds = 10
self.logger.info("Play for {} seconds...".format(sleep_time_seconds))
if not self.stop_event.is_set():
time.sleep(sleep_time_seconds)
# Pause playback
self.send_input_event(app, "remote", "pause")
time.sleep(5)
self.send_input_event(app, "remote", "play")
time.sleep(5)
self.send_input_event(app, "remote", "pause")
time.sleep(5)
self.send_input_event(app, "remote", "play")
time.sleep(5)
self.memory_monitor(app, "stop")
self.logger.info("Stopping app...")
if not self.stop_event.is_set():
# Stop the app
self.stop_app(app)
self.logger.info("Test complete.")
def _start(self):
"""Private start function that starts the client and loops through the apps forever."""
self.mqtt_client.start()
discovered_apps = self.mqtt_client.get_discovered_apps()
while not self.stop_event.is_set():
for app in discovered_apps:
self.run_test(app)
def start(self):
"""Public start function to get the party started."""
self.thread = Thread(target=self._start, daemon=True)
self.thread.start()
return self.thread
def stop(self):
"""Party's over."""
self.mqtt_client.stop()
self.stop_event.set()
for active_request in self.active_requests:
active_request.cancel() | 0.760606 | 0.166913 |
import demistomock as demisto
from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import
from CommonServerUserPython import * # noqa
"""PrismaCloudAttribution
"""
from typing import Dict, List, Any, Iterable, Union
import traceback
IPADDRESS_KEYS = ['publicIpAddress', 'natIP', 'publicIp', 'inboundIpAddress', 'ipAddress', 'IPAddress']
FQDN_KEYS = ['publicDnsName', 'dnsname', 'domainName', 'name', 'dnsName', 'hostName', 'properties.hostName', 'fqdn',
'enabledHostNames', 'web']
''' STANDALONE FUNCTION '''
def recursive_find(keys: Union[List[str], str], value: Iterable[Any]) -> Iterable[Any]:
if not isinstance(keys, list):
keys = [keys]
for k, v in (value.items() if isinstance(value, dict) else
enumerate(value) if isinstance(value, list) else []):
if k in keys:
yield v
elif isinstance(v, list):
for result in recursive_find(keys, v):
yield result
elif isinstance(v, dict):
for result in recursive_find(keys, v):
yield result
def handle_data(data: Dict[str, Any], fields: List[str]) -> Dict[str, Any]:
out_dict: Dict = {}
if 'ip' in fields:
ips = list(set(recursive_find(IPADDRESS_KEYS, data)))
out_dict["ip"] = ips if ips else None
if 'fqdn' in fields:
fqdns = list(set([fq for fq in recursive_find(FQDN_KEYS, data) if fq.count('.') > 0]))
out_dict["fqdn"] = fqdns if fqdns else None
return out_dict
''' COMMAND FUNCTION '''
def attribution_command(args: Dict[str, Any]) -> CommandResults:
assets = argToList(args.get('assets', []))
fields = argToList(
args.get('fields', 'id,cloudType,resourceName,resourceType,regionId,accountId,accountName,hasAlert,service,ip,fqdn'))
asset_dict: Dict[str, Dict[str, Any]] = {}
for asset in assets:
if not isinstance(asset, dict):
continue
if 'rrn' not in asset:
continue
rrn = asset['rrn']
asset_dict[rrn] = {'rrn': rrn}
for k in asset.keys():
if k == 'name' and 'resourceName' in fields:
asset_dict[rrn]['resourceName'] = asset['name']
elif k == 'data' and isinstance(asset[k], dict):
asset_dict[rrn].update(handle_data(asset[k], fields))
elif k in fields:
asset_dict[rrn][k] = asset[k]
return CommandResults(
outputs=list(asset_dict.values()),
outputs_prefix="PrismaCloud.Attribution",
outputs_key_field="rrn"
)
''' MAIN FUNCTION '''
def main():
try:
return_results(attribution_command(demisto.args()))
except Exception as ex:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute PrismaCloudAttribution. Error: {str(ex)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main() | Packs/PrismaCloud/Scripts/PrismaCloudAttribution/PrismaCloudAttribution.py | import demistomock as demisto
from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import
from CommonServerUserPython import * # noqa
"""PrismaCloudAttribution
"""
from typing import Dict, List, Any, Iterable, Union
import traceback
IPADDRESS_KEYS = ['publicIpAddress', 'natIP', 'publicIp', 'inboundIpAddress', 'ipAddress', 'IPAddress']
FQDN_KEYS = ['publicDnsName', 'dnsname', 'domainName', 'name', 'dnsName', 'hostName', 'properties.hostName', 'fqdn',
'enabledHostNames', 'web']
''' STANDALONE FUNCTION '''
def recursive_find(keys: Union[List[str], str], value: Iterable[Any]) -> Iterable[Any]:
if not isinstance(keys, list):
keys = [keys]
for k, v in (value.items() if isinstance(value, dict) else
enumerate(value) if isinstance(value, list) else []):
if k in keys:
yield v
elif isinstance(v, list):
for result in recursive_find(keys, v):
yield result
elif isinstance(v, dict):
for result in recursive_find(keys, v):
yield result
def handle_data(data: Dict[str, Any], fields: List[str]) -> Dict[str, Any]:
out_dict: Dict = {}
if 'ip' in fields:
ips = list(set(recursive_find(IPADDRESS_KEYS, data)))
out_dict["ip"] = ips if ips else None
if 'fqdn' in fields:
fqdns = list(set([fq for fq in recursive_find(FQDN_KEYS, data) if fq.count('.') > 0]))
out_dict["fqdn"] = fqdns if fqdns else None
return out_dict
''' COMMAND FUNCTION '''
def attribution_command(args: Dict[str, Any]) -> CommandResults:
assets = argToList(args.get('assets', []))
fields = argToList(
args.get('fields', 'id,cloudType,resourceName,resourceType,regionId,accountId,accountName,hasAlert,service,ip,fqdn'))
asset_dict: Dict[str, Dict[str, Any]] = {}
for asset in assets:
if not isinstance(asset, dict):
continue
if 'rrn' not in asset:
continue
rrn = asset['rrn']
asset_dict[rrn] = {'rrn': rrn}
for k in asset.keys():
if k == 'name' and 'resourceName' in fields:
asset_dict[rrn]['resourceName'] = asset['name']
elif k == 'data' and isinstance(asset[k], dict):
asset_dict[rrn].update(handle_data(asset[k], fields))
elif k in fields:
asset_dict[rrn][k] = asset[k]
return CommandResults(
outputs=list(asset_dict.values()),
outputs_prefix="PrismaCloud.Attribution",
outputs_key_field="rrn"
)
''' MAIN FUNCTION '''
def main():
try:
return_results(attribution_command(demisto.args()))
except Exception as ex:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute PrismaCloudAttribution. Error: {str(ex)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main() | 0.522202 | 0.131982 |
import sys
from unittest import skip
from django.core.management.commands import test
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.module_loading import import_by_path
class Command(test.Command):
def handle(self, *args, **kwargs):
# Look for the previous app in INSTALLED_APPS that defines a
# test command for, eg., South support.
apps = settings.INSTALLED_APPS[:]
previous_apps = reversed(apps[:apps.index('djangae')])
CommandClass = test.Command
for app in previous_apps:
try:
CommandClass = import_by_path('{}.management.commands.test.Command'.format(app))
break
except ImproperlyConfigured:
pass
if settings.DATABASES['default']['ENGINE'] == 'djangae.db.backends.appengine':
_monkey_patch_unsupported_tests()
CommandClass().handle(*args, **kwargs)
def _monkey_patch_unsupported_tests():
unsupported_tests = []
if 'django.contrib.auth' in settings.INSTALLED_APPS:
import django
if django.VERSION[:2] == (1, 5):
unsupported_tests.extend([
# These auth tests override the AUTH_USER_MODEL setting, which then uses M2M joins
'django.contrib.auth.tests.auth_backends.CustomPermissionsUserModelBackendTest.test_custom_perms',
'django.contrib.auth.tests.auth_backends.CustomPermissionsUserModelBackendTest.test_get_all_superuser_permissions',
'django.contrib.auth.tests.auth_backends.CustomPermissionsUserModelBackendTest.test_has_no_object_perm',
'django.contrib.auth.tests.auth_backends.CustomPermissionsUserModelBackendTest.test_has_perm',
'django.contrib.auth.tests.auth_backends.ExtensionUserModelBackendTest.test_custom_perms',
'django.contrib.auth.tests.auth_backends.ExtensionUserModelBackendTest.test_has_perm',
'django.contrib.auth.tests.auth_backends.ExtensionUserModelBackendTest.test_get_all_superuser_permissions',
'django.contrib.auth.tests.auth_backends.ExtensionUserModelBackendTest.test_has_no_object_perm'
])
for unsupported_test in unsupported_tests:
module_path, klass_name, method_name = unsupported_test.rsplit(".", 2)
__import__(module_path, klass_name)
module = sys.modules[module_path]
if hasattr(module, klass_name):
klass = getattr(module, klass_name)
method = getattr(klass, method_name)
setattr(klass, method_name, skip("Not supported by Djangae")(method)) | djangae/management/commands/test.py | import sys
from unittest import skip
from django.core.management.commands import test
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.module_loading import import_by_path
class Command(test.Command):
def handle(self, *args, **kwargs):
# Look for the previous app in INSTALLED_APPS that defines a
# test command for, eg., South support.
apps = settings.INSTALLED_APPS[:]
previous_apps = reversed(apps[:apps.index('djangae')])
CommandClass = test.Command
for app in previous_apps:
try:
CommandClass = import_by_path('{}.management.commands.test.Command'.format(app))
break
except ImproperlyConfigured:
pass
if settings.DATABASES['default']['ENGINE'] == 'djangae.db.backends.appengine':
_monkey_patch_unsupported_tests()
CommandClass().handle(*args, **kwargs)
def _monkey_patch_unsupported_tests():
unsupported_tests = []
if 'django.contrib.auth' in settings.INSTALLED_APPS:
import django
if django.VERSION[:2] == (1, 5):
unsupported_tests.extend([
# These auth tests override the AUTH_USER_MODEL setting, which then uses M2M joins
'django.contrib.auth.tests.auth_backends.CustomPermissionsUserModelBackendTest.test_custom_perms',
'django.contrib.auth.tests.auth_backends.CustomPermissionsUserModelBackendTest.test_get_all_superuser_permissions',
'django.contrib.auth.tests.auth_backends.CustomPermissionsUserModelBackendTest.test_has_no_object_perm',
'django.contrib.auth.tests.auth_backends.CustomPermissionsUserModelBackendTest.test_has_perm',
'django.contrib.auth.tests.auth_backends.ExtensionUserModelBackendTest.test_custom_perms',
'django.contrib.auth.tests.auth_backends.ExtensionUserModelBackendTest.test_has_perm',
'django.contrib.auth.tests.auth_backends.ExtensionUserModelBackendTest.test_get_all_superuser_permissions',
'django.contrib.auth.tests.auth_backends.ExtensionUserModelBackendTest.test_has_no_object_perm'
])
for unsupported_test in unsupported_tests:
module_path, klass_name, method_name = unsupported_test.rsplit(".", 2)
__import__(module_path, klass_name)
module = sys.modules[module_path]
if hasattr(module, klass_name):
klass = getattr(module, klass_name)
method = getattr(klass, method_name)
setattr(klass, method_name, skip("Not supported by Djangae")(method)) | 0.227298 | 0.145874 |
import msvcrt
import time
from threading import Thread
from mc_autosplit.utils.exception import FailedToReadAdvancements
from mc_autosplit.utils.mc_utils import get_last_played_level, get_advancements
from mc_autosplit.splitting import handle_advancement_changes
from mc_autosplit.watcher import PathWatcher
class Runner:
def __init__(self):
initialised = False
while not initialised:
try:
_, self.level_path = get_last_played_level()
self.advancements = get_advancements(self.level_path)
self.advancements_watcher = PathWatcher(self.level_path / 'advancements',
self.advancement_watcher_callback)
self.user_inp_thread = Thread(target=self.watch_user_input)
initialised = True
except FileNotFoundError:
print('No levels found, sleeping for 5 seconds...')
time.sleep(5)
def reset(self, level_path=None):
self.level_path = None
while self.level_path is None:
try:
print('Resetting...')
self.level_path = level_path or get_last_played_level()[1]
except FileNotFoundError:
print('Failed to find level, sleeping for 5 seconds...')
time.sleep(5)
print('Now watching:', self.level_path)
self.advancements = get_advancements(self.level_path)
self.advancements_watcher.stop()
self.advancements_watcher = PathWatcher(self.level_path / 'advancements',
self.advancement_watcher_callback)
self.advancements_watcher.start()
def advancement_watcher_callback(self, _, __):
self.check_advancements_changed()
def _check_advancements_changed(self):
advancements_updated = get_advancements(self.level_path)
if advancements_updated != self.advancements:
new_advancements = set(advancements_updated) - set(self.advancements)
print('Detected Advancements: ', new_advancements)
handle_advancement_changes(set(self.advancements), new_advancements)
self.advancements = advancements_updated
def check_advancements_changed(self):
have_checked = False
while not have_checked:
try:
self._check_advancements_changed()
have_checked = True
except FileNotFoundError:
print('World being watched has been disappeared!')
self.reset()
have_checked = True
except FailedToReadAdvancements:
print('Failed to read advancements... trying again in 1 second')
time.sleep(1)
def watch_user_input(self):
while True:
input_char = msvcrt.getch()
if input_char == 'r':
self.reset()
def check_for_world_change(self):
try:
_, last_played_path = get_last_played_level()
if last_played_path != self.level_path:
self.reset(last_played_path)
except FileNotFoundError:
self.reset()
def watch_saves(self):
while True:
self.check_for_world_change()
time.sleep(3)
def run(self):
self.user_inp_thread.start()
print('Watching advancements at:', self.level_path)
self.advancements_watcher.start()
self.watch_saves()
if __name__ == '__main__':
runner = Runner()
runner.run() | mc_autosplit/run.py | import msvcrt
import time
from threading import Thread
from mc_autosplit.utils.exception import FailedToReadAdvancements
from mc_autosplit.utils.mc_utils import get_last_played_level, get_advancements
from mc_autosplit.splitting import handle_advancement_changes
from mc_autosplit.watcher import PathWatcher
class Runner:
def __init__(self):
initialised = False
while not initialised:
try:
_, self.level_path = get_last_played_level()
self.advancements = get_advancements(self.level_path)
self.advancements_watcher = PathWatcher(self.level_path / 'advancements',
self.advancement_watcher_callback)
self.user_inp_thread = Thread(target=self.watch_user_input)
initialised = True
except FileNotFoundError:
print('No levels found, sleeping for 5 seconds...')
time.sleep(5)
def reset(self, level_path=None):
self.level_path = None
while self.level_path is None:
try:
print('Resetting...')
self.level_path = level_path or get_last_played_level()[1]
except FileNotFoundError:
print('Failed to find level, sleeping for 5 seconds...')
time.sleep(5)
print('Now watching:', self.level_path)
self.advancements = get_advancements(self.level_path)
self.advancements_watcher.stop()
self.advancements_watcher = PathWatcher(self.level_path / 'advancements',
self.advancement_watcher_callback)
self.advancements_watcher.start()
def advancement_watcher_callback(self, _, __):
self.check_advancements_changed()
def _check_advancements_changed(self):
advancements_updated = get_advancements(self.level_path)
if advancements_updated != self.advancements:
new_advancements = set(advancements_updated) - set(self.advancements)
print('Detected Advancements: ', new_advancements)
handle_advancement_changes(set(self.advancements), new_advancements)
self.advancements = advancements_updated
def check_advancements_changed(self):
have_checked = False
while not have_checked:
try:
self._check_advancements_changed()
have_checked = True
except FileNotFoundError:
print('World being watched has been disappeared!')
self.reset()
have_checked = True
except FailedToReadAdvancements:
print('Failed to read advancements... trying again in 1 second')
time.sleep(1)
def watch_user_input(self):
while True:
input_char = msvcrt.getch()
if input_char == 'r':
self.reset()
def check_for_world_change(self):
try:
_, last_played_path = get_last_played_level()
if last_played_path != self.level_path:
self.reset(last_played_path)
except FileNotFoundError:
self.reset()
def watch_saves(self):
while True:
self.check_for_world_change()
time.sleep(3)
def run(self):
self.user_inp_thread.start()
print('Watching advancements at:', self.level_path)
self.advancements_watcher.start()
self.watch_saves()
if __name__ == '__main__':
runner = Runner()
runner.run() | 0.240329 | 0.079746 |
import os
import shutil
import sys
import tempfile
import django
from pathlib2 import Path
# Path to the temp mezzanine project folder
TMP_PATH = Path(tempfile.mkdtemp()) / "project_template"
# Injected at the bottom of local_settings.py
TEST_SETTINGS = """
# START INJECTED SETTINGS
INSTALLED_APPS = list(INSTALLED_APPS)
if "mezzanine.accounts" not in INSTALLED_APPS:
INSTALLED_APPS.append("mezzanine.accounts")
INSTALLED_APPS.append("mezzanine_seminars")
# Use the MD5 password hasher by default for quicker test runs.
PASSWORD_HASHERS = ('django.contrib.auth.hashers.MD5PasswordHasher',)
# END INJECTED SETTINGS
"""
# Injected at the bottom of urls.py
TEST_URLS = """
# START INJECTED URLCONFIG
urlpatterns = list(urlpatterns)
urlpatterns.insert(
0, url("^seminars/", include("mezzanine_seminars.urls", namespace="seminars"))
)
# END INJECTED URLCONFIG
"""
def after_django_setup():
"""
Runs once per testing session AFTER Django has been set up.
"""
from ddf import teach
from mezzanine_seminars.models import Seminar
# When creating Seminars we don't want to create extra sites
teach(Seminar, site=None, featured_image="")
def pytest_report_header(config):
"""
Have pytest report the path of the project folder
"""
return "mezzanine proj (tmp): {}".format(TMP_PATH)
def pytest_configure():
"""
Hack the `project_template` dir into an actual project to test against.
"""
from mezzanine.utils.importing import path_for_import
template_path = Path(path_for_import("mezzanine")) / "project_template"
shutil.copytree(str(template_path), str(TMP_PATH))
proj_path = TMP_PATH / "project_name"
# Settings
local_settings = (proj_path / "local_settings.py.template").read_text()
(proj_path / "local_settings.py").write_text(local_settings + TEST_SETTINGS)
# URLs
urls = (proj_path / "urls.py").read_text()
(proj_path / "urls.py").write_text(urls + TEST_URLS)
# Setup the environment for Django
sys.path.insert(0, str(TMP_PATH))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project_name.settings")
django.setup()
after_django_setup()
def pytest_unconfigure():
"""
Remove the temporary folder
"""
try:
shutil.rmtree(str(TMP_PATH))
except OSError:
pass | tests/conftest.py | import os
import shutil
import sys
import tempfile
import django
from pathlib2 import Path
# Path to the temp mezzanine project folder
TMP_PATH = Path(tempfile.mkdtemp()) / "project_template"
# Injected at the bottom of local_settings.py
TEST_SETTINGS = """
# START INJECTED SETTINGS
INSTALLED_APPS = list(INSTALLED_APPS)
if "mezzanine.accounts" not in INSTALLED_APPS:
INSTALLED_APPS.append("mezzanine.accounts")
INSTALLED_APPS.append("mezzanine_seminars")
# Use the MD5 password hasher by default for quicker test runs.
PASSWORD_HASHERS = ('django.contrib.auth.hashers.MD5PasswordHasher',)
# END INJECTED SETTINGS
"""
# Injected at the bottom of urls.py
TEST_URLS = """
# START INJECTED URLCONFIG
urlpatterns = list(urlpatterns)
urlpatterns.insert(
0, url("^seminars/", include("mezzanine_seminars.urls", namespace="seminars"))
)
# END INJECTED URLCONFIG
"""
def after_django_setup():
"""
Runs once per testing session AFTER Django has been set up.
"""
from ddf import teach
from mezzanine_seminars.models import Seminar
# When creating Seminars we don't want to create extra sites
teach(Seminar, site=None, featured_image="")
def pytest_report_header(config):
"""
Have pytest report the path of the project folder
"""
return "mezzanine proj (tmp): {}".format(TMP_PATH)
def pytest_configure():
"""
Hack the `project_template` dir into an actual project to test against.
"""
from mezzanine.utils.importing import path_for_import
template_path = Path(path_for_import("mezzanine")) / "project_template"
shutil.copytree(str(template_path), str(TMP_PATH))
proj_path = TMP_PATH / "project_name"
# Settings
local_settings = (proj_path / "local_settings.py.template").read_text()
(proj_path / "local_settings.py").write_text(local_settings + TEST_SETTINGS)
# URLs
urls = (proj_path / "urls.py").read_text()
(proj_path / "urls.py").write_text(urls + TEST_URLS)
# Setup the environment for Django
sys.path.insert(0, str(TMP_PATH))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project_name.settings")
django.setup()
after_django_setup()
def pytest_unconfigure():
"""
Remove the temporary folder
"""
try:
shutil.rmtree(str(TMP_PATH))
except OSError:
pass | 0.227298 | 0.126704 |
import unittest
from uuid import uuid4
import docker
from sabotage import disconnected
class TestDisconnected(unittest.TestCase):
def _create_network(self, name=None, driver="bridge"):
network = self.client.networks.create(name if name else str(uuid4()), driver=driver)
self.networks += [network]
return network
def _create_container(self, network, name=None, labels=None):
container = self.client.containers.create("alpine:3.9.2",
name=name if name else str(uuid4()),
network=network,
command="sh -c 'trap : TERM INT; sleep 3600 & wait'",
labels=labels if labels else {})
self.containers += [container]
return container
def _ping(self, container, host):
command = "sh -c 'ping -q -c 1 -w 1 {host} > /dev/null && echo -n OK || echo -n FAIL'"
result = container.exec_run(command.format(host=host))
if isinstance(result, bytes):
return result.decode()
return result.output.decode()
def setUp(self):
self.client = docker.from_env()
self.networks = []
self.containers = []
def tearDown(self):
for container in self.containers:
container.stop()
for network in self.networks:
network.remove()
self.client.api.close()
def test_disconnected(self):
network = self._create_network()
container1 = self._create_container(network.id)
container2 = self._create_container(network.id)
container1.start()
container2.start()
res = self._ping(container2, container1.name)
self.assertEqual(res, "OK")
with disconnected(container1.name):
res = self._ping(container2, container1.name)
self.assertEqual(res, "FAIL")
res = self._ping(container2, container1.name)
self.assertEqual(res, "OK")
def test_disconnected_with_compose_labels(self):
network = self._create_network()
service_name, project_name = str(uuid4()), str(uuid4())
container1 = self._create_container(name=service_name, network=network.id, labels={
"com.docker.compose.project": project_name,
"com.docker.compose.service": service_name,
"com.docker.compose.container-number": "1",
})
container2 = self._create_container(network=network.id)
container1.start()
container2.start()
with disconnected(service_name, project_name=project_name):
res = self._ping(container2, container1.name)
self.assertEqual(res, "FAIL")
res = self._ping(container2, container1.name)
self.assertEqual(res, "OK") | tests/test_disconnected.py | import unittest
from uuid import uuid4
import docker
from sabotage import disconnected
class TestDisconnected(unittest.TestCase):
def _create_network(self, name=None, driver="bridge"):
network = self.client.networks.create(name if name else str(uuid4()), driver=driver)
self.networks += [network]
return network
def _create_container(self, network, name=None, labels=None):
container = self.client.containers.create("alpine:3.9.2",
name=name if name else str(uuid4()),
network=network,
command="sh -c 'trap : TERM INT; sleep 3600 & wait'",
labels=labels if labels else {})
self.containers += [container]
return container
def _ping(self, container, host):
command = "sh -c 'ping -q -c 1 -w 1 {host} > /dev/null && echo -n OK || echo -n FAIL'"
result = container.exec_run(command.format(host=host))
if isinstance(result, bytes):
return result.decode()
return result.output.decode()
def setUp(self):
self.client = docker.from_env()
self.networks = []
self.containers = []
def tearDown(self):
for container in self.containers:
container.stop()
for network in self.networks:
network.remove()
self.client.api.close()
def test_disconnected(self):
network = self._create_network()
container1 = self._create_container(network.id)
container2 = self._create_container(network.id)
container1.start()
container2.start()
res = self._ping(container2, container1.name)
self.assertEqual(res, "OK")
with disconnected(container1.name):
res = self._ping(container2, container1.name)
self.assertEqual(res, "FAIL")
res = self._ping(container2, container1.name)
self.assertEqual(res, "OK")
def test_disconnected_with_compose_labels(self):
network = self._create_network()
service_name, project_name = str(uuid4()), str(uuid4())
container1 = self._create_container(name=service_name, network=network.id, labels={
"com.docker.compose.project": project_name,
"com.docker.compose.service": service_name,
"com.docker.compose.container-number": "1",
})
container2 = self._create_container(network=network.id)
container1.start()
container2.start()
with disconnected(service_name, project_name=project_name):
res = self._ping(container2, container1.name)
self.assertEqual(res, "FAIL")
res = self._ping(container2, container1.name)
self.assertEqual(res, "OK") | 0.382487 | 0.157752 |
import datetime
import numpy as np
import pandas as pd
from pyspark.sql import functions
from cape_privacy.spark import dtypes
from cape_privacy.spark import utils
from cape_privacy.spark.transformations import rounding as rnd
# Utils
def _make_and_apply_rounder(sess, df, dtype, precision):
df = sess.createDataFrame(df, schema=["data"])
rounder = rnd.NumericRounding(dtype, precision)
result_df = df.select(rounder(functions.col("data")))
return result_df.toPandas()
def _make_float_data(dtype, precision=0, scale=0.1):
data = np.arange(6, dtype=dtype).reshape((6, 1))
delta = data * scale
expected = np.around(data + delta, decimals=precision)
test_df = pd.DataFrame(data + delta, columns=["data"])
return test_df, expected
def _make_integer_data(dtype, precision):
data = np.array([123, 1234, 12345, 123456], dtype=dtype).reshape((4, 1))
expected = np.around(data, precision)
test_df = pd.DataFrame(data, columns=["data"])
return test_df, expected
def _make_date_data(sess):
df = sess.createDataFrame([("1997-02-28",)], ["data"])
expected = np.array(datetime.date(1997, 2, 1))
return df, expected
def _make_datetime_data(sess):
df = sess.createDataFrame([("1997-02-28 05:02:11",)], ["data"])
expected = np.array(datetime.datetime(1997, 2, 1, 0, 0, 0))
return df, expected
# Tests
def test_rounding_float():
precision = 0
sess = utils.make_session("test.rounding.float")
test_df, expected = _make_float_data(np.float32, precision)
result_df = _make_and_apply_rounder(sess, test_df, dtypes.Float, precision)
result = result_df.values
assert result.dtype == expected.dtype
np.testing.assert_almost_equal(result, expected)
def test_rounding_double():
precision = 0
sess = utils.make_session("test.rounding.double")
test_df, expected = _make_float_data(np.float64, precision)
result_df = _make_and_apply_rounder(sess, test_df, dtypes.Double, precision)
result = result_df.values
assert result.dtype == expected.dtype
np.testing.assert_almost_equal(result, expected)
def test_rounding_integer():
precision = -2
sess = utils.make_session("test.rounding.integer")
test_df, expected = _make_integer_data(np.int32, precision)
result_df = _make_and_apply_rounder(sess, test_df, dtypes.Integer, precision)
result = result_df.values
assert result.dtype == expected.dtype
np.testing.assert_almost_equal(result, expected)
def test_rounding_long():
precision = -2
sess = utils.make_session("test.rounding.integer")
test_df, expected = _make_integer_data(np.int64, precision)
result_df = _make_and_apply_rounder(sess, test_df, dtypes.Long, precision)
result = result_df.values
assert result.dtype == expected.dtype
np.testing.assert_almost_equal(result, expected)
def test_truncate_date():
sess = utils.make_session("test.truncation.date")
test_df, expected = _make_date_data(sess)
truncate = rnd.DateTruncation("month")
result_df = test_df.select(truncate(test_df.data)).toPandas()
result = result_df.values
assert result.dtype == expected.dtype
np.testing.assert_equal(result, expected) | cape_privacy/spark/transformations/rounding_test.py | import datetime
import numpy as np
import pandas as pd
from pyspark.sql import functions
from cape_privacy.spark import dtypes
from cape_privacy.spark import utils
from cape_privacy.spark.transformations import rounding as rnd
# Utils
def _make_and_apply_rounder(sess, df, dtype, precision):
df = sess.createDataFrame(df, schema=["data"])
rounder = rnd.NumericRounding(dtype, precision)
result_df = df.select(rounder(functions.col("data")))
return result_df.toPandas()
def _make_float_data(dtype, precision=0, scale=0.1):
data = np.arange(6, dtype=dtype).reshape((6, 1))
delta = data * scale
expected = np.around(data + delta, decimals=precision)
test_df = pd.DataFrame(data + delta, columns=["data"])
return test_df, expected
def _make_integer_data(dtype, precision):
data = np.array([123, 1234, 12345, 123456], dtype=dtype).reshape((4, 1))
expected = np.around(data, precision)
test_df = pd.DataFrame(data, columns=["data"])
return test_df, expected
def _make_date_data(sess):
df = sess.createDataFrame([("1997-02-28",)], ["data"])
expected = np.array(datetime.date(1997, 2, 1))
return df, expected
def _make_datetime_data(sess):
df = sess.createDataFrame([("1997-02-28 05:02:11",)], ["data"])
expected = np.array(datetime.datetime(1997, 2, 1, 0, 0, 0))
return df, expected
# Tests
def test_rounding_float():
precision = 0
sess = utils.make_session("test.rounding.float")
test_df, expected = _make_float_data(np.float32, precision)
result_df = _make_and_apply_rounder(sess, test_df, dtypes.Float, precision)
result = result_df.values
assert result.dtype == expected.dtype
np.testing.assert_almost_equal(result, expected)
def test_rounding_double():
precision = 0
sess = utils.make_session("test.rounding.double")
test_df, expected = _make_float_data(np.float64, precision)
result_df = _make_and_apply_rounder(sess, test_df, dtypes.Double, precision)
result = result_df.values
assert result.dtype == expected.dtype
np.testing.assert_almost_equal(result, expected)
def test_rounding_integer():
precision = -2
sess = utils.make_session("test.rounding.integer")
test_df, expected = _make_integer_data(np.int32, precision)
result_df = _make_and_apply_rounder(sess, test_df, dtypes.Integer, precision)
result = result_df.values
assert result.dtype == expected.dtype
np.testing.assert_almost_equal(result, expected)
def test_rounding_long():
precision = -2
sess = utils.make_session("test.rounding.integer")
test_df, expected = _make_integer_data(np.int64, precision)
result_df = _make_and_apply_rounder(sess, test_df, dtypes.Long, precision)
result = result_df.values
assert result.dtype == expected.dtype
np.testing.assert_almost_equal(result, expected)
def test_truncate_date():
sess = utils.make_session("test.truncation.date")
test_df, expected = _make_date_data(sess)
truncate = rnd.DateTruncation("month")
result_df = test_df.select(truncate(test_df.data)).toPandas()
result = result_df.values
assert result.dtype == expected.dtype
np.testing.assert_equal(result, expected) | 0.798226 | 0.688907 |
import struct
import os
from pathlib import Path
import opencc
startPy = 0x1540 # 拼音表偏移
startChinese = 0x2628 # 汉语词组表偏移
GPy_Table = {} # 全局拼音表
GTable = [] # 解析结果, 元组(词频,拼音,中文词组)的列表
# 原始字节码转为字符串
def byte2str(data):
pos = 0
str = ''
while pos < len(data):
c = chr(struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0])
if c != chr(0):
str += c
pos += 2
return str
# 获取拼音表
def get_py_table(data):
data = data[4:]
pos = 0
while pos < len(data):
index = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
pos += 2
lenPy = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
pos += 2
py = byte2str(data[pos:pos + lenPy])
GPy_Table[index] = py
pos += lenPy
# 获取一个词组的拼音
def get_word_py(data):
pos = 0
ret = ''
while pos < len(data):
index = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
ret += GPy_Table[index]
pos += 2
return ret
# 读取中文表
def get_chinese(data):
pos = 0
while pos < len(data):
# 同音词数量
same = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
# 拼音索引表长度
pos += 2
py_table_len = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
# 拼音索引表
pos += 2
py = get_word_py(data[pos: pos + py_table_len])
# 中文词组
pos += py_table_len
for i in range(same):
# 中文词组长度
c_len = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
# 中文词组
pos += 2
word = byte2str(data[pos: pos + c_len])
# 扩展数据长度
pos += c_len
ext_len = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
# 词频
pos += 2
count = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
# 保存
GTable.append((count, py, word))
# 到下个词的偏移位置
pos += ext_len
def scel2txt(file_name):
file_name = os.path.abspath(file_name)
# 分隔符
print("-" * 60)
# 读取文件
with open(file_name, 'rb') as f:
data = f.read()
print("词库名:", byte2str(data[0x130:0x338])) # .encode('GB18030')
print("词库类型:", byte2str(data[0x338:0x540]))
print("描述信息:", byte2str(data[0x540:0xd40]))
print("词库示例:", byte2str(data[0xd40:startPy]))
get_py_table(data[startPy:startChinese])
get_chinese(data[startChinese:])
def deduplication(input_filename, output_filename):
result = []
converter = opencc.OpenCC("tw2s.json")
with open(input_filename, "r", encoding="utf-8") as fin:
input_filename = fin.readlines()
for i in input_filename:
_, _, word = i.split(" ")
word = word.strip()
word = converter.convert(word)
if word == "":
continue
result.append(word)
print("去重前词汇表数量为:" + str(len(result)))
result = list(set(result))
print("去重后词汇表数量为:" + str(len(result)))
with open(output_filename, "w", encoding="utf-8") as fout:
res = []
for word in result:
res.append(str(word))
fout.write("\n".join(res))
if __name__ == '__main__':
# scel所在文件夹路径
in_dir_path = "../Data/vocab/scel"
file_names = [
# os.path.join(in_dir_path, "日本地名第一版.scel"),
# os.path.join(in_dir_path, "765个世界主要城市.scel"),
# os.path.join(in_dir_path, "世界所有国家及其首都.scel"),
# os.path.join(in_dir_path, "台北市城市信息精选.scel"),
# os.path.join(in_dir_path, "基隆市城市信息精选.scel"),
# os.path.join(in_dir_path, "高雄市城市信息精选.scel"),
# os.path.join(in_dir_path, "台南市城市信息精选.scel"),
# os.path.join(in_dir_path, "全国各地省市名.scel"),
os.path.join(in_dir_path, "机关团体公文写作开拓思路常用词库.scel"),
# os.path.join(in_dir_path, "全国县及县以上行政区划地名.scel")
]
for f in file_names:
scel2txt(f)
output_dir_path = "../Data/"
output_filename = os.path.join(os.path.abspath(output_dir_path), "vocab", "公文写作.txt")
output_path = Path(output_filename)
output = []
for count, py, word in GTable:
output.append(str(count) + ' ' + py + ' ' + word)
output_path.write_text("\n".join(output))
print("-" * 60)
# 去重
vocab_filename = "公文写作.txt"
deduplication(output_filename, output_filename) | Code/scel2txt.py | import struct
import os
from pathlib import Path
import opencc
startPy = 0x1540 # 拼音表偏移
startChinese = 0x2628 # 汉语词组表偏移
GPy_Table = {} # 全局拼音表
GTable = [] # 解析结果, 元组(词频,拼音,中文词组)的列表
# 原始字节码转为字符串
def byte2str(data):
pos = 0
str = ''
while pos < len(data):
c = chr(struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0])
if c != chr(0):
str += c
pos += 2
return str
# 获取拼音表
def get_py_table(data):
data = data[4:]
pos = 0
while pos < len(data):
index = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
pos += 2
lenPy = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
pos += 2
py = byte2str(data[pos:pos + lenPy])
GPy_Table[index] = py
pos += lenPy
# 获取一个词组的拼音
def get_word_py(data):
pos = 0
ret = ''
while pos < len(data):
index = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
ret += GPy_Table[index]
pos += 2
return ret
# 读取中文表
def get_chinese(data):
pos = 0
while pos < len(data):
# 同音词数量
same = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
# 拼音索引表长度
pos += 2
py_table_len = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
# 拼音索引表
pos += 2
py = get_word_py(data[pos: pos + py_table_len])
# 中文词组
pos += py_table_len
for i in range(same):
# 中文词组长度
c_len = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
# 中文词组
pos += 2
word = byte2str(data[pos: pos + c_len])
# 扩展数据长度
pos += c_len
ext_len = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
# 词频
pos += 2
count = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]
# 保存
GTable.append((count, py, word))
# 到下个词的偏移位置
pos += ext_len
def scel2txt(file_name):
file_name = os.path.abspath(file_name)
# 分隔符
print("-" * 60)
# 读取文件
with open(file_name, 'rb') as f:
data = f.read()
print("词库名:", byte2str(data[0x130:0x338])) # .encode('GB18030')
print("词库类型:", byte2str(data[0x338:0x540]))
print("描述信息:", byte2str(data[0x540:0xd40]))
print("词库示例:", byte2str(data[0xd40:startPy]))
get_py_table(data[startPy:startChinese])
get_chinese(data[startChinese:])
def deduplication(input_filename, output_filename):
result = []
converter = opencc.OpenCC("tw2s.json")
with open(input_filename, "r", encoding="utf-8") as fin:
input_filename = fin.readlines()
for i in input_filename:
_, _, word = i.split(" ")
word = word.strip()
word = converter.convert(word)
if word == "":
continue
result.append(word)
print("去重前词汇表数量为:" + str(len(result)))
result = list(set(result))
print("去重后词汇表数量为:" + str(len(result)))
with open(output_filename, "w", encoding="utf-8") as fout:
res = []
for word in result:
res.append(str(word))
fout.write("\n".join(res))
if __name__ == '__main__':
# scel所在文件夹路径
in_dir_path = "../Data/vocab/scel"
file_names = [
# os.path.join(in_dir_path, "日本地名第一版.scel"),
# os.path.join(in_dir_path, "765个世界主要城市.scel"),
# os.path.join(in_dir_path, "世界所有国家及其首都.scel"),
# os.path.join(in_dir_path, "台北市城市信息精选.scel"),
# os.path.join(in_dir_path, "基隆市城市信息精选.scel"),
# os.path.join(in_dir_path, "高雄市城市信息精选.scel"),
# os.path.join(in_dir_path, "台南市城市信息精选.scel"),
# os.path.join(in_dir_path, "全国各地省市名.scel"),
os.path.join(in_dir_path, "机关团体公文写作开拓思路常用词库.scel"),
# os.path.join(in_dir_path, "全国县及县以上行政区划地名.scel")
]
for f in file_names:
scel2txt(f)
output_dir_path = "../Data/"
output_filename = os.path.join(os.path.abspath(output_dir_path), "vocab", "公文写作.txt")
output_path = Path(output_filename)
output = []
for count, py, word in GTable:
output.append(str(count) + ' ' + py + ' ' + word)
output_path.write_text("\n".join(output))
print("-" * 60)
# 去重
vocab_filename = "公文写作.txt"
deduplication(output_filename, output_filename) | 0.154089 | 0.377483 |
from urllib.parse import quote_plus
from openai import api_requestor, util, error
from openai.api_resources.abstract import (
CreateableAPIResource,
ListableAPIResource,
nested_resource_class_methods,
)
from openai.api_resources.abstract.deletable_api_resource import DeletableAPIResource
from openai.openai_response import OpenAIResponse
from openai.util import ApiType
@nested_resource_class_methods("event", operations=["list"])
class FineTune(ListableAPIResource, CreateableAPIResource, DeletableAPIResource):
OBJECT_NAME = "fine-tunes"
@classmethod
def cancel(
cls,
id,
api_key=None,
api_type=None,
request_id=None,
api_version=None,
**params
):
base = cls.class_url()
extn = quote_plus(id)
typed_api_type, api_version = cls._get_api_type_and_version(api_type, api_version)
if typed_api_type == ApiType.AZURE:
url = "/%s%s/%s/cancel?api-version=%s" % (cls.azure_api_prefix, base, extn, api_version)
elif typed_api_type == ApiType.OPEN_AI:
url = "%s/%s/cancel" % (base, extn)
else:
raise error.InvalidAPIType('Unsupported API type %s' % api_type)
instance = cls(id, api_key, **params)
return instance.request("post", url, request_id=request_id)
@classmethod
def stream_events(
cls,
id,
api_key=None,
api_base=None,
api_type=None,
request_id=None,
api_version=None,
organization=None,
**params,
):
base = cls.class_url()
extn = quote_plus(id)
requestor = api_requestor.APIRequestor(
api_key,
api_base=api_base,
api_type=api_type,
api_version=api_version,
organization=organization,
)
typed_api_type, api_version = cls._get_api_type_and_version(api_type, api_version)
if typed_api_type == ApiType.AZURE:
url = "/%s%s/%s/events?stream=true&api-version=%s" % (cls.azure_api_prefix, base, extn, api_version)
elif typed_api_type == ApiType.OPEN_AI:
url = "%s/%s/events?stream=true" % (base, extn)
else:
raise error.InvalidAPIType('Unsupported API type %s' % api_type)
response, _, api_key = requestor.request(
"get", url, params, stream=True, request_id=request_id
)
assert not isinstance(response, OpenAIResponse) # must be an iterator
return (
util.convert_to_openai_object(
line,
api_key,
api_version,
organization,
)
for line in response
) | openai/api_resources/fine_tune.py | from urllib.parse import quote_plus
from openai import api_requestor, util, error
from openai.api_resources.abstract import (
CreateableAPIResource,
ListableAPIResource,
nested_resource_class_methods,
)
from openai.api_resources.abstract.deletable_api_resource import DeletableAPIResource
from openai.openai_response import OpenAIResponse
from openai.util import ApiType
@nested_resource_class_methods("event", operations=["list"])
class FineTune(ListableAPIResource, CreateableAPIResource, DeletableAPIResource):
OBJECT_NAME = "fine-tunes"
@classmethod
def cancel(
cls,
id,
api_key=None,
api_type=None,
request_id=None,
api_version=None,
**params
):
base = cls.class_url()
extn = quote_plus(id)
typed_api_type, api_version = cls._get_api_type_and_version(api_type, api_version)
if typed_api_type == ApiType.AZURE:
url = "/%s%s/%s/cancel?api-version=%s" % (cls.azure_api_prefix, base, extn, api_version)
elif typed_api_type == ApiType.OPEN_AI:
url = "%s/%s/cancel" % (base, extn)
else:
raise error.InvalidAPIType('Unsupported API type %s' % api_type)
instance = cls(id, api_key, **params)
return instance.request("post", url, request_id=request_id)
@classmethod
def stream_events(
cls,
id,
api_key=None,
api_base=None,
api_type=None,
request_id=None,
api_version=None,
organization=None,
**params,
):
base = cls.class_url()
extn = quote_plus(id)
requestor = api_requestor.APIRequestor(
api_key,
api_base=api_base,
api_type=api_type,
api_version=api_version,
organization=organization,
)
typed_api_type, api_version = cls._get_api_type_and_version(api_type, api_version)
if typed_api_type == ApiType.AZURE:
url = "/%s%s/%s/events?stream=true&api-version=%s" % (cls.azure_api_prefix, base, extn, api_version)
elif typed_api_type == ApiType.OPEN_AI:
url = "%s/%s/events?stream=true" % (base, extn)
else:
raise error.InvalidAPIType('Unsupported API type %s' % api_type)
response, _, api_key = requestor.request(
"get", url, params, stream=True, request_id=request_id
)
assert not isinstance(response, OpenAIResponse) # must be an iterator
return (
util.convert_to_openai_object(
line,
api_key,
api_version,
organization,
)
for line in response
) | 0.49707 | 0.095392 |
import sys, os
sys.path.append(os.path.dirname(os.path.abspath(__file__))+'/../')
import numpy as np
# Internal
from lib.BFPConvertor import BFPConvertor
from lib import BFPActivation
from lib import BFPFullyConnet
# PyTorch
import torch
import torch.nn as nn
import torchvision
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=True, dilation=dilation) #enable bias for fused BN
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=True)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, exp_bit=8, mantisa_bit=8,
start_exp_ind=0, opt_exp_act_list=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride=stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.exp_bit = exp_bit
self.mantisa_bit = mantisa_bit
self.opt_exp_act_list = opt_exp_act_list
self.start_exp_ind = start_exp_ind
def forward(self, x):
residual = x
out = self.conv1(x)
#out = self.bn1(out)
# disble bn for fused BN
out = BFPActivation.transform_activation_offline(out, self.exp_bit, self.mantisa_bit, self.opt_exp_act_list[self.start_exp_ind])
out = self.relu(out)
out = self.conv2(out)
#out = self.bn2(out)
out = BFPActivation.transform_activation_offline(out, self.exp_bit, self.mantisa_bit, self.opt_exp_act_list[self.start_exp_ind+1])
if self.downsample is not None:
residual = self.downsample(x)
residual = BFPActivation.transform_activation_offline(residual, self.exp_bit,
self.mantisa_bit, self.opt_exp_act_list[self.start_exp_ind+2])
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, exp_bit=8, mantisa_bit=8,
start_exp_ind=0, opt_exp_act_list=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2= conv3x3(planes,planes,stride=stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes*self.expansion)
self.bn3 = nn.BatchNorm2d(planes*self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.exp_bit = exp_bit
self.mantisa_bit = mantisa_bit
self.opt_exp_act_list = opt_exp_act_list
self.start_exp_ind = start_exp_ind
def forward(self, x):
residual = x
out = self.conv1(x)
#out = self.bn1(out)
out = BFPActivation.transform_activation_offline(out, self.exp_bit, self.mantisa_bit, self.opt_exp_act_list[self.start_exp_ind])
out = self.relu(out)
out = self.conv2(out)
#out = self.bn2(out)
out = BFPActivation.transform_activation_offline(out, self.exp_bit, self.mantisa_bit, self.opt_exp_act_list[self.start_exp_ind+1])
out = self.relu(out)
out = self.conv3(out)
#out = self.bn3(out)
out = BFPActivation.transform_activation_offline(out, self.exp_bit, self.mantisa_bit, self.opt_exp_act_list[self.start_exp_ind+2])
if self.downsample is not None:
# Get a max of two list
#max_exp_act_list = np.maximum.reduce([self.opt_exp_act_list[self.start_exp_ind+2], self.opt_exp_act_list[self.start_exp_ind+3]]).tolist()
residual = self.downsample(x)
# bfp quantize both tensor for shortcut using the max exponent list
# since they have the same exp list, no need for realignment
residual = BFPActivation.transform_activation_offline(residual, self.exp_bit,
self.mantisa_bit, self.opt_exp_act_list[self.start_exp_ind+3])
#out = BFPActivation.transform_activation_offline(out, self.exp_bit, self.mantisa_bit, max_exp_act_list)
else:
# bfp quantize both tensor for shortcut using the third exponent list
residual = BFPActivation.transform_activation_offline(residual, self.exp_bit, self.mantisa_bit, self.opt_exp_act_list[self.start_exp_ind+2])
out+=residual
out = self.relu(out)
return out
class BlockResNet(nn.Module):
def __init__(self, block, layers,num_classes = 1000, exp_bit=8, mantisa_bit=8, opt_exp_act_list=None):
self.inplanes = 64
super(BlockResNet, self).__init__()
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=True)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.exp_bit = exp_bit
self.mantisa_bit = mantisa_bit
self.opt_exp_act_list = opt_exp_act_list
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], exp_bit=self.exp_bit,
mantisa_bit=self.mantisa_bit, opt_exp_act_list=self.opt_exp_act_list,
start_exp_ind=2)
self.layer2 = self._make_layer(block, 128, layers[1],stride=2, exp_bit=self.exp_bit,
mantisa_bit=self.mantisa_bit, opt_exp_act_list=self.opt_exp_act_list,
start_exp_ind=2 + (layers[0]*3+1))
self.layer3 = self._make_layer(block, 256, layers[2],stride=2, exp_bit=self.exp_bit,
mantisa_bit=self.mantisa_bit, opt_exp_act_list=self.opt_exp_act_list,
start_exp_ind=2 + (layers[0]*3+1) + (layers[1]*3+1))
self.layer4 = self._make_layer(block, 512, layers[3],stride=2, exp_bit=self.exp_bit,
mantisa_bit=self.mantisa_bit, opt_exp_act_list=self.opt_exp_act_list,
start_exp_ind=2 + (layers[0]*3+1) + (layers[1]*3+1) + (layers[2]*3+1))
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512*block.expansion, num_classes)
#print ("fc exponent:", self.opt_exp_act_list[-1])
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
#nn.init.constant_(m.alpha, 1)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, exp_bit=8, mantisa_bit=8, opt_exp_act_list=None, start_exp_ind=0):
downsample = None
if stride!=1 or self.inplanes !=planes*block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
# Fused BN
#nn.BatchNorm2d(planes*block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, exp_bit=exp_bit,
mantisa_bit=mantisa_bit, opt_exp_act_list=opt_exp_act_list, start_exp_ind=start_exp_ind))
start_exp_ind = start_exp_ind + 3 + (int)(downsample != None)
self.inplanes = planes*block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, exp_bit=exp_bit,
mantisa_bit=mantisa_bit, opt_exp_act_list=opt_exp_act_list, start_exp_ind=start_exp_ind))
start_exp_ind = start_exp_ind + 3
return nn.Sequential(*layers)
def forward(self, x):
x = BFPActivation.transform_activation_offline(x, self.exp_bit, self.mantisa_bit, self.opt_exp_act_list[0])
x = self.conv1(x)
#x = self.bn1(x) #Fused BN
x = BFPActivation.transform_activation_offline(x, self.exp_bit, self.mantisa_bit, self.opt_exp_act_list[1])
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
x = BFPFullyConnet.transform_fc_offline(x, self.exp_bit, self.mantisa_bit, self.opt_exp_act_list[-1])
return x
# bfp indicate if insert bfp quantization during inference
def resnet101(pretrained=False, bit_nmb=8, num_classes=1000, bfp=False, mantisa_bit=8, exp_bit=8, opt_exp_act_list=None):
"""Constructs a ResNet101 model
"""
if (bfp):
block_model = BlockResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes, mantisa_bit=mantisa_bit,
exp_bit=exp_bit, opt_exp_act_list=opt_exp_act_list)
if pretrained==True:
golden_model = torchvision.models.resnet101(pretrained=True)
resnet_converter = BFPConvertor(mantisa_bit, exp_bit)
block_model = resnet_converter(golden_model, block_model)
else:
if pretrained==True:
model = torchvision.models.resnet101(pretrained=True)
else:
model = torchvision.models.resnet101()
block_model = model
return block_model
def resnet50(pretrained=False, num_classes=1000, bfp=False,
group=1, mantisa_bit=8, exp_bit=8, opt_exp_act_list=None):
""" Constructs a ResNet50 model
"""
weight_exp_list = []
if (bfp):
#print ("Shape of exp list:", np.shape(opt_exp_act_list))
#print (opt_exp_act_list[0])
block_model = BlockResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, mantisa_bit=mantisa_bit,
exp_bit=exp_bit, opt_exp_act_list=opt_exp_act_list)
if pretrained==True:
golden_model = torchvision.models.resnet50(pretrained=True)
resnet_converter = BFPConvertor(mantisa_bit, exp_bit)
block_model, weight_exp_list = resnet_converter(golden_model, block_model, group, is_kl=False)
else:
if pretrained==True:
model = torchvision.models.resnet50(pretrained=True)
else:
model = torchvision.models.resnet50()
block_model = model
#block_model = torch.nn.DataParallel(block_model).cuda()
return block_model, weight_exp_list
def resnet34(pretrained=False, bit_nmb=8, num_classes=1000, bfp=False, mantisa_bit=8, exp_bit=8, opt_exp_act_list=None):
""" Constructs a ResNet34 model
"""
if (bfp):
print ("Shape of exp list:", np.shape(opt_exp_act_list))
block_model = BlockResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes, mantisa_bit=mantisa_bit,
exp_bit=exp_bit, opt_exp_act_list=opt_exp_act_list)
if pretrained==True:
golden_model = torchvision.models.resnet34(pretrained=True)
resnet_converter = BFPConvertor(mantisa_bit, exp_bit)
block_model = resnet_converter(golden_model, block_model)
else:
if pretrained==True:
model = torchvision.models.resnet34(pretrained=True)
else:
model = torchvision.models.resnet34()
block_model = model
return block_model
if __name__ == "__main__":
import os
import time
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
with torch.no_grad():
net, _ = resnet50()
dev = "gpu"
if dev == "cpu":
inputs = torch.rand(1, 3, 224, 224)
net.cpu()
test_iter = 100
else:
inputs = torch.rand(1, 3, 224, 224).cuda()
net.cuda()
test_iter = 10000
net.eval()
start = time.time()
for i in range(test_iter):
outputs = net.forward(inputs)
end = time.time()
avg_time = ((end-start) * 1000) / test_iter
print(avg_time, " ms") | models/resnet.py | import sys, os
sys.path.append(os.path.dirname(os.path.abspath(__file__))+'/../')
import numpy as np
# Internal
from lib.BFPConvertor import BFPConvertor
from lib import BFPActivation
from lib import BFPFullyConnet
# PyTorch
import torch
import torch.nn as nn
import torchvision
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=True, dilation=dilation) #enable bias for fused BN
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=True)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, exp_bit=8, mantisa_bit=8,
start_exp_ind=0, opt_exp_act_list=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride=stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.exp_bit = exp_bit
self.mantisa_bit = mantisa_bit
self.opt_exp_act_list = opt_exp_act_list
self.start_exp_ind = start_exp_ind
def forward(self, x):
residual = x
out = self.conv1(x)
#out = self.bn1(out)
# disble bn for fused BN
out = BFPActivation.transform_activation_offline(out, self.exp_bit, self.mantisa_bit, self.opt_exp_act_list[self.start_exp_ind])
out = self.relu(out)
out = self.conv2(out)
#out = self.bn2(out)
out = BFPActivation.transform_activation_offline(out, self.exp_bit, self.mantisa_bit, self.opt_exp_act_list[self.start_exp_ind+1])
if self.downsample is not None:
residual = self.downsample(x)
residual = BFPActivation.transform_activation_offline(residual, self.exp_bit,
self.mantisa_bit, self.opt_exp_act_list[self.start_exp_ind+2])
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, exp_bit=8, mantisa_bit=8,
start_exp_ind=0, opt_exp_act_list=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2= conv3x3(planes,planes,stride=stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes*self.expansion)
self.bn3 = nn.BatchNorm2d(planes*self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.exp_bit = exp_bit
self.mantisa_bit = mantisa_bit
self.opt_exp_act_list = opt_exp_act_list
self.start_exp_ind = start_exp_ind
def forward(self, x):
residual = x
out = self.conv1(x)
#out = self.bn1(out)
out = BFPActivation.transform_activation_offline(out, self.exp_bit, self.mantisa_bit, self.opt_exp_act_list[self.start_exp_ind])
out = self.relu(out)
out = self.conv2(out)
#out = self.bn2(out)
out = BFPActivation.transform_activation_offline(out, self.exp_bit, self.mantisa_bit, self.opt_exp_act_list[self.start_exp_ind+1])
out = self.relu(out)
out = self.conv3(out)
#out = self.bn3(out)
out = BFPActivation.transform_activation_offline(out, self.exp_bit, self.mantisa_bit, self.opt_exp_act_list[self.start_exp_ind+2])
if self.downsample is not None:
# Get a max of two list
#max_exp_act_list = np.maximum.reduce([self.opt_exp_act_list[self.start_exp_ind+2], self.opt_exp_act_list[self.start_exp_ind+3]]).tolist()
residual = self.downsample(x)
# bfp quantize both tensor for shortcut using the max exponent list
# since they have the same exp list, no need for realignment
residual = BFPActivation.transform_activation_offline(residual, self.exp_bit,
self.mantisa_bit, self.opt_exp_act_list[self.start_exp_ind+3])
#out = BFPActivation.transform_activation_offline(out, self.exp_bit, self.mantisa_bit, max_exp_act_list)
else:
# bfp quantize both tensor for shortcut using the third exponent list
residual = BFPActivation.transform_activation_offline(residual, self.exp_bit, self.mantisa_bit, self.opt_exp_act_list[self.start_exp_ind+2])
out+=residual
out = self.relu(out)
return out
class BlockResNet(nn.Module):
def __init__(self, block, layers,num_classes = 1000, exp_bit=8, mantisa_bit=8, opt_exp_act_list=None):
self.inplanes = 64
super(BlockResNet, self).__init__()
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=True)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.exp_bit = exp_bit
self.mantisa_bit = mantisa_bit
self.opt_exp_act_list = opt_exp_act_list
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], exp_bit=self.exp_bit,
mantisa_bit=self.mantisa_bit, opt_exp_act_list=self.opt_exp_act_list,
start_exp_ind=2)
self.layer2 = self._make_layer(block, 128, layers[1],stride=2, exp_bit=self.exp_bit,
mantisa_bit=self.mantisa_bit, opt_exp_act_list=self.opt_exp_act_list,
start_exp_ind=2 + (layers[0]*3+1))
self.layer3 = self._make_layer(block, 256, layers[2],stride=2, exp_bit=self.exp_bit,
mantisa_bit=self.mantisa_bit, opt_exp_act_list=self.opt_exp_act_list,
start_exp_ind=2 + (layers[0]*3+1) + (layers[1]*3+1))
self.layer4 = self._make_layer(block, 512, layers[3],stride=2, exp_bit=self.exp_bit,
mantisa_bit=self.mantisa_bit, opt_exp_act_list=self.opt_exp_act_list,
start_exp_ind=2 + (layers[0]*3+1) + (layers[1]*3+1) + (layers[2]*3+1))
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512*block.expansion, num_classes)
#print ("fc exponent:", self.opt_exp_act_list[-1])
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
#nn.init.constant_(m.alpha, 1)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, exp_bit=8, mantisa_bit=8, opt_exp_act_list=None, start_exp_ind=0):
downsample = None
if stride!=1 or self.inplanes !=planes*block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
# Fused BN
#nn.BatchNorm2d(planes*block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, exp_bit=exp_bit,
mantisa_bit=mantisa_bit, opt_exp_act_list=opt_exp_act_list, start_exp_ind=start_exp_ind))
start_exp_ind = start_exp_ind + 3 + (int)(downsample != None)
self.inplanes = planes*block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, exp_bit=exp_bit,
mantisa_bit=mantisa_bit, opt_exp_act_list=opt_exp_act_list, start_exp_ind=start_exp_ind))
start_exp_ind = start_exp_ind + 3
return nn.Sequential(*layers)
def forward(self, x):
x = BFPActivation.transform_activation_offline(x, self.exp_bit, self.mantisa_bit, self.opt_exp_act_list[0])
x = self.conv1(x)
#x = self.bn1(x) #Fused BN
x = BFPActivation.transform_activation_offline(x, self.exp_bit, self.mantisa_bit, self.opt_exp_act_list[1])
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
x = BFPFullyConnet.transform_fc_offline(x, self.exp_bit, self.mantisa_bit, self.opt_exp_act_list[-1])
return x
# bfp indicate if insert bfp quantization during inference
def resnet101(pretrained=False, bit_nmb=8, num_classes=1000, bfp=False, mantisa_bit=8, exp_bit=8, opt_exp_act_list=None):
"""Constructs a ResNet101 model
"""
if (bfp):
block_model = BlockResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes, mantisa_bit=mantisa_bit,
exp_bit=exp_bit, opt_exp_act_list=opt_exp_act_list)
if pretrained==True:
golden_model = torchvision.models.resnet101(pretrained=True)
resnet_converter = BFPConvertor(mantisa_bit, exp_bit)
block_model = resnet_converter(golden_model, block_model)
else:
if pretrained==True:
model = torchvision.models.resnet101(pretrained=True)
else:
model = torchvision.models.resnet101()
block_model = model
return block_model
def resnet50(pretrained=False, num_classes=1000, bfp=False,
group=1, mantisa_bit=8, exp_bit=8, opt_exp_act_list=None):
""" Constructs a ResNet50 model
"""
weight_exp_list = []
if (bfp):
#print ("Shape of exp list:", np.shape(opt_exp_act_list))
#print (opt_exp_act_list[0])
block_model = BlockResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, mantisa_bit=mantisa_bit,
exp_bit=exp_bit, opt_exp_act_list=opt_exp_act_list)
if pretrained==True:
golden_model = torchvision.models.resnet50(pretrained=True)
resnet_converter = BFPConvertor(mantisa_bit, exp_bit)
block_model, weight_exp_list = resnet_converter(golden_model, block_model, group, is_kl=False)
else:
if pretrained==True:
model = torchvision.models.resnet50(pretrained=True)
else:
model = torchvision.models.resnet50()
block_model = model
#block_model = torch.nn.DataParallel(block_model).cuda()
return block_model, weight_exp_list
def resnet34(pretrained=False, bit_nmb=8, num_classes=1000, bfp=False, mantisa_bit=8, exp_bit=8, opt_exp_act_list=None):
""" Constructs a ResNet34 model
"""
if (bfp):
print ("Shape of exp list:", np.shape(opt_exp_act_list))
block_model = BlockResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes, mantisa_bit=mantisa_bit,
exp_bit=exp_bit, opt_exp_act_list=opt_exp_act_list)
if pretrained==True:
golden_model = torchvision.models.resnet34(pretrained=True)
resnet_converter = BFPConvertor(mantisa_bit, exp_bit)
block_model = resnet_converter(golden_model, block_model)
else:
if pretrained==True:
model = torchvision.models.resnet34(pretrained=True)
else:
model = torchvision.models.resnet34()
block_model = model
return block_model
if __name__ == "__main__":
import os
import time
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
with torch.no_grad():
net, _ = resnet50()
dev = "gpu"
if dev == "cpu":
inputs = torch.rand(1, 3, 224, 224)
net.cpu()
test_iter = 100
else:
inputs = torch.rand(1, 3, 224, 224).cuda()
net.cuda()
test_iter = 10000
net.eval()
start = time.time()
for i in range(test_iter):
outputs = net.forward(inputs)
end = time.time()
avg_time = ((end-start) * 1000) / test_iter
print(avg_time, " ms") | 0.750644 | 0.2862 |
import git
from github import Github
from distutils.util import convert_path
import logging
import os
import shutil
import re
def copy_to_version(app, exception):
"""
We keep versioned docs in two situations:
- A new PR into dev from a feature branch (in versions/dev)
- A new release on main (in versions/{versions})
:param app:
:param exception:
:return:
"""
# Get branch name, version number, and tag
git_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(app.outdir))))
repo = git.Repo(git_root)
try:
# This works if you're working locally, and you're on a branch rather than on a PR or release tag commit.
branch_name = repo.active_branch.name
except:
commit = repo.commit()
branch_name = repo.git.branch('--contains', commit.hexsha).strip('* ')
logging.warning(f'Branch name: {branch_name}')
gh = Github()
gh_repo = gh.get_repo("NatalieThurlby/Ontolopy")
pr_reg = re.findall(r'pull/(\d+)/', branch_name)
release_reg = re.findall(r'(\d+\.\d+\.\d+?-\w+)', branch_name)
# PR
if len(pr_reg) != 0:
assert(len(pr_reg) == 1)
number = int(pr_reg[0])
pr = gh_repo.get_pull(number)
from_ = pr.head.label.split(':')[1]
to_ = pr.base.label.split(':')[1]
logging.warning(f'Detected detached HEAD state: PR from {from_} to {to_}.')
branch_name = to_ # only want to keep where we're going to, e.g. feature_branch -> dev, version = "dev"
# Release
elif len(release_reg) != 0:
assert(len(release_reg)) == 1
release_id = release_reg[0]
release = gh_repo.get_release(release_id)
logging.warning(
f'Detected detached HEAD state: due to release {release_id} on branch {release.target_commitish}')
if release.prerelease:
logging.warning(f"We don't keep versioned docs for pre-releases.")
elif release.target_commitish == 'main':
branch_name = release.target_commitish
else:
logging.error(f'Releases should only happen on branch "main", not branch {release.target_commitish}.')
ns = {}
ver_path = convert_path(os.path.join(git_root, 'ontolopy/version.py'))
with open(ver_path) as ver_file:
exec(ver_file.read(), ns)
version = ns['__version__']
tagged = next((tag for tag in repo.tags if tag.commit == repo.head.commit), None)
if not tagged:
tag_name = None
else:
tag_name = tagged.name
# Check that we have a correctly tagged release on the main branch.
if branch_name not in ["main", "dev"]:
logging.warning(f"On branch {branch_name}. Not saving versioned docs.")
return None
if branch_name == 'main' and not tagged:
logging.warning("On branch main, but it is not tagged. Not saving versioned docs.")
return None
elif branch_name == 'main' and tagged and (tag_name != version):
logging.error(f"Tag name ({tag_name}) and version ({version}) do not match.")
# Copy built files to version dir
logging.info(f"Copying built files to version directory for branch {branch_name}, tag {tag_name}.")
if branch_name == 'main':
version_dir = os.path.join(app.outdir, f'versions/{version}/')
elif branch_name == 'dev':
version_dir = os.path.join(app.outdir, f'versions/{branch_name}/')
if not os.path.exists(version_dir):
shutil.copytree(app.outdir, version_dir, ignore=shutil.ignore_patterns('versions'))
else:
os.system(f'rm -r {version_dir}')
shutil.copytree(app.outdir, version_dir, ignore=shutil.ignore_patterns('versions'))
# TODO: build json for dropdown (#9)
def setup(app):
app.connect('build-finished', copy_to_version) | docs/_ext/versioned-tagged-docs.py | import git
from github import Github
from distutils.util import convert_path
import logging
import os
import shutil
import re
def copy_to_version(app, exception):
"""
We keep versioned docs in two situations:
- A new PR into dev from a feature branch (in versions/dev)
- A new release on main (in versions/{versions})
:param app:
:param exception:
:return:
"""
# Get branch name, version number, and tag
git_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(app.outdir))))
repo = git.Repo(git_root)
try:
# This works if you're working locally, and you're on a branch rather than on a PR or release tag commit.
branch_name = repo.active_branch.name
except:
commit = repo.commit()
branch_name = repo.git.branch('--contains', commit.hexsha).strip('* ')
logging.warning(f'Branch name: {branch_name}')
gh = Github()
gh_repo = gh.get_repo("NatalieThurlby/Ontolopy")
pr_reg = re.findall(r'pull/(\d+)/', branch_name)
release_reg = re.findall(r'(\d+\.\d+\.\d+?-\w+)', branch_name)
# PR
if len(pr_reg) != 0:
assert(len(pr_reg) == 1)
number = int(pr_reg[0])
pr = gh_repo.get_pull(number)
from_ = pr.head.label.split(':')[1]
to_ = pr.base.label.split(':')[1]
logging.warning(f'Detected detached HEAD state: PR from {from_} to {to_}.')
branch_name = to_ # only want to keep where we're going to, e.g. feature_branch -> dev, version = "dev"
# Release
elif len(release_reg) != 0:
assert(len(release_reg)) == 1
release_id = release_reg[0]
release = gh_repo.get_release(release_id)
logging.warning(
f'Detected detached HEAD state: due to release {release_id} on branch {release.target_commitish}')
if release.prerelease:
logging.warning(f"We don't keep versioned docs for pre-releases.")
elif release.target_commitish == 'main':
branch_name = release.target_commitish
else:
logging.error(f'Releases should only happen on branch "main", not branch {release.target_commitish}.')
ns = {}
ver_path = convert_path(os.path.join(git_root, 'ontolopy/version.py'))
with open(ver_path) as ver_file:
exec(ver_file.read(), ns)
version = ns['__version__']
tagged = next((tag for tag in repo.tags if tag.commit == repo.head.commit), None)
if not tagged:
tag_name = None
else:
tag_name = tagged.name
# Check that we have a correctly tagged release on the main branch.
if branch_name not in ["main", "dev"]:
logging.warning(f"On branch {branch_name}. Not saving versioned docs.")
return None
if branch_name == 'main' and not tagged:
logging.warning("On branch main, but it is not tagged. Not saving versioned docs.")
return None
elif branch_name == 'main' and tagged and (tag_name != version):
logging.error(f"Tag name ({tag_name}) and version ({version}) do not match.")
# Copy built files to version dir
logging.info(f"Copying built files to version directory for branch {branch_name}, tag {tag_name}.")
if branch_name == 'main':
version_dir = os.path.join(app.outdir, f'versions/{version}/')
elif branch_name == 'dev':
version_dir = os.path.join(app.outdir, f'versions/{branch_name}/')
if not os.path.exists(version_dir):
shutil.copytree(app.outdir, version_dir, ignore=shutil.ignore_patterns('versions'))
else:
os.system(f'rm -r {version_dir}')
shutil.copytree(app.outdir, version_dir, ignore=shutil.ignore_patterns('versions'))
# TODO: build json for dropdown (#9)
def setup(app):
app.connect('build-finished', copy_to_version) | 0.243193 | 0.251211 |
# IMPORT STANDARD LIBRARIES
import os
# IMPORT THIRD-PARTY LIBRARIES
import vim
# IMPORT LOCAL LIBRARIES
from . import arg_list
from . import filer
def _esc(path):
'''str: Clean the given file/folder path so that Vim can understand it.'''
return path.replace(' ', '\ ')
def enter_arg_list():
'''Edit the file which is focused_arg in the arg-list.
Any time an arg-list is created, one file is given focus (e.g. it gets "[]"s
added around its name).
This function will change the user's current buffer to the focused_arg arg-list item.
'''
file_name = filer.get_current_absolute_path()
for item in arg_list.get_args():
if arg_list.is_focused(item):
item = arg_list.get_unfocused_name(item)
vim.command('edit {item}'.format(item=item))
break
items = arg_list.get_args(strip=True)
go_to_first_line = file_name not in items
if go_to_first_line:
vim.command('normal! gg')
def save_arg_list():
'''Gather the current arg-list and save it to a temporary file.'''
args = ', '.join(('"{item}"'.format(item=item) for item in arg_list.get_args()))
vim.command('let g:arggitter_temp_arg_list = [{args}]'.format(args=args))
def override_arg_list():
'''Get all unstaged files in the current git repository and add it to the arg-list.
When a user runs this function, two scenarios can occur:
1. The user's current file buffer is not in the arg-list.
2. The user's current file buffer is in the arg-list.
In scenario #1, the user's current file buffer is added as the first item to
the user's arg-list and the arg-list will focus onto this file.
In scenario #2, the current file buffer is completely ignored and whatever
the arg-list's file file path is given focus, instead.
Raises:
EnvironmentError: If not git repository could be found for the current file.
'''
def _allow_submodules():
try:
return bool(int(vim.eval('g:arggitter_allow_submodules')))
except Exception:
return False
root = filer.get_current_git_root()
if not root:
raise EnvironmentError('No root git repository could be found')
current_file = filer.get_current_absolute_path()
unstaged_files = (
_esc(os.path.join(root, path))
for path in filer.get_unstaged_git_files(root, allow_submodules=_allow_submodules()))
unstaged_files = filer.sort_items(unstaged_files, [current_file])
arg_list.add_to_arg_list(unstaged_files)
def restore_arg_list():
'''Read the user's saved arg-list and apply it to the current session.'''
args = vim.eval('g:arggitter_temp_arg_list')
unfocused_args = []
focused_arg = ''
for arg in args:
if arg.startswith('[') and arg.endswith(']'):
arg = arg[1:-1]
focused_arg = arg
unfocused_args.append(arg)
arg_list.add_to_arg_list(unfocused_args)
if focused_arg:
arg_list.set_focus_to(focused_arg) | pythonx/arggitter/arggitter.py | # IMPORT STANDARD LIBRARIES
import os
# IMPORT THIRD-PARTY LIBRARIES
import vim
# IMPORT LOCAL LIBRARIES
from . import arg_list
from . import filer
def _esc(path):
'''str: Clean the given file/folder path so that Vim can understand it.'''
return path.replace(' ', '\ ')
def enter_arg_list():
'''Edit the file which is focused_arg in the arg-list.
Any time an arg-list is created, one file is given focus (e.g. it gets "[]"s
added around its name).
This function will change the user's current buffer to the focused_arg arg-list item.
'''
file_name = filer.get_current_absolute_path()
for item in arg_list.get_args():
if arg_list.is_focused(item):
item = arg_list.get_unfocused_name(item)
vim.command('edit {item}'.format(item=item))
break
items = arg_list.get_args(strip=True)
go_to_first_line = file_name not in items
if go_to_first_line:
vim.command('normal! gg')
def save_arg_list():
'''Gather the current arg-list and save it to a temporary file.'''
args = ', '.join(('"{item}"'.format(item=item) for item in arg_list.get_args()))
vim.command('let g:arggitter_temp_arg_list = [{args}]'.format(args=args))
def override_arg_list():
'''Get all unstaged files in the current git repository and add it to the arg-list.
When a user runs this function, two scenarios can occur:
1. The user's current file buffer is not in the arg-list.
2. The user's current file buffer is in the arg-list.
In scenario #1, the user's current file buffer is added as the first item to
the user's arg-list and the arg-list will focus onto this file.
In scenario #2, the current file buffer is completely ignored and whatever
the arg-list's file file path is given focus, instead.
Raises:
EnvironmentError: If not git repository could be found for the current file.
'''
def _allow_submodules():
try:
return bool(int(vim.eval('g:arggitter_allow_submodules')))
except Exception:
return False
root = filer.get_current_git_root()
if not root:
raise EnvironmentError('No root git repository could be found')
current_file = filer.get_current_absolute_path()
unstaged_files = (
_esc(os.path.join(root, path))
for path in filer.get_unstaged_git_files(root, allow_submodules=_allow_submodules()))
unstaged_files = filer.sort_items(unstaged_files, [current_file])
arg_list.add_to_arg_list(unstaged_files)
def restore_arg_list():
'''Read the user's saved arg-list and apply it to the current session.'''
args = vim.eval('g:arggitter_temp_arg_list')
unfocused_args = []
focused_arg = ''
for arg in args:
if arg.startswith('[') and arg.endswith(']'):
arg = arg[1:-1]
focused_arg = arg
unfocused_args.append(arg)
arg_list.add_to_arg_list(unfocused_args)
if focused_arg:
arg_list.set_focus_to(focused_arg) | 0.271155 | 0.13589 |
import logging
import logging.config
import ServerSideExtension_pb2 as SSE
import grpc
import numpy
from ssedata import ArgType, FunctionType, ReturnType
class ScriptEval:
"""
Class for SSE plugin ScriptEval functionality.
"""
def EvaluateScript(self, header, request, context):
"""
Evaluates script provided in the header, given the
arguments provided in the sequence of RowData objects, the request.
:param header:
:param request: an iterable sequence of RowData.
:param context: the context sent from client
:return: an iterable sequence of RowData.
"""
# Retrieve function type
func_type = self.get_func_type(header)
# Retrieve data types from header
arg_types = self.get_arg_types(header)
ret_type = self.get_return_type(header)
logging.info('EvaluateScript: {} ({} {}) {}'
.format(header.script, arg_types, ret_type, func_type))
# Check if parameters are provided
if header.params:
all_rows = []
# Iterate over bundled rows
for request_rows in request:
# Iterate over rows
for row in request_rows.rows:
# Retrieve parameters
params = self.get_arguments(context, arg_types, row.duals, header)
all_rows.append(params)
# First element in the parameter list should contain the data of the first parameter.
all_rows = [list(param) for param in zip(*all_rows)]
if arg_types == ArgType.Mixed:
param_datatypes = [param.dataType for param in header.params]
for i, datatype in enumerate(param_datatypes):
if datatype == SSE.DUAL:
# For easier access to the numerical and string representation of duals, in the script, we
# split them to two list. For example, if the first parameter is dual, it will contain two lists
# the first one being the numerical representation and the second one the string.
all_rows[i] = [list(datatype) for datatype in zip(*all_rows[i])]
logging.debug('Received data from Qlik (args): {}'.format(all_rows))
yield self.evaluate(header.script, ret_type, params=all_rows)
else:
# No parameters provided
yield self.evaluate(header.script, ret_type)
@staticmethod
def get_func_type(header):
"""
Retrieves the function type.
:param header:
:return:
"""
func_type = header.functionType
if func_type == SSE.SCALAR:
return FunctionType.Scalar
elif func_type == SSE.AGGREGATION:
return FunctionType.Aggregation
elif func_type == SSE.TENSOR:
return FunctionType.Tensor
@staticmethod
def get_arguments(context, arg_types, duals, header):
"""
Gets the array of arguments based on
the duals, and the type (string, numeric)
specified in the header.
:param context: the context sent from client
:param header: the script header.
:param duals: an iterable sequence of duals.
:return: an array of (potentially mixed data type) arguments.
"""
if arg_types == ArgType.String:
# All parameters are of string type
script_args = [d.strData for d in duals]
elif arg_types == ArgType.Numeric:
# All parameters are of numeric type
script_args = [d.numData for d in duals]
elif arg_types == ArgType.Mixed:
# Parameters can be either string, numeric or dual
script_args = []
for dual, param in zip(duals, header.params):
if param.dataType == SSE.STRING:
script_args.append(dual.strData)
elif param.dataType == SSE.NUMERIC:
script_args.append(dual.numData)
elif param.dataType == SSE.DUAL:
script_args.append((dual.numData, dual.strData))
else:
# Undefined argument types
# Make sure the error handling, including logging, works as intended in the client
msg = 'Undefined argument type: '.format(arg_types)
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details(msg)
# Raise error on the plugin-side
raise grpc.RpcError(grpc.StatusCode.INVALID_ARGUMENT, msg)
return script_args
@staticmethod
def get_arg_types(header):
"""
Determines the argument types for all parameters.
:param header:
:return: ArgType
"""
data_types = [param.dataType for param in header.params]
if not data_types:
return ArgType.Empty
elif len(set(data_types)) > 1 or all(data_type == SSE.DUAL for data_type in data_types):
return ArgType.Mixed
elif all(data_type == SSE.STRING for data_type in data_types):
return ArgType.String
elif all(data_type == SSE.NUMERIC for data_type in data_types):
return ArgType.Numeric
else:
return ArgType.Undefined
@staticmethod
def get_return_type(header):
"""
:param header:
:return: Return type
"""
if header.returnType == SSE.STRING:
return ReturnType.String
elif header.returnType == SSE.NUMERIC:
return ReturnType.Numeric
elif header.returnType == SSE.DUAL:
return ReturnType.Dual
else:
return ReturnType.Undefined
@staticmethod
def get_duals(result, ret_type):
if isinstance(result, str) or not hasattr(result, '__iter__'):
result = [result]
# Transform the result to an iterable of Dual data
if ret_type == ReturnType.String:
duals = [SSE.Dual(strData=col) for col in result]
elif ret_type == ReturnType.Numeric:
duals = [SSE.Dual(numData=col) for col in result]
return iter(duals)
def evaluate(self, script, ret_type, params=[]):
"""
Evaluates a script with given parameters and construct the result to a Row of duals.
:param script: script to evaluate
:param ret_type: return data type
:param params: params to evaluate. Default: []
:return: a RowData of string dual
"""
# Evaluate script
result = eval(script, {'args': params, 'numpy': numpy})
logging.debug('Result: {}'.format(result))
bundledRows = SSE.BundledRows()
if isinstance(result, str) or not hasattr(result, '__iter__'):
# A single value is returned
bundledRows.rows.add(duals=self.get_duals(result, ret_type))
else:
for row in result:
# note that each element of the result should represent a row
bundledRows.rows.add(duals=self.get_duals(row, ret_type))
return bundledRows | examples/python/fullscriptsupport/scripteval.py | import logging
import logging.config
import ServerSideExtension_pb2 as SSE
import grpc
import numpy
from ssedata import ArgType, FunctionType, ReturnType
class ScriptEval:
"""
Class for SSE plugin ScriptEval functionality.
"""
def EvaluateScript(self, header, request, context):
"""
Evaluates script provided in the header, given the
arguments provided in the sequence of RowData objects, the request.
:param header:
:param request: an iterable sequence of RowData.
:param context: the context sent from client
:return: an iterable sequence of RowData.
"""
# Retrieve function type
func_type = self.get_func_type(header)
# Retrieve data types from header
arg_types = self.get_arg_types(header)
ret_type = self.get_return_type(header)
logging.info('EvaluateScript: {} ({} {}) {}'
.format(header.script, arg_types, ret_type, func_type))
# Check if parameters are provided
if header.params:
all_rows = []
# Iterate over bundled rows
for request_rows in request:
# Iterate over rows
for row in request_rows.rows:
# Retrieve parameters
params = self.get_arguments(context, arg_types, row.duals, header)
all_rows.append(params)
# First element in the parameter list should contain the data of the first parameter.
all_rows = [list(param) for param in zip(*all_rows)]
if arg_types == ArgType.Mixed:
param_datatypes = [param.dataType for param in header.params]
for i, datatype in enumerate(param_datatypes):
if datatype == SSE.DUAL:
# For easier access to the numerical and string representation of duals, in the script, we
# split them to two list. For example, if the first parameter is dual, it will contain two lists
# the first one being the numerical representation and the second one the string.
all_rows[i] = [list(datatype) for datatype in zip(*all_rows[i])]
logging.debug('Received data from Qlik (args): {}'.format(all_rows))
yield self.evaluate(header.script, ret_type, params=all_rows)
else:
# No parameters provided
yield self.evaluate(header.script, ret_type)
@staticmethod
def get_func_type(header):
"""
Retrieves the function type.
:param header:
:return:
"""
func_type = header.functionType
if func_type == SSE.SCALAR:
return FunctionType.Scalar
elif func_type == SSE.AGGREGATION:
return FunctionType.Aggregation
elif func_type == SSE.TENSOR:
return FunctionType.Tensor
@staticmethod
def get_arguments(context, arg_types, duals, header):
"""
Gets the array of arguments based on
the duals, and the type (string, numeric)
specified in the header.
:param context: the context sent from client
:param header: the script header.
:param duals: an iterable sequence of duals.
:return: an array of (potentially mixed data type) arguments.
"""
if arg_types == ArgType.String:
# All parameters are of string type
script_args = [d.strData for d in duals]
elif arg_types == ArgType.Numeric:
# All parameters are of numeric type
script_args = [d.numData for d in duals]
elif arg_types == ArgType.Mixed:
# Parameters can be either string, numeric or dual
script_args = []
for dual, param in zip(duals, header.params):
if param.dataType == SSE.STRING:
script_args.append(dual.strData)
elif param.dataType == SSE.NUMERIC:
script_args.append(dual.numData)
elif param.dataType == SSE.DUAL:
script_args.append((dual.numData, dual.strData))
else:
# Undefined argument types
# Make sure the error handling, including logging, works as intended in the client
msg = 'Undefined argument type: '.format(arg_types)
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details(msg)
# Raise error on the plugin-side
raise grpc.RpcError(grpc.StatusCode.INVALID_ARGUMENT, msg)
return script_args
@staticmethod
def get_arg_types(header):
"""
Determines the argument types for all parameters.
:param header:
:return: ArgType
"""
data_types = [param.dataType for param in header.params]
if not data_types:
return ArgType.Empty
elif len(set(data_types)) > 1 or all(data_type == SSE.DUAL for data_type in data_types):
return ArgType.Mixed
elif all(data_type == SSE.STRING for data_type in data_types):
return ArgType.String
elif all(data_type == SSE.NUMERIC for data_type in data_types):
return ArgType.Numeric
else:
return ArgType.Undefined
@staticmethod
def get_return_type(header):
"""
:param header:
:return: Return type
"""
if header.returnType == SSE.STRING:
return ReturnType.String
elif header.returnType == SSE.NUMERIC:
return ReturnType.Numeric
elif header.returnType == SSE.DUAL:
return ReturnType.Dual
else:
return ReturnType.Undefined
@staticmethod
def get_duals(result, ret_type):
if isinstance(result, str) or not hasattr(result, '__iter__'):
result = [result]
# Transform the result to an iterable of Dual data
if ret_type == ReturnType.String:
duals = [SSE.Dual(strData=col) for col in result]
elif ret_type == ReturnType.Numeric:
duals = [SSE.Dual(numData=col) for col in result]
return iter(duals)
def evaluate(self, script, ret_type, params=[]):
"""
Evaluates a script with given parameters and construct the result to a Row of duals.
:param script: script to evaluate
:param ret_type: return data type
:param params: params to evaluate. Default: []
:return: a RowData of string dual
"""
# Evaluate script
result = eval(script, {'args': params, 'numpy': numpy})
logging.debug('Result: {}'.format(result))
bundledRows = SSE.BundledRows()
if isinstance(result, str) or not hasattr(result, '__iter__'):
# A single value is returned
bundledRows.rows.add(duals=self.get_duals(result, ret_type))
else:
for row in result:
# note that each element of the result should represent a row
bundledRows.rows.add(duals=self.get_duals(row, ret_type))
return bundledRows | 0.738952 | 0.445047 |
import inspect
import json
from ordereddict import OrderedDict
from cfn_pyplates.exceptions import AddRemoveError
aws_template_format_version = '2010-09-09'
__all__ = [
'JSONableDict',
'CloudFormationTemplate',
'Parameters',
'Mappings',
'Resources',
'Outputs',
'Properties',
'Mapping',
'Resource',
'Parameter',
'Output',
'DependsOn',
'DeletionPolicy',
'UpdatePolicy',
'Metadata',
'ec2_tags',
]
class JSONableDict(OrderedDict):
'''A dictionary that knows how to turn itself into JSON
Args:
update_dict: A dictionary of values for prepopulating the JSONableDict
at instantiation
name: An optional name. If left out, the class's (or subclass's) name
will be used.
The most common use-case of any JSON entry in a CFN Template is the
``{"Name": {"Key1": "Value1", "Key2": Value2"} }`` pattern. The
significance of a JSONableDict's subclass name, or explicitly passing
a 'name' argument is accomodating this pattern. All JSONableDicts have
names.
To create the pyplate equivalent of the above JSON, contruct a
JSONableDict accordingly::
JSONableDict({'Key1': 'Value1', 'Key2', 'Value2'}, 'Name'})
Based on :class:`ordereddict.OrderedDict`, the order of keys is significant.
'''
def __init__(self, update_dict=None, name=None):
super(JSONableDict, self).__init__()
self._name = name
if update_dict:
self.update(update_dict)
def __unicode__(self):
# Indenting to keep things readable
# Trailing whitespace after commas removed
# (The space after colons is cool, though. He can stay.)
return unicode(self.json)
def __str__(self):
return unicode(self).encode('utf-8')
def __setattr__(self, name, value):
# This makes it simple to bind child dictionaries to an
# attribute while still making sure they wind up in the output
# dictionary, see usage example in CloudFormationTemplate init
if isinstance(value, JSONableDict):
self.add(value)
super(JSONableDict, self).__setattr__(name, value)
def __delattr__(self, name):
attr = getattr(self, name)
if isinstance(attr, JSONableDict):
try:
self.remove(attr)
except KeyError:
# Key already deleted, somehow.
# Everything's fine here now. How're you?
pass
super(JSONableDict, self).__delattr__(name)
def _get_name(self):
if self._name is not None:
return self._name
else:
# Default to the class name if _name is None
return self.__class__.__name__
def _set_name(self, name):
self._name = name
def _del_name(self):
self._name = None
name = property(_get_name, _set_name, _del_name)
'''Accessor to the ``name`` internals;
Allows getting, settings, and deleting the name
'''
@property
def json(self):
'Accessor to the canonical JSON representation of a JSONableDict'
return self.to_json(indent=2, separators=(',', ': '))
def add(self, child):
'''Add a child node
Args:
child: An instance of JSONableDict
Raises:
AddRemoveError: :exc:`cfn_pyplates.exceptions.AddRemoveError`
'''
if isinstance(child, JSONableDict):
self.update(
{child.name: child}
)
else:
raise AddRemoveError
return child
def remove(self, child):
'''Remove a child node
Args:
child: An instance of JSONableDict
Raises:
AddRemoveError: :exc:`cfn_pyplates.exceptions.AddRemoveError`
'''
if isinstance(child, JSONableDict):
del(self[child.name])
else:
raise AddRemoveError
def to_json(self, *args, **kwargs):
'''Thin wrapper around the :func:`json.dumps` method.
Allows for passing any arguments that json.dumps would accept to
completely customize the JSON output if desired.
'''
return json.dumps(self, *args, **kwargs)
class CloudFormationTemplate(JSONableDict):
'''The root element of a CloudFormation template [#cfn-template]_
Takes an option description string in the constructor
Comes pre-loaded with all the subelements CloudFormation can stand:
- Parameters
- Mappings
- Resources
- Outputs
'''
def __init__(self, description=None):
super(CloudFormationTemplate, self).__init__({
'AWSTemplateFormatVersion': aws_template_format_version,
})
if description:
self.update({
'Description': description,
})
# Tack on all the base template elements that a CF template can handle
# at easy-to-reach parameters
self.parameters = Parameters()
self.mappings = Mappings()
self.resources = Resources()
self.outputs = Outputs()
def __unicode__(self):
# Before outputting to json, remove empty elements
def predicate(obj):
'''getmembers predicate to find empty JSONableDict attributes attached to self
CloudFormation doesn't like empty mappings for these top-level
attributes, so any falsey JSONableDict that's at attribute on
the CloudFormationTemplate instance needs to get removed
'''
if isinstance(obj, JSONableDict) and not obj:
return True
for attr, mapping in inspect.getmembers(self, predicate):
delattr(self, attr)
return super(CloudFormationTemplate, self).__unicode__()
class Metadatums(JSONableDict):
'''The base Container for metadatums used at stack creation [#cfn-metadata]_
Attached to a :class:`cfn_pyplates.core.CloudFormationTemplate`
'''
pass
# CloudFormationTemplate base elements
class Parameters(JSONableDict):
'''The base Container for parameters used at stack creation [#cfn-parameters]_
Attached to a :class:`cfn_pyplates.core.CloudFormationTemplate`
'''
pass
class Mappings(JSONableDict):
'''The base Container for stack option mappings [#cfn-mappings]_
.. note::
Since most lookups can be done inside a pyplate using python,
this is normally unused.
Attached to a :class:`cfn_pyplates.core.CloudFormationTemplate`
'''
pass
class Resources(JSONableDict):
'''The base Container for stack resources [#cfn-resources]_
Attached to a :class:`cfn_pyplates.core.CloudFormationTemplate`
'''
pass
class Outputs(JSONableDict):
'''The base Container for stack outputs [#cfn-outputs]_
Attached to a :class:`cfn_pyplates.core.CloudFormationTemplate`
'''
pass
# Other 'named' JSONableDicts
class Properties(JSONableDict):
'''A properties mapping [#cfn-properties]_, used by various CFN declarations
Can be found in:
- :class:`cfn_pyplates.core.Parameters`
- :class:`cfn_pyplates.core.Outputs`
- :class:`cfn_pyplates.core.Resource`
Properties will be most commonly found in Resources
'''
pass
class Resource(JSONableDict):
'''A generic CFN Resource [#cfn-resource-types]_
Used in the :class:`cfn_pyplates.core.Resources` container.
All resources have a name, and most have a 'Type' and 'Properties' dict.
Thus, this class takes those as arguments and makes a generic resource.
The 'name' parameter must follow CFN's guidelines for naming [#cfn-resources]_
The 'type' parameter must be one of these:
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html
The optional 'properties' parameter is a dictionary of properties as
defined by the resource type, see documentation related to each resource
type
Args:
name: The unique name of the resource to add
type: The type of this resource
properties: Optional properties mapping to apply to this resource,
can be an instance of ``JSONableDict`` or just plain old ``dict``
attributes: Optional (on of 'DependsOn', 'DeletionPolicy', 'Metadata', 'UpdatePolicy' or a list of 2 or more)
'''
def __init__(self, name, type, properties=None, attributes=[]):
update_dict = {'Type': type}
super(Resource, self).__init__(update_dict, name)
if properties:
try:
# Assume we've got a JSONableDict
self.add(properties)
except AddRemoveError:
# If not, coerce it
self.add(Properties(properties))
if attributes:
if self.__is_attribute(attributes):
self.add(attributes)
elif isinstance(attributes, list):
for i in attributes:
if isinstance(i, JSONableDict) and self.__is_attribute(i):
self.add(i)
def __is_attribute(self, attribute):
"""Is the Object a valid Resource Attribute?
:param attribute: the object under test
"""
if isinstance(attribute, list):
for i in attribute:
self.__is_attribute(i)
elif attribute.__class__.__name__ in ['Metadata', 'UpdatePolicy']:
self.add(attribute)
elif attribute.__class__.__name__ in ['DependsOn', 'DeletionPolicy']:
self.update({attribute.__class__.__name__: attribute.value})
class Parameter(JSONableDict):
'''A CFN Parameter [#cfn-parameters]_
Used in the :class:`cfn_pyplates.core.Parameters` container, a Parameter
will be used when the template is processed by CloudFormation to prompt the
user for any additional input.
More information for Parameter options:
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html
Args:
name: The unique name of the parameter to add
type: The type of this parameter
properties: Optional properties mapping to apply to this parameter
'''
def __init__(self, name, type, properties=None):
# Just like a Resource, except the properties go in the
# update_dict, not a named key.
update_dict = {'Type': type}
if properties is not None:
update_dict.update(properties)
super(Parameter, self).__init__(update_dict, name)
class Mapping(JSONableDict):
'''A CFN Mapping [#cfn-mappings]_
Used in the :class:`cfn_pyplates.core.Mappings` container, a Mapping
defines mappings used within the Cloudformation template and is not
the same as a PyPlates options mapping.
More information for mapping options:
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/concept-mappings.html
Args:
name: The unique name of the mapping to add
mappings: The dictionary of mappings
'''
def __init__(self, name, mappings=None):
update_dict = {}
if mappings is not None:
update_dict.update(mappings)
super(Mapping, self).__init__(update_dict, name)
class Output(JSONableDict):
'''A CFN Output [#cfn-outputs]_
Used in the :class:`cfn_pyplates.core.Outputs`, an Output entry describes
a value to be shown when describe this stack using CFN API tools.
More information for Output options can be found here:
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/outputs-section-structure.html
Args:
name: The unique name of the output
value: The value the output should return
description: An optional description of this output
'''
def __init__(self, name, value, description=None):
update_dict = {'Value': value}
if description is not None:
update_dict['Description'] = description
super(Output, self).__init__(update_dict, name)
class Metadata(JSONableDict):
'''A CFN Output [#cfn-outputs]_
Used in the :class:`cfn_pyplates.core.Resource`, The Metadata attribute enables you to associate
structured data with a resource. By adding a Metadata attribute to a resource, you can add data in
JSON format to the resource declaration. In addition, you can use intrinsic functions (such as GetAtt and Ref),
parameters, and pseudo parameters within the Metadata attribute to add those interpreted values.
More information for Metadata can be found here:
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-metadata.html
Args:
properties: The unique name of the output
'''
def __init__(self, properties=None):
super(Metadata, self).__init__(properties, "Metadata")
class DependsOn(object):
'''A CFN Output [#cfn-outputs]_
Used in the :class:`cfn_pyplates.core.Resource`, The DependsOn attribute enables you to specify
that the creation of a specific resource follows another
More information for DependsOn Attribute can be found here:
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-dependson.html
Args:
properties: The unique name of the output
'''
def __init__(self, policy=None):
if policy:
self.value = policy
class DeletionPolicy(object):
'''A CFN Output [#cfn-outputs]_
Used in the :class:`cfn_pyplates.core.Resource`, The DeletionPolicy attribute enables you to
specify how AWS CloudFormation handles the resource deletion.
More information for DeletionPolicy Attribute can be found here:
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-deletionpolicy.html
Args:
properties: The unique name of the output
'''
def __init__(self, policy=None):
if policy:
self.value = str(policy)
class UpdatePolicy(JSONableDict):
'''A CFN Output [#cfn-outputs]_
Used in the :class:`cfn_pyplates.core.Resource`, The UpdatePolicy attribute enables you to
specify how AWS CloudFormation handles rolling updates for a particular resource.
More information for UpdatePolicy Attribute can be found here:
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html
Args:
properties: The unique name of the output
'''
def __init__(self, properties=None):
super(UpdatePolicy, self).__init__(properties, "UpdatePolicy")
def ec2_tags(tags):
'''A container for Tags on EC2 Instances
Tags are declared really verbosely in CFN templates, but we have
opportunites in the land of python to keep things a little more
sane.
So we can turn the
`AWS EC2 Tags example <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-tags.html>`_
from this::
"Tags": [
{ "Key" : "Role", "Value": "Test Instance" },
{ "Key" : "Application", "Value" : { "Ref" : "AWS::StackName"} }
]
Into something more like this::
EC2Tags({
'Role': 'Test Instance',
'Application': ref('StackName'),
})
Args:
tags: A dictionary of tags to apply to an EC2 instance
'''
tags_list = list()
for key, value in tags.iteritems():
tags_list.append({'Key': key, 'Value': value})
return tags_list | cfn_pyplates/core.py | import inspect
import json
from ordereddict import OrderedDict
from cfn_pyplates.exceptions import AddRemoveError
aws_template_format_version = '2010-09-09'
__all__ = [
'JSONableDict',
'CloudFormationTemplate',
'Parameters',
'Mappings',
'Resources',
'Outputs',
'Properties',
'Mapping',
'Resource',
'Parameter',
'Output',
'DependsOn',
'DeletionPolicy',
'UpdatePolicy',
'Metadata',
'ec2_tags',
]
class JSONableDict(OrderedDict):
'''A dictionary that knows how to turn itself into JSON
Args:
update_dict: A dictionary of values for prepopulating the JSONableDict
at instantiation
name: An optional name. If left out, the class's (or subclass's) name
will be used.
The most common use-case of any JSON entry in a CFN Template is the
``{"Name": {"Key1": "Value1", "Key2": Value2"} }`` pattern. The
significance of a JSONableDict's subclass name, or explicitly passing
a 'name' argument is accomodating this pattern. All JSONableDicts have
names.
To create the pyplate equivalent of the above JSON, contruct a
JSONableDict accordingly::
JSONableDict({'Key1': 'Value1', 'Key2', 'Value2'}, 'Name'})
Based on :class:`ordereddict.OrderedDict`, the order of keys is significant.
'''
def __init__(self, update_dict=None, name=None):
super(JSONableDict, self).__init__()
self._name = name
if update_dict:
self.update(update_dict)
def __unicode__(self):
# Indenting to keep things readable
# Trailing whitespace after commas removed
# (The space after colons is cool, though. He can stay.)
return unicode(self.json)
def __str__(self):
return unicode(self).encode('utf-8')
def __setattr__(self, name, value):
# This makes it simple to bind child dictionaries to an
# attribute while still making sure they wind up in the output
# dictionary, see usage example in CloudFormationTemplate init
if isinstance(value, JSONableDict):
self.add(value)
super(JSONableDict, self).__setattr__(name, value)
def __delattr__(self, name):
attr = getattr(self, name)
if isinstance(attr, JSONableDict):
try:
self.remove(attr)
except KeyError:
# Key already deleted, somehow.
# Everything's fine here now. How're you?
pass
super(JSONableDict, self).__delattr__(name)
def _get_name(self):
if self._name is not None:
return self._name
else:
# Default to the class name if _name is None
return self.__class__.__name__
def _set_name(self, name):
self._name = name
def _del_name(self):
self._name = None
name = property(_get_name, _set_name, _del_name)
'''Accessor to the ``name`` internals;
Allows getting, settings, and deleting the name
'''
@property
def json(self):
'Accessor to the canonical JSON representation of a JSONableDict'
return self.to_json(indent=2, separators=(',', ': '))
def add(self, child):
'''Add a child node
Args:
child: An instance of JSONableDict
Raises:
AddRemoveError: :exc:`cfn_pyplates.exceptions.AddRemoveError`
'''
if isinstance(child, JSONableDict):
self.update(
{child.name: child}
)
else:
raise AddRemoveError
return child
def remove(self, child):
'''Remove a child node
Args:
child: An instance of JSONableDict
Raises:
AddRemoveError: :exc:`cfn_pyplates.exceptions.AddRemoveError`
'''
if isinstance(child, JSONableDict):
del(self[child.name])
else:
raise AddRemoveError
def to_json(self, *args, **kwargs):
'''Thin wrapper around the :func:`json.dumps` method.
Allows for passing any arguments that json.dumps would accept to
completely customize the JSON output if desired.
'''
return json.dumps(self, *args, **kwargs)
class CloudFormationTemplate(JSONableDict):
'''The root element of a CloudFormation template [#cfn-template]_
Takes an option description string in the constructor
Comes pre-loaded with all the subelements CloudFormation can stand:
- Parameters
- Mappings
- Resources
- Outputs
'''
def __init__(self, description=None):
super(CloudFormationTemplate, self).__init__({
'AWSTemplateFormatVersion': aws_template_format_version,
})
if description:
self.update({
'Description': description,
})
# Tack on all the base template elements that a CF template can handle
# at easy-to-reach parameters
self.parameters = Parameters()
self.mappings = Mappings()
self.resources = Resources()
self.outputs = Outputs()
def __unicode__(self):
# Before outputting to json, remove empty elements
def predicate(obj):
'''getmembers predicate to find empty JSONableDict attributes attached to self
CloudFormation doesn't like empty mappings for these top-level
attributes, so any falsey JSONableDict that's at attribute on
the CloudFormationTemplate instance needs to get removed
'''
if isinstance(obj, JSONableDict) and not obj:
return True
for attr, mapping in inspect.getmembers(self, predicate):
delattr(self, attr)
return super(CloudFormationTemplate, self).__unicode__()
class Metadatums(JSONableDict):
'''The base Container for metadatums used at stack creation [#cfn-metadata]_
Attached to a :class:`cfn_pyplates.core.CloudFormationTemplate`
'''
pass
# CloudFormationTemplate base elements
class Parameters(JSONableDict):
'''The base Container for parameters used at stack creation [#cfn-parameters]_
Attached to a :class:`cfn_pyplates.core.CloudFormationTemplate`
'''
pass
class Mappings(JSONableDict):
'''The base Container for stack option mappings [#cfn-mappings]_
.. note::
Since most lookups can be done inside a pyplate using python,
this is normally unused.
Attached to a :class:`cfn_pyplates.core.CloudFormationTemplate`
'''
pass
class Resources(JSONableDict):
'''The base Container for stack resources [#cfn-resources]_
Attached to a :class:`cfn_pyplates.core.CloudFormationTemplate`
'''
pass
class Outputs(JSONableDict):
'''The base Container for stack outputs [#cfn-outputs]_
Attached to a :class:`cfn_pyplates.core.CloudFormationTemplate`
'''
pass
# Other 'named' JSONableDicts
class Properties(JSONableDict):
'''A properties mapping [#cfn-properties]_, used by various CFN declarations
Can be found in:
- :class:`cfn_pyplates.core.Parameters`
- :class:`cfn_pyplates.core.Outputs`
- :class:`cfn_pyplates.core.Resource`
Properties will be most commonly found in Resources
'''
pass
class Resource(JSONableDict):
'''A generic CFN Resource [#cfn-resource-types]_
Used in the :class:`cfn_pyplates.core.Resources` container.
All resources have a name, and most have a 'Type' and 'Properties' dict.
Thus, this class takes those as arguments and makes a generic resource.
The 'name' parameter must follow CFN's guidelines for naming [#cfn-resources]_
The 'type' parameter must be one of these:
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html
The optional 'properties' parameter is a dictionary of properties as
defined by the resource type, see documentation related to each resource
type
Args:
name: The unique name of the resource to add
type: The type of this resource
properties: Optional properties mapping to apply to this resource,
can be an instance of ``JSONableDict`` or just plain old ``dict``
attributes: Optional (on of 'DependsOn', 'DeletionPolicy', 'Metadata', 'UpdatePolicy' or a list of 2 or more)
'''
def __init__(self, name, type, properties=None, attributes=[]):
update_dict = {'Type': type}
super(Resource, self).__init__(update_dict, name)
if properties:
try:
# Assume we've got a JSONableDict
self.add(properties)
except AddRemoveError:
# If not, coerce it
self.add(Properties(properties))
if attributes:
if self.__is_attribute(attributes):
self.add(attributes)
elif isinstance(attributes, list):
for i in attributes:
if isinstance(i, JSONableDict) and self.__is_attribute(i):
self.add(i)
def __is_attribute(self, attribute):
"""Is the Object a valid Resource Attribute?
:param attribute: the object under test
"""
if isinstance(attribute, list):
for i in attribute:
self.__is_attribute(i)
elif attribute.__class__.__name__ in ['Metadata', 'UpdatePolicy']:
self.add(attribute)
elif attribute.__class__.__name__ in ['DependsOn', 'DeletionPolicy']:
self.update({attribute.__class__.__name__: attribute.value})
class Parameter(JSONableDict):
'''A CFN Parameter [#cfn-parameters]_
Used in the :class:`cfn_pyplates.core.Parameters` container, a Parameter
will be used when the template is processed by CloudFormation to prompt the
user for any additional input.
More information for Parameter options:
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html
Args:
name: The unique name of the parameter to add
type: The type of this parameter
properties: Optional properties mapping to apply to this parameter
'''
def __init__(self, name, type, properties=None):
# Just like a Resource, except the properties go in the
# update_dict, not a named key.
update_dict = {'Type': type}
if properties is not None:
update_dict.update(properties)
super(Parameter, self).__init__(update_dict, name)
class Mapping(JSONableDict):
'''A CFN Mapping [#cfn-mappings]_
Used in the :class:`cfn_pyplates.core.Mappings` container, a Mapping
defines mappings used within the Cloudformation template and is not
the same as a PyPlates options mapping.
More information for mapping options:
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/concept-mappings.html
Args:
name: The unique name of the mapping to add
mappings: The dictionary of mappings
'''
def __init__(self, name, mappings=None):
update_dict = {}
if mappings is not None:
update_dict.update(mappings)
super(Mapping, self).__init__(update_dict, name)
class Output(JSONableDict):
'''A CFN Output [#cfn-outputs]_
Used in the :class:`cfn_pyplates.core.Outputs`, an Output entry describes
a value to be shown when describe this stack using CFN API tools.
More information for Output options can be found here:
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/outputs-section-structure.html
Args:
name: The unique name of the output
value: The value the output should return
description: An optional description of this output
'''
def __init__(self, name, value, description=None):
update_dict = {'Value': value}
if description is not None:
update_dict['Description'] = description
super(Output, self).__init__(update_dict, name)
class Metadata(JSONableDict):
'''A CFN Output [#cfn-outputs]_
Used in the :class:`cfn_pyplates.core.Resource`, The Metadata attribute enables you to associate
structured data with a resource. By adding a Metadata attribute to a resource, you can add data in
JSON format to the resource declaration. In addition, you can use intrinsic functions (such as GetAtt and Ref),
parameters, and pseudo parameters within the Metadata attribute to add those interpreted values.
More information for Metadata can be found here:
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-metadata.html
Args:
properties: The unique name of the output
'''
def __init__(self, properties=None):
super(Metadata, self).__init__(properties, "Metadata")
class DependsOn(object):
'''A CFN Output [#cfn-outputs]_
Used in the :class:`cfn_pyplates.core.Resource`, The DependsOn attribute enables you to specify
that the creation of a specific resource follows another
More information for DependsOn Attribute can be found here:
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-dependson.html
Args:
properties: The unique name of the output
'''
def __init__(self, policy=None):
if policy:
self.value = policy
class DeletionPolicy(object):
'''A CFN Output [#cfn-outputs]_
Used in the :class:`cfn_pyplates.core.Resource`, The DeletionPolicy attribute enables you to
specify how AWS CloudFormation handles the resource deletion.
More information for DeletionPolicy Attribute can be found here:
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-deletionpolicy.html
Args:
properties: The unique name of the output
'''
def __init__(self, policy=None):
if policy:
self.value = str(policy)
class UpdatePolicy(JSONableDict):
'''A CFN Output [#cfn-outputs]_
Used in the :class:`cfn_pyplates.core.Resource`, The UpdatePolicy attribute enables you to
specify how AWS CloudFormation handles rolling updates for a particular resource.
More information for UpdatePolicy Attribute can be found here:
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html
Args:
properties: The unique name of the output
'''
def __init__(self, properties=None):
super(UpdatePolicy, self).__init__(properties, "UpdatePolicy")
def ec2_tags(tags):
'''A container for Tags on EC2 Instances
Tags are declared really verbosely in CFN templates, but we have
opportunites in the land of python to keep things a little more
sane.
So we can turn the
`AWS EC2 Tags example <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-tags.html>`_
from this::
"Tags": [
{ "Key" : "Role", "Value": "Test Instance" },
{ "Key" : "Application", "Value" : { "Ref" : "AWS::StackName"} }
]
Into something more like this::
EC2Tags({
'Role': 'Test Instance',
'Application': ref('StackName'),
})
Args:
tags: A dictionary of tags to apply to an EC2 instance
'''
tags_list = list()
for key, value in tags.iteritems():
tags_list.append({'Key': key, 'Value': value})
return tags_list | 0.775945 | 0.207275 |
import ics_to_csv
import pytest
import ics
import subprocess
import os
import uuid
# Bare minimum tests to make sure my sample doesn't break as time goes on
UUID = uuid.uuid4().hex[:10]
OUTPUT_FILE = f"holidays-test-{UUID}.csv"
TEST_CALENDAR_STRING=(
"BEGIN:VCALENDAR\n"
"VERSION:2.0\n"
"PRODID:-//Telerik Inc.//NONSGML RadScheduler//EN\n"
"METHOD:PUBLISH\n"
"BEGIN:VEVENT\n"
"DTSTART:19970101\n"
"DTEND:19970102\n"
"UID:20210211T185111Z-05fe6751-ec1d-4532-b3a8-60e48e0eb064\n"
"DTSTAMP:20210211T185111Z\n"
"SUMMARY:New Year’s Day\n"
"DESCRIPTION:\n"
"END:VEVENT\n"
"END:VCALENDAR\n"
)
# Test string is missing END:VCALENDAR and BEGIN: VEVENT lines
TEST_BAD_CALENDAR_STRING=(
"BEGIN:VCALENDAR\n"
"VERSION:2.0\n"
"PRODID:-//Telerik Inc.//NONSGML RadScheduler//EN\n"
"METHOD:PUBLISH\n"
"DTSTART:19970101\n"
"DTEND:19970102\n"
"UID:20210211T185111Z-05fe6751-ec1d-4532-b3a8-60e48e0eb064\n"
"DTSTAMP:20210211T185111Z\n"
"SUMMARY:New Year’s Day\n"
"DESCRIPTION:\n"
"END:VEVENT\n"
)
TEST_EVENT_LIST = [['Date', 'Holiday'], ['1997-1-1', 'New Year’s Day']]
@pytest.fixture
def cleanup_csv_test():
yield
subprocess.run(["rm", OUTPUT_FILE])
def test_convert_ics_to_string_fails_bad_ics():
with pytest.raises(Exception):
assert(ics_to_csv.convert_ics_to_string("test_data/holidays-test-bad.ics"))
with pytest.raises(Exception):
assert(ics_to_csv.convert_ics_to_string("test_data/test.txt"))
def test_convert_ics_to_string():
output = ics_to_csv.convert_ics_to_string("test_data/holidays-test.ics")
assert "BEGIN:VCALENDAR" in output
def test_make_event_list():
output = ics_to_csv.make_event_list(TEST_CALENDAR_STRING)
assert "New Year’s Day" in output[1]
assert len(output)==2
def test_make_event_list_fails():
with pytest.raises(ics.grammar.parse.ParseError):
ics_to_csv.make_event_list(TEST_BAD_CALENDAR_STRING)
def test_convert_list_to_csv(cleanup_csv_test):
ics_to_csv.convert_list_to_csv(TEST_EVENT_LIST, OUTPUT_FILE)
assert os.path.exists(OUTPUT_FILE) | ics_to_csv_test.py | import ics_to_csv
import pytest
import ics
import subprocess
import os
import uuid
# Bare minimum tests to make sure my sample doesn't break as time goes on
UUID = uuid.uuid4().hex[:10]
OUTPUT_FILE = f"holidays-test-{UUID}.csv"
TEST_CALENDAR_STRING=(
"BEGIN:VCALENDAR\n"
"VERSION:2.0\n"
"PRODID:-//Telerik Inc.//NONSGML RadScheduler//EN\n"
"METHOD:PUBLISH\n"
"BEGIN:VEVENT\n"
"DTSTART:19970101\n"
"DTEND:19970102\n"
"UID:20210211T185111Z-05fe6751-ec1d-4532-b3a8-60e48e0eb064\n"
"DTSTAMP:20210211T185111Z\n"
"SUMMARY:New Year’s Day\n"
"DESCRIPTION:\n"
"END:VEVENT\n"
"END:VCALENDAR\n"
)
# Test string is missing END:VCALENDAR and BEGIN: VEVENT lines
TEST_BAD_CALENDAR_STRING=(
"BEGIN:VCALENDAR\n"
"VERSION:2.0\n"
"PRODID:-//Telerik Inc.//NONSGML RadScheduler//EN\n"
"METHOD:PUBLISH\n"
"DTSTART:19970101\n"
"DTEND:19970102\n"
"UID:20210211T185111Z-05fe6751-ec1d-4532-b3a8-60e48e0eb064\n"
"DTSTAMP:20210211T185111Z\n"
"SUMMARY:New Year’s Day\n"
"DESCRIPTION:\n"
"END:VEVENT\n"
)
TEST_EVENT_LIST = [['Date', 'Holiday'], ['1997-1-1', 'New Year’s Day']]
@pytest.fixture
def cleanup_csv_test():
yield
subprocess.run(["rm", OUTPUT_FILE])
def test_convert_ics_to_string_fails_bad_ics():
with pytest.raises(Exception):
assert(ics_to_csv.convert_ics_to_string("test_data/holidays-test-bad.ics"))
with pytest.raises(Exception):
assert(ics_to_csv.convert_ics_to_string("test_data/test.txt"))
def test_convert_ics_to_string():
output = ics_to_csv.convert_ics_to_string("test_data/holidays-test.ics")
assert "BEGIN:VCALENDAR" in output
def test_make_event_list():
output = ics_to_csv.make_event_list(TEST_CALENDAR_STRING)
assert "New Year’s Day" in output[1]
assert len(output)==2
def test_make_event_list_fails():
with pytest.raises(ics.grammar.parse.ParseError):
ics_to_csv.make_event_list(TEST_BAD_CALENDAR_STRING)
def test_convert_list_to_csv(cleanup_csv_test):
ics_to_csv.convert_list_to_csv(TEST_EVENT_LIST, OUTPUT_FILE)
assert os.path.exists(OUTPUT_FILE) | 0.225672 | 0.214671 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import device, MAX_LENGTH
# noinspection PyUnresolvedReferences,PyShadowingBuiltins
class AttnDecoder(nn.Module):
"""A decoder in seq2seq model using a gated recurrent unit (GRU) and attention.
"""
def __init__(self, output_dim,
embedding_dim,
hidden_dim,
num_layers=1,
dropout=0.1,
max_length=MAX_LENGTH):
super(AttnDecoder, self).__init__()
self.output_dim = output_dim
self.hidden_dim = hidden_dim
self.max_length = max_length
self.embedding = nn.Embedding(num_embeddings=self.output_dim,
embedding_dim=embedding_dim)
self.attn = nn.Linear(in_features=self.hidden_dim + embedding_dim,
out_features=self.max_length)
self.attn_combine = nn.Linear(in_features=self.hidden_dim + embedding_dim,
out_features=self.hidden_dim)
self.dropout = nn.Dropout(dropout)
self.lstm = nn.LSTM(input_size=self.hidden_dim,
hidden_size=self.hidden_dim,
num_layers=num_layers)
self.out = nn.Linear(in_features=self.hidden_dim,
out_features=self.output_dim)
def forward(self, input, hidden, encoder_outputs):
"""
The forward pass of the decoder.
"""
# pad encoder outputs with 0s so that always has length <self.max_length>
encoder_outputs = F.pad(encoder_outputs, (0, 0, 0, self.max_length - len(encoder_outputs)))
embedded = self.embedding(input).view(1, 1, -1)
embedded = self.dropout(embedded)
attn_weights = F.softmax(self.attn(torch.cat((embedded[0], torch.squeeze(hidden[0], dim=0)), 1)), dim=1)
attn_applied = torch.bmm(attn_weights.unsqueeze(0), encoder_outputs.unsqueeze(0))
attn_combine = self.attn_combine(torch.cat((embedded[0], attn_applied[0]), 1)).unsqueeze(0)
output, hidden = self.lstm(F.relu(attn_combine), hidden)
output = F.log_softmax(self.out(output[0]), dim=1)
return output, hidden, attn_weights
def init_hidden(self):
"""
Initialize the hidden state of the decoder.
"""
return torch.zeros(1, 1, self.hidden_dim, device=device) | examples/deepsort/models/attn_decoder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import device, MAX_LENGTH
# noinspection PyUnresolvedReferences,PyShadowingBuiltins
class AttnDecoder(nn.Module):
"""A decoder in seq2seq model using a gated recurrent unit (GRU) and attention.
"""
def __init__(self, output_dim,
embedding_dim,
hidden_dim,
num_layers=1,
dropout=0.1,
max_length=MAX_LENGTH):
super(AttnDecoder, self).__init__()
self.output_dim = output_dim
self.hidden_dim = hidden_dim
self.max_length = max_length
self.embedding = nn.Embedding(num_embeddings=self.output_dim,
embedding_dim=embedding_dim)
self.attn = nn.Linear(in_features=self.hidden_dim + embedding_dim,
out_features=self.max_length)
self.attn_combine = nn.Linear(in_features=self.hidden_dim + embedding_dim,
out_features=self.hidden_dim)
self.dropout = nn.Dropout(dropout)
self.lstm = nn.LSTM(input_size=self.hidden_dim,
hidden_size=self.hidden_dim,
num_layers=num_layers)
self.out = nn.Linear(in_features=self.hidden_dim,
out_features=self.output_dim)
def forward(self, input, hidden, encoder_outputs):
"""
The forward pass of the decoder.
"""
# pad encoder outputs with 0s so that always has length <self.max_length>
encoder_outputs = F.pad(encoder_outputs, (0, 0, 0, self.max_length - len(encoder_outputs)))
embedded = self.embedding(input).view(1, 1, -1)
embedded = self.dropout(embedded)
attn_weights = F.softmax(self.attn(torch.cat((embedded[0], torch.squeeze(hidden[0], dim=0)), 1)), dim=1)
attn_applied = torch.bmm(attn_weights.unsqueeze(0), encoder_outputs.unsqueeze(0))
attn_combine = self.attn_combine(torch.cat((embedded[0], attn_applied[0]), 1)).unsqueeze(0)
output, hidden = self.lstm(F.relu(attn_combine), hidden)
output = F.log_softmax(self.out(output[0]), dim=1)
return output, hidden, attn_weights
def init_hidden(self):
"""
Initialize the hidden state of the decoder.
"""
return torch.zeros(1, 1, self.hidden_dim, device=device) | 0.946399 | 0.394638 |
import tensorflow
from tensorflow.keras.layers import Conv2D, Input, Dense, MaxPool2D, BatchNormalization, GlobalAvgPool2D, Flatten
from tensorflow.keras import Model
# functional approach : function that returns a model
def functional_model():
my_input = Input(shape=(28,28,1))
x = Conv2D(32, (3,3), activation='relu')(my_input)
x = Conv2D(64, (3,3), activation='relu')(x)
x = MaxPool2D()(x)
x = BatchNormalization()(x)
x = Conv2D(128, (3,3), activation='relu')(x)
x = MaxPool2D()(x)
x = BatchNormalization()(x)
x = GlobalAvgPool2D()(x)
x = Dense(64, activation='relu')(x)
x = Dense(10, activation='softmax')(x)
model = tensorflow.keras.Model(inputs=my_input, outputs=x)
return model
# tensorflow.keras.Model : inherit from this class
class MyCustomModel(tensorflow.keras.Model):
def __init__(self):
super().__init__()
self.conv1 = Conv2D(32, (3,3), activation='relu')
self.conv2 = Conv2D(64, (3,3), activation='relu')
self.maxpool1 = MaxPool2D()
self.batchnorm1 = BatchNormalization()
self.conv3 = Conv2D(128, (3,3), activation='relu')
self.maxpool2 = MaxPool2D()
self.batchnorm2 = BatchNormalization()
self.globalavgpool1 = GlobalAvgPool2D()
self.dense1 = Dense(64, activation='relu')
self.dense2 = Dense(10, activation='softmax')
def call(self, my_input):
x = self.conv1(my_input)
x = self.conv2(x)
x = self.maxpool1(x)
x = self.batchnorm1(x)
x = self.conv3(x)
x = self.maxpool2(x)
x = self.batchnorm2(x)
x = self.globalavgpool1(x)
x = self.dense1(x)
x = self.dense2(x)
return x
def streesigns_model(nbr_classes):
my_input = Input(shape=(60,60,3))
x = Conv2D(32, (3,3), activation='relu')(my_input)
x = MaxPool2D()(x)
x = BatchNormalization()(x)
x = Conv2D(64, (3,3), activation='relu')(x)
x = MaxPool2D()(x)
x = BatchNormalization()(x)
x = Conv2D(128, (3,3), activation='relu')(x)
x = MaxPool2D()(x)
x = BatchNormalization()(x)
# x = Flatten()(x)
x = GlobalAvgPool2D()(x)
x = Dense(128, activation='relu')(x)
x = Dense(nbr_classes, activation='softmax')(x)
return Model(inputs=my_input, outputs=x)
if __name__=='__main__':
model = streesigns_model(10)
model.summary() | deeplearning_models.py | import tensorflow
from tensorflow.keras.layers import Conv2D, Input, Dense, MaxPool2D, BatchNormalization, GlobalAvgPool2D, Flatten
from tensorflow.keras import Model
# functional approach : function that returns a model
def functional_model():
my_input = Input(shape=(28,28,1))
x = Conv2D(32, (3,3), activation='relu')(my_input)
x = Conv2D(64, (3,3), activation='relu')(x)
x = MaxPool2D()(x)
x = BatchNormalization()(x)
x = Conv2D(128, (3,3), activation='relu')(x)
x = MaxPool2D()(x)
x = BatchNormalization()(x)
x = GlobalAvgPool2D()(x)
x = Dense(64, activation='relu')(x)
x = Dense(10, activation='softmax')(x)
model = tensorflow.keras.Model(inputs=my_input, outputs=x)
return model
# tensorflow.keras.Model : inherit from this class
class MyCustomModel(tensorflow.keras.Model):
def __init__(self):
super().__init__()
self.conv1 = Conv2D(32, (3,3), activation='relu')
self.conv2 = Conv2D(64, (3,3), activation='relu')
self.maxpool1 = MaxPool2D()
self.batchnorm1 = BatchNormalization()
self.conv3 = Conv2D(128, (3,3), activation='relu')
self.maxpool2 = MaxPool2D()
self.batchnorm2 = BatchNormalization()
self.globalavgpool1 = GlobalAvgPool2D()
self.dense1 = Dense(64, activation='relu')
self.dense2 = Dense(10, activation='softmax')
def call(self, my_input):
x = self.conv1(my_input)
x = self.conv2(x)
x = self.maxpool1(x)
x = self.batchnorm1(x)
x = self.conv3(x)
x = self.maxpool2(x)
x = self.batchnorm2(x)
x = self.globalavgpool1(x)
x = self.dense1(x)
x = self.dense2(x)
return x
def streesigns_model(nbr_classes):
my_input = Input(shape=(60,60,3))
x = Conv2D(32, (3,3), activation='relu')(my_input)
x = MaxPool2D()(x)
x = BatchNormalization()(x)
x = Conv2D(64, (3,3), activation='relu')(x)
x = MaxPool2D()(x)
x = BatchNormalization()(x)
x = Conv2D(128, (3,3), activation='relu')(x)
x = MaxPool2D()(x)
x = BatchNormalization()(x)
# x = Flatten()(x)
x = GlobalAvgPool2D()(x)
x = Dense(128, activation='relu')(x)
x = Dense(nbr_classes, activation='softmax')(x)
return Model(inputs=my_input, outputs=x)
if __name__=='__main__':
model = streesigns_model(10)
model.summary() | 0.888704 | 0.669664 |
from itertools import chain
from typing import List, Tuple, Callable
from iobes import TokenFunction, Span, Error
from iobes.parse import (
parse_spans_iob_with_errors,
parse_spans_bio_with_errors,
parse_spans_iobes_with_errors,
parse_spans_bilou_with_errors,
parse_spans_bmeow_with_errors,
)
from iobes.write import (
write_iob_tags,
write_bio_tags,
write_iobes_tags,
write_bilou_tags,
write_bmeow_tags,
)
def convert_tags(
tags: List[str],
parse_function: Callable[[List[str]], Tuple[List[Span], List[Error]]],
write_function: Callable[[List[Span], int], List[str]],
) -> List[str]:
"""Convert tags from one format to another.
Args:
tags: The tags that we are converting.
parse_function: A function that parses tags into spans.
write_function: A function the turns spans into a list of tags.
Raises:
ValueError: If there were errors in the tag formatting.
Returns:
The list of tags in the new format.
"""
spans, errors = parse_function(tags)
if errors:
error_string = "\n".join(str(e) for e in errors)
raise ValueError(f"Found errors in the tag sequence, cannot be converted. Errors: {error_string}")
return write_function(spans, length=len(tags))
def iob_to_bio(tags: List[str]) -> List[str]:
"""Convert IOB tags to the BIO format.
Args:
tags: The IOB tags we are converting
Raises:
ValueError: If there were errors in the IOB formatting of the input.
Returns:
Tags that produce the same spans in the BIO format.
"""
return convert_tags(tags, parse_spans_iob_with_errors, write_bio_tags)
def iob_to_iobes(tags: List[str]) -> List[str]:
"""Convert IOB tags to the IOBES format.
Args:
tags: The IOB tags we are converting
Raises:
ValueError: If there were errors in the IOB formatting of the input.
Returns:
Tags that produce the same spans in the IOBES format.
"""
return bio_to_iobes(iob_to_bio(tags))
def iob_to_bilou(tags: List[str]) -> List[str]:
"""Convert IOB tags to the BILOU format.
Args:
tags: The IOB tags we are converting
Raises:
ValueError: If there were errors in the IOB formatting of the input.
Returns:
Tags that produce the same spans in the BILOU format.
"""
return iobes_to_bilou(iob_to_iobes(tags))
def iob_to_bmeow(tags: List[str]) -> List[str]:
"""Convert IOB tags to the BMEOW format.
Args:
tags: The IOB tags we are converting
Raises:
ValueError: If there were errors in the IOB formatting of the input.
Returns:
Tags that produce the same spans in the BMEOW format.
"""
return iobes_to_bmeow(iob_to_iobes(tags))
def iob_to_bmewo(tags: List[str]) -> List[str]:
"""Convert IOB tags to the BMEWO format.
Note:
Alias for :py:func:`~iobes.convert.iob_to_bmeow`.
Args:
tags: The IOB tags we are converting
Raises:
ValueError: If there were errors in the IOB formatting of the input.
Returns:
Tags that produce the same spans in the BMEOW format.
"""
return iob_to_bmeow(tags)
def bio_to_iob(tags: List[str]) -> List[str]:
"""Convert BIO tags to the IOB format.
Args:
tags: The BIO tags we are converting
Raises:
ValueError: If there were errors in the BIO formatting of the input.
Returns:
Tags that produce the same spans in the IOB format.
"""
return convert_tags(tags, parse_spans_bio_with_errors, write_iob_tags)
def bio_to_iobes(tags: List[str]) -> List[str]:
"""Convert BIO tags to the IOBES format.
Args:
tags: The BIO tags we are converting
Raises:
ValueError: If there were errors in the BIO formatting of the input.
Returns:
Tags that produce the same spans in the IOBES format.
"""
return convert_tags(tags, parse_spans_bio_with_errors, write_iobes_tags)
def bio_to_bilou(tags: List[str]) -> List[str]:
"""Convert BIO tags to the BILOU format.
Args:
tags: The BIO tags we are converting
Raises:
ValueError: If there were errors in the BIO formatting of the input.
Returns:
Tags that produce the same spans in the BILOU format.
"""
return iobes_to_bilou(bio_to_iobes(tags))
def bio_to_bmeow(tags: List[str]) -> List[str]:
"""Convert BIO tags to the BMEOW format.
Args:
tags: The BIO tags we are converting
Raises:
ValueError: If there were errors in the BIO formatting of the input.
Returns:
Tags that produce the same spans in the BMEOW format.
"""
return iobes_to_bmeow(bio_to_iobes(tags))
def bio_to_bmewo(tags: List[str]) -> List[str]:
"""Convert BIO tags to the BMEWO format.
Note:
Alias for :py:func:`~iobes.convert.bio_to_bmeow`
Args:
tags: The BIO tags we are converting
Raises:
ValueError: If there were errors in the BIO formatting of the input.
Returns:
Tags that produce the same spans in the BMEWO format.
"""
return bio_to_bmeow(tags)
def iobes_to_iob(tags: List[str]) -> List[str]:
"""Convert IOBES tags to the IOB format.
Args:
tags: The IOBES tags we are converting
Raises:
ValueError: If there were errors in the IOBES formatting of the input.
Returns:
Tags that produce the same spans in the IOB format.
"""
return bio_to_iob(iobes_to_bio(tags))
def iobes_to_bio(tags: List[str]) -> List[str]:
"""Convert IOBES tags to the BIO format.
Args:
tags: The IOBES tags we are converting
Raises:
ValueError: If there were errors in the IOBES formatting of the input.
Returns:
Tags that produce the same spans in the BIO format.
"""
return convert_tags(tags, parse_spans_iobes_with_errors, write_bio_tags)
def iobes_to_bilou(tags: List[str]) -> List[str]:
"""Convert IOBES tags to the BILOU format.
Args:
tags: The IOBES tags we are converting
Raises:
ValueError: If there were errors in the IOBES formatting of the input.
Returns:
Tags that produce the same spans in the BILOU format.
"""
return convert_tags(tags, parse_spans_iobes_with_errors, write_bilou_tags)
def iobes_to_bmeow(tags: List[str]) -> List[str]:
"""Convert IOBES tags to the BMEOW format.
Args:
tags: The IOBES tags we are converting
Raises:
ValueError: If there were errors in the IOBES formatting of the input.
Returns:
Tags that produce the same spans in the BMEOW format.
"""
return convert_tags(tags, parse_spans_iobes_with_errors, write_bmeow_tags)
def iobes_to_bmewo(tags: List[str]) -> List[str]:
"""Convert IOBES tags to the BMEWO format.
Note:
Alias for :py:func:`~iobes.convert.iobes_to_bmeow`
Args:
tags: The IOBES tags we are converting
Raises:
ValueError: If there were errors in the IOBES formatting of the input.
Returns:
Tags that produce the same spans in the BMEWO format.
"""
return iobes_to_bmeow(tags)
def bilou_to_iob(tags: List[str]) -> List[str]:
"""Convert BILOU tags to the IOB format.
Args:
tags: The BILOU tags we are converting
Raises:
ValueError: If there were errors in the BILOU formatting of the input.
Returns:
Tags that produce the same spans in the IOB format.
"""
return convert_tags(tags, parse_spans_bilou_with_errors, write_iob_tags)
def bilou_to_bio(tags: List[str]) -> List[str]:
"""Convert BILOU tags to the BIO format.
Args:
tags: The BILOU tags we are converting
Raises:
ValueError: If there were errors in the BILOU formatting of the input.
Returns:
Tags that produce the same spans in the BIO format.
"""
return convert_tags(tags, parse_spans_bilou_with_errors, write_bio_tags)
def bilou_to_iobes(tags: List[str]) -> List[str]:
"""Convert BILOU tags to the IOBES format.
Args:
tags: The BILOU tags we are converting
Raises:
ValueError: If there were errors in the BILOU formatting of the input.
Returns:
Tags that produce the same spans in the IOBES format.
"""
return convert_tags(tags, parse_spans_bilou_with_errors, write_iobes_tags)
def bilou_to_bmeow(tags: List[str]) -> List[str]:
"""Convert BILOU tags to the BMEOW format.
Args:
tags: The BILOU tags we are converting
Raises:
ValueError: If there were errors in the BILOU formatting of the input.
Returns:
Tags that produce the same spans in the BMEOW format.
"""
return convert_tags(tags, parse_spans_bilou_with_errors, write_bmeow_tags)
def bilou_to_bmewo(tags: List[str]) -> List[str]:
"""Convert BILOU tags to the BMEWO format.
Note:
Alias for :py:func:`~iobes.convert.bilou_to_bmeow`
Args:
tags: The BILOU tags we are converting
Raises:
ValueError: If there were errors in the BILOU formatting of the input.
Returns:
Tags that produce the same spans in the BMEWO format.
"""
return bilou_to_bmeow(tags)
def bmeow_to_iob(tags: List[str]) -> List[str]:
"""Convert BMEOW tags to the IOB format.
Args:
tags: The BMEOW tags we are converting
Raises:
ValueError: If there were errors in the BMEOW formatting of the input.
Returns:
Tags that produce the same spans in the IOB format.
"""
return convert_tags(tags, parse_spans_bmeow_with_errors, write_iob_tags)
def bmeow_to_bio(tags: List[str]) -> List[str]:
"""Convert BMEOW tags to the BIO format.
Args:
tags: The BMEOW tags we are converting
Raises:
ValueError: If there were errors in the BMEOW formatting of the input.
Returns:
Tags that produce the same spans in the BIO format.
"""
return convert_tags(tags, parse_spans_bmeow_with_errors, write_bio_tags)
def bmeow_to_iobes(tags: List[str]) -> List[str]:
"""Convert BMEOW tags to the IOBES format.
Args:
tags: The BMEOW tags we are converting
Raises:
ValueError: If there were errors in the BMEOW formatting of the input.
Returns:
Tags that produce the same spans in the IOBES format.
"""
return convert_tags(tags, parse_spans_bmeow_with_errors, write_iobes_tags)
def bmeow_to_bilou(tags: List[str]) -> List[str]:
"""Convert BMEOW tags to the BILOU format.
Args:
tags: The BMEOW tags we are converting
Raises:
ValueError: If there were errors in the BMEOW formatting of the input.
Returns:
Tags that produce the same spans in the BILOU format.
"""
return convert_tags(tags, parse_spans_bmeow_with_errors, write_bilou_tags)
def bmewo_to_iob(tags: List[str]) -> List[str]:
"""Convert BMEWO tags to the IOB format.
Note:
Alias for :py:func:`~iobes.convert.bmeow_to_iob`
Args:
tags: The BMEWO tags we are converting
Raises:
ValueError: If there were errors in the BMEWO formatting of the input.
Returns:
Tags that produce the same spans in the IOB format.
"""
return bmeow_to_iob(tags)
def bmewo_to_bio(tags: List[str]) -> List[str]:
"""Convert BMEWO tags to the BIO format.
Note:
Alias for :py:func:`~iobes.convert.bmeow_to_bio`
Args:
tags: The BMEWO tags we are converting
Raises:
ValueError: If there were errors in the BMEWO formatting of the input.
Returns:
Tags that produce the same spans in the BIO format.
"""
return bmeow_to_bio(tags)
def bmewo_to_iobes(tags: List[str]) -> List[str]:
"""Convert BMEWO tags to the IOBES format.
Note:
Alias for :py:func:`~iobes.convert.bmeow_to_iobes`
Args:
tags: The BMEWO tags we are converting
Raises:
ValueError: If there were errors in the BMEWO formatting of the input.
Returns:
Tags that produce the same spans in the IOBES format.
"""
return bmeow_to_iobes(tags)
def bmewo_to_bilou(tags: List[str]) -> List[str]:
"""Convert BMEWO tags to the BILOU format.
Note:
Alias for :py:func:`~iobes.convert.bmeow_to_bilou`
Args:
tags: The BMEWO tags we are converting
Raises:
ValueError: If there were errors in the BMEWO formatting of the input.
Returns:
Tags that produce the same spans in the BILOU format.
"""
return bmeow_to_bilou(tags) | iobes/convert.py | from itertools import chain
from typing import List, Tuple, Callable
from iobes import TokenFunction, Span, Error
from iobes.parse import (
parse_spans_iob_with_errors,
parse_spans_bio_with_errors,
parse_spans_iobes_with_errors,
parse_spans_bilou_with_errors,
parse_spans_bmeow_with_errors,
)
from iobes.write import (
write_iob_tags,
write_bio_tags,
write_iobes_tags,
write_bilou_tags,
write_bmeow_tags,
)
def convert_tags(
tags: List[str],
parse_function: Callable[[List[str]], Tuple[List[Span], List[Error]]],
write_function: Callable[[List[Span], int], List[str]],
) -> List[str]:
"""Convert tags from one format to another.
Args:
tags: The tags that we are converting.
parse_function: A function that parses tags into spans.
write_function: A function the turns spans into a list of tags.
Raises:
ValueError: If there were errors in the tag formatting.
Returns:
The list of tags in the new format.
"""
spans, errors = parse_function(tags)
if errors:
error_string = "\n".join(str(e) for e in errors)
raise ValueError(f"Found errors in the tag sequence, cannot be converted. Errors: {error_string}")
return write_function(spans, length=len(tags))
def iob_to_bio(tags: List[str]) -> List[str]:
"""Convert IOB tags to the BIO format.
Args:
tags: The IOB tags we are converting
Raises:
ValueError: If there were errors in the IOB formatting of the input.
Returns:
Tags that produce the same spans in the BIO format.
"""
return convert_tags(tags, parse_spans_iob_with_errors, write_bio_tags)
def iob_to_iobes(tags: List[str]) -> List[str]:
"""Convert IOB tags to the IOBES format.
Args:
tags: The IOB tags we are converting
Raises:
ValueError: If there were errors in the IOB formatting of the input.
Returns:
Tags that produce the same spans in the IOBES format.
"""
return bio_to_iobes(iob_to_bio(tags))
def iob_to_bilou(tags: List[str]) -> List[str]:
"""Convert IOB tags to the BILOU format.
Args:
tags: The IOB tags we are converting
Raises:
ValueError: If there were errors in the IOB formatting of the input.
Returns:
Tags that produce the same spans in the BILOU format.
"""
return iobes_to_bilou(iob_to_iobes(tags))
def iob_to_bmeow(tags: List[str]) -> List[str]:
"""Convert IOB tags to the BMEOW format.
Args:
tags: The IOB tags we are converting
Raises:
ValueError: If there were errors in the IOB formatting of the input.
Returns:
Tags that produce the same spans in the BMEOW format.
"""
return iobes_to_bmeow(iob_to_iobes(tags))
def iob_to_bmewo(tags: List[str]) -> List[str]:
"""Convert IOB tags to the BMEWO format.
Note:
Alias for :py:func:`~iobes.convert.iob_to_bmeow`.
Args:
tags: The IOB tags we are converting
Raises:
ValueError: If there were errors in the IOB formatting of the input.
Returns:
Tags that produce the same spans in the BMEOW format.
"""
return iob_to_bmeow(tags)
def bio_to_iob(tags: List[str]) -> List[str]:
"""Convert BIO tags to the IOB format.
Args:
tags: The BIO tags we are converting
Raises:
ValueError: If there were errors in the BIO formatting of the input.
Returns:
Tags that produce the same spans in the IOB format.
"""
return convert_tags(tags, parse_spans_bio_with_errors, write_iob_tags)
def bio_to_iobes(tags: List[str]) -> List[str]:
"""Convert BIO tags to the IOBES format.
Args:
tags: The BIO tags we are converting
Raises:
ValueError: If there were errors in the BIO formatting of the input.
Returns:
Tags that produce the same spans in the IOBES format.
"""
return convert_tags(tags, parse_spans_bio_with_errors, write_iobes_tags)
def bio_to_bilou(tags: List[str]) -> List[str]:
"""Convert BIO tags to the BILOU format.
Args:
tags: The BIO tags we are converting
Raises:
ValueError: If there were errors in the BIO formatting of the input.
Returns:
Tags that produce the same spans in the BILOU format.
"""
return iobes_to_bilou(bio_to_iobes(tags))
def bio_to_bmeow(tags: List[str]) -> List[str]:
"""Convert BIO tags to the BMEOW format.
Args:
tags: The BIO tags we are converting
Raises:
ValueError: If there were errors in the BIO formatting of the input.
Returns:
Tags that produce the same spans in the BMEOW format.
"""
return iobes_to_bmeow(bio_to_iobes(tags))
def bio_to_bmewo(tags: List[str]) -> List[str]:
"""Convert BIO tags to the BMEWO format.
Note:
Alias for :py:func:`~iobes.convert.bio_to_bmeow`
Args:
tags: The BIO tags we are converting
Raises:
ValueError: If there were errors in the BIO formatting of the input.
Returns:
Tags that produce the same spans in the BMEWO format.
"""
return bio_to_bmeow(tags)
def iobes_to_iob(tags: List[str]) -> List[str]:
"""Convert IOBES tags to the IOB format.
Args:
tags: The IOBES tags we are converting
Raises:
ValueError: If there were errors in the IOBES formatting of the input.
Returns:
Tags that produce the same spans in the IOB format.
"""
return bio_to_iob(iobes_to_bio(tags))
def iobes_to_bio(tags: List[str]) -> List[str]:
"""Convert IOBES tags to the BIO format.
Args:
tags: The IOBES tags we are converting
Raises:
ValueError: If there were errors in the IOBES formatting of the input.
Returns:
Tags that produce the same spans in the BIO format.
"""
return convert_tags(tags, parse_spans_iobes_with_errors, write_bio_tags)
def iobes_to_bilou(tags: List[str]) -> List[str]:
"""Convert IOBES tags to the BILOU format.
Args:
tags: The IOBES tags we are converting
Raises:
ValueError: If there were errors in the IOBES formatting of the input.
Returns:
Tags that produce the same spans in the BILOU format.
"""
return convert_tags(tags, parse_spans_iobes_with_errors, write_bilou_tags)
def iobes_to_bmeow(tags: List[str]) -> List[str]:
"""Convert IOBES tags to the BMEOW format.
Args:
tags: The IOBES tags we are converting
Raises:
ValueError: If there were errors in the IOBES formatting of the input.
Returns:
Tags that produce the same spans in the BMEOW format.
"""
return convert_tags(tags, parse_spans_iobes_with_errors, write_bmeow_tags)
def iobes_to_bmewo(tags: List[str]) -> List[str]:
"""Convert IOBES tags to the BMEWO format.
Note:
Alias for :py:func:`~iobes.convert.iobes_to_bmeow`
Args:
tags: The IOBES tags we are converting
Raises:
ValueError: If there were errors in the IOBES formatting of the input.
Returns:
Tags that produce the same spans in the BMEWO format.
"""
return iobes_to_bmeow(tags)
def bilou_to_iob(tags: List[str]) -> List[str]:
"""Convert BILOU tags to the IOB format.
Args:
tags: The BILOU tags we are converting
Raises:
ValueError: If there were errors in the BILOU formatting of the input.
Returns:
Tags that produce the same spans in the IOB format.
"""
return convert_tags(tags, parse_spans_bilou_with_errors, write_iob_tags)
def bilou_to_bio(tags: List[str]) -> List[str]:
"""Convert BILOU tags to the BIO format.
Args:
tags: The BILOU tags we are converting
Raises:
ValueError: If there were errors in the BILOU formatting of the input.
Returns:
Tags that produce the same spans in the BIO format.
"""
return convert_tags(tags, parse_spans_bilou_with_errors, write_bio_tags)
def bilou_to_iobes(tags: List[str]) -> List[str]:
"""Convert BILOU tags to the IOBES format.
Args:
tags: The BILOU tags we are converting
Raises:
ValueError: If there were errors in the BILOU formatting of the input.
Returns:
Tags that produce the same spans in the IOBES format.
"""
return convert_tags(tags, parse_spans_bilou_with_errors, write_iobes_tags)
def bilou_to_bmeow(tags: List[str]) -> List[str]:
"""Convert BILOU tags to the BMEOW format.
Args:
tags: The BILOU tags we are converting
Raises:
ValueError: If there were errors in the BILOU formatting of the input.
Returns:
Tags that produce the same spans in the BMEOW format.
"""
return convert_tags(tags, parse_spans_bilou_with_errors, write_bmeow_tags)
def bilou_to_bmewo(tags: List[str]) -> List[str]:
"""Convert BILOU tags to the BMEWO format.
Note:
Alias for :py:func:`~iobes.convert.bilou_to_bmeow`
Args:
tags: The BILOU tags we are converting
Raises:
ValueError: If there were errors in the BILOU formatting of the input.
Returns:
Tags that produce the same spans in the BMEWO format.
"""
return bilou_to_bmeow(tags)
def bmeow_to_iob(tags: List[str]) -> List[str]:
"""Convert BMEOW tags to the IOB format.
Args:
tags: The BMEOW tags we are converting
Raises:
ValueError: If there were errors in the BMEOW formatting of the input.
Returns:
Tags that produce the same spans in the IOB format.
"""
return convert_tags(tags, parse_spans_bmeow_with_errors, write_iob_tags)
def bmeow_to_bio(tags: List[str]) -> List[str]:
"""Convert BMEOW tags to the BIO format.
Args:
tags: The BMEOW tags we are converting
Raises:
ValueError: If there were errors in the BMEOW formatting of the input.
Returns:
Tags that produce the same spans in the BIO format.
"""
return convert_tags(tags, parse_spans_bmeow_with_errors, write_bio_tags)
def bmeow_to_iobes(tags: List[str]) -> List[str]:
"""Convert BMEOW tags to the IOBES format.
Args:
tags: The BMEOW tags we are converting
Raises:
ValueError: If there were errors in the BMEOW formatting of the input.
Returns:
Tags that produce the same spans in the IOBES format.
"""
return convert_tags(tags, parse_spans_bmeow_with_errors, write_iobes_tags)
def bmeow_to_bilou(tags: List[str]) -> List[str]:
"""Convert BMEOW tags to the BILOU format.
Args:
tags: The BMEOW tags we are converting
Raises:
ValueError: If there were errors in the BMEOW formatting of the input.
Returns:
Tags that produce the same spans in the BILOU format.
"""
return convert_tags(tags, parse_spans_bmeow_with_errors, write_bilou_tags)
def bmewo_to_iob(tags: List[str]) -> List[str]:
"""Convert BMEWO tags to the IOB format.
Note:
Alias for :py:func:`~iobes.convert.bmeow_to_iob`
Args:
tags: The BMEWO tags we are converting
Raises:
ValueError: If there were errors in the BMEWO formatting of the input.
Returns:
Tags that produce the same spans in the IOB format.
"""
return bmeow_to_iob(tags)
def bmewo_to_bio(tags: List[str]) -> List[str]:
"""Convert BMEWO tags to the BIO format.
Note:
Alias for :py:func:`~iobes.convert.bmeow_to_bio`
Args:
tags: The BMEWO tags we are converting
Raises:
ValueError: If there were errors in the BMEWO formatting of the input.
Returns:
Tags that produce the same spans in the BIO format.
"""
return bmeow_to_bio(tags)
def bmewo_to_iobes(tags: List[str]) -> List[str]:
"""Convert BMEWO tags to the IOBES format.
Note:
Alias for :py:func:`~iobes.convert.bmeow_to_iobes`
Args:
tags: The BMEWO tags we are converting
Raises:
ValueError: If there were errors in the BMEWO formatting of the input.
Returns:
Tags that produce the same spans in the IOBES format.
"""
return bmeow_to_iobes(tags)
def bmewo_to_bilou(tags: List[str]) -> List[str]:
"""Convert BMEWO tags to the BILOU format.
Note:
Alias for :py:func:`~iobes.convert.bmeow_to_bilou`
Args:
tags: The BMEWO tags we are converting
Raises:
ValueError: If there were errors in the BMEWO formatting of the input.
Returns:
Tags that produce the same spans in the BILOU format.
"""
return bmeow_to_bilou(tags) | 0.878835 | 0.653873 |
# Importing the libraries
import csv
import numpy as np
# Loading the dataset
X_train_fpath = './data/X_train'
y_train_fpath = './data/Y_train'
X_test_fpath = './data/X_test'
with open(X_train_fpath) as f:
next(f)
X_train = np.array([line.strip('\n').split(',')[1:]
for line in f], dtype=float)
with open(y_train_fpath) as f:
next(f)
y_train = np.array([line.strip('\n').split(',')[1]
for line in f], dtype=float)
with open(X_test_fpath) as f:
next(f)
X_test = np.array([line.strip('\n').split(',')[1:]
for line in f], dtype=float)
# Defining data preprocessing functions
def _normalize(X, train=True, specified_column=None, X_mean=None, X_std=None):
"""
This function normalizes specific columns of X.
The mean and standard variance of training data will be reused when processing testing data.
Arguments:
X: data to be processed.
train: 'True' when processing training data. 'False' when processing testing data.
specified_column: indexes of the columns that will be normalized. If 'None', all columns will be normalized.
X_mean: mean value of the training data, used when train='False'.
X_std: standard deviation of the training data, used when train='False'.
Outputs:
X: normalized data.
X_mean: computed mean value of the training data.
X_std: computed standard deviation of the training data.
"""
if specified_column == None:
specified_column = np.arange(X.shape[1])
if train:
X_mean = np.mean(X[:, specified_column], 0).reshape(1, -1)
X_std = np.std(X[:, specified_column], 0).reshape(1, -1)
X[:, specified_column] = (X[:, specified_column] - X_mean) / (X_std + 1e-8)
return X, X_mean, X_std
# Defining some useful functions
def _predict(X, w, b):
"""
This function returns a truth value prediction for each row of X by rounding the result of logistic regression function.
"""
return np.round(_f(X, w, b)).astype(np.int)
def _sigmoid(z):
"""
Sigmoid function can be used to calculate probability.
To avoid overflow, minimum/maximum output value is set.
"""
return np.clip(1 / (1.0 + np.exp(-z)), 1e-8, 1-(1e-8))
def _f(X, w, b):
"""
This is the logistic regression function, parameterized by w and b.
Arguments:
X: input data, shape=[batch_size, data_dimension]
w: weight vector, shape=[data_dimension]
b: bias, scalar
Output:
predicted probability of each row of X being postively labeled, shape=[batch_size, ]
"""
return _sigmoid(np.matmul(X, w) + b)
def _accuracy(y_pred, y_label):
"""
This function calculates prediction accuracy
"""
acc = 1 - np.mean(np.abs(y_pred - y_label))
return acc
# Data preprocessing
# Normalizing the training and testing data
X_train, X_mean, X_std = _normalize(X_train, train=True)
X_test, _, _ = _normalize(
X_test, train=False, specified_column=None, X_mean=X_mean, X_std=X_std)
# Calculating the Mean and Covariance
# In the generative model, we need to calculate the average and covariance of the data in the two categories separately.
# compute in-class mean
X_train_0 = np.array([x for x, y in zip(X_train, y_train) if y == 0])
X_train_1 = np.array([x for x, y in zip(X_train, y_train) if y == 1])
mean_0 = np.mean(X_train_0, axis=0)
mean_1 = np.mean(X_train_1, axis=0)
# compute the in-class covariance
data_dim = X_train.shape[1]
cov_0 = np.zeros((data_dim, data_dim))
cov_1 = np.zeros((data_dim, data_dim))
for x in X_train_0:
# np.transpose([x - mean_0]).shape -> (510, 1)
# [x - mean_0].shape -> (1, 510)
# np.dot(np.transpose([x - mean_0]), [x - mean_0]).shape -> (510, 510)
cov_0 += np.dot(np.transpose([x - mean_0]),
[x - mean_0]) / X_train_0.shape[0]
for x in X_train_1:
cov_1 += np.dot(np.transpose([x - mean_0]),
[x - mean_0]) / X_train_1.shape[0]
# Shared covariance is taken as a weighted average of individual in-class covariance.
cov = (cov_0 * X_train_0.shape[0] + cov_1 * X_train_1.shape[0]
) / (X_train_0.shape[0] + X_train_1.shape[0])
# Computing weights and bias
# The weight matrix and deviation vector can be directly calculated.
# Compute the inverse of covariance matrix
# Since the covariance matrix may be nearly singular, np.linalg.inv() may give a large numerical error
# Via SVD decomposition, one can get matrix inverse efficiently and accurately
u, s, v = np.linalg.svd(cov, full_matrices=False)
inv = np.matmul(v.T * 1 / s, u.T)
# Directly compute weights and bias
w = np.dot(inv, mean_0 - mean_1)
b = -0.5 * np.dot(mean_0, np.dot(inv, mean_0)) + 0.5 * np.dot(mean_1, np.dot(inv, mean_1)) \
+ np.log(float(X_train_0.shape[0]) / X_train_1.shape[0])
# Compute accuracy on training set
y_train_pred = 1 - _predict(X_train, w, b)
print('Training accuracy: {}'.format(_accuracy(y_train_pred, y_train)))
# Predicting testing labels
predictions = _predict(X_test, w, b)
with open('output_generative.csv', mode='w', newline='') as submit_file:
csv_writer = csv.writer(submit_file)
header = ['id', 'label']
print(header)
csv_writer.writerow(header)
for i in range(len(predictions)):
row = [str(i+1), predictions[i]]
csv_writer.writerow(row)
print(row)
print()
# Print out the most significant weights
# Arrange the array in an ascending order and take it from the end to the front
ind = np.argsort(np.abs(w))[::-1]
with open(X_test_fpath) as f:
content = f.readline().strip('\n').split(',')
features = np.array(content)
for i in ind[0: 10]:
print(features[i], w[i]) | 01_Binary-Classification/Binary-Classification_Generative-Model.py | # Importing the libraries
import csv
import numpy as np
# Loading the dataset
X_train_fpath = './data/X_train'
y_train_fpath = './data/Y_train'
X_test_fpath = './data/X_test'
with open(X_train_fpath) as f:
next(f)
X_train = np.array([line.strip('\n').split(',')[1:]
for line in f], dtype=float)
with open(y_train_fpath) as f:
next(f)
y_train = np.array([line.strip('\n').split(',')[1]
for line in f], dtype=float)
with open(X_test_fpath) as f:
next(f)
X_test = np.array([line.strip('\n').split(',')[1:]
for line in f], dtype=float)
# Defining data preprocessing functions
def _normalize(X, train=True, specified_column=None, X_mean=None, X_std=None):
"""
This function normalizes specific columns of X.
The mean and standard variance of training data will be reused when processing testing data.
Arguments:
X: data to be processed.
train: 'True' when processing training data. 'False' when processing testing data.
specified_column: indexes of the columns that will be normalized. If 'None', all columns will be normalized.
X_mean: mean value of the training data, used when train='False'.
X_std: standard deviation of the training data, used when train='False'.
Outputs:
X: normalized data.
X_mean: computed mean value of the training data.
X_std: computed standard deviation of the training data.
"""
if specified_column == None:
specified_column = np.arange(X.shape[1])
if train:
X_mean = np.mean(X[:, specified_column], 0).reshape(1, -1)
X_std = np.std(X[:, specified_column], 0).reshape(1, -1)
X[:, specified_column] = (X[:, specified_column] - X_mean) / (X_std + 1e-8)
return X, X_mean, X_std
# Defining some useful functions
def _predict(X, w, b):
"""
This function returns a truth value prediction for each row of X by rounding the result of logistic regression function.
"""
return np.round(_f(X, w, b)).astype(np.int)
def _sigmoid(z):
"""
Sigmoid function can be used to calculate probability.
To avoid overflow, minimum/maximum output value is set.
"""
return np.clip(1 / (1.0 + np.exp(-z)), 1e-8, 1-(1e-8))
def _f(X, w, b):
"""
This is the logistic regression function, parameterized by w and b.
Arguments:
X: input data, shape=[batch_size, data_dimension]
w: weight vector, shape=[data_dimension]
b: bias, scalar
Output:
predicted probability of each row of X being postively labeled, shape=[batch_size, ]
"""
return _sigmoid(np.matmul(X, w) + b)
def _accuracy(y_pred, y_label):
"""
This function calculates prediction accuracy
"""
acc = 1 - np.mean(np.abs(y_pred - y_label))
return acc
# Data preprocessing
# Normalizing the training and testing data
X_train, X_mean, X_std = _normalize(X_train, train=True)
X_test, _, _ = _normalize(
X_test, train=False, specified_column=None, X_mean=X_mean, X_std=X_std)
# Calculating the Mean and Covariance
# In the generative model, we need to calculate the average and covariance of the data in the two categories separately.
# compute in-class mean
X_train_0 = np.array([x for x, y in zip(X_train, y_train) if y == 0])
X_train_1 = np.array([x for x, y in zip(X_train, y_train) if y == 1])
mean_0 = np.mean(X_train_0, axis=0)
mean_1 = np.mean(X_train_1, axis=0)
# compute the in-class covariance
data_dim = X_train.shape[1]
cov_0 = np.zeros((data_dim, data_dim))
cov_1 = np.zeros((data_dim, data_dim))
for x in X_train_0:
# np.transpose([x - mean_0]).shape -> (510, 1)
# [x - mean_0].shape -> (1, 510)
# np.dot(np.transpose([x - mean_0]), [x - mean_0]).shape -> (510, 510)
cov_0 += np.dot(np.transpose([x - mean_0]),
[x - mean_0]) / X_train_0.shape[0]
for x in X_train_1:
cov_1 += np.dot(np.transpose([x - mean_0]),
[x - mean_0]) / X_train_1.shape[0]
# Shared covariance is taken as a weighted average of individual in-class covariance.
cov = (cov_0 * X_train_0.shape[0] + cov_1 * X_train_1.shape[0]
) / (X_train_0.shape[0] + X_train_1.shape[0])
# Computing weights and bias
# The weight matrix and deviation vector can be directly calculated.
# Compute the inverse of covariance matrix
# Since the covariance matrix may be nearly singular, np.linalg.inv() may give a large numerical error
# Via SVD decomposition, one can get matrix inverse efficiently and accurately
u, s, v = np.linalg.svd(cov, full_matrices=False)
inv = np.matmul(v.T * 1 / s, u.T)
# Directly compute weights and bias
w = np.dot(inv, mean_0 - mean_1)
b = -0.5 * np.dot(mean_0, np.dot(inv, mean_0)) + 0.5 * np.dot(mean_1, np.dot(inv, mean_1)) \
+ np.log(float(X_train_0.shape[0]) / X_train_1.shape[0])
# Compute accuracy on training set
y_train_pred = 1 - _predict(X_train, w, b)
print('Training accuracy: {}'.format(_accuracy(y_train_pred, y_train)))
# Predicting testing labels
predictions = _predict(X_test, w, b)
with open('output_generative.csv', mode='w', newline='') as submit_file:
csv_writer = csv.writer(submit_file)
header = ['id', 'label']
print(header)
csv_writer.writerow(header)
for i in range(len(predictions)):
row = [str(i+1), predictions[i]]
csv_writer.writerow(row)
print(row)
print()
# Print out the most significant weights
# Arrange the array in an ascending order and take it from the end to the front
ind = np.argsort(np.abs(w))[::-1]
with open(X_test_fpath) as f:
content = f.readline().strip('\n').split(',')
features = np.array(content)
for i in ind[0: 10]:
print(features[i], w[i]) | 0.651798 | 0.590986 |
from unittest import TestCase
from httpobs.scanner.analyzer.content import contribute, subresource_integrity
from httpobs.tests.utils import empty_requests
class TestContribute(TestCase):
def setUp(self):
self.reqs = empty_requests()
def tearDown(self):
self.reqs = None
def test_no_contribute_mozilla(self):
result = contribute(self.reqs)
self.assertEquals('contribute-json-not-implemented', result['result'])
self.assertFalse(result['pass'])
def test_no_contribute_not_mozilla(self):
self.reqs['responses']['auto'].url = 'https://github.com'
result = contribute(self.reqs)
self.assertEquals('contribute-json-only-required-on-mozilla-properties', result['result'])
self.assertTrue(result['pass'])
def test_invalid_json(self):
self.reqs['resources']['/contribute.json'] = 'foobar'
result = contribute(self.reqs)
self.assertEquals('contribute-json-invalid-json', result['result'])
self.assertFalse(result['pass'])
def test_contribute_too_large(self):
self.reqs['resources']['/contribute.json'] = '{"name": "' + 'foo' * 100000 + '"}'
result = contribute(self.reqs)
self.assertEquals(result['data'], {})
def test_with_required_keys(self):
self.reqs['resources']['/contribute.json'] = """
{
"name": "Bedrock",
"description": "The app powering www.mozilla.org.",
"repository": {
"url": "https://github.com/mozilla/bedrock",
"license": "MPL2",
"tests": "https://travis-ci.org/mozilla/bedrock/"
},
"participate": {
"home": "https://wiki.mozilla.org/Webdev/GetInvolved/mozilla.org",
"docs": "http://bedrock.readthedocs.org/",
"mailing-list": "https://www.mozilla.org/about/forums/#dev-mozilla-org",
"irc": "irc://irc.mozilla.org/#www"
},
"bugs": {
"list": "https://bugzilla.mozilla.org/describecomponents.cgi?product=www.mozilla.org",
"report": "https://bugzilla.mozilla.org/enter_bug.cgi?product=www.mozilla.org",
"mentored": "https://bugzilla.mozilla.org/buglist.cgi?f1=bug_mentor&o1=..."
},
"urls": {
"prod": "https://www.mozilla.org",
"stage": "https://www.allizom.org",
"dev": "https://www-dev.allizom.org",
"demo1": "https://www-demo1.allizom.org",
"demo2": "https://www-demo2.allizom.org",
"demo3": "https://www-demo3.allizom.org",
"demo4": "https://www-demo4.allizom.org",
"demo5": "https://www-demo5.allizom.org"
},
"keywords": [
"python",
"less-css",
"django",
"html5",
"jquery"
]
}"""
result = contribute(self.reqs)
self.assertEquals('contribute-json-with-required-keys', result['result'])
self.assertTrue(result['pass'])
def test_missing_required_keys(self):
self.reqs['resources']['/contribute.json'] = """
{
"name": "Bedrock",
"description": "The app powering www.mozilla.org.",
"repository": {
"url": "https://github.com/mozilla/bedrock",
"license": "MPL2",
"tests": "https://travis-ci.org/mozilla/bedrock/"
},
"participate": {
"home": "https://wiki.mozilla.org/Webdev/GetInvolved/mozilla.org",
"docs": "http://bedrock.readthedocs.org/",
"mailing-list": "https://www.mozilla.org/about/forums/#dev-mozilla-org",
"irc": "irc://irc.mozilla.org/#www"
},
"urls": {
"prod": "https://www.mozilla.org",
"stage": "https://www.allizom.org",
"dev": "https://www-dev.allizom.org",
"demo1": "https://www-demo1.allizom.org",
"demo2": "https://www-demo2.allizom.org",
"demo3": "https://www-demo3.allizom.org",
"demo4": "https://www-demo4.allizom.org",
"demo5": "https://www-demo5.allizom.org"
},
"keywords": [
"python",
"less-css",
"django",
"html5",
"jquery"
]
}"""
result = contribute(self.reqs)
self.assertEquals('contribute-json-missing-required-keys', result['result'])
self.assertFalse(result['pass'])
class TestSubResourceIntegrity(TestCase):
def setUp(self):
self.reqs = empty_requests()
def tearDown(self):
self.reqs = None
def test_no_scripts(self):
self.reqs = empty_requests('test_content_sri_no_scripts.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-not-implemented-but-no-scripts-loaded', result['result'])
self.assertTrue(result['pass'])
def test_not_html(self):
# invalid html
self.reqs['resources']['__path__'] = '<![..]>'
result = subresource_integrity(self.reqs)
self.assertEquals('html-not-parsable', result['result'])
self.assertFalse(result['pass'])
# json, like what an API might return
self.reqs['responses']['auto'].headers['Content-Type'] = 'application/json'
self.reqs['resources']['__path__'] = """
{
'foo': 'bar'
}
"""
result = subresource_integrity(self.reqs)
self.assertEquals('sri-not-implemented-response-not-html', result['result'])
self.assertTrue(result['pass'])
def test_same_origin(self):
self.reqs = empty_requests('test_content_sri_sameorigin1.html')
result = subresource_integrity(self.reqs)
self.assertEquals(result['result'], 'sri-not-implemented-but-all-scripts-loaded-from-secure-origin')
self.assertTrue(result['pass'])
# On the same second-level domain, but without a protocol
self.reqs = empty_requests('test_content_sri_sameorigin3.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-not-implemented-and-external-scripts-not-loaded-securely', result['result'])
self.assertFalse(result['pass'])
# On the same second-level domain, with https:// specified
self.reqs = empty_requests('test_content_sri_sameorigin2.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-not-implemented-but-all-scripts-loaded-from-secure-origin', result['result'])
self.assertTrue(result['pass'])
# And the same, but with a 404 status code
self.reqs['responses']['auto'].status_code = 404
result = subresource_integrity(self.reqs)
self.assertEquals('sri-not-implemented-but-all-scripts-loaded-from-secure-origin', result['result'])
self.assertTrue(result['pass'])
def test_implemented_external_scripts_https(self):
# load from a remote site
self.reqs = empty_requests('test_content_sri_impl_external_https1.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-implemented-and-external-scripts-loaded-securely', result['result'])
self.assertTrue(result['pass'])
# load from an intranet / localhost
self.reqs = empty_requests('test_content_sri_impl_external_https2.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-implemented-and-external-scripts-loaded-securely', result['result'])
self.assertTrue(result['pass'])
def test_implemented_same_origin(self):
self.reqs = empty_requests('test_content_sri_impl_sameorigin.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-implemented-and-all-scripts-loaded-securely', result['result'])
self.assertTrue(result['pass'])
def test_not_implemented_external_scripts_https(self):
self.reqs = empty_requests('test_content_sri_notimpl_external_https.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-not-implemented-but-external-scripts-loaded-securely', result['result'])
self.assertFalse(result['pass'])
def test_implemented_external_scripts_http(self):
self.reqs = empty_requests('test_content_sri_impl_external_http.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-implemented-but-external-scripts-not-loaded-securely', result['result'])
self.assertFalse(result['pass'])
def test_implemented_external_scripts_noproto(self):
self.reqs = empty_requests('test_content_sri_impl_external_noproto.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-implemented-but-external-scripts-not-loaded-securely', result['result'])
self.assertFalse(result['pass'])
def test_not_implemented_external_scripts_http(self):
self.reqs = empty_requests('test_content_sri_notimpl_external_http.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-not-implemented-and-external-scripts-not-loaded-securely', result['result'])
self.assertFalse(result['pass'])
def test_not_implemented_external_scripts_noproto(self):
self.reqs = empty_requests('test_content_sri_notimpl_external_noproto.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-not-implemented-and-external-scripts-not-loaded-securely', result['result'])
self.assertFalse(result['pass']) | functions/observatory/http-observatory/httpobs/tests/unittests/test_content.py | from unittest import TestCase
from httpobs.scanner.analyzer.content import contribute, subresource_integrity
from httpobs.tests.utils import empty_requests
class TestContribute(TestCase):
def setUp(self):
self.reqs = empty_requests()
def tearDown(self):
self.reqs = None
def test_no_contribute_mozilla(self):
result = contribute(self.reqs)
self.assertEquals('contribute-json-not-implemented', result['result'])
self.assertFalse(result['pass'])
def test_no_contribute_not_mozilla(self):
self.reqs['responses']['auto'].url = 'https://github.com'
result = contribute(self.reqs)
self.assertEquals('contribute-json-only-required-on-mozilla-properties', result['result'])
self.assertTrue(result['pass'])
def test_invalid_json(self):
self.reqs['resources']['/contribute.json'] = 'foobar'
result = contribute(self.reqs)
self.assertEquals('contribute-json-invalid-json', result['result'])
self.assertFalse(result['pass'])
def test_contribute_too_large(self):
self.reqs['resources']['/contribute.json'] = '{"name": "' + 'foo' * 100000 + '"}'
result = contribute(self.reqs)
self.assertEquals(result['data'], {})
def test_with_required_keys(self):
self.reqs['resources']['/contribute.json'] = """
{
"name": "Bedrock",
"description": "The app powering www.mozilla.org.",
"repository": {
"url": "https://github.com/mozilla/bedrock",
"license": "MPL2",
"tests": "https://travis-ci.org/mozilla/bedrock/"
},
"participate": {
"home": "https://wiki.mozilla.org/Webdev/GetInvolved/mozilla.org",
"docs": "http://bedrock.readthedocs.org/",
"mailing-list": "https://www.mozilla.org/about/forums/#dev-mozilla-org",
"irc": "irc://irc.mozilla.org/#www"
},
"bugs": {
"list": "https://bugzilla.mozilla.org/describecomponents.cgi?product=www.mozilla.org",
"report": "https://bugzilla.mozilla.org/enter_bug.cgi?product=www.mozilla.org",
"mentored": "https://bugzilla.mozilla.org/buglist.cgi?f1=bug_mentor&o1=..."
},
"urls": {
"prod": "https://www.mozilla.org",
"stage": "https://www.allizom.org",
"dev": "https://www-dev.allizom.org",
"demo1": "https://www-demo1.allizom.org",
"demo2": "https://www-demo2.allizom.org",
"demo3": "https://www-demo3.allizom.org",
"demo4": "https://www-demo4.allizom.org",
"demo5": "https://www-demo5.allizom.org"
},
"keywords": [
"python",
"less-css",
"django",
"html5",
"jquery"
]
}"""
result = contribute(self.reqs)
self.assertEquals('contribute-json-with-required-keys', result['result'])
self.assertTrue(result['pass'])
def test_missing_required_keys(self):
self.reqs['resources']['/contribute.json'] = """
{
"name": "Bedrock",
"description": "The app powering www.mozilla.org.",
"repository": {
"url": "https://github.com/mozilla/bedrock",
"license": "MPL2",
"tests": "https://travis-ci.org/mozilla/bedrock/"
},
"participate": {
"home": "https://wiki.mozilla.org/Webdev/GetInvolved/mozilla.org",
"docs": "http://bedrock.readthedocs.org/",
"mailing-list": "https://www.mozilla.org/about/forums/#dev-mozilla-org",
"irc": "irc://irc.mozilla.org/#www"
},
"urls": {
"prod": "https://www.mozilla.org",
"stage": "https://www.allizom.org",
"dev": "https://www-dev.allizom.org",
"demo1": "https://www-demo1.allizom.org",
"demo2": "https://www-demo2.allizom.org",
"demo3": "https://www-demo3.allizom.org",
"demo4": "https://www-demo4.allizom.org",
"demo5": "https://www-demo5.allizom.org"
},
"keywords": [
"python",
"less-css",
"django",
"html5",
"jquery"
]
}"""
result = contribute(self.reqs)
self.assertEquals('contribute-json-missing-required-keys', result['result'])
self.assertFalse(result['pass'])
class TestSubResourceIntegrity(TestCase):
def setUp(self):
self.reqs = empty_requests()
def tearDown(self):
self.reqs = None
def test_no_scripts(self):
self.reqs = empty_requests('test_content_sri_no_scripts.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-not-implemented-but-no-scripts-loaded', result['result'])
self.assertTrue(result['pass'])
def test_not_html(self):
# invalid html
self.reqs['resources']['__path__'] = '<![..]>'
result = subresource_integrity(self.reqs)
self.assertEquals('html-not-parsable', result['result'])
self.assertFalse(result['pass'])
# json, like what an API might return
self.reqs['responses']['auto'].headers['Content-Type'] = 'application/json'
self.reqs['resources']['__path__'] = """
{
'foo': 'bar'
}
"""
result = subresource_integrity(self.reqs)
self.assertEquals('sri-not-implemented-response-not-html', result['result'])
self.assertTrue(result['pass'])
def test_same_origin(self):
self.reqs = empty_requests('test_content_sri_sameorigin1.html')
result = subresource_integrity(self.reqs)
self.assertEquals(result['result'], 'sri-not-implemented-but-all-scripts-loaded-from-secure-origin')
self.assertTrue(result['pass'])
# On the same second-level domain, but without a protocol
self.reqs = empty_requests('test_content_sri_sameorigin3.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-not-implemented-and-external-scripts-not-loaded-securely', result['result'])
self.assertFalse(result['pass'])
# On the same second-level domain, with https:// specified
self.reqs = empty_requests('test_content_sri_sameorigin2.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-not-implemented-but-all-scripts-loaded-from-secure-origin', result['result'])
self.assertTrue(result['pass'])
# And the same, but with a 404 status code
self.reqs['responses']['auto'].status_code = 404
result = subresource_integrity(self.reqs)
self.assertEquals('sri-not-implemented-but-all-scripts-loaded-from-secure-origin', result['result'])
self.assertTrue(result['pass'])
def test_implemented_external_scripts_https(self):
# load from a remote site
self.reqs = empty_requests('test_content_sri_impl_external_https1.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-implemented-and-external-scripts-loaded-securely', result['result'])
self.assertTrue(result['pass'])
# load from an intranet / localhost
self.reqs = empty_requests('test_content_sri_impl_external_https2.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-implemented-and-external-scripts-loaded-securely', result['result'])
self.assertTrue(result['pass'])
def test_implemented_same_origin(self):
self.reqs = empty_requests('test_content_sri_impl_sameorigin.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-implemented-and-all-scripts-loaded-securely', result['result'])
self.assertTrue(result['pass'])
def test_not_implemented_external_scripts_https(self):
self.reqs = empty_requests('test_content_sri_notimpl_external_https.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-not-implemented-but-external-scripts-loaded-securely', result['result'])
self.assertFalse(result['pass'])
def test_implemented_external_scripts_http(self):
self.reqs = empty_requests('test_content_sri_impl_external_http.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-implemented-but-external-scripts-not-loaded-securely', result['result'])
self.assertFalse(result['pass'])
def test_implemented_external_scripts_noproto(self):
self.reqs = empty_requests('test_content_sri_impl_external_noproto.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-implemented-but-external-scripts-not-loaded-securely', result['result'])
self.assertFalse(result['pass'])
def test_not_implemented_external_scripts_http(self):
self.reqs = empty_requests('test_content_sri_notimpl_external_http.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-not-implemented-and-external-scripts-not-loaded-securely', result['result'])
self.assertFalse(result['pass'])
def test_not_implemented_external_scripts_noproto(self):
self.reqs = empty_requests('test_content_sri_notimpl_external_noproto.html')
result = subresource_integrity(self.reqs)
self.assertEquals('sri-not-implemented-and-external-scripts-not-loaded-securely', result['result'])
self.assertFalse(result['pass']) | 0.710829 | 0.441312 |
import os
import pathlib
import subprocess
from contextlib import contextmanager
from typing import Container, Generator, List, Optional, Union
class DockerComposeRuntimeError(RuntimeError):
pass
class DockerComposeExecutor:
def __init__(self, compose_file: str, timeout: int) -> None:
self._compose_file = compose_file
self._timeout = timeout
def execute(self, subcommand: Union[str, List[str]]) -> str:
subcommand = [subcommand] if isinstance(subcommand, str) else subcommand
command = ["docker-compose", "-f", self._compose_file, *subcommand]
return self._execute(command)
def _execute(
self, command: List[str], success_codes: Container[int] = (os.EX_OK,)
) -> str:
# Run in the compose file's folder as you normally would when building
# This also helps with some of compose's quirks, like the inability to
# specify a .env file location
cwd = pathlib.Path(self._compose_file).parent
try:
result = subprocess.run(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
timeout=self._timeout,
cwd=cwd,
)
except subprocess.TimeoutExpired as exc:
exit_code = -9
output = exc.output
else:
exit_code = result.returncode
output = result.stdout.decode("utf-8")
if exit_code not in success_codes:
raise DockerComposeRuntimeError(
f"Command {command} returned {exit_code}:\n {output}."
)
return output
@contextmanager
def docker_services(
yml_file_abs_path: str,
timeout: int = 1800,
remove_images: bool = True,
docker_build_args: Optional[dict] = None,
) -> Generator[DockerComposeExecutor, None, None]:
docker_build_args = docker_build_args or {}
build_command = ["build"]
for key, value in docker_build_args.items():
build_command.extend(["--build-arg", f"{key}={value}"])
docker_compose = DockerComposeExecutor(yml_file_abs_path, timeout)
try:
docker_compose.execute(build_command)
yield docker_compose
finally:
cleanup_cmd = ["down", "-v"]
if remove_images:
cleanup_cmd.extend(["--rmi", "local"])
docker_compose.execute(cleanup_cmd) | tests/docker_compose_executor.py | import os
import pathlib
import subprocess
from contextlib import contextmanager
from typing import Container, Generator, List, Optional, Union
class DockerComposeRuntimeError(RuntimeError):
pass
class DockerComposeExecutor:
def __init__(self, compose_file: str, timeout: int) -> None:
self._compose_file = compose_file
self._timeout = timeout
def execute(self, subcommand: Union[str, List[str]]) -> str:
subcommand = [subcommand] if isinstance(subcommand, str) else subcommand
command = ["docker-compose", "-f", self._compose_file, *subcommand]
return self._execute(command)
def _execute(
self, command: List[str], success_codes: Container[int] = (os.EX_OK,)
) -> str:
# Run in the compose file's folder as you normally would when building
# This also helps with some of compose's quirks, like the inability to
# specify a .env file location
cwd = pathlib.Path(self._compose_file).parent
try:
result = subprocess.run(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
timeout=self._timeout,
cwd=cwd,
)
except subprocess.TimeoutExpired as exc:
exit_code = -9
output = exc.output
else:
exit_code = result.returncode
output = result.stdout.decode("utf-8")
if exit_code not in success_codes:
raise DockerComposeRuntimeError(
f"Command {command} returned {exit_code}:\n {output}."
)
return output
@contextmanager
def docker_services(
yml_file_abs_path: str,
timeout: int = 1800,
remove_images: bool = True,
docker_build_args: Optional[dict] = None,
) -> Generator[DockerComposeExecutor, None, None]:
docker_build_args = docker_build_args or {}
build_command = ["build"]
for key, value in docker_build_args.items():
build_command.extend(["--build-arg", f"{key}={value}"])
docker_compose = DockerComposeExecutor(yml_file_abs_path, timeout)
try:
docker_compose.execute(build_command)
yield docker_compose
finally:
cleanup_cmd = ["down", "-v"]
if remove_images:
cleanup_cmd.extend(["--rmi", "local"])
docker_compose.execute(cleanup_cmd) | 0.625781 | 0.101233 |
import abc
import warnings
from typing import List, Optional, Sequence, Tuple, Union, cast
import torch
from .theseus_function import TheseusFunction
from .variable import Variable
# Abstract class for representing cost weights (aka, precisions, inverse covariance)
# Concrete classes must implement two methods:
# - `weight_error`: return an error tensor weighted by the cost weight
# - `weightJacobiansError`: returns jacobians an errors weighted by the cost weight
class CostWeight(TheseusFunction, abc.ABC):
def __init__(
self,
name: Optional[str] = None,
):
super().__init__(name=name)
@abc.abstractmethod
def weight_error(self, error: torch.Tensor) -> torch.Tensor:
pass
@abc.abstractmethod
def weight_jacobians_and_error(
self,
jacobians: List[torch.Tensor],
error: torch.Tensor,
) -> Tuple[List[torch.Tensor], torch.Tensor]:
pass
# Must copy everything
@abc.abstractmethod
def _copy_impl(self, new_name: Optional[str] = None) -> "TheseusFunction":
pass
def copy(
self, new_name: Optional[str] = None, keep_variable_names: bool = False
) -> "CostWeight":
return cast(
CostWeight,
super().copy(new_name=new_name, keep_variable_names=keep_variable_names),
)
# Besides passing a theseus Variable, can also get a float and it will create the
# Variable with a default name for it
class ScaleCostWeight(CostWeight):
def __init__(
self,
scale: Union[float, torch.Tensor, Variable],
name: Optional[str] = None,
):
super().__init__(name=name)
if not isinstance(scale, Variable):
if not isinstance(scale, torch.Tensor):
scale = torch.tensor(scale)
self.scale = Variable(scale)
else:
self.scale = scale
if not self.scale.data.squeeze().ndim in [0, 1]:
raise ValueError("ScaleCostWeight only accepts 0- or 1-dim (batched) data.")
self.scale.data = self.scale.data.view(-1, 1)
self.register_aux_vars(["scale"])
def weight_error(self, error: torch.Tensor) -> torch.Tensor:
return error * self.scale.data
def weight_jacobians_and_error(
self,
jacobians: List[torch.Tensor],
error: torch.Tensor,
) -> Tuple[List[torch.Tensor], torch.Tensor]:
error = error * self.scale.data
new_jacobians = []
for jac in jacobians:
new_jacobians.append(jac * self.scale.data.view(-1, 1, 1))
return new_jacobians, error
def _copy_impl(self, new_name: Optional[str] = None) -> "ScaleCostWeight":
return ScaleCostWeight(self.scale.copy(), name=new_name)
# Besides passing a theseus Variable, can also get any float sequence and it will create the
# Variable with a default name for it
class DiagonalCostWeight(CostWeight):
def __init__(
self,
diagonal: Union[Sequence[float], torch.Tensor, Variable],
name: Optional[str] = None,
):
super().__init__(name=name)
if not isinstance(diagonal, Variable):
if not isinstance(diagonal, torch.Tensor):
diagonal = torch.tensor(diagonal)
self.diagonal = Variable(diagonal)
else:
self.diagonal = diagonal
if not self.diagonal.data.squeeze().ndim < 3:
raise ValueError("DiagonalCostWeight only accepts data with ndim < 3.")
if self.diagonal.data.ndim == 0:
self.diagonal.data = self.diagonal.data.view(1, 1)
if self.diagonal.data.ndim == 1:
warnings.warn(
"1-D diagonal input is ambiguous. Dimension will be "
"interpreted as data dimension and not batch dimension."
)
self.diagonal.data = self.diagonal.data.view(1, -1)
self.register_aux_vars(["diagonal"])
def weight_error(self, error: torch.Tensor) -> torch.Tensor:
return error * self.diagonal.data
def weight_jacobians_and_error(
self,
jacobians: List[torch.Tensor],
error: torch.Tensor,
) -> Tuple[List[torch.Tensor], torch.Tensor]:
error = error * self.diagonal.data
new_jacobians = []
for jac in jacobians:
# Jacobian is batch_size x cost_fuction_dim x var_dim
# This left multiplies the weights to jacobian
new_jacobians.append(jac * self.diagonal.data.unsqueeze(2))
return new_jacobians, error
def _copy_impl(self, new_name: Optional[str] = None) -> "DiagonalCostWeight":
return DiagonalCostWeight(self.diagonal.copy(), name=new_name) | theseus/core/cost_weight.py |
import abc
import warnings
from typing import List, Optional, Sequence, Tuple, Union, cast
import torch
from .theseus_function import TheseusFunction
from .variable import Variable
# Abstract class for representing cost weights (aka, precisions, inverse covariance)
# Concrete classes must implement two methods:
# - `weight_error`: return an error tensor weighted by the cost weight
# - `weightJacobiansError`: returns jacobians an errors weighted by the cost weight
class CostWeight(TheseusFunction, abc.ABC):
def __init__(
self,
name: Optional[str] = None,
):
super().__init__(name=name)
@abc.abstractmethod
def weight_error(self, error: torch.Tensor) -> torch.Tensor:
pass
@abc.abstractmethod
def weight_jacobians_and_error(
self,
jacobians: List[torch.Tensor],
error: torch.Tensor,
) -> Tuple[List[torch.Tensor], torch.Tensor]:
pass
# Must copy everything
@abc.abstractmethod
def _copy_impl(self, new_name: Optional[str] = None) -> "TheseusFunction":
pass
def copy(
self, new_name: Optional[str] = None, keep_variable_names: bool = False
) -> "CostWeight":
return cast(
CostWeight,
super().copy(new_name=new_name, keep_variable_names=keep_variable_names),
)
# Besides passing a theseus Variable, can also get a float and it will create the
# Variable with a default name for it
class ScaleCostWeight(CostWeight):
def __init__(
self,
scale: Union[float, torch.Tensor, Variable],
name: Optional[str] = None,
):
super().__init__(name=name)
if not isinstance(scale, Variable):
if not isinstance(scale, torch.Tensor):
scale = torch.tensor(scale)
self.scale = Variable(scale)
else:
self.scale = scale
if not self.scale.data.squeeze().ndim in [0, 1]:
raise ValueError("ScaleCostWeight only accepts 0- or 1-dim (batched) data.")
self.scale.data = self.scale.data.view(-1, 1)
self.register_aux_vars(["scale"])
def weight_error(self, error: torch.Tensor) -> torch.Tensor:
return error * self.scale.data
def weight_jacobians_and_error(
self,
jacobians: List[torch.Tensor],
error: torch.Tensor,
) -> Tuple[List[torch.Tensor], torch.Tensor]:
error = error * self.scale.data
new_jacobians = []
for jac in jacobians:
new_jacobians.append(jac * self.scale.data.view(-1, 1, 1))
return new_jacobians, error
def _copy_impl(self, new_name: Optional[str] = None) -> "ScaleCostWeight":
return ScaleCostWeight(self.scale.copy(), name=new_name)
# Besides passing a theseus Variable, can also get any float sequence and it will create the
# Variable with a default name for it
class DiagonalCostWeight(CostWeight):
def __init__(
self,
diagonal: Union[Sequence[float], torch.Tensor, Variable],
name: Optional[str] = None,
):
super().__init__(name=name)
if not isinstance(diagonal, Variable):
if not isinstance(diagonal, torch.Tensor):
diagonal = torch.tensor(diagonal)
self.diagonal = Variable(diagonal)
else:
self.diagonal = diagonal
if not self.diagonal.data.squeeze().ndim < 3:
raise ValueError("DiagonalCostWeight only accepts data with ndim < 3.")
if self.diagonal.data.ndim == 0:
self.diagonal.data = self.diagonal.data.view(1, 1)
if self.diagonal.data.ndim == 1:
warnings.warn(
"1-D diagonal input is ambiguous. Dimension will be "
"interpreted as data dimension and not batch dimension."
)
self.diagonal.data = self.diagonal.data.view(1, -1)
self.register_aux_vars(["diagonal"])
def weight_error(self, error: torch.Tensor) -> torch.Tensor:
return error * self.diagonal.data
def weight_jacobians_and_error(
self,
jacobians: List[torch.Tensor],
error: torch.Tensor,
) -> Tuple[List[torch.Tensor], torch.Tensor]:
error = error * self.diagonal.data
new_jacobians = []
for jac in jacobians:
# Jacobian is batch_size x cost_fuction_dim x var_dim
# This left multiplies the weights to jacobian
new_jacobians.append(jac * self.diagonal.data.unsqueeze(2))
return new_jacobians, error
def _copy_impl(self, new_name: Optional[str] = None) -> "DiagonalCostWeight":
return DiagonalCostWeight(self.diagonal.copy(), name=new_name) | 0.940694 | 0.485295 |
import numpy as np
from builtins import TypeError
import matplotlib.pyplot as plt
import cv2
from collections import deque
# Define a class to receive the characteristics of each line detection
class Line():
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
MAX_NUM_FITS = 15
PREV_FILTER = 0.95
NEXT_FILTER = 0.05
def __init__(self):
# was the line detected in the last iteration?
self.detected = False
# x values of the last n fits of the line
self.recent_xfitted = []
#average x values of the fitted line over the last n iterations
self.bestx = None
#polynomial coefficients averaged over the last n iterations
self.best_fit = None
#polynomial coefficients for the most recent fit
self.current_fit = [np.array([False])]
# polynomial coefficients for the last coefficients
self.all_fits = deque()
#radius of curvature of the line in some units
self.radius_of_curvature = None
#filtered radius of curvature of the line in some units
self.filt_radius_of_curvature = None
#distance in meters of vehicle center from the line
self.x_val_bottom_m = None
#difference in fit coefficients between last and new fits
self.diffs = np.array([0,0,0], dtype='float')
#x values for detected line pixels
self.allx = None
#y values for detected line pixels
self.ally = None
def add_coeffs(self, coeffs):
if np.any(self.best_fit) != None:
current_diffs = np.abs(self.best_fit - coeffs)
if (np.max(current_diffs)>190.0):
return
self.current_fit = coeffs
self.all_fits.append(coeffs)
num_coeffs = len(self.all_fits)
if (len(self.all_fits)>self.MAX_NUM_FITS):
self.all_fits.popleft()
num_coeffs = self.MAX_NUM_FITS
self.best_fit = np.array([0.0,0.0,0.0])
for coef in self.all_fits:
self.best_fit += coef
self.best_fit = self.best_fit/num_coeffs
def compute_x_values(self):
try:
self.allx = self.current_fit[0]*self.ally**2 + self.current_fit[1]*self.ally + self.current_fit[2]
except TypeError:
print('The function failed to fit a line!')
self.allx = 1*self.ally**2 + 1*self.ally
def draw_lines(self,img):
pts = np.array([self.allx, self.ally])
limg = cv2.polylines(img, np.int32([pts.transpose()]), 0, (255,255,0),4)
return limg
def compute_radii(self):
fit_cr = np.polyfit(self.ally*self.ym_per_pix, self.allx*self.xm_per_pix, 2)
y_eval_px = np.max(self.ally)
y_eval_m = y_eval_px*self.ym_per_pix
radii = ((1 + (2*fit_cr[0]*y_eval_m + fit_cr[1])**2)**1.5) / np.absolute(2*fit_cr[0])
self.radius_of_curvature = radii
if self.filt_radius_of_curvature == None:
self.filt_radius_of_curvature = radii
else:
self.filt_radius_of_curvature = self.PREV_FILTER*self.filt_radius_of_curvature+self.NEXT_FILTER*radii
print(self.filt_radius_of_curvature)
# Line positions
self.x_val_bottom_m = fit_cr[0]*y_eval_m**2 + fit_cr[1]*y_eval_m + fit_cr[2] | src/line.py | import numpy as np
from builtins import TypeError
import matplotlib.pyplot as plt
import cv2
from collections import deque
# Define a class to receive the characteristics of each line detection
class Line():
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
MAX_NUM_FITS = 15
PREV_FILTER = 0.95
NEXT_FILTER = 0.05
def __init__(self):
# was the line detected in the last iteration?
self.detected = False
# x values of the last n fits of the line
self.recent_xfitted = []
#average x values of the fitted line over the last n iterations
self.bestx = None
#polynomial coefficients averaged over the last n iterations
self.best_fit = None
#polynomial coefficients for the most recent fit
self.current_fit = [np.array([False])]
# polynomial coefficients for the last coefficients
self.all_fits = deque()
#radius of curvature of the line in some units
self.radius_of_curvature = None
#filtered radius of curvature of the line in some units
self.filt_radius_of_curvature = None
#distance in meters of vehicle center from the line
self.x_val_bottom_m = None
#difference in fit coefficients between last and new fits
self.diffs = np.array([0,0,0], dtype='float')
#x values for detected line pixels
self.allx = None
#y values for detected line pixels
self.ally = None
def add_coeffs(self, coeffs):
if np.any(self.best_fit) != None:
current_diffs = np.abs(self.best_fit - coeffs)
if (np.max(current_diffs)>190.0):
return
self.current_fit = coeffs
self.all_fits.append(coeffs)
num_coeffs = len(self.all_fits)
if (len(self.all_fits)>self.MAX_NUM_FITS):
self.all_fits.popleft()
num_coeffs = self.MAX_NUM_FITS
self.best_fit = np.array([0.0,0.0,0.0])
for coef in self.all_fits:
self.best_fit += coef
self.best_fit = self.best_fit/num_coeffs
def compute_x_values(self):
try:
self.allx = self.current_fit[0]*self.ally**2 + self.current_fit[1]*self.ally + self.current_fit[2]
except TypeError:
print('The function failed to fit a line!')
self.allx = 1*self.ally**2 + 1*self.ally
def draw_lines(self,img):
pts = np.array([self.allx, self.ally])
limg = cv2.polylines(img, np.int32([pts.transpose()]), 0, (255,255,0),4)
return limg
def compute_radii(self):
fit_cr = np.polyfit(self.ally*self.ym_per_pix, self.allx*self.xm_per_pix, 2)
y_eval_px = np.max(self.ally)
y_eval_m = y_eval_px*self.ym_per_pix
radii = ((1 + (2*fit_cr[0]*y_eval_m + fit_cr[1])**2)**1.5) / np.absolute(2*fit_cr[0])
self.radius_of_curvature = radii
if self.filt_radius_of_curvature == None:
self.filt_radius_of_curvature = radii
else:
self.filt_radius_of_curvature = self.PREV_FILTER*self.filt_radius_of_curvature+self.NEXT_FILTER*radii
print(self.filt_radius_of_curvature)
# Line positions
self.x_val_bottom_m = fit_cr[0]*y_eval_m**2 + fit_cr[1]*y_eval_m + fit_cr[2] | 0.586049 | 0.666406 |
import warnings
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
def plot(
res: List[Dict[str, Union[np.ndarray, str, Dict]]],
legend: Optional[str] = None,
title: Optional[str] = None,
xlabel: Optional[str] = None,
ylabel: Optional[str] = None,
xlim: Optional[Tuple[float, float]] = None,
ylim: Optional[Tuple[float, float]] = None,
inv_x: bool = False,
inv_y: bool = False,
sci_x: bool = False,
sci_y: bool = False,
axtext: Optional[str] = None,
annotation_location: Optional[str] = None,
percent: bool = False,
filename: Optional[str] = None,
screen: bool = True,
) -> None:
try:
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator, FuncFormatter
except ImportError:
warnings.warn("Install matplotlib to enable plotting.")
return
def to_percent(y_ticks, _):
# Adapted from https://matplotlib.org/examples/pylab_examples/histogram_percent_demo.html
return "{:g}".format(100 * y_ticks)
font = {"family": "serif", "weight": "normal", "size": 16}
mpl.rc("font", **font)
plt.ioff()
fig, ax = plt.subplots()
xmin = float("inf")
xmax = float("-inf")
for r in res:
if "args" in r:
args = r["args"]
else:
args = dict()
if "name" in r:
# backwards compatibility
args["label"] = r["name"]
if "hist" in r and r["hist"]:
y = r["y"]
# Use Freedman–Diaconis rule to determine good bin width
inter_quartile_range = np.percentile(y, 75) - np.percentile(y, 25)
bin_width = 2 * inter_quartile_range / (len(y) ** (1 / 3))
num_bins = int((np.max(y) - np.min(y)) / bin_width)
_, x, _ = ax.hist(y, num_bins, **args)
else:
x = r["x"]
y = r["y"]
if xlim is not None:
x = x[(r["x"] >= xlim[0]) & (r["x"] <= xlim[1])]
y = y[(r["x"] >= xlim[0]) & (r["x"] <= xlim[1])]
if "y_err" in r:
dy = r["y_err"]
if xlim is not None:
dy = dy[(r["x"] >= xlim[0]) & (r["x"] <= xlim[1])]
ax.errorbar(x, y, yerr=dy, **args)
else:
ax.plot(x, y, **args)
xmin = min(np.min(x), xmin)
xmax = max(np.max(x), xmax)
if legend is not None:
ax.legend(loc=legend)
box = ax.get_position()
if title is not None:
ax.set_title(title, y=1.05)
box = box.from_bounds(box.x0, box.y0, box.width, box.height * 0.95)
if xlabel is not None:
ax.set_xlabel(xlabel, labelpad=5)
box = box.from_bounds(
box.x0, box.y0 + 0.05 * box.height, box.width, box.height * 0.95
)
if ylabel is not None:
ax.set_ylabel(ylabel, labelpad=10)
box = box.from_bounds(
box.x0 + 0.05 * box.width, box.y0, box.width * 0.95, box.height
)
ax.set_position([box.x0, box.y0, box.width, box.height])
ax.axis("auto")
if xlim is not None:
ax.set_xlim(xlim)
elif np.isfinite(xmin) and np.isfinite(xmax):
ax.set_xlim([xmin, xmax])
if ylim is not None:
ax.set_ylim(ylim)
# Hack to minimize chance of tick overlap
xticks = [
tick_location
for tick_location in ax.get_xticks()
if ax.get_xlim()[0] < tick_location < ax.get_xlim()[1]
]
if len(xticks) > 5:
ax.set_xticks(xticks[::2])
if xlim is not None:
ax.set_xlim(xlim)
elif np.isfinite(xmin) and np.isfinite(xmax):
ax.set_xlim([xmin, xmax])
ax.xaxis.set_minor_locator(AutoMinorLocator(2))
if inv_x:
ax.invert_xaxis()
if inv_y:
ax.invert_yaxis()
if axtext is not None:
if isinstance(axtext, str):
axtext = [axtext]
if annotation_location is None:
annotation_location = [None for _ in axtext]
if isinstance(annotation_location, tuple):
annotation_location = [annotation_location]
for t, loc in zip(axtext, annotation_location):
bbox = dict(boxstyle="round", fc="w", ec="0.5", alpha=0.9)
if loc is None:
ax.text(
0.95,
0.05,
t,
transform=ax.transAxes,
ha="right",
va="bottom",
bbox=bbox,
)
else:
ax.text(loc[0], loc[1], t, bbox=bbox)
if percent:
formatter = FuncFormatter(to_percent)
ax.yaxis.set_major_formatter(formatter)
if sci_x:
ax.ticklabel_format(style="sci", axis="x", scilimits=(-3, 4))
if sci_y:
ax.ticklabel_format(style="sci", axis="y", scilimits=(-3, 4))
ax.xaxis.major.formatter._useMathText = True
if filename is not None:
fig.savefig(filename, dpi=300)
if screen:
fig.show()
plt.ion()
if not screen:
plt.close(fig) | physical_validation/util/plot.py |
import warnings
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
def plot(
res: List[Dict[str, Union[np.ndarray, str, Dict]]],
legend: Optional[str] = None,
title: Optional[str] = None,
xlabel: Optional[str] = None,
ylabel: Optional[str] = None,
xlim: Optional[Tuple[float, float]] = None,
ylim: Optional[Tuple[float, float]] = None,
inv_x: bool = False,
inv_y: bool = False,
sci_x: bool = False,
sci_y: bool = False,
axtext: Optional[str] = None,
annotation_location: Optional[str] = None,
percent: bool = False,
filename: Optional[str] = None,
screen: bool = True,
) -> None:
try:
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator, FuncFormatter
except ImportError:
warnings.warn("Install matplotlib to enable plotting.")
return
def to_percent(y_ticks, _):
# Adapted from https://matplotlib.org/examples/pylab_examples/histogram_percent_demo.html
return "{:g}".format(100 * y_ticks)
font = {"family": "serif", "weight": "normal", "size": 16}
mpl.rc("font", **font)
plt.ioff()
fig, ax = plt.subplots()
xmin = float("inf")
xmax = float("-inf")
for r in res:
if "args" in r:
args = r["args"]
else:
args = dict()
if "name" in r:
# backwards compatibility
args["label"] = r["name"]
if "hist" in r and r["hist"]:
y = r["y"]
# Use Freedman–Diaconis rule to determine good bin width
inter_quartile_range = np.percentile(y, 75) - np.percentile(y, 25)
bin_width = 2 * inter_quartile_range / (len(y) ** (1 / 3))
num_bins = int((np.max(y) - np.min(y)) / bin_width)
_, x, _ = ax.hist(y, num_bins, **args)
else:
x = r["x"]
y = r["y"]
if xlim is not None:
x = x[(r["x"] >= xlim[0]) & (r["x"] <= xlim[1])]
y = y[(r["x"] >= xlim[0]) & (r["x"] <= xlim[1])]
if "y_err" in r:
dy = r["y_err"]
if xlim is not None:
dy = dy[(r["x"] >= xlim[0]) & (r["x"] <= xlim[1])]
ax.errorbar(x, y, yerr=dy, **args)
else:
ax.plot(x, y, **args)
xmin = min(np.min(x), xmin)
xmax = max(np.max(x), xmax)
if legend is not None:
ax.legend(loc=legend)
box = ax.get_position()
if title is not None:
ax.set_title(title, y=1.05)
box = box.from_bounds(box.x0, box.y0, box.width, box.height * 0.95)
if xlabel is not None:
ax.set_xlabel(xlabel, labelpad=5)
box = box.from_bounds(
box.x0, box.y0 + 0.05 * box.height, box.width, box.height * 0.95
)
if ylabel is not None:
ax.set_ylabel(ylabel, labelpad=10)
box = box.from_bounds(
box.x0 + 0.05 * box.width, box.y0, box.width * 0.95, box.height
)
ax.set_position([box.x0, box.y0, box.width, box.height])
ax.axis("auto")
if xlim is not None:
ax.set_xlim(xlim)
elif np.isfinite(xmin) and np.isfinite(xmax):
ax.set_xlim([xmin, xmax])
if ylim is not None:
ax.set_ylim(ylim)
# Hack to minimize chance of tick overlap
xticks = [
tick_location
for tick_location in ax.get_xticks()
if ax.get_xlim()[0] < tick_location < ax.get_xlim()[1]
]
if len(xticks) > 5:
ax.set_xticks(xticks[::2])
if xlim is not None:
ax.set_xlim(xlim)
elif np.isfinite(xmin) and np.isfinite(xmax):
ax.set_xlim([xmin, xmax])
ax.xaxis.set_minor_locator(AutoMinorLocator(2))
if inv_x:
ax.invert_xaxis()
if inv_y:
ax.invert_yaxis()
if axtext is not None:
if isinstance(axtext, str):
axtext = [axtext]
if annotation_location is None:
annotation_location = [None for _ in axtext]
if isinstance(annotation_location, tuple):
annotation_location = [annotation_location]
for t, loc in zip(axtext, annotation_location):
bbox = dict(boxstyle="round", fc="w", ec="0.5", alpha=0.9)
if loc is None:
ax.text(
0.95,
0.05,
t,
transform=ax.transAxes,
ha="right",
va="bottom",
bbox=bbox,
)
else:
ax.text(loc[0], loc[1], t, bbox=bbox)
if percent:
formatter = FuncFormatter(to_percent)
ax.yaxis.set_major_formatter(formatter)
if sci_x:
ax.ticklabel_format(style="sci", axis="x", scilimits=(-3, 4))
if sci_y:
ax.ticklabel_format(style="sci", axis="y", scilimits=(-3, 4))
ax.xaxis.major.formatter._useMathText = True
if filename is not None:
fig.savefig(filename, dpi=300)
if screen:
fig.show()
plt.ion()
if not screen:
plt.close(fig) | 0.783658 | 0.446374 |
from random import uniform
from GA_2D import initialize2D, select2D, crossover2D, mutate2D
from GA_3D import initialize3D, select3D, crossover3D, mutate3D
from HC_2D import *
from HC_3D import *
def GA_first(chromosome):
avg_chromosome = []
for i in range(100):
for i in range(1000):
# Selection
chromosome = select2D(chromosome)
# Crossover
cross = chromosome.copy()
random.shuffle(cross)
chromosome.clear()
for i in range(0, 3):
parent_one = cross.pop()
parent_two = cross.pop()
offspring = crossover2D(parent_one, parent_two)
chromosome.append(offspring[0])
chromosome.append(offspring[1])
# Mutation
for gene in chromosome:
gene += mutate2D(chromosome)
avg_chromosome.append(sum(chromosome)/len(chromosome))
return sum(avg_chromosome)/len(avg_chromosome)
def GA_second(chromosome):
avg_chromosome = []
for i in range(100):
for i in range(1000):
# Selection
chromosome = select3D(chromosome)
# Crossover
cross = chromosome.copy()
random.shuffle(cross)
chromosome.clear()
for i in range(0, 3):
parent_one = cross.pop()
parent_two = cross.pop()
offspring = crossover3D(parent_one, parent_two)
chromosome.append(offspring[0])
chromosome.append(offspring[1])
# Mutation
for gene in chromosome:
mutant = mutate3D(chromosome)
gene = [mutant[0], mutant[1]]
x = sum([element[0] for element in chromosome])/len(chromosome)
y = sum([element[1] for element in chromosome])/len(chromosome)
avg_chromosome.append([x, y])
avg_x = sum([element[0] for element in avg_chromosome])/len(avg_chromosome)
avg_y = sum([element[1] for element in avg_chromosome])/len(avg_chromosome)
return [avg_x, avg_y]
"""
Implements Hill Climbing in 2D API
"""
def HC_first():
hill_results_2D = []
for i in range(1000):
hill_results_2D.append(hill_climb_2D(uniform(-5,5)))
return sum(hill_results_2D)/len(hill_results_2D)
"""
Implements Hill Climbing in 3D API
"""
def HC_second():
hill_results_3D = []
for i in range(100):
coords = (random.uniform(-5, 5), random.uniform(-5,5))
hill_results_3D.append(list(hill_climb_3D(coords)))
return [sum(hill_results_3D[0])/len(hill_results_3D), sum(hill_results_3D[1])/len(hill_results_3D)]
"""
Helper Function for plotting in 2D
"""
def plot_2D(point, title):
fx = pow(np.linspace(-5,5,100), 4) - 22*pow(np.linspace(-5,5,100), 2)
guess = pow(point, 4) - 22*pow(point, 2)
plt.figure()
plt.scatter(np.linspace(-5,5,100), fx, color='r', label='f(x)')
plt.annotate('Max Estimate', (point, guess), xytext=(point + 1, guess + 1),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3'))
plt.title(title)
plt.ylabel('f(x)')
plt.xlabel('x')
plt.show()
"""
Helper Function for plotting in 3D
"""
def plot_3D(coords, title):
estimate = "Max Estimate: " + str(coords)
dx, dy = np.meshgrid(np.linspace(-5,5,500), np.linspace(-5,5,500))
fz = -22*dx**2 + dx**4 - 22*dy**2 + dy**4
guess = -22*coords[0]**2 + coords[0]**4 - 22*coords[1]**2 + coords[1]**4
plt.figure()
ax = plt.axes(projection = '3d')
ax.plot_surface(dx,dy,fz, cmap = 'viridis')
ax.set_title(title)
ax.set_zlabel("f(x,y)")
ax.set_ylabel('y')
ax.set_xlabel('x')
ax.text(-10, -10, 300, estimate)
plt.show()
def main():
# Chromosome initialization - 2D
chromosome2D = initialize2D()
chromosome2D = GA_first(chromosome2D)
# Chromosome initialization - 3D
chromosome3D = initialize3D()
chromosome3D = GA_second(chromosome3D)
plot_2D(chromosome2D, "Genetic Algorithm - 2D")
plot_3D(chromosome3D, "Genetic Algorithm - 3D")
# Hill Climbing
hill_x = HC_first()
hill_xy = HC_second()
plot_2D(hill_x, 'Hill Climbing - 2D')
plot_3D(hill_xy, 'Hill Climbing - 3D')
if __name__ == "__main__":
main() | main.py | from random import uniform
from GA_2D import initialize2D, select2D, crossover2D, mutate2D
from GA_3D import initialize3D, select3D, crossover3D, mutate3D
from HC_2D import *
from HC_3D import *
def GA_first(chromosome):
avg_chromosome = []
for i in range(100):
for i in range(1000):
# Selection
chromosome = select2D(chromosome)
# Crossover
cross = chromosome.copy()
random.shuffle(cross)
chromosome.clear()
for i in range(0, 3):
parent_one = cross.pop()
parent_two = cross.pop()
offspring = crossover2D(parent_one, parent_two)
chromosome.append(offspring[0])
chromosome.append(offspring[1])
# Mutation
for gene in chromosome:
gene += mutate2D(chromosome)
avg_chromosome.append(sum(chromosome)/len(chromosome))
return sum(avg_chromosome)/len(avg_chromosome)
def GA_second(chromosome):
avg_chromosome = []
for i in range(100):
for i in range(1000):
# Selection
chromosome = select3D(chromosome)
# Crossover
cross = chromosome.copy()
random.shuffle(cross)
chromosome.clear()
for i in range(0, 3):
parent_one = cross.pop()
parent_two = cross.pop()
offspring = crossover3D(parent_one, parent_two)
chromosome.append(offspring[0])
chromosome.append(offspring[1])
# Mutation
for gene in chromosome:
mutant = mutate3D(chromosome)
gene = [mutant[0], mutant[1]]
x = sum([element[0] for element in chromosome])/len(chromosome)
y = sum([element[1] for element in chromosome])/len(chromosome)
avg_chromosome.append([x, y])
avg_x = sum([element[0] for element in avg_chromosome])/len(avg_chromosome)
avg_y = sum([element[1] for element in avg_chromosome])/len(avg_chromosome)
return [avg_x, avg_y]
"""
Implements Hill Climbing in 2D API
"""
def HC_first():
hill_results_2D = []
for i in range(1000):
hill_results_2D.append(hill_climb_2D(uniform(-5,5)))
return sum(hill_results_2D)/len(hill_results_2D)
"""
Implements Hill Climbing in 3D API
"""
def HC_second():
hill_results_3D = []
for i in range(100):
coords = (random.uniform(-5, 5), random.uniform(-5,5))
hill_results_3D.append(list(hill_climb_3D(coords)))
return [sum(hill_results_3D[0])/len(hill_results_3D), sum(hill_results_3D[1])/len(hill_results_3D)]
"""
Helper Function for plotting in 2D
"""
def plot_2D(point, title):
fx = pow(np.linspace(-5,5,100), 4) - 22*pow(np.linspace(-5,5,100), 2)
guess = pow(point, 4) - 22*pow(point, 2)
plt.figure()
plt.scatter(np.linspace(-5,5,100), fx, color='r', label='f(x)')
plt.annotate('Max Estimate', (point, guess), xytext=(point + 1, guess + 1),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3'))
plt.title(title)
plt.ylabel('f(x)')
plt.xlabel('x')
plt.show()
"""
Helper Function for plotting in 3D
"""
def plot_3D(coords, title):
estimate = "Max Estimate: " + str(coords)
dx, dy = np.meshgrid(np.linspace(-5,5,500), np.linspace(-5,5,500))
fz = -22*dx**2 + dx**4 - 22*dy**2 + dy**4
guess = -22*coords[0]**2 + coords[0]**4 - 22*coords[1]**2 + coords[1]**4
plt.figure()
ax = plt.axes(projection = '3d')
ax.plot_surface(dx,dy,fz, cmap = 'viridis')
ax.set_title(title)
ax.set_zlabel("f(x,y)")
ax.set_ylabel('y')
ax.set_xlabel('x')
ax.text(-10, -10, 300, estimate)
plt.show()
def main():
# Chromosome initialization - 2D
chromosome2D = initialize2D()
chromosome2D = GA_first(chromosome2D)
# Chromosome initialization - 3D
chromosome3D = initialize3D()
chromosome3D = GA_second(chromosome3D)
plot_2D(chromosome2D, "Genetic Algorithm - 2D")
plot_3D(chromosome3D, "Genetic Algorithm - 3D")
# Hill Climbing
hill_x = HC_first()
hill_xy = HC_second()
plot_2D(hill_x, 'Hill Climbing - 2D')
plot_3D(hill_xy, 'Hill Climbing - 3D')
if __name__ == "__main__":
main() | 0.419648 | 0.519156 |
import sys
from argparse import ArgumentParser
from logging import getLogger
from os.path import dirname
from rdflib import Graph, Namespace
from utils import (
find_conversions,
execute_conversions,
standardize_namespaces,
get_output_filename,
)
"""
Argument Parsing:
source: source version (for example '1.0.3')
target: target version (for example '1.1')
info: log operation description
"""
parser = ArgumentParser(description="Update Brick models.")
parser.add_argument(
"models",
metavar="model",
type=str,
nargs="+",
help="a turtle file with a brick model",
)
parser.add_argument("--source", help="source version", required=True)
parser.add_argument("--target", help="target version", required=True)
parser.add_argument(
"--info", help="get information related to ongoing operations", action="store_true"
)
args = parser.parse_args()
# set log level to INFO if required.
if args.info:
getLogger().setLevel("INFO")
def convert(source, target, models):
# Load the versions graph which has information about possible conversions.
versions_graph = Graph()
directory = dirname(sys.argv[0]) or "."
versions_graph.parse(directory + "/conversions/versions.ttl", format="turtle")
versions_graph.bind("version", Namespace("https://brickschema.org/version#"))
# Ask if the conversion is possible
job = versions_graph.query(
"""ASK{
"%s" version:convertsTo+ "%s"
}"""
% (source, target)
)
# If yes, find the shortest path and convert
for doable in job:
if doable:
print("Conversion available!\n=====================")
# Find the conversions to update the model in minimum number of version upgrades
conversions = find_conversions(source, target, versions_graph)
# Loop through all the input models
for model in models:
print("\n\nUpdating {}...".format(model))
standardize_namespaces(model)
# Execute all conversions
for conversion in conversions:
model_graph = Graph()
model_graph.parse(model, format="turtle")
print("Converting to {}...".format(conversion[1]))
execute_conversions(conversion, model_graph)
output = get_output_filename(model, conversion[1])
model_graph.serialize(output, format="turtle")
print("Output stored: {}".format(output))
else:
print("No conversions available from {} to {}.".format(source, target))
convert(args.source, args.target, args.models) | tools/convert/convert.py | import sys
from argparse import ArgumentParser
from logging import getLogger
from os.path import dirname
from rdflib import Graph, Namespace
from utils import (
find_conversions,
execute_conversions,
standardize_namespaces,
get_output_filename,
)
"""
Argument Parsing:
source: source version (for example '1.0.3')
target: target version (for example '1.1')
info: log operation description
"""
parser = ArgumentParser(description="Update Brick models.")
parser.add_argument(
"models",
metavar="model",
type=str,
nargs="+",
help="a turtle file with a brick model",
)
parser.add_argument("--source", help="source version", required=True)
parser.add_argument("--target", help="target version", required=True)
parser.add_argument(
"--info", help="get information related to ongoing operations", action="store_true"
)
args = parser.parse_args()
# set log level to INFO if required.
if args.info:
getLogger().setLevel("INFO")
def convert(source, target, models):
# Load the versions graph which has information about possible conversions.
versions_graph = Graph()
directory = dirname(sys.argv[0]) or "."
versions_graph.parse(directory + "/conversions/versions.ttl", format="turtle")
versions_graph.bind("version", Namespace("https://brickschema.org/version#"))
# Ask if the conversion is possible
job = versions_graph.query(
"""ASK{
"%s" version:convertsTo+ "%s"
}"""
% (source, target)
)
# If yes, find the shortest path and convert
for doable in job:
if doable:
print("Conversion available!\n=====================")
# Find the conversions to update the model in minimum number of version upgrades
conversions = find_conversions(source, target, versions_graph)
# Loop through all the input models
for model in models:
print("\n\nUpdating {}...".format(model))
standardize_namespaces(model)
# Execute all conversions
for conversion in conversions:
model_graph = Graph()
model_graph.parse(model, format="turtle")
print("Converting to {}...".format(conversion[1]))
execute_conversions(conversion, model_graph)
output = get_output_filename(model, conversion[1])
model_graph.serialize(output, format="turtle")
print("Output stored: {}".format(output))
else:
print("No conversions available from {} to {}.".format(source, target))
convert(args.source, args.target, args.models) | 0.480235 | 0.169372 |
import os
import hashlib
import tempfile
from exiftool import ExifTool
from flask import Flask, request, jsonify
app = Flask(__name__, template_folder="views", static_folder="public", static_url_path="/")
app.config["MAX_CONTENT_LENGTH"] = 100 * 1024
secret = os.getenv("SECRET") or "BSidesTLV2021{This_Is_Not_The_Flag}"
if len(secret) < 35:
raise Exception("Secret size should be 35 or above")
def parse_metadata(metadata, filter_keys=None):
filter_keys = filter_keys or []
parsed_metadata = {}
for key, value in metadata.items():
keys = key.split(":")
o = parsed_metadata
kl = len(keys)
for i, k in enumerate(keys):
if k not in o:
o[k] = {}
if i < kl - 1:
o = o[k]
continue
o[k] = value
for k in filter_keys:
parsed_metadata.pop(k)
return dict(parsed_metadata)
@app.route("/")
def index():
return app.send_static_file("index.html")
@app.route("/view", methods=["POST"])
def view():
token = request.form.get("token")
if not token:
return jsonify({"error": "empty token"})
images = request.files.getlist("image[]")
if not images:
return jsonify({"error": "empty image"})
image_streams = []
mac = hashlib.sha1(secret.encode())
for image in images:
if not image.mimetype.startswith("image/"):
return jsonify({"error": "bad image"})
image_stream = image.stream.read()
mac.update(image_stream)
image_streams.append(image_stream)
if token != mac.hexdigest():
return jsonify({"error": "bad token"})
metadata = []
try:
with ExifTool() as et:
for i, image_stream in enumerate(image_streams):
with tempfile.NamedTemporaryFile(delete=False) as tmp:
tmp.write(image_stream)
tmp.flush()
tmp.close()
parsed_metadata = {
"SourceFile": images[i].filename,
**parse_metadata(et.get_metadata(tmp.name), filter_keys=["File", "SourceFile"])
}
metadata.append(parsed_metadata)
try:
os.unlink(tmp.name)
except Exception as ex:
pass
except Exception as ex:
return jsonify({"error": str(ex)})
return jsonify(metadata[0] if len(metadata) < 2 else metadata)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000, debug=True, threaded=True) | BSidesTLV/2021/web/Intellimage/index.py | import os
import hashlib
import tempfile
from exiftool import ExifTool
from flask import Flask, request, jsonify
app = Flask(__name__, template_folder="views", static_folder="public", static_url_path="/")
app.config["MAX_CONTENT_LENGTH"] = 100 * 1024
secret = os.getenv("SECRET") or "BSidesTLV2021{This_Is_Not_The_Flag}"
if len(secret) < 35:
raise Exception("Secret size should be 35 or above")
def parse_metadata(metadata, filter_keys=None):
filter_keys = filter_keys or []
parsed_metadata = {}
for key, value in metadata.items():
keys = key.split(":")
o = parsed_metadata
kl = len(keys)
for i, k in enumerate(keys):
if k not in o:
o[k] = {}
if i < kl - 1:
o = o[k]
continue
o[k] = value
for k in filter_keys:
parsed_metadata.pop(k)
return dict(parsed_metadata)
@app.route("/")
def index():
return app.send_static_file("index.html")
@app.route("/view", methods=["POST"])
def view():
token = request.form.get("token")
if not token:
return jsonify({"error": "empty token"})
images = request.files.getlist("image[]")
if not images:
return jsonify({"error": "empty image"})
image_streams = []
mac = hashlib.sha1(secret.encode())
for image in images:
if not image.mimetype.startswith("image/"):
return jsonify({"error": "bad image"})
image_stream = image.stream.read()
mac.update(image_stream)
image_streams.append(image_stream)
if token != mac.hexdigest():
return jsonify({"error": "bad token"})
metadata = []
try:
with ExifTool() as et:
for i, image_stream in enumerate(image_streams):
with tempfile.NamedTemporaryFile(delete=False) as tmp:
tmp.write(image_stream)
tmp.flush()
tmp.close()
parsed_metadata = {
"SourceFile": images[i].filename,
**parse_metadata(et.get_metadata(tmp.name), filter_keys=["File", "SourceFile"])
}
metadata.append(parsed_metadata)
try:
os.unlink(tmp.name)
except Exception as ex:
pass
except Exception as ex:
return jsonify({"error": str(ex)})
return jsonify(metadata[0] if len(metadata) < 2 else metadata)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000, debug=True, threaded=True) | 0.215516 | 0.088465 |
from __future__ import print_function
import argparse
import os
import pickle
import subprocess
import sys
from Bio import SeqIO
from collections import defaultdict
def generate_fasta_entries(input_fasta, output_fasta):
""" Converts neoepiscope output FASTAs new FASTA file with IDs as headers,
storing IDs as a dictionary for mapping
input_fasta: path to input neoepiscope FASTAs
output_fasta: path to output fasta with alternate headers
Return values:
mapper_dict: dictionary linking FASTA header IDs as keys to
original fasta headers as values
"""
# Set up dictionaries and sequence counter
fasta_dict = {}
mapper_dict = {}
i = 0
print('Parsing FASTA records', file=sys.stderr)
# Iterate through all records in the FASTA file to extract data
for record in SeqIO.parse(input_fasta, 'fasta'):
# Extract sequence header and sequence
header = str(record.id)
sequence = str(record.seq)
# Skip sequences that are too short for netCTLpan
if len(sequence) < 8:
continue
# Create new header and save entry to dictionary
new_header = ' '.join(['>', str(i)])
fasta_dict[new_header] = sequence
mapper_dict[str(i)] = header
# Increment counter
i += 1
print(' '.join(['Stored', str(i), 'FASTA sequences.']), file=sys.stderr)
# Write output fasta
with open(output_fasta, 'w') as f:
for header in fasta_dict:
print(header, file=f)
print(fasta_dict[header], file=f)
# Return dictionaries
return fasta_dict, mapper_dict
def run_netCTLpan(fasta, output_path, netCTLpan, n, allele):
""" Runs netCTLpan on input FASTA file
fasta: path to FASTA file with alternate headers (written by
generate_fasta_entries() function)
output_path: path to write netCTLpan output
netCTLpan: path to netCTLpan executable
n: peptide_length
allele: MHC allele
No return value.
"""
print('Running netCTLpan', file=sys.stderr)
# Set netCTLpan command and run
command = [
netCTLpan, '-v', '-f', fasta, '-xls', '-xlsfile', output_path,
'-l', n, '-a', allele
]
subprocess.check_call(command)
def extract_neoepitopes(neoepiscope_results):
""" Extract neoepitopes to store for each sample/transcript
neoepiscope_results: path to neoepiscope results
Return value: dictionary with transcript IDs as keys with sets of
relevant neoepitope sequences as values
"""
# Set up dictionary to store neoepitope information
neoepitope_dict = defaultdict(set)
print('Extracting neoepitope data', file=sys.stderr)
# Process data
with open(neoepiscope_results) as f:
f.readline()
f.readline()
for line in f:
tokens = line.strip().split('\t')
# Extract transcripts to iterate through
transcripts = tokens[9].split(';')
for tx in transcripts:
# Store neoepitope for each relevant transcript
neoepitope_dict[tx].add(tokens[0])
# Return neoepitope dictionary
return neoepitope_dict
def process_netCTLpan_output(
netCTLpan_output, mapping_dictionary,
neoepitope_dictionary
):
""" Processes netCTL output to retain data only for neoepitope sequences,
storing them in a dictionary
netCTLpan_output: path to netCTLpan output
mapping_dictionary: dictionary linking FASTA header IDs as keys to
original fasta headers as values
neoepitope_dictionary: dictionary with transcript IDs keys with sets of
relevant neoepitope sequences as values
Return value: dictionary linking tuple of (transcript ID, peptide) as
keys to set of tuples of (TAP score, cleavage score) as
values
"""
print('Processing netCTLpan output', file=sys.stderr)
netCTL_score_dict = defaultdict(set)
# Process file to extract data
with open(netCTLpan_output) as f:
for line in f:
if line[0] != 'N':
# Process peptide result
tokens = line.strip().split('\t')
# [N, Sequence Name, Peptide, Allele, MHC, TAP, Cle, Comb, %Rank]
# Grab sequence identifier, removing initial underscore
# Split by middle undescore to isolate just the ID
# (large proteins will have been split into more entries)
identifier = tokens[1].split('_')[1]
# Use identifier to grab sample/transcript information
original_header = mapping_dictionary[identifier]
transcript = original_header.lstrip('>').split('_')[0]
# Check if peptide is a neoepitope for that transcript
peptide = tokens[2]
if peptide in neoepitope_dictionary[transcript]:
# Store MHC rank, TAP score, cleavage score, combined score, % rank
scores = (tokens[4], tokens[5], tokens[6], tokens[7], tokens[8])
netCTL_score_dict[(transcript, peptide)].add(scores)
print("Done processing output", file=sys.stderr)
# Return dictionary
return netCTL_score_dict
if __name__ == "__main__":
# Parse command line options
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', type=str, required=True,
help='path to neoepiscope output file'
)
parser.add_argument('-o', '--output', type=str, required=True,
help='path to write FASTAs and netCTL output files'
)
parser.add_argument('-e', '--executable', type=str, required=True,
help='path to netCTLpan executable'
)
parser.add_argument('-a', '--allele', type=str, required=True,
help='HLA allele'
)
parser.add_argument('-s', '--size', type=str, required=True,
help='peptide size'
)
args = parser.parse_args()
# Get absolute paths to input/output directories/files
input_file = os.path.abspath(args.input)
input_fasta = '.'.join([input_file, 'fasta'])
sample_id = os.path.basename(input_file).replace('.neoepiscope.comprehensive.out', '')
output_dir = os.path.abspath(args.output)
output_fasta = os.path.join(
output_dir,
'.'.join([sample_id, args.size, args.allele, 'netCTL.fasta'])
)
output_dictionary = os.path.join(
output_dir,
'.'.join([sample_id, args.size, args.allele, 'pickle'])
)
netCTLpan = os.path.abspath(args.executable)
# Generate FASTA dictionaries
fasta_entries, mapper = generate_fasta_entries(input_fasta, output_fasta)
# Extract neoepitopes
neoepitopes = extract_neoepitopes(input_file)
# Run netCTLpan
complete_score_dict = {}
netCTLpan_output = os.path.join(
output_dir,
'.'.join(
[
sample_id,
args.size,
args.allele,
'netCTL.out'
]
)
)
run_netCTLpan(output_fasta, netCTLpan_output, netCTLpan, args.size, args.allele)
# Process netCTLpan data
netCTLpan_scores = process_netCTLpan_output(
netCTLpan_output,
mapper,
neoepitopes
)
complete_score_dict.update(netCTLpan_scores)
print('Saving results', file=sys.stderr)
# Store results in pickled dictionary
with open(output_dictionary, 'wb') as p:
pickle.dump(complete_score_dict, p)
print('Done!', file=sys.stderr) | scripts/generate_netctlpan_data.py |
from __future__ import print_function
import argparse
import os
import pickle
import subprocess
import sys
from Bio import SeqIO
from collections import defaultdict
def generate_fasta_entries(input_fasta, output_fasta):
""" Converts neoepiscope output FASTAs new FASTA file with IDs as headers,
storing IDs as a dictionary for mapping
input_fasta: path to input neoepiscope FASTAs
output_fasta: path to output fasta with alternate headers
Return values:
mapper_dict: dictionary linking FASTA header IDs as keys to
original fasta headers as values
"""
# Set up dictionaries and sequence counter
fasta_dict = {}
mapper_dict = {}
i = 0
print('Parsing FASTA records', file=sys.stderr)
# Iterate through all records in the FASTA file to extract data
for record in SeqIO.parse(input_fasta, 'fasta'):
# Extract sequence header and sequence
header = str(record.id)
sequence = str(record.seq)
# Skip sequences that are too short for netCTLpan
if len(sequence) < 8:
continue
# Create new header and save entry to dictionary
new_header = ' '.join(['>', str(i)])
fasta_dict[new_header] = sequence
mapper_dict[str(i)] = header
# Increment counter
i += 1
print(' '.join(['Stored', str(i), 'FASTA sequences.']), file=sys.stderr)
# Write output fasta
with open(output_fasta, 'w') as f:
for header in fasta_dict:
print(header, file=f)
print(fasta_dict[header], file=f)
# Return dictionaries
return fasta_dict, mapper_dict
def run_netCTLpan(fasta, output_path, netCTLpan, n, allele):
""" Runs netCTLpan on input FASTA file
fasta: path to FASTA file with alternate headers (written by
generate_fasta_entries() function)
output_path: path to write netCTLpan output
netCTLpan: path to netCTLpan executable
n: peptide_length
allele: MHC allele
No return value.
"""
print('Running netCTLpan', file=sys.stderr)
# Set netCTLpan command and run
command = [
netCTLpan, '-v', '-f', fasta, '-xls', '-xlsfile', output_path,
'-l', n, '-a', allele
]
subprocess.check_call(command)
def extract_neoepitopes(neoepiscope_results):
""" Extract neoepitopes to store for each sample/transcript
neoepiscope_results: path to neoepiscope results
Return value: dictionary with transcript IDs as keys with sets of
relevant neoepitope sequences as values
"""
# Set up dictionary to store neoepitope information
neoepitope_dict = defaultdict(set)
print('Extracting neoepitope data', file=sys.stderr)
# Process data
with open(neoepiscope_results) as f:
f.readline()
f.readline()
for line in f:
tokens = line.strip().split('\t')
# Extract transcripts to iterate through
transcripts = tokens[9].split(';')
for tx in transcripts:
# Store neoepitope for each relevant transcript
neoepitope_dict[tx].add(tokens[0])
# Return neoepitope dictionary
return neoepitope_dict
def process_netCTLpan_output(
netCTLpan_output, mapping_dictionary,
neoepitope_dictionary
):
""" Processes netCTL output to retain data only for neoepitope sequences,
storing them in a dictionary
netCTLpan_output: path to netCTLpan output
mapping_dictionary: dictionary linking FASTA header IDs as keys to
original fasta headers as values
neoepitope_dictionary: dictionary with transcript IDs keys with sets of
relevant neoepitope sequences as values
Return value: dictionary linking tuple of (transcript ID, peptide) as
keys to set of tuples of (TAP score, cleavage score) as
values
"""
print('Processing netCTLpan output', file=sys.stderr)
netCTL_score_dict = defaultdict(set)
# Process file to extract data
with open(netCTLpan_output) as f:
for line in f:
if line[0] != 'N':
# Process peptide result
tokens = line.strip().split('\t')
# [N, Sequence Name, Peptide, Allele, MHC, TAP, Cle, Comb, %Rank]
# Grab sequence identifier, removing initial underscore
# Split by middle undescore to isolate just the ID
# (large proteins will have been split into more entries)
identifier = tokens[1].split('_')[1]
# Use identifier to grab sample/transcript information
original_header = mapping_dictionary[identifier]
transcript = original_header.lstrip('>').split('_')[0]
# Check if peptide is a neoepitope for that transcript
peptide = tokens[2]
if peptide in neoepitope_dictionary[transcript]:
# Store MHC rank, TAP score, cleavage score, combined score, % rank
scores = (tokens[4], tokens[5], tokens[6], tokens[7], tokens[8])
netCTL_score_dict[(transcript, peptide)].add(scores)
print("Done processing output", file=sys.stderr)
# Return dictionary
return netCTL_score_dict
if __name__ == "__main__":
# Parse command line options
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', type=str, required=True,
help='path to neoepiscope output file'
)
parser.add_argument('-o', '--output', type=str, required=True,
help='path to write FASTAs and netCTL output files'
)
parser.add_argument('-e', '--executable', type=str, required=True,
help='path to netCTLpan executable'
)
parser.add_argument('-a', '--allele', type=str, required=True,
help='HLA allele'
)
parser.add_argument('-s', '--size', type=str, required=True,
help='peptide size'
)
args = parser.parse_args()
# Get absolute paths to input/output directories/files
input_file = os.path.abspath(args.input)
input_fasta = '.'.join([input_file, 'fasta'])
sample_id = os.path.basename(input_file).replace('.neoepiscope.comprehensive.out', '')
output_dir = os.path.abspath(args.output)
output_fasta = os.path.join(
output_dir,
'.'.join([sample_id, args.size, args.allele, 'netCTL.fasta'])
)
output_dictionary = os.path.join(
output_dir,
'.'.join([sample_id, args.size, args.allele, 'pickle'])
)
netCTLpan = os.path.abspath(args.executable)
# Generate FASTA dictionaries
fasta_entries, mapper = generate_fasta_entries(input_fasta, output_fasta)
# Extract neoepitopes
neoepitopes = extract_neoepitopes(input_file)
# Run netCTLpan
complete_score_dict = {}
netCTLpan_output = os.path.join(
output_dir,
'.'.join(
[
sample_id,
args.size,
args.allele,
'netCTL.out'
]
)
)
run_netCTLpan(output_fasta, netCTLpan_output, netCTLpan, args.size, args.allele)
# Process netCTLpan data
netCTLpan_scores = process_netCTLpan_output(
netCTLpan_output,
mapper,
neoepitopes
)
complete_score_dict.update(netCTLpan_scores)
print('Saving results', file=sys.stderr)
# Store results in pickled dictionary
with open(output_dictionary, 'wb') as p:
pickle.dump(complete_score_dict, p)
print('Done!', file=sys.stderr) | 0.434221 | 0.238129 |
from __future__ import annotations
from typing import Any
import numpy as np
from .base_layer import BaseLayer, LayerInputType
class BatchNorm(BaseLayer):
"""Batch normalization layer.
Inherits from
----------
BaseLayer
Attributes
----------
axis: int
Axis along which batch normalization should be performed.
momentum: float
Momentum for the moving averages of the mean and standard deviation.
epsilon: float
Epsilon to avoid divide by zero.
gamma: np.ndarray
Standard deviation scaling parameter.
beta: np.ndarray
Meaning scaling parameter.
std: np.ndarray
Standard deviation of the input.
norm: np.ndarray
Norm of the input, i.e. (input - mean) / std.
scaled_norm: np.ndarray
Norm after it is scaled by gamma and beta, i.e. gamma * norm + beta.
mean_mva: np.ndarray
Moving average of mean.
std_mva: np.ndarray
Moving average of standard deviation.
Methods
----------
x_dim() -> int
Returns the dimension of the input along the axis attribute.
Input shape
----------
(..., batch_size), where ... represents any number of dimensions.
Output shape
----------
Same as the input shape.
Example
----------
>>> import numpy as np
>>> from dnn import Input
>>> from dnn.layers import BatchNorm
>>> ip = Input(shape=(3, 6, 6, None)) # Create input
>>> ip.ip = np.random.rand(3, 6, 6, 64)
>>> layer = BatchNorm(ip=ip)
>>> layer.forward_step().shape # Forward step
(3, 6, 6, 64)
"""
reset = ("std", "norm", "scaled_norm")
__slots__ = ("gamma", "beta", "norm", "scaled_norm", "mean_mva", "std_mva")
def __init__(
self,
ip: LayerInputType,
axis: int = 0,
momentum: float = 0.5,
epsilon: float = 1e-7,
name: str = None,
) -> None:
"""
Arguments
----------
ip: Input to the layer.
axis: Axis along which normalization should be carried out.
Defaults to 0.
momentum: Momentum for the moving averages. Defaults to 0.5.
epsilon: Small value to prevent divide by zero. Defaults to 1e-7.
name: Name of the layer. Should be unique in a model.
When None, a name is automatically generated.
Raises
----------
ValueError: When axis is out of bounds or it is negative but not -1.
"""
self.gamma = None
self.beta = None
params = ["gamma", "beta"]
super().__init__(ip=ip, params=params, name=name)
ndims = len(self.input_shape())
if axis >= ndims:
msg = (
"axis is out of bounds for the layer. "
f"Should be -1 or between 0 and {ndims - 1} but got {axis} instead."
)
raise ValueError(msg)
if axis < 0 and axis != -1:
raise ValueError("-1 is the only negative value allowed for axis.")
self.axis = axis
self._ndims = ndims
# Resolve axis = -1 to refer to the last dimension
self._axis = ndims - 1 if axis == -1 else axis
# Get all axes which are not equal to axis
self._axes = tuple(ax for ax in range(ndims) if ax != self._axis)
self.momentum = momentum
self.epsilon = epsilon
self.std = None
self.norm = None
self.scaled_norm = None
self.mean_mva = None
self.std_mva = None
def fans(self) -> tuple[int, int]:
if not isinstance(self.ip_layer, BaseLayer):
raise TypeError("fans() can only be used when the input is another layer.")
_, ip_fan_out = self.ip_layer.fans()
return ip_fan_out, ip_fan_out
def x_dim(self) -> int:
"""Method to obtain the dimension of the input along the axis attribute."""
return self.input_shape()[self.axis]
def build(self) -> Any:
x_dim = self.x_dim()
# The position of x_dim depends on the value of axis
# Eg - If axis = 0 and the input is 4D, shape should be (x_dim, 1, 1, 1)
# But if axis = -1, the shape should be (1, 1, 1, x_dim)
shape = [1] * self._ndims
shape[self._axis] = x_dim
shape = tuple(shape)
self.gamma = self._add_param(shape=shape, initializer="ones")
self.beta = self._add_param(shape=shape, initializer="zeros")
self.mean_mva = self._add_param(shape=shape, initializer="zeros")
self.std_mva = self._add_param(shape=shape, initializer="ones")
def count_params(self) -> int:
return 2 * self.x_dim()
def output(self) -> np.ndarray | None:
return self.scaled_norm
def output_shape(self) -> tuple[int, ...]:
return self.input_shape()
def _update_mva(self, mean: np.ndarray, std: np.ndarray) -> None:
mom, one_minus_mom = self.momentum, 1 - self.momentum
self.mean_mva *= mom
self.mean_mva += one_minus_mom * mean
self.std_mva *= mom
self.std_mva += one_minus_mom * std
def forward_step(self, *args, **kwargs) -> np.ndarray:
ip = self.input()
# If in training mode, the mean and std are calculated for the current batch
# and the moving averages updated
if self.training:
mean = ip.mean(axis=self._axes, keepdims=True)
std = np.sqrt(ip.var(axis=self._axes, keepdims=True) + self.epsilon)
self._update_mva(mean, std)
self.std = std
# Otherwise, the moving averages act as the mean and std
else:
mean, std = self.mean_mva, self.std_mva
self.norm = ip - mean
self.norm /= std
self.scaled_norm = self.gamma * self.norm
self.scaled_norm += self.beta
return self.scaled_norm
def backprop_parameters(self, grad: np.ndarray, *args, **kwargs) -> None:
self.gradients = {
"gamma": np.sum(grad * self.norm, axis=self._axes, keepdims=True),
"beta": np.sum(grad, axis=self._axes, keepdims=True),
}
def backprop_inputs(self, grad: np.ndarray, *args, **kwargs) -> np.ndarray:
grad *= self.gamma
# Calculate share of the mean in the gradient
mean_share = grad.sum(axis=self._axes, keepdims=True)
# Calculate share of the variance in the gradient
var_share = self.norm * np.sum(grad * self.norm, axis=self._axes, keepdims=True)
# Since mean and std are calculated across all dimensions except axis,
# The gradient should be scaled by the product of all dimensions except the axis
scale = grad.size / self.x_dim()
grad = scale * grad
grad -= mean_share
grad -= var_share
grad /= self.std
grad /= scale
return grad | dnn/layers/batch_norm.py | from __future__ import annotations
from typing import Any
import numpy as np
from .base_layer import BaseLayer, LayerInputType
class BatchNorm(BaseLayer):
"""Batch normalization layer.
Inherits from
----------
BaseLayer
Attributes
----------
axis: int
Axis along which batch normalization should be performed.
momentum: float
Momentum for the moving averages of the mean and standard deviation.
epsilon: float
Epsilon to avoid divide by zero.
gamma: np.ndarray
Standard deviation scaling parameter.
beta: np.ndarray
Meaning scaling parameter.
std: np.ndarray
Standard deviation of the input.
norm: np.ndarray
Norm of the input, i.e. (input - mean) / std.
scaled_norm: np.ndarray
Norm after it is scaled by gamma and beta, i.e. gamma * norm + beta.
mean_mva: np.ndarray
Moving average of mean.
std_mva: np.ndarray
Moving average of standard deviation.
Methods
----------
x_dim() -> int
Returns the dimension of the input along the axis attribute.
Input shape
----------
(..., batch_size), where ... represents any number of dimensions.
Output shape
----------
Same as the input shape.
Example
----------
>>> import numpy as np
>>> from dnn import Input
>>> from dnn.layers import BatchNorm
>>> ip = Input(shape=(3, 6, 6, None)) # Create input
>>> ip.ip = np.random.rand(3, 6, 6, 64)
>>> layer = BatchNorm(ip=ip)
>>> layer.forward_step().shape # Forward step
(3, 6, 6, 64)
"""
reset = ("std", "norm", "scaled_norm")
__slots__ = ("gamma", "beta", "norm", "scaled_norm", "mean_mva", "std_mva")
def __init__(
self,
ip: LayerInputType,
axis: int = 0,
momentum: float = 0.5,
epsilon: float = 1e-7,
name: str = None,
) -> None:
"""
Arguments
----------
ip: Input to the layer.
axis: Axis along which normalization should be carried out.
Defaults to 0.
momentum: Momentum for the moving averages. Defaults to 0.5.
epsilon: Small value to prevent divide by zero. Defaults to 1e-7.
name: Name of the layer. Should be unique in a model.
When None, a name is automatically generated.
Raises
----------
ValueError: When axis is out of bounds or it is negative but not -1.
"""
self.gamma = None
self.beta = None
params = ["gamma", "beta"]
super().__init__(ip=ip, params=params, name=name)
ndims = len(self.input_shape())
if axis >= ndims:
msg = (
"axis is out of bounds for the layer. "
f"Should be -1 or between 0 and {ndims - 1} but got {axis} instead."
)
raise ValueError(msg)
if axis < 0 and axis != -1:
raise ValueError("-1 is the only negative value allowed for axis.")
self.axis = axis
self._ndims = ndims
# Resolve axis = -1 to refer to the last dimension
self._axis = ndims - 1 if axis == -1 else axis
# Get all axes which are not equal to axis
self._axes = tuple(ax for ax in range(ndims) if ax != self._axis)
self.momentum = momentum
self.epsilon = epsilon
self.std = None
self.norm = None
self.scaled_norm = None
self.mean_mva = None
self.std_mva = None
def fans(self) -> tuple[int, int]:
if not isinstance(self.ip_layer, BaseLayer):
raise TypeError("fans() can only be used when the input is another layer.")
_, ip_fan_out = self.ip_layer.fans()
return ip_fan_out, ip_fan_out
def x_dim(self) -> int:
"""Method to obtain the dimension of the input along the axis attribute."""
return self.input_shape()[self.axis]
def build(self) -> Any:
x_dim = self.x_dim()
# The position of x_dim depends on the value of axis
# Eg - If axis = 0 and the input is 4D, shape should be (x_dim, 1, 1, 1)
# But if axis = -1, the shape should be (1, 1, 1, x_dim)
shape = [1] * self._ndims
shape[self._axis] = x_dim
shape = tuple(shape)
self.gamma = self._add_param(shape=shape, initializer="ones")
self.beta = self._add_param(shape=shape, initializer="zeros")
self.mean_mva = self._add_param(shape=shape, initializer="zeros")
self.std_mva = self._add_param(shape=shape, initializer="ones")
def count_params(self) -> int:
return 2 * self.x_dim()
def output(self) -> np.ndarray | None:
return self.scaled_norm
def output_shape(self) -> tuple[int, ...]:
return self.input_shape()
def _update_mva(self, mean: np.ndarray, std: np.ndarray) -> None:
mom, one_minus_mom = self.momentum, 1 - self.momentum
self.mean_mva *= mom
self.mean_mva += one_minus_mom * mean
self.std_mva *= mom
self.std_mva += one_minus_mom * std
def forward_step(self, *args, **kwargs) -> np.ndarray:
ip = self.input()
# If in training mode, the mean and std are calculated for the current batch
# and the moving averages updated
if self.training:
mean = ip.mean(axis=self._axes, keepdims=True)
std = np.sqrt(ip.var(axis=self._axes, keepdims=True) + self.epsilon)
self._update_mva(mean, std)
self.std = std
# Otherwise, the moving averages act as the mean and std
else:
mean, std = self.mean_mva, self.std_mva
self.norm = ip - mean
self.norm /= std
self.scaled_norm = self.gamma * self.norm
self.scaled_norm += self.beta
return self.scaled_norm
def backprop_parameters(self, grad: np.ndarray, *args, **kwargs) -> None:
self.gradients = {
"gamma": np.sum(grad * self.norm, axis=self._axes, keepdims=True),
"beta": np.sum(grad, axis=self._axes, keepdims=True),
}
def backprop_inputs(self, grad: np.ndarray, *args, **kwargs) -> np.ndarray:
grad *= self.gamma
# Calculate share of the mean in the gradient
mean_share = grad.sum(axis=self._axes, keepdims=True)
# Calculate share of the variance in the gradient
var_share = self.norm * np.sum(grad * self.norm, axis=self._axes, keepdims=True)
# Since mean and std are calculated across all dimensions except axis,
# The gradient should be scaled by the product of all dimensions except the axis
scale = grad.size / self.x_dim()
grad = scale * grad
grad -= mean_share
grad -= var_share
grad /= self.std
grad /= scale
return grad | 0.975987 | 0.732472 |
# Author......: See docs/credits.txt
# License.....: MIT
# Target......: Exodus wallet extractor
# Example.....: exodus2hashcat.py <path to exodus seed seco file>
import binascii
import sys
import hashlib
from Crypto.Cipher import AES
import base64
import os.path
METADATA_LEN = 256
HEADER_LEN = 224
CRC_LEN = 32
LEN_BLOB_STORED = 4
if len(sys.argv) != 2 :
print("Error, usage exodus2hashcat.py <path to exodus seed.seco file>")
sys.exit(1)
if os.path.basename(sys.argv[1])!= 'seed.seco':
print("Error, usage exodus2hashcat.py <path to exodus seed.seco file>")
sys.exit(1)
with open(sys.argv[1],'rb') as fd:
seedBuffer = fd.read()
#Basic check
if not seedBuffer[0:4].decode("utf8").startswith("SECO"):
print("Not A SECO exodus header magic")
sys.exit(1)
salt = seedBuffer[0x100:0x120]
n = int.from_bytes(seedBuffer[0x120:0x124],"big")
r = int.from_bytes(seedBuffer[0x124:0x128],"big")
p = int.from_bytes(seedBuffer[0x128:0x12c],"big")
#Basic check
if n!=16384 or r !=8 or p != 1:
print("Warning,unexpected scrypt N,r,p values")
if os.path.getsize(sys.argv[1]) != METADATA_LEN + HEADER_LEN + CRC_LEN + LEN_BLOB_STORED+ int.from_bytes(seedBuffer[0x200:0x204],"big"):
print(os.path.getsize(sys.argv[1]))
print( METADATA_LEN + HEADER_LEN + int.from_bytes(seedBuffer[0x200:0x204],"big"))
print("Error file size")
sys.argv[1]
#Check integrity
m = hashlib.sha256()
m.update(seedBuffer[HEADER_LEN+CRC_LEN:])
if m.digest() != seedBuffer[HEADER_LEN:HEADER_LEN+CRC_LEN]:
print("SECO file seems corrupted")
sys.exit(1)
#Check aes-gcm string
cipher = seedBuffer[0x12c:0x138]
if binascii.hexlify(cipher) != b"6165732d3235362d67636d00":
print("Error aes-256-gcm")
sys.exit(1)
iv = seedBuffer[0x14c:0x158]
authTag = seedBuffer[0x158:0x168]
key = seedBuffer[0x168:0x188]
print("EXODUS:"+str(n)+":"+str(r)+":"+str(p)+":"+base64.b64encode(salt).decode("utf8")+":"+base64.b64encode(iv).decode("utf8")+":"+base64.b64encode(key).decode("utf8")+":"+base64.b64encode(authTag).decode("utf8")) | tools/exodus2hashcat.py |
# Author......: See docs/credits.txt
# License.....: MIT
# Target......: Exodus wallet extractor
# Example.....: exodus2hashcat.py <path to exodus seed seco file>
import binascii
import sys
import hashlib
from Crypto.Cipher import AES
import base64
import os.path
METADATA_LEN = 256
HEADER_LEN = 224
CRC_LEN = 32
LEN_BLOB_STORED = 4
if len(sys.argv) != 2 :
print("Error, usage exodus2hashcat.py <path to exodus seed.seco file>")
sys.exit(1)
if os.path.basename(sys.argv[1])!= 'seed.seco':
print("Error, usage exodus2hashcat.py <path to exodus seed.seco file>")
sys.exit(1)
with open(sys.argv[1],'rb') as fd:
seedBuffer = fd.read()
#Basic check
if not seedBuffer[0:4].decode("utf8").startswith("SECO"):
print("Not A SECO exodus header magic")
sys.exit(1)
salt = seedBuffer[0x100:0x120]
n = int.from_bytes(seedBuffer[0x120:0x124],"big")
r = int.from_bytes(seedBuffer[0x124:0x128],"big")
p = int.from_bytes(seedBuffer[0x128:0x12c],"big")
#Basic check
if n!=16384 or r !=8 or p != 1:
print("Warning,unexpected scrypt N,r,p values")
if os.path.getsize(sys.argv[1]) != METADATA_LEN + HEADER_LEN + CRC_LEN + LEN_BLOB_STORED+ int.from_bytes(seedBuffer[0x200:0x204],"big"):
print(os.path.getsize(sys.argv[1]))
print( METADATA_LEN + HEADER_LEN + int.from_bytes(seedBuffer[0x200:0x204],"big"))
print("Error file size")
sys.argv[1]
#Check integrity
m = hashlib.sha256()
m.update(seedBuffer[HEADER_LEN+CRC_LEN:])
if m.digest() != seedBuffer[HEADER_LEN:HEADER_LEN+CRC_LEN]:
print("SECO file seems corrupted")
sys.exit(1)
#Check aes-gcm string
cipher = seedBuffer[0x12c:0x138]
if binascii.hexlify(cipher) != b"6165732d3235362d67636d00":
print("Error aes-256-gcm")
sys.exit(1)
iv = seedBuffer[0x14c:0x158]
authTag = seedBuffer[0x158:0x168]
key = seedBuffer[0x168:0x188]
print("EXODUS:"+str(n)+":"+str(r)+":"+str(p)+":"+base64.b64encode(salt).decode("utf8")+":"+base64.b64encode(iv).decode("utf8")+":"+base64.b64encode(key).decode("utf8")+":"+base64.b64encode(authTag).decode("utf8")) | 0.37502 | 0.180594 |
import os
import sys
# dont do this in production code, this is bad practice it would seem, only for tests
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/../../servicemanager'))
from servicemanager.actions import actions
from servicemanager.serviceresolver import ServiceResolver
from servicemanager.smcontext import SmApplication, SmContext
import time
import shutil
import unittest
from servicemanager import subprocess
class TestBase(unittest.TestCase):
config_dir_override = os.path.join(os.path.dirname(__file__), "../conf")
default_time_out = 10
def setUp(self):
self.set_up_and_clean_workspace()
self.bintrayContext = None
self.nexusContext = None
def tearDown(self):
self.stopFakeBintray()
self.stopFakeNexus()
def set_up_and_clean_workspace(self):
workspace_dir = os.path.join(os.path.dirname(__file__), "workspace")
if os.path.exists(workspace_dir):
shutil.rmtree(workspace_dir)
os.mkdir(workspace_dir)
os.environ["WORKSPACE"] = workspace_dir
os.chdir(workspace_dir)
def createContext(self): return SmContext(SmApplication(self.config_dir_override), None, False, False)
def start_service_and_wait(self, context, servicetostart):
sm_application = SmApplication(self.config_dir_override)
service_resolver = ServiceResolver(sm_application)
actions.start_and_wait(service_resolver, context, [servicetostart], source=False, fatjar=True, release=False, proxy=None, port=None, seconds_to_wait=5, append_args=None)
def startFakeBintray(self):
self.bintrayContext = self.createContext()
self.start_service_and_wait(self.bintrayContext, "FAKE_BINTRAY")
self.assertIsNotNone(self.bintrayContext.get_service("FAKE_BINTRAY").status())
def startFakeNexus(self):
self.nexusContext = self.createContext()
self.start_service_and_wait(self.nexusContext, "FAKE_NEXUS")
self.assertIsNotNone(self.nexusContext.get_service("FAKE_NEXUS").status())
def stopFakeNexus(self):
if self.nexusContext is not None:
self.nexusContext.kill("FAKE_NEXUS", True)
self.assertEqual(self.nexusContext.get_service("FAKE_NEXUS").status(), [])
def stopFakeBintray(self):
if self.bintrayContext is not None:
self.bintrayContext.kill("FAKE_BINTRAY", True)
self.assertEqual(self.bintrayContext.get_service("FAKE_BINTRAY").status(), [])
def waitForCondition(self, f, expected, time_out_secs = default_time_out):
dead_line = time.time() + time_out_secs
value = None
while (time.time() < dead_line):
value = f()
if value == expected: return
time.sleep(0.1)
command = "ps -eo ppid,pid,etime,rss,args"
ps_command = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
stdout, stderr = ps_command.communicate()
print(stdout)
self.assertEquals(value, expected) | test/it/testbase.py | import os
import sys
# dont do this in production code, this is bad practice it would seem, only for tests
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/../../servicemanager'))
from servicemanager.actions import actions
from servicemanager.serviceresolver import ServiceResolver
from servicemanager.smcontext import SmApplication, SmContext
import time
import shutil
import unittest
from servicemanager import subprocess
class TestBase(unittest.TestCase):
config_dir_override = os.path.join(os.path.dirname(__file__), "../conf")
default_time_out = 10
def setUp(self):
self.set_up_and_clean_workspace()
self.bintrayContext = None
self.nexusContext = None
def tearDown(self):
self.stopFakeBintray()
self.stopFakeNexus()
def set_up_and_clean_workspace(self):
workspace_dir = os.path.join(os.path.dirname(__file__), "workspace")
if os.path.exists(workspace_dir):
shutil.rmtree(workspace_dir)
os.mkdir(workspace_dir)
os.environ["WORKSPACE"] = workspace_dir
os.chdir(workspace_dir)
def createContext(self): return SmContext(SmApplication(self.config_dir_override), None, False, False)
def start_service_and_wait(self, context, servicetostart):
sm_application = SmApplication(self.config_dir_override)
service_resolver = ServiceResolver(sm_application)
actions.start_and_wait(service_resolver, context, [servicetostart], source=False, fatjar=True, release=False, proxy=None, port=None, seconds_to_wait=5, append_args=None)
def startFakeBintray(self):
self.bintrayContext = self.createContext()
self.start_service_and_wait(self.bintrayContext, "FAKE_BINTRAY")
self.assertIsNotNone(self.bintrayContext.get_service("FAKE_BINTRAY").status())
def startFakeNexus(self):
self.nexusContext = self.createContext()
self.start_service_and_wait(self.nexusContext, "FAKE_NEXUS")
self.assertIsNotNone(self.nexusContext.get_service("FAKE_NEXUS").status())
def stopFakeNexus(self):
if self.nexusContext is not None:
self.nexusContext.kill("FAKE_NEXUS", True)
self.assertEqual(self.nexusContext.get_service("FAKE_NEXUS").status(), [])
def stopFakeBintray(self):
if self.bintrayContext is not None:
self.bintrayContext.kill("FAKE_BINTRAY", True)
self.assertEqual(self.bintrayContext.get_service("FAKE_BINTRAY").status(), [])
def waitForCondition(self, f, expected, time_out_secs = default_time_out):
dead_line = time.time() + time_out_secs
value = None
while (time.time() < dead_line):
value = f()
if value == expected: return
time.sleep(0.1)
command = "ps -eo ppid,pid,etime,rss,args"
ps_command = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
stdout, stderr = ps_command.communicate()
print(stdout)
self.assertEquals(value, expected) | 0.243103 | 0.164684 |
import warnings
import math
import re
from functools import wraps
from collections import namedtuple
from datetime import datetime
import urllib
import pytz
import cherrypy
import gnupg
from jinja2 import Environment, PackageLoader
from dateutil import parser as date_parser
from wtforms import Form, TextField, PasswordField, validators, ValidationError, SelectField, IntegerField
from wtforms.ext.dateutil.fields import DateField
from ensconce.config import config
from ensconce.autolog import log
from ensconce.dao import operators
from ensconce import exc, search, acl
from ensconce.auth import get_configured_providers
from ensconce.crypto import state, util as crypto_util
from ensconce.model import meta, Password
from ensconce.cya import auditlog
from ensconce.webapp.util import render, request_params, notify, operator_info
from wtforms.fields.simple import HiddenField
def _is_api_request():
"""
Whether the request is for JSON data or initiated by XHR.
"""
accept = cherrypy.request.headers.get('Accept')
requested_with = cherrypy.request.headers.get('X-Requested-With')
return accept in ('application/json',) or requested_with in ('XMLHttpRequest',)
def ensure_initialized(f):
"""
Makes sure crypto engine has been initialized.
"""
@wraps(f)
def wrapper(*args, **kwargs):
if not state.initialized:
if config.get('debug', False) and config.get('debug.secret_key'):
secret_key_file = config.get('debug.secret_key')
crypto_util.load_secret_key_file(secret_key_file)
else:
raise exc.CryptoNotInitialized("Crypto engine has not been initialized.")
return f(*args, **kwargs)
return wrapper
def transaction(f):
"""
A decorator to automatically wrap the request in a single transaction.
"""
@wraps(f)
def wrapper(*args, **kwargs):
session = meta.Session()
if not session.is_active:
session.begin() # In case autocommit=True
try:
res = f(*args, **kwargs)
except cherrypy.HTTPRedirect:
# This is not a "real" exception, so we still want to commit the transaction.
session.commit()
raise
except:
log.exception("Rolling back SQLAlchemy transaction due to exception.")
session.rollback()
raise
else:
session.commit()
return res
return wrapper
def clean_errors(f):
"""
Filter out or change specific exceptions and roll back transaction.
"""
@wraps(f)
def wrapper(*args, **kwargs):
sess = meta.Session()
try:
res = f(*args, **kwargs)
# Unfortunately, that doesn't work flawlessly, since flush() will mark the session 'clean' despite it still being uncommitted
# Also it doesn't capture things removed via SQL directly. (But it's probably still worth keeping here.)
if sess.new or sess.dirty or sess.deleted:
warnings.warn("Unsaved objects in session will be discarded: new={0!r}, dirty={1!r}, deleted={2!r}".format(sess.new, sess.dirty, sess.deleted))
return res
except exc.NotLoggedIn:
if not _is_api_request():
current_url = cherrypy.url(qs=cherrypy.request.query_string, relative="server")
if current_url and current_url != '/' and not re.search(r'logout/?$', current_url):
redirect_url = "/login?redirect={0}".format(urllib.quote_plus(current_url))
else:
redirect_url = "/login"
raise cherrypy.HTTPRedirect(redirect_url)
else:
raise cherrypy.HTTPError("403 Forbidden", "User is not logged in.")
except exc.CryptoNotInitialized:
sess.rollback()
if not _is_api_request():
raise cherrypy.HTTPRedirect("/startup")
else:
raise cherrypy.HTTPError("503 Service Unavailable", "Crypto engine has not been initialized.")
finally:
sess.close()
return wrapper
def check_session(f):
@wraps(f)
def wrapper(*args, **kwargs):
if 'username' not in cherrypy.session: # @UndefinedVariable
raise exc.NotLoggedIn()
return f(*args, **kwargs)
return wrapper
class expose_all(object):
"""
A decorator to expose all methods and apply various other checks.
"""
def __init__(self, insecure_methods=None, init_methods=None, auth_decorator=check_session):
if insecure_methods is None:
insecure_methods = []
if init_methods is None:
init_methods = []
if init_methods:
# If there are any init methods that are not insecure, we should issue a warning
# (This is probably a misconfiguration.)
candidates = set(init_methods).difference(set(insecure_methods))
if candidates:
warnings.warn("Some init_methods are not included in insecure_methods: {0}".format(candidates))
self.insecure_methods = insecure_methods
self.init_methods = init_methods
self.auth_decorator = auth_decorator
def __call__(self, clazz):
for name, attr in clazz.__dict__.items():
if hasattr(attr, "__call__"):
attr.exposed = True
if name not in self.init_methods:
#print("Adding ensure_initialized to {0}".format(name))
attr = ensure_initialized(attr)
if name not in self.insecure_methods:
#print ("Adding {0} to {1}.{2}".format(self.auth_decorator.__name__, clazz.__module__, name))
attr = self.auth_decorator(attr)
attr = transaction(attr) # Wrap entire request in SA transaction
attr = clean_errors(attr) # Handle any exception cleanup
setattr(clazz, name, attr) # Finally, replace the attr on the class
return clazz
def validate_passphrase(form, field):
"""
"""
try:
passphrase = field.data
key = crypto_util.derive_configured_key(passphrase)
if not crypto_util.validate_key(key):
raise ValidationError("Invalid passphrase entered.")
except ValidationError:
raise
except exc.MissingKeyMetadata:
log.exception("Missing key metadata.")
raise ValidationError("Database crypto has not yet been initialized.")
except:
log.exception("Error validating passphrase.")
raise ValidationError("Error validating passphrase (see server logs).")
class PassphraseSubmitForm(Form):
passphrase = TextField('Passphrase', validators=[validators.Required(),
validate_passphrase])
class LoginForm(Form):
redirect = HiddenField('redirect')
username = TextField('Username', validators=[validators.Required(),
validators.Length(max=255)])
password = PasswordField('Password', validators=[validators.Required(),
validators.Length(max=2048)])
class AuditlogForm(Form):
code = SelectField('Code', choices=[('', '(All)')]+[(c,c) for c in sorted(auditlog.enumerate_codes())], default='') # Need a default or we get coerced to u'None'
start = DateField('Start')
end = DateField('End')
comment = TextField('Username')
operator = TextField('Operator')
page = IntegerField('Page', default=1)
@expose_all(insecure_methods=('login', 'startup', 'process_login', 'initialize', 'osd'),
init_methods=('startup', 'initialize'))
class Root(object):
"""
The root cherrypy handler class.
"""
def index(self):
# Grab some recent passwords accessed by the current user.
results = auditlog.recent_content_views(operator_id=operator_info().user_id,
object_type=Password.object_type(),
limit=20,
skip_count=True)
return render("index.html", {'recent_pw_views': results.entries})
@cherrypy.tools.response_headers(headers=[('Content-Type', 'text/xml')])
def osd(self):
return render("osd-search.xml", {'base_url': cherrypy.url('/')})
def startup(self):
form = PassphraseSubmitForm()
return render("startup.html", {'form': form})
def initialize(self, **kwargs):
form = PassphraseSubmitForm(request_params())
if form.validate():
crypto_util.configure_crypto_state(form.passphrase.data)
raise cherrypy.HTTPRedirect("/")
else:
return render("startup.html", {'form': form})
def login(self, redirect=None):
form = LoginForm(redirect=redirect)
return render("login.html", {'auth_provider': config['auth.provider'], 'form': form})
def process_login(self, **kwargs):
form = LoginForm(request_params())
# TODO: Refactor to combine with the ensconce.server:checkpassword method. Lots of duplicate
# logic here. AT MINIMUM MAKE SURE THAT ANY CHANGES HERE ARE REFLECTED THERE
# This is a "flow-control" exception. ... You'll see. :)
class _LoginFailed(Exception):
pass
try:
if not form.validate():
raise _LoginFailed()
username = form.username.data
password = form.password.data
for auth_provider in get_configured_providers():
try:
auth_provider.authenticate(username, password)
except exc.InsufficientPrivileges:
form.username.errors.append(ValidationError("Insufficient privileges to log in."))
# Fail fast in this case; we don't want to continue on to try other authenticators.
raise _LoginFailed()
except exc.AuthError:
# Swallow other auth errors so it goes onto next authenticator in the list.
pass
except:
# Other exceptions needs to get logged at least.
log.exception("Unexpected error authenticating user using {0!r}".format(auth_provider))
else:
log.info("Authentication succeeded for username {0} using provider {1}".format(username, auth_provider))
break
else:
log.debug("Authenticators exhausted; login failed.")
form.password.errors.append(ValidationError("Invalid username/password."))
raise _LoginFailed()
except _LoginFailed:
auditlog.log(auditlog.CODE_AUTH_FAILED, comment=username)
return render("login.html", {'auth_provider': config['auth.provider'], 'form': form})
else:
# Resolve the user using the *current value* for auth_provider (as that is the one that passed the auth.
user = auth_provider.resolve_user(username)
log.debug("Setting up cherrypy session with username={0}, user_id={1}".format(username, user.id))
cherrypy.session['username'] = username # @UndefinedVariable
cherrypy.session['user_id'] = user.id # @UndefinedVariable
auditlog.log(auditlog.CODE_AUTH_LOGIN)
if form.redirect.data:
raise cherrypy.HTTPRedirect(form.redirect.data)
else:
raise cherrypy.HTTPRedirect("/")
def logout(self):
auditlog.log(auditlog.CODE_AUTH_LOGOUT)
cherrypy.session.clear() # @UndefinedVariable
raise cherrypy.HTTPRedirect("/")
@acl.require_access(acl.AUDIT)
def auditlog(self, **kwargs):
form = AuditlogForm(request_params())
page_size = 50
page = form.page.data
offset = page_size * (page - 1)
limit = page_size
log.debug("Page = {0}, offset={1}, limit={2}".format(page, offset, limit))
results = auditlog.search(start=form.start.data,
end=form.end.data,
code=form.code.data,
operator_username=form.operator.data,
offset=offset,
limit=limit)
if results.count < offset:
form.page.data = 1
form.page.raw_data = ['1'] # Apparently need this too!
total_pages = int(math.ceil( (1.0 * results.count) / page_size))
return render('auditlog.html', {'entries': results.entries, 'form': form, 'total_pages': total_pages})
@acl.require_access([acl.GROUP_R, acl.RESOURCE_R, acl.PASS_R])
def search(self, searchstr):
r_matches = g_matches = p_matches = None
if searchstr:
(r_matches, g_matches, p_matches) = search.search(searchstr, include_encrypted=True)
if len(r_matches) + len(g_matches) + len(p_matches) == 1:
# There was only one result, so just send them to the resulting page.
notify("Showing you the one result that matched your query.")
if r_matches:
raise cherrypy.HTTPRedirect("/resource/view/{0}".format(r_matches[0].id))
elif g_matches:
raise cherrypy.HTTPRedirect("/group/view/{0}".format(g_matches[0].id))
elif p_matches:
# We could also redirect them to the password view/history page if that is more helpful?
raise cherrypy.HTTPRedirect("/resource/view/{0}".format(p_matches[0].resource_id))
auditlog.log(auditlog.CODE_SEARCH, comment=searchstr)
return render('search.html', {'resource_matches': r_matches,
'group_matches': g_matches,
'password_matches': p_matches,
'searchstr': searchstr }) | ensconce/webapp/tree/__init__.py | import warnings
import math
import re
from functools import wraps
from collections import namedtuple
from datetime import datetime
import urllib
import pytz
import cherrypy
import gnupg
from jinja2 import Environment, PackageLoader
from dateutil import parser as date_parser
from wtforms import Form, TextField, PasswordField, validators, ValidationError, SelectField, IntegerField
from wtforms.ext.dateutil.fields import DateField
from ensconce.config import config
from ensconce.autolog import log
from ensconce.dao import operators
from ensconce import exc, search, acl
from ensconce.auth import get_configured_providers
from ensconce.crypto import state, util as crypto_util
from ensconce.model import meta, Password
from ensconce.cya import auditlog
from ensconce.webapp.util import render, request_params, notify, operator_info
from wtforms.fields.simple import HiddenField
def _is_api_request():
"""
Whether the request is for JSON data or initiated by XHR.
"""
accept = cherrypy.request.headers.get('Accept')
requested_with = cherrypy.request.headers.get('X-Requested-With')
return accept in ('application/json',) or requested_with in ('XMLHttpRequest',)
def ensure_initialized(f):
"""
Makes sure crypto engine has been initialized.
"""
@wraps(f)
def wrapper(*args, **kwargs):
if not state.initialized:
if config.get('debug', False) and config.get('debug.secret_key'):
secret_key_file = config.get('debug.secret_key')
crypto_util.load_secret_key_file(secret_key_file)
else:
raise exc.CryptoNotInitialized("Crypto engine has not been initialized.")
return f(*args, **kwargs)
return wrapper
def transaction(f):
"""
A decorator to automatically wrap the request in a single transaction.
"""
@wraps(f)
def wrapper(*args, **kwargs):
session = meta.Session()
if not session.is_active:
session.begin() # In case autocommit=True
try:
res = f(*args, **kwargs)
except cherrypy.HTTPRedirect:
# This is not a "real" exception, so we still want to commit the transaction.
session.commit()
raise
except:
log.exception("Rolling back SQLAlchemy transaction due to exception.")
session.rollback()
raise
else:
session.commit()
return res
return wrapper
def clean_errors(f):
"""
Filter out or change specific exceptions and roll back transaction.
"""
@wraps(f)
def wrapper(*args, **kwargs):
sess = meta.Session()
try:
res = f(*args, **kwargs)
# Unfortunately, that doesn't work flawlessly, since flush() will mark the session 'clean' despite it still being uncommitted
# Also it doesn't capture things removed via SQL directly. (But it's probably still worth keeping here.)
if sess.new or sess.dirty or sess.deleted:
warnings.warn("Unsaved objects in session will be discarded: new={0!r}, dirty={1!r}, deleted={2!r}".format(sess.new, sess.dirty, sess.deleted))
return res
except exc.NotLoggedIn:
if not _is_api_request():
current_url = cherrypy.url(qs=cherrypy.request.query_string, relative="server")
if current_url and current_url != '/' and not re.search(r'logout/?$', current_url):
redirect_url = "/login?redirect={0}".format(urllib.quote_plus(current_url))
else:
redirect_url = "/login"
raise cherrypy.HTTPRedirect(redirect_url)
else:
raise cherrypy.HTTPError("403 Forbidden", "User is not logged in.")
except exc.CryptoNotInitialized:
sess.rollback()
if not _is_api_request():
raise cherrypy.HTTPRedirect("/startup")
else:
raise cherrypy.HTTPError("503 Service Unavailable", "Crypto engine has not been initialized.")
finally:
sess.close()
return wrapper
def check_session(f):
@wraps(f)
def wrapper(*args, **kwargs):
if 'username' not in cherrypy.session: # @UndefinedVariable
raise exc.NotLoggedIn()
return f(*args, **kwargs)
return wrapper
class expose_all(object):
"""
A decorator to expose all methods and apply various other checks.
"""
def __init__(self, insecure_methods=None, init_methods=None, auth_decorator=check_session):
if insecure_methods is None:
insecure_methods = []
if init_methods is None:
init_methods = []
if init_methods:
# If there are any init methods that are not insecure, we should issue a warning
# (This is probably a misconfiguration.)
candidates = set(init_methods).difference(set(insecure_methods))
if candidates:
warnings.warn("Some init_methods are not included in insecure_methods: {0}".format(candidates))
self.insecure_methods = insecure_methods
self.init_methods = init_methods
self.auth_decorator = auth_decorator
def __call__(self, clazz):
for name, attr in clazz.__dict__.items():
if hasattr(attr, "__call__"):
attr.exposed = True
if name not in self.init_methods:
#print("Adding ensure_initialized to {0}".format(name))
attr = ensure_initialized(attr)
if name not in self.insecure_methods:
#print ("Adding {0} to {1}.{2}".format(self.auth_decorator.__name__, clazz.__module__, name))
attr = self.auth_decorator(attr)
attr = transaction(attr) # Wrap entire request in SA transaction
attr = clean_errors(attr) # Handle any exception cleanup
setattr(clazz, name, attr) # Finally, replace the attr on the class
return clazz
def validate_passphrase(form, field):
"""
"""
try:
passphrase = field.data
key = crypto_util.derive_configured_key(passphrase)
if not crypto_util.validate_key(key):
raise ValidationError("Invalid passphrase entered.")
except ValidationError:
raise
except exc.MissingKeyMetadata:
log.exception("Missing key metadata.")
raise ValidationError("Database crypto has not yet been initialized.")
except:
log.exception("Error validating passphrase.")
raise ValidationError("Error validating passphrase (see server logs).")
class PassphraseSubmitForm(Form):
passphrase = TextField('Passphrase', validators=[validators.Required(),
validate_passphrase])
class LoginForm(Form):
redirect = HiddenField('redirect')
username = TextField('Username', validators=[validators.Required(),
validators.Length(max=255)])
password = PasswordField('Password', validators=[validators.Required(),
validators.Length(max=2048)])
class AuditlogForm(Form):
code = SelectField('Code', choices=[('', '(All)')]+[(c,c) for c in sorted(auditlog.enumerate_codes())], default='') # Need a default or we get coerced to u'None'
start = DateField('Start')
end = DateField('End')
comment = TextField('Username')
operator = TextField('Operator')
page = IntegerField('Page', default=1)
@expose_all(insecure_methods=('login', 'startup', 'process_login', 'initialize', 'osd'),
init_methods=('startup', 'initialize'))
class Root(object):
"""
The root cherrypy handler class.
"""
def index(self):
# Grab some recent passwords accessed by the current user.
results = auditlog.recent_content_views(operator_id=operator_info().user_id,
object_type=Password.object_type(),
limit=20,
skip_count=True)
return render("index.html", {'recent_pw_views': results.entries})
@cherrypy.tools.response_headers(headers=[('Content-Type', 'text/xml')])
def osd(self):
return render("osd-search.xml", {'base_url': cherrypy.url('/')})
def startup(self):
form = PassphraseSubmitForm()
return render("startup.html", {'form': form})
def initialize(self, **kwargs):
form = PassphraseSubmitForm(request_params())
if form.validate():
crypto_util.configure_crypto_state(form.passphrase.data)
raise cherrypy.HTTPRedirect("/")
else:
return render("startup.html", {'form': form})
def login(self, redirect=None):
form = LoginForm(redirect=redirect)
return render("login.html", {'auth_provider': config['auth.provider'], 'form': form})
def process_login(self, **kwargs):
form = LoginForm(request_params())
# TODO: Refactor to combine with the ensconce.server:checkpassword method. Lots of duplicate
# logic here. AT MINIMUM MAKE SURE THAT ANY CHANGES HERE ARE REFLECTED THERE
# This is a "flow-control" exception. ... You'll see. :)
class _LoginFailed(Exception):
pass
try:
if not form.validate():
raise _LoginFailed()
username = form.username.data
password = form.password.data
for auth_provider in get_configured_providers():
try:
auth_provider.authenticate(username, password)
except exc.InsufficientPrivileges:
form.username.errors.append(ValidationError("Insufficient privileges to log in."))
# Fail fast in this case; we don't want to continue on to try other authenticators.
raise _LoginFailed()
except exc.AuthError:
# Swallow other auth errors so it goes onto next authenticator in the list.
pass
except:
# Other exceptions needs to get logged at least.
log.exception("Unexpected error authenticating user using {0!r}".format(auth_provider))
else:
log.info("Authentication succeeded for username {0} using provider {1}".format(username, auth_provider))
break
else:
log.debug("Authenticators exhausted; login failed.")
form.password.errors.append(ValidationError("Invalid username/password."))
raise _LoginFailed()
except _LoginFailed:
auditlog.log(auditlog.CODE_AUTH_FAILED, comment=username)
return render("login.html", {'auth_provider': config['auth.provider'], 'form': form})
else:
# Resolve the user using the *current value* for auth_provider (as that is the one that passed the auth.
user = auth_provider.resolve_user(username)
log.debug("Setting up cherrypy session with username={0}, user_id={1}".format(username, user.id))
cherrypy.session['username'] = username # @UndefinedVariable
cherrypy.session['user_id'] = user.id # @UndefinedVariable
auditlog.log(auditlog.CODE_AUTH_LOGIN)
if form.redirect.data:
raise cherrypy.HTTPRedirect(form.redirect.data)
else:
raise cherrypy.HTTPRedirect("/")
def logout(self):
auditlog.log(auditlog.CODE_AUTH_LOGOUT)
cherrypy.session.clear() # @UndefinedVariable
raise cherrypy.HTTPRedirect("/")
@acl.require_access(acl.AUDIT)
def auditlog(self, **kwargs):
form = AuditlogForm(request_params())
page_size = 50
page = form.page.data
offset = page_size * (page - 1)
limit = page_size
log.debug("Page = {0}, offset={1}, limit={2}".format(page, offset, limit))
results = auditlog.search(start=form.start.data,
end=form.end.data,
code=form.code.data,
operator_username=form.operator.data,
offset=offset,
limit=limit)
if results.count < offset:
form.page.data = 1
form.page.raw_data = ['1'] # Apparently need this too!
total_pages = int(math.ceil( (1.0 * results.count) / page_size))
return render('auditlog.html', {'entries': results.entries, 'form': form, 'total_pages': total_pages})
@acl.require_access([acl.GROUP_R, acl.RESOURCE_R, acl.PASS_R])
def search(self, searchstr):
r_matches = g_matches = p_matches = None
if searchstr:
(r_matches, g_matches, p_matches) = search.search(searchstr, include_encrypted=True)
if len(r_matches) + len(g_matches) + len(p_matches) == 1:
# There was only one result, so just send them to the resulting page.
notify("Showing you the one result that matched your query.")
if r_matches:
raise cherrypy.HTTPRedirect("/resource/view/{0}".format(r_matches[0].id))
elif g_matches:
raise cherrypy.HTTPRedirect("/group/view/{0}".format(g_matches[0].id))
elif p_matches:
# We could also redirect them to the password view/history page if that is more helpful?
raise cherrypy.HTTPRedirect("/resource/view/{0}".format(p_matches[0].resource_id))
auditlog.log(auditlog.CODE_SEARCH, comment=searchstr)
return render('search.html', {'resource_matches': r_matches,
'group_matches': g_matches,
'password_matches': p_matches,
'searchstr': searchstr }) | 0.393269 | 0.082734 |
import pytest
from nlp import Morphology
from etc import all_entitis
class TestNLP:
def setup(self):
self.morph = Morphology(all_entitis)
@pytest.mark.parametrize("phrase,expect", [
pytest.param('Включи свет в коридоре',
{'device_action': 'turn_on', 'device': 'light', 'place': 'hall'},
id='turn_on light hall'),
pytest.param('Зажги лампочку на кухне',
{'device_action': 'turn_on', 'device': 'light', 'place': 'kitchen'},
id='turn_on light kitchen'),
pytest.param('Отключи люстру в зале',
{'device_action': 'turn_off', 'device': 'light', 'place': 'livingroom'},
id='turn_on light livingroom'),
pytest.param('Выключи освещение в туалете',
{'device_action': 'turn_off', 'device': 'light', 'place': 'restroom'},
id='turn_off light restroom'),
pytest.param('Выруби телевизор в спальне',
{'device_action': 'turn_off', 'device': 'tv', 'place': 'livingroom'},
id='turn_off tv livingroom'),
pytest.param('Включи пожалуйста колонку в детской',
{'device_action': 'turn_on', 'device': 'music', 'place': 'playroom'},
id='turn_on music playroom'),
pytest.param('Зажги свет в ванной',
{'device_action': 'turn_on', 'device': 'light', 'place': 'bathroom'},
id='turn_on light bathroom'),
pytest.param('Вруби лампочку в прихожей',
{'device_action': 'turn_on', 'device': 'light', 'place': 'hall'},
id='turn_on light hall'),
pytest.param('Телевизор выключи в игровой',
{'device_action': 'turn_off', 'device': 'tv', 'place': 'playroom'},
id='turn_off tv playroom'),
pytest.param('Выруби музыку в большой комнате',
{'device_action': 'turn_off', 'device': 'music', 'place': 'livingroom'},
id='turn_off music livingroom'),
pytest.param('Включи колонку в маленькой комнате',
{'device_action': 'turn_on', 'device': 'music', 'place': 'playroom'},
id='turn_on music playroom'),
pytest.param('Зажги везде лампочки',
{'device_action': 'turn_on', 'device': 'light', 'place': 'all'},
id='turn_on light all'),
pytest.param('Вруби в доме музыку',
{'device_action': 'turn_on', 'device': 'music', 'place': 'all'},
id='turn_on music all'),
pytest.param('Выключи во всем доме освещение',
{'device_action': 'turn_off', 'device': 'light', 'place': 'all'},
id='turn_off light all'),
pytest.param('Выруби в квартире телевизор',
{'device_action': 'turn_off', 'device': 'tv', 'place': 'all'},
id='turn_off tv all'),
pytest.param('Включи во всей квартире музыку',
{'device_action': 'turn_on', 'device': 'music', 'place': 'all'},
id='turn_on music all'),
pytest.param('Зажги тут свет',
{'device_action': 'turn_on', 'device': 'light', 'place': 'here'},
id='turn_on light here'),
pytest.param('Вруби здесь телевизор',
{'device_action': 'turn_on', 'device': 'tv', 'place': 'here'},
id='turn_on tv here'),
pytest.param('Выключи во второй комнате освещение',
{'device_action': 'turn_off', 'device': 'light', 'place': 'playroom'},
id='turn_off light playroom'),
])
def test_nlp(self, phrase, expect):
result = self.morph.analyze(phrase)
if 'unknown' in result:
del result['unknown']
assert result == expect | test/test_npl.py | import pytest
from nlp import Morphology
from etc import all_entitis
class TestNLP:
def setup(self):
self.morph = Morphology(all_entitis)
@pytest.mark.parametrize("phrase,expect", [
pytest.param('Включи свет в коридоре',
{'device_action': 'turn_on', 'device': 'light', 'place': 'hall'},
id='turn_on light hall'),
pytest.param('Зажги лампочку на кухне',
{'device_action': 'turn_on', 'device': 'light', 'place': 'kitchen'},
id='turn_on light kitchen'),
pytest.param('Отключи люстру в зале',
{'device_action': 'turn_off', 'device': 'light', 'place': 'livingroom'},
id='turn_on light livingroom'),
pytest.param('Выключи освещение в туалете',
{'device_action': 'turn_off', 'device': 'light', 'place': 'restroom'},
id='turn_off light restroom'),
pytest.param('Выруби телевизор в спальне',
{'device_action': 'turn_off', 'device': 'tv', 'place': 'livingroom'},
id='turn_off tv livingroom'),
pytest.param('Включи пожалуйста колонку в детской',
{'device_action': 'turn_on', 'device': 'music', 'place': 'playroom'},
id='turn_on music playroom'),
pytest.param('Зажги свет в ванной',
{'device_action': 'turn_on', 'device': 'light', 'place': 'bathroom'},
id='turn_on light bathroom'),
pytest.param('Вруби лампочку в прихожей',
{'device_action': 'turn_on', 'device': 'light', 'place': 'hall'},
id='turn_on light hall'),
pytest.param('Телевизор выключи в игровой',
{'device_action': 'turn_off', 'device': 'tv', 'place': 'playroom'},
id='turn_off tv playroom'),
pytest.param('Выруби музыку в большой комнате',
{'device_action': 'turn_off', 'device': 'music', 'place': 'livingroom'},
id='turn_off music livingroom'),
pytest.param('Включи колонку в маленькой комнате',
{'device_action': 'turn_on', 'device': 'music', 'place': 'playroom'},
id='turn_on music playroom'),
pytest.param('Зажги везде лампочки',
{'device_action': 'turn_on', 'device': 'light', 'place': 'all'},
id='turn_on light all'),
pytest.param('Вруби в доме музыку',
{'device_action': 'turn_on', 'device': 'music', 'place': 'all'},
id='turn_on music all'),
pytest.param('Выключи во всем доме освещение',
{'device_action': 'turn_off', 'device': 'light', 'place': 'all'},
id='turn_off light all'),
pytest.param('Выруби в квартире телевизор',
{'device_action': 'turn_off', 'device': 'tv', 'place': 'all'},
id='turn_off tv all'),
pytest.param('Включи во всей квартире музыку',
{'device_action': 'turn_on', 'device': 'music', 'place': 'all'},
id='turn_on music all'),
pytest.param('Зажги тут свет',
{'device_action': 'turn_on', 'device': 'light', 'place': 'here'},
id='turn_on light here'),
pytest.param('Вруби здесь телевизор',
{'device_action': 'turn_on', 'device': 'tv', 'place': 'here'},
id='turn_on tv here'),
pytest.param('Выключи во второй комнате освещение',
{'device_action': 'turn_off', 'device': 'light', 'place': 'playroom'},
id='turn_off light playroom'),
])
def test_nlp(self, phrase, expect):
result = self.morph.analyze(phrase)
if 'unknown' in result:
del result['unknown']
assert result == expect | 0.239972 | 0.571438 |
from __future__ import annotations
import math
import time
from concurrent.futures import Executor
from threading import Event
from typing import Any, Dict
from qmixsdk.qmixpump import ContiFlowProperty, ContiFlowPump, ContiFlowSwitchingMode
from sila2.framework import FullyQualifiedIdentifier
from sila2.framework.errors.validation_error import ValidationError
from ..generated.continuousflowconfigurationservice import (
ContinuousFlowConfigurationServiceBase,
ContinuousFlowConfigurationServiceFeature,
SetCrossFlowDuration_Responses,
SetOverlapDuration_Responses,
SetRefillFlowRate_Responses,
SetSwitchingMode_Responses,
)
def invert_dict(d: dict) -> dict:
return dict([(v, k) for k, v in d.items()])
class ContinuousFlowConfigurationServiceImpl(ContinuousFlowConfigurationServiceBase):
__pump: ContiFlowPump
__ALLOWED_SWITCHING_MODES = {
"SwitchingCrossFlow": ContiFlowSwitchingMode.CROSS_FLOW
# more to come
}
__stop_event: Event
def __init__(self, pump: ContiFlowPump, executor: Executor):
super().__init__()
self.__pump = pump
self.__stop_event = Event()
# TODO restore drive position counter + contiflow params
def update_cross_flow_duration(stop_event: Event):
new_cross_flow_duration = cross_flow_duration = self.__pump.get_device_property(
ContiFlowProperty.CROSSFLOW_DURATION_S
)
while not stop_event.is_set():
new_cross_flow_duration = self.__pump.get_device_property(ContiFlowProperty.CROSSFLOW_DURATION_S)
if not math.isclose(new_cross_flow_duration, cross_flow_duration):
self.update_CrossFlowDuration(cross_flow_duration)
time.sleep(0.1)
def update_max_refill_flow(stop_event: Event):
new_max_refill_flow = max_refill_flow = self.__pump.get_device_property(ContiFlowProperty.MAX_REFILL_FLOW)
while not stop_event.is_set():
new_max_refill_flow = self.__pump.get_device_property(ContiFlowProperty.MAX_REFILL_FLOW)
if not math.isclose(new_max_refill_flow, max_refill_flow):
self.update_MaxRefillFlowRate(max_refill_flow)
time.sleep(0.1)
def update_min_flow_rate(stop_event: Event):
new_min_flow_rate = min_flow_rate = self.__pump.get_device_property(ContiFlowProperty.MIN_PUMP_FLOW)
while not stop_event.is_set():
new_min_flow_rate = self.__pump.get_device_property(ContiFlowProperty.MIN_PUMP_FLOW)
if not math.isclose(new_min_flow_rate, min_flow_rate):
self.update_MinFlowRate(min_flow_rate)
time.sleep(0.1)
def update_overlap_duration(stop_event: Event):
new_overlap_duration = overlap_duration = self.__pump.get_device_property(
ContiFlowProperty.OVERLAP_DURATION_S
)
while not stop_event.is_set():
new_overlap_duration = self.__pump.get_device_property(ContiFlowProperty.OVERLAP_DURATION_S)
if not math.isclose(new_overlap_duration, overlap_duration):
self.update_OverlapDuration(overlap_duration)
time.sleep(0.1)
def update_refill_flow_rate(stop_event: Event):
new_refill_flow_rate = refill_flow_rate = self.__pump.get_device_property(ContiFlowProperty.REFILL_FLOW)
while not stop_event.is_set():
new_refill_flow_rate = self.__pump.get_device_property(ContiFlowProperty.REFILL_FLOW)
if not math.isclose(new_refill_flow_rate, refill_flow_rate):
self.update_RefillFlowRate(refill_flow_rate)
time.sleep(0.1)
def update_switching_mode(stop_event: Event):
new_switching_mode = switching_mode = self.__pump.get_device_property(ContiFlowProperty.SWITCHING_MODE)
while not stop_event.is_set():
new_switching_mode = self.__pump.get_device_property(ContiFlowProperty.SWITCHING_MODE)
if new_switching_mode != switching_mode:
self.update_SwitchingMode(invert_dict(self.__ALLOWED_SWITCHING_MODES)[switching_mode])
time.sleep(0.1)
# initial values
self.update_CrossFlowDuration(self.__pump.get_device_property(ContiFlowProperty.CROSSFLOW_DURATION_S))
self.update_MaxRefillFlowRate(self.__pump.get_device_property(ContiFlowProperty.MAX_REFILL_FLOW))
self.update_MinFlowRate(self.__pump.get_device_property(ContiFlowProperty.MIN_PUMP_FLOW))
self.update_OverlapDuration(self.__pump.get_device_property(ContiFlowProperty.OVERLAP_DURATION_S))
self.update_RefillFlowRate(self.__pump.get_device_property(ContiFlowProperty.REFILL_FLOW))
self.update_SwitchingMode(invert_dict(self.__ALLOWED_SWITCHING_MODES).get(ContiFlowProperty.SWITCHING_MODE))
executor.submit(update_cross_flow_duration, self.__stop_event)
executor.submit(update_max_refill_flow, self.__stop_event)
executor.submit(update_min_flow_rate, self.__stop_event)
executor.submit(update_overlap_duration, self.__stop_event)
executor.submit(update_refill_flow_rate, self.__stop_event)
executor.submit(update_switching_mode, self.__stop_event)
def SetSwitchingMode(
self, SwitchingMode: str, *, metadata: Dict[FullyQualifiedIdentifier, Any]
) -> SetSwitchingMode_Responses:
try:
self.__pump.set_device_property(
ContiFlowProperty.SWITCHING_MODE, self.__ALLOWED_SWITCHING_MODES.get(SwitchingMode)
)
except KeyError:
raise ValidationError(
ContinuousFlowConfigurationServiceFeature["SetSwitchingMode"].parameters.fields[0],
"The given value for the Contiflow Switching Mode is invalid. Allowed values are: {}".format(
", ".join(self.__ALLOWED_SWITCHING_MODES.keys())
),
)
def SetRefillFlowRate(
self, RefillFlowRate: float, *, metadata: Dict[FullyQualifiedIdentifier, Any]
) -> SetRefillFlowRate_Responses:
self.__pump.set_device_property(ContiFlowProperty.REFILL_FLOW, RefillFlowRate)
def SetCrossFlowDuration(
self, CrossFlowDuration: float, *, metadata: Dict[FullyQualifiedIdentifier, Any]
) -> SetCrossFlowDuration_Responses:
self.__pump.set_device_property(ContiFlowProperty.CROSSFLOW_DURATION_S, CrossFlowDuration)
def SetOverlapDuration(
self, OverlapDuration: float, *, metadata: Dict[FullyQualifiedIdentifier, Any]
) -> SetOverlapDuration_Responses:
self.__pump.set_device_property(ContiFlowProperty.OVERLAP_DURATION_S, OverlapDuration)
def stop(self) -> None:
self.__stop_event.set() | sila_cetoni/pumps/contiflowpumps/sila/contiflowpump_service/feature_implementations/continuousflowconfigurationservice_impl.py | from __future__ import annotations
import math
import time
from concurrent.futures import Executor
from threading import Event
from typing import Any, Dict
from qmixsdk.qmixpump import ContiFlowProperty, ContiFlowPump, ContiFlowSwitchingMode
from sila2.framework import FullyQualifiedIdentifier
from sila2.framework.errors.validation_error import ValidationError
from ..generated.continuousflowconfigurationservice import (
ContinuousFlowConfigurationServiceBase,
ContinuousFlowConfigurationServiceFeature,
SetCrossFlowDuration_Responses,
SetOverlapDuration_Responses,
SetRefillFlowRate_Responses,
SetSwitchingMode_Responses,
)
def invert_dict(d: dict) -> dict:
return dict([(v, k) for k, v in d.items()])
class ContinuousFlowConfigurationServiceImpl(ContinuousFlowConfigurationServiceBase):
__pump: ContiFlowPump
__ALLOWED_SWITCHING_MODES = {
"SwitchingCrossFlow": ContiFlowSwitchingMode.CROSS_FLOW
# more to come
}
__stop_event: Event
def __init__(self, pump: ContiFlowPump, executor: Executor):
super().__init__()
self.__pump = pump
self.__stop_event = Event()
# TODO restore drive position counter + contiflow params
def update_cross_flow_duration(stop_event: Event):
new_cross_flow_duration = cross_flow_duration = self.__pump.get_device_property(
ContiFlowProperty.CROSSFLOW_DURATION_S
)
while not stop_event.is_set():
new_cross_flow_duration = self.__pump.get_device_property(ContiFlowProperty.CROSSFLOW_DURATION_S)
if not math.isclose(new_cross_flow_duration, cross_flow_duration):
self.update_CrossFlowDuration(cross_flow_duration)
time.sleep(0.1)
def update_max_refill_flow(stop_event: Event):
new_max_refill_flow = max_refill_flow = self.__pump.get_device_property(ContiFlowProperty.MAX_REFILL_FLOW)
while not stop_event.is_set():
new_max_refill_flow = self.__pump.get_device_property(ContiFlowProperty.MAX_REFILL_FLOW)
if not math.isclose(new_max_refill_flow, max_refill_flow):
self.update_MaxRefillFlowRate(max_refill_flow)
time.sleep(0.1)
def update_min_flow_rate(stop_event: Event):
new_min_flow_rate = min_flow_rate = self.__pump.get_device_property(ContiFlowProperty.MIN_PUMP_FLOW)
while not stop_event.is_set():
new_min_flow_rate = self.__pump.get_device_property(ContiFlowProperty.MIN_PUMP_FLOW)
if not math.isclose(new_min_flow_rate, min_flow_rate):
self.update_MinFlowRate(min_flow_rate)
time.sleep(0.1)
def update_overlap_duration(stop_event: Event):
new_overlap_duration = overlap_duration = self.__pump.get_device_property(
ContiFlowProperty.OVERLAP_DURATION_S
)
while not stop_event.is_set():
new_overlap_duration = self.__pump.get_device_property(ContiFlowProperty.OVERLAP_DURATION_S)
if not math.isclose(new_overlap_duration, overlap_duration):
self.update_OverlapDuration(overlap_duration)
time.sleep(0.1)
def update_refill_flow_rate(stop_event: Event):
new_refill_flow_rate = refill_flow_rate = self.__pump.get_device_property(ContiFlowProperty.REFILL_FLOW)
while not stop_event.is_set():
new_refill_flow_rate = self.__pump.get_device_property(ContiFlowProperty.REFILL_FLOW)
if not math.isclose(new_refill_flow_rate, refill_flow_rate):
self.update_RefillFlowRate(refill_flow_rate)
time.sleep(0.1)
def update_switching_mode(stop_event: Event):
new_switching_mode = switching_mode = self.__pump.get_device_property(ContiFlowProperty.SWITCHING_MODE)
while not stop_event.is_set():
new_switching_mode = self.__pump.get_device_property(ContiFlowProperty.SWITCHING_MODE)
if new_switching_mode != switching_mode:
self.update_SwitchingMode(invert_dict(self.__ALLOWED_SWITCHING_MODES)[switching_mode])
time.sleep(0.1)
# initial values
self.update_CrossFlowDuration(self.__pump.get_device_property(ContiFlowProperty.CROSSFLOW_DURATION_S))
self.update_MaxRefillFlowRate(self.__pump.get_device_property(ContiFlowProperty.MAX_REFILL_FLOW))
self.update_MinFlowRate(self.__pump.get_device_property(ContiFlowProperty.MIN_PUMP_FLOW))
self.update_OverlapDuration(self.__pump.get_device_property(ContiFlowProperty.OVERLAP_DURATION_S))
self.update_RefillFlowRate(self.__pump.get_device_property(ContiFlowProperty.REFILL_FLOW))
self.update_SwitchingMode(invert_dict(self.__ALLOWED_SWITCHING_MODES).get(ContiFlowProperty.SWITCHING_MODE))
executor.submit(update_cross_flow_duration, self.__stop_event)
executor.submit(update_max_refill_flow, self.__stop_event)
executor.submit(update_min_flow_rate, self.__stop_event)
executor.submit(update_overlap_duration, self.__stop_event)
executor.submit(update_refill_flow_rate, self.__stop_event)
executor.submit(update_switching_mode, self.__stop_event)
def SetSwitchingMode(
self, SwitchingMode: str, *, metadata: Dict[FullyQualifiedIdentifier, Any]
) -> SetSwitchingMode_Responses:
try:
self.__pump.set_device_property(
ContiFlowProperty.SWITCHING_MODE, self.__ALLOWED_SWITCHING_MODES.get(SwitchingMode)
)
except KeyError:
raise ValidationError(
ContinuousFlowConfigurationServiceFeature["SetSwitchingMode"].parameters.fields[0],
"The given value for the Contiflow Switching Mode is invalid. Allowed values are: {}".format(
", ".join(self.__ALLOWED_SWITCHING_MODES.keys())
),
)
def SetRefillFlowRate(
self, RefillFlowRate: float, *, metadata: Dict[FullyQualifiedIdentifier, Any]
) -> SetRefillFlowRate_Responses:
self.__pump.set_device_property(ContiFlowProperty.REFILL_FLOW, RefillFlowRate)
def SetCrossFlowDuration(
self, CrossFlowDuration: float, *, metadata: Dict[FullyQualifiedIdentifier, Any]
) -> SetCrossFlowDuration_Responses:
self.__pump.set_device_property(ContiFlowProperty.CROSSFLOW_DURATION_S, CrossFlowDuration)
def SetOverlapDuration(
self, OverlapDuration: float, *, metadata: Dict[FullyQualifiedIdentifier, Any]
) -> SetOverlapDuration_Responses:
self.__pump.set_device_property(ContiFlowProperty.OVERLAP_DURATION_S, OverlapDuration)
def stop(self) -> None:
self.__stop_event.set() | 0.562657 | 0.24197 |
import urllib.request
import json
import SettingsAndPreferences as settings
import sys
oauth = settings.findValue("APIOauth").strip()
clientid = settings.findValue("ClientID").strip()
nick = settings.findValue("JOIN").strip()
lastData = {}
def parseInfo(url, lookfor):
headers={
'Accept': 'application/vnd.twitchtv.v5+json',
'Client-ID': clientid,
'Authorization': 'OAuth '+oauth,
}
req = urllib.request.Request(url, None, headers)
resp = urllib.request.urlopen(req)
data = json.loads(resp.read().decode("utf-8"))
global lastData
lastData = data
## userlist = []
## try:
## while lookfor=="data":
## cursor = data["pagination"]["cursor"]
## userlist+=data["data"]
## req = urllib.request.Request(url+"?cursor="+cursor, None, headers)
## resp = urllib.request.urlopen(req)
## data = json.loads(resp.read().decode("utf-8"))
## except Exception as e:
## print(e)
## return userlist
if "6" in settings.findValue("printLevel"):
print(data)
return data[lookfor]
def getUserID(user):
url = "https://api.twitch.tv/kraken/user"
return parseInfo(url, "_id")
def NewFollower(refresh=True):
url = "https://api.twitch.tv/kraken/channels/"+ID+"/follows"
users = parseInfo(url, "follows")
return users[0]["user"]["display_name"]
def totalFollowers(refresh=True):
if refresh:
url = "https://api.twitch.tv/kraken/channels/"+ID+"/follows"
return parseInfo(url, "_total")
return lastData["_total"]
def totalSubs(refresh=True):
url = "https://api.twitch.tv/kraken/channels/"+ID+"/subscriptions"
total = parseInfo(url, "_total")
for sub in lastData["subscriptions"]:
if sub["user"]["_id"]==ID:
total-=1
elif sub["sub_plan"] == "2000":
total+=1
elif sub["sub_plan"] == "3000":
total+=5
return total
try:
ID = getUserID(nick)
except Exception as e:
print(e)
print("Can't automatically fetch ID, using manual value")
ID = settings.findValue("UserID")
followers = totalFollowers()
subs = totalSubs()
if(followers):
settings.levelprint("API working, "+str(followers)+" followers, most recent: "+NewFollower(False),0)
else:
settings.levelprint("API not working (followers)",0)
if(subs):
settings.levelprint("API working, "+str(subs)+" sub points",0)
else:
settings.levelprint("API not working (subs)",0) | TwitchAPI.py | import urllib.request
import json
import SettingsAndPreferences as settings
import sys
oauth = settings.findValue("APIOauth").strip()
clientid = settings.findValue("ClientID").strip()
nick = settings.findValue("JOIN").strip()
lastData = {}
def parseInfo(url, lookfor):
headers={
'Accept': 'application/vnd.twitchtv.v5+json',
'Client-ID': clientid,
'Authorization': 'OAuth '+oauth,
}
req = urllib.request.Request(url, None, headers)
resp = urllib.request.urlopen(req)
data = json.loads(resp.read().decode("utf-8"))
global lastData
lastData = data
## userlist = []
## try:
## while lookfor=="data":
## cursor = data["pagination"]["cursor"]
## userlist+=data["data"]
## req = urllib.request.Request(url+"?cursor="+cursor, None, headers)
## resp = urllib.request.urlopen(req)
## data = json.loads(resp.read().decode("utf-8"))
## except Exception as e:
## print(e)
## return userlist
if "6" in settings.findValue("printLevel"):
print(data)
return data[lookfor]
def getUserID(user):
url = "https://api.twitch.tv/kraken/user"
return parseInfo(url, "_id")
def NewFollower(refresh=True):
url = "https://api.twitch.tv/kraken/channels/"+ID+"/follows"
users = parseInfo(url, "follows")
return users[0]["user"]["display_name"]
def totalFollowers(refresh=True):
if refresh:
url = "https://api.twitch.tv/kraken/channels/"+ID+"/follows"
return parseInfo(url, "_total")
return lastData["_total"]
def totalSubs(refresh=True):
url = "https://api.twitch.tv/kraken/channels/"+ID+"/subscriptions"
total = parseInfo(url, "_total")
for sub in lastData["subscriptions"]:
if sub["user"]["_id"]==ID:
total-=1
elif sub["sub_plan"] == "2000":
total+=1
elif sub["sub_plan"] == "3000":
total+=5
return total
try:
ID = getUserID(nick)
except Exception as e:
print(e)
print("Can't automatically fetch ID, using manual value")
ID = settings.findValue("UserID")
followers = totalFollowers()
subs = totalSubs()
if(followers):
settings.levelprint("API working, "+str(followers)+" followers, most recent: "+NewFollower(False),0)
else:
settings.levelprint("API not working (followers)",0)
if(subs):
settings.levelprint("API working, "+str(subs)+" sub points",0)
else:
settings.levelprint("API not working (subs)",0) | 0.081189 | 0.071819 |
from __future__ import absolute_import
from flytekit import configuration as _config
from flytekit.clis.sdk_in_container import constants as _constants
from flytekit.clis.sdk_in_container import pyflyte as _pyflyte
from flytekit.tools import module_loader as _module_loader
from click.testing import CliRunner
import mock as _mock
import pytest
import os
import sys
def _fake_module_load(names):
assert names == ('common.workflows',)
from common.workflows import simple
yield simple
@pytest.yield_fixture(scope='function',
params=[
os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../common/configs/local.config'),
'/foo/bar',
None
])
def mock_ctx(request):
with _config.TemporaryConfiguration(request.param):
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../..'))
try:
with _mock.patch('flytekit.tools.module_loader.iterate_modules') as mock_module_load:
mock_module_load.side_effect = _fake_module_load
ctx = _mock.MagicMock()
ctx.obj = {
_constants.CTX_PACKAGES: ('common.workflows',),
_constants.CTX_PROJECT: 'tests',
_constants.CTX_DOMAIN: 'unit',
_constants.CTX_VERSION: 'version'
}
yield ctx
finally:
sys.path.pop()
@pytest.fixture
def mock_clirunner(monkeypatch):
def f(*args, **kwargs):
runner = CliRunner()
base_args = [
'-p', 'tests',
'-d', 'unit',
'-v', 'version',
'--pkgs', 'common.workflows',
]
result = runner.invoke(_pyflyte.main, base_args + list(args), **kwargs)
if result.exception:
raise result.exception
return result
tests_dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../..')
config_path = os.path.join(tests_dir_path, 'common/configs/local.config')
with _config.TemporaryConfiguration(config_path):
monkeypatch.syspath_prepend(tests_dir_path)
monkeypatch.setattr(_module_loader, 'iterate_modules', _fake_module_load)
yield f | tests/flytekit/unit/cli/pyflyte/conftest.py | from __future__ import absolute_import
from flytekit import configuration as _config
from flytekit.clis.sdk_in_container import constants as _constants
from flytekit.clis.sdk_in_container import pyflyte as _pyflyte
from flytekit.tools import module_loader as _module_loader
from click.testing import CliRunner
import mock as _mock
import pytest
import os
import sys
def _fake_module_load(names):
assert names == ('common.workflows',)
from common.workflows import simple
yield simple
@pytest.yield_fixture(scope='function',
params=[
os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../common/configs/local.config'),
'/foo/bar',
None
])
def mock_ctx(request):
with _config.TemporaryConfiguration(request.param):
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../..'))
try:
with _mock.patch('flytekit.tools.module_loader.iterate_modules') as mock_module_load:
mock_module_load.side_effect = _fake_module_load
ctx = _mock.MagicMock()
ctx.obj = {
_constants.CTX_PACKAGES: ('common.workflows',),
_constants.CTX_PROJECT: 'tests',
_constants.CTX_DOMAIN: 'unit',
_constants.CTX_VERSION: 'version'
}
yield ctx
finally:
sys.path.pop()
@pytest.fixture
def mock_clirunner(monkeypatch):
def f(*args, **kwargs):
runner = CliRunner()
base_args = [
'-p', 'tests',
'-d', 'unit',
'-v', 'version',
'--pkgs', 'common.workflows',
]
result = runner.invoke(_pyflyte.main, base_args + list(args), **kwargs)
if result.exception:
raise result.exception
return result
tests_dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../..')
config_path = os.path.join(tests_dir_path, 'common/configs/local.config')
with _config.TemporaryConfiguration(config_path):
monkeypatch.syspath_prepend(tests_dir_path)
monkeypatch.setattr(_module_loader, 'iterate_modules', _fake_module_load)
yield f | 0.396302 | 0.105303 |
import numpy as np
import cv2
import h5py
import glob
import os
from keras.models import model_from_json
from retipy import retina
from retipy import landmarks as l
"""Module with operations related to classify vessels into arteries and veins."""
_base_directory_training = 'retipy/resources/images/drive/training/'
_base_directory_test = 'retipy/resources/images/drive/test/'
_base_directory_model = os.path.join(os.path.dirname(__file__), 'model/')
def _vessel_widths(center_img: np.ndarray, segmented_img: np.ndarray):
image = segmented_img.copy()
widths = []
for i in range(0, image.shape[0]):
for j in range(0, image.shape[1]):
if center_img[i, j] == 255:
w0 = 0
w45 = 0
w90 = 0
w135 = 0
w180 = 1
w225 = 1
w270 = 1
w315 = 1
while True:
if image[i, j + w0 + 1] != 0:
w0 += 1
if image[i, j - w180 - 1] != 0:
w180 += 1
if image[i - w90 - 1, j] != 0:
w90 += 1
if image[i + w270 + 1, j] != 0:
w270 += 1
if image[i - w45 - 1, j + w45 + 1] != 0:
w45 += 1
if image[i + w225 + 1, j - w225 - 1] != 0:
w225 += 1
if image[i - w135 - 1, j - w135 - 1] != 0:
w135 += 1
if image[i + w315 + 1, j + w315 + 1] != 0:
w315 += 1
if image[i, j + w0 + 1] == 0 and image[i, j - w180 - 1] == 0:
widths.append([i, j, 0, w0, w180])
break
elif image[i - w90 - 1, j] == 0 and image[i + w270 + 1, j] == 0:
widths.append([i, j, 90, w90, w270])
break
elif image[i - w45 - 1, j + w45 + 1] == 0 and image[i + w225 + 1, j - w225 - 1] == 0:
widths.append([i, j, 45, w45, w225])
break
elif image[i - w135 - 1, j - w135 - 1] == 0 and image[i + w315 + 1, j + w315 + 1] == 0:
widths.append([i, j, 135, w135, w315])
break
return widths
def _local_binary_pattern(window: list):
x = [0, 0, 1, 2, 2, 2, 1, 0]
y = [1, 2, 2, 2, 1, 0, 0, 0]
decimal = 0
center = window[1][1]
for i in range(0, 8):
if center >= window[x[i]][y[i]]:
decimal += np.power(2, i)
return decimal
def _preparing_data(widths: list, sections: int, original_img: np.ndarray, classified_img: np.ndarray,
bright_img: np.ndarray, gray_img: np.ndarray):
f_vectors = []
if classified_img is not None:
for w in widths:
w0 = w[0]
w1 = w[1]
if (np.array_equal(classified_img[w0, w1], [255, 0, 0]) or np.array_equal(classified_img[w0, w1], [0, 0, 255])) \
and ((w[3]+w[4]) >= 2):
if np.array_equal(classified_img[w0, w1], [255, 0, 0]):
out = 0
elif np.array_equal(classified_img[w0, w1], [0, 0, 255]):
out = 1
iv = _vector(w, sections, original_img, bright_img, gray_img, out)
f_vectors.append(iv)
else:
for w in widths:
if (w[3] + w[4]) >= 2:
iv = _vector(w, sections, original_img, bright_img, gray_img, -1)
f_vectors.append(iv)
return f_vectors
def _vector(w: list, sections: int, original_img: np.ndarray, bright_img: np.ndarray, gray_img: np.ndarray, out: int):
iv = []
w0 = w[0]
w1 = w[1]
angle = w[2]
section = (w[3] + w[4]) / sections
if angle == 0:
y = w1 + w[3]
iv.extend([w0, w1])
for i in range(0, sections + 1):
step = int(np.floor(y - (i * section)))
iv.extend(original_img[w0, step])
iv.extend([bright_img[w0, step]])
iv.extend([_local_binary_pattern(gray_img[w0 - 1:w0 + 2, step - 1:step + 2])])
iv.extend([w[3] + w[4]])
iv.extend([out])
elif angle == 45:
x = w0 - w[3]
y = w1 + w[3]
iv.extend([w0, w1])
for i in range(0, sections + 1):
stepx = int(np.floor(x + (i * section)))
stepy = int(np.floor(y - (i * section)))
iv.extend(original_img[stepx, stepy])
iv.extend([bright_img[stepx, stepy]])
iv.extend([_local_binary_pattern(gray_img[stepx - 1:stepx + 2, stepy - 1:stepy + 2])])
iv.extend([w[3] + w[4]])
iv.extend([out])
elif angle == 90:
x = w0 - w[3]
iv.extend([w0, w1])
for i in range(0, sections + 1):
step = int(np.floor(x + (i * section)))
iv.extend(original_img[step, w1])
iv.extend([bright_img[step, w1]])
iv.extend([_local_binary_pattern(gray_img[step - 1:step + 2, w1 - 1:w1 + 2])])
iv.extend([w[3] + w[4]])
iv.extend([out])
elif angle == 135:
x = w0 - w[3]
y = w1 - w[3]
iv.extend([w0, w1])
for i in range(0, sections + 1):
stepx = int(np.floor(x + (i * section)))
stepy = int(np.floor(y + (i * section)))
iv.extend(original_img[stepx, stepy])
iv.extend([bright_img[stepx, stepy]])
iv.extend([_local_binary_pattern(gray_img[stepx - 1:stepx + 2, stepy - 1:stepy + 2])])
iv.extend([w[3] + w[4]])
iv.extend([out])
return iv
def _feature_vectors():
directory = _base_directory_training + "original/"
features = []
for filename in sorted(glob.glob(os.path.join(directory, '*.tif'))):
name = os.path.basename(filename)
name = name.split(".")[0]
original = cv2.imread(filename, 1)
gray = cv2.imread(filename, 0)
(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(gray)
lab = cv2.cvtColor(original, cv2.COLOR_BGR2LAB)
L, A, B = cv2.split(lab)
manual = retina.Retina(None, _base_directory_training + "manual/" + name + ".png")
manual.threshold_image()
thr_img = manual.get_uint_image()
cv2.circle(thr_img, maxLoc, 60, 0, -1)
manual.skeletonization()
skeleton_img = manual.get_uint_image()
cv2.circle(skeleton_img, maxLoc, 60, 0, -1)
landmarks, segmented_skeleton_img = l.potential_landmarks(skeleton_img, 3)
av = cv2.imread(_base_directory_training + "av/" + name + ".png", 1)
widths = _vessel_widths(segmented_skeleton_img, thr_img)
data = _preparing_data(widths, 6, original, av, L, gray)
features.extend(data)
h5f = h5py.File(_base_directory_model + 'vector_features_interpolation.h5', 'w')
h5f.create_dataset('training', data=features)
return features
def _loading_model(original: np.ndarray, threshold: np.ndarray, av: np.ndarray, size: int):
# Load model of the neuronal network
json_file = open(_base_directory_model + 'modelVA.json', "r")
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# Load weights
loaded_model.load_weights(_base_directory_model + 'modelVA.h5')
gray = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)
(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(gray)
lab = cv2.cvtColor(original, cv2.COLOR_BGR2LAB)
L, A, B = cv2.split(lab)
manual = retina.Retina(threshold, None)
manual.threshold_image()
thr_img = manual.get_uint_image()
cv2.circle(thr_img, maxLoc, 60, 0, -1)
manual.skeletonization()
skeleton_img = manual.get_uint_image()
cv2.circle(skeleton_img, maxLoc, 60, 0, -1)
landmarks, segmented_skeleton_img = l.potential_landmarks(skeleton_img, 3)
widths = _vessel_widths(segmented_skeleton_img, thr_img)
data = _preparing_data(widths, 6, original, av, L, gray)
features = np.array(data)
predict_img = np.full((segmented_skeleton_img.shape[0], segmented_skeleton_img.shape[1]), 3, dtype=float)
for row in range(0, features.shape[0]):
prediction = loaded_model.predict(np.divide(features[row:row + 1, 2:size], 255), batch_size=1)
predict_img[features[row, 0], features[row, 1]] = prediction
return features, segmented_skeleton_img, thr_img, predict_img
def _validating_model(features: np.ndarray, skeleton_img: np.ndarray, original_img: np.ndarray, predicted_img: np.ndarray, size: int, av: int):
max_acc = -1
rgb_prediction = []
network_prediction = []
original = []
if av == 0:
manual_copy = retina.Retina(skeleton_img, None)
manual_copy.bin_to_bgr()
manual_copy = manual_copy.get_uint_image()
original_copy = original_img.copy()
predict_copy = predicted_img.copy()
mask0 = predict_copy == 3
mask1 = (predict_copy >= 0) & (predict_copy < 0.8)
mask2 = (predict_copy >= 0.8) & (predict_copy <= 1)
predict_copy[mask1] = 1
predict_copy[mask2] = 2
predict_copy[mask0] = 0
for row in range(0, features.shape[0]):
if predict_copy[features[row, 0], features[row, 1]] == 2:
manual_copy[features[row, 0], features[row, 1]] = [0, 0, 255]
original_copy[features[row, 0], features[row, 1]] = [0, 0, 255]
elif predict_copy[features[row, 0], features[row, 1]] == 1:
manual_copy[features[row, 0], features[row, 1]] = [255, 0, 0]
original_copy[features[row, 0], features[row, 1]] = [255, 0, 0]
rgb_prediction = manual_copy
network_prediction = predict_copy
original = original_copy
else:
for i in range(0, 1000):
manual_copy = retina.Retina(skeleton_img, None)
manual_copy.bin_to_bgr()
manual_copy = manual_copy.get_uint_image()
original_copy = original_img.copy()
predict_copy = predicted_img.copy()
k = 0.001*i
mask0 = predict_copy == 3
mask1 = (predict_copy >= 0) & (predict_copy < k)
mask2 = (predict_copy >= k) & (predict_copy <= 1)
predict_copy[mask1] = 1
predict_copy[mask2] = 2
predict_copy[mask0] = 0
true_positive = 0
true_negative = 0
false_positive = 0
false_negative = 0
for row in range(0, features.shape[0]):
if predict_copy[features[row, 0], features[row, 1]] == 2:
manual_copy[features[row, 0], features[row, 1]] = [0, 0, 255]
original_copy[features[row, 0], features[row, 1]] = [0, 0, 255]
elif predict_copy[features[row, 0], features[row, 1]] == 1:
manual_copy[features[row, 0], features[row, 1]] = [255, 0, 0]
original_copy[features[row, 0], features[row, 1]] = [255, 0, 0]
if int(predict_copy[features[row, 0], features[row, 1]]) == 1 and features[row, size] == 0:
true_negative += 1
elif int(predict_copy[features[row, 0], features[row, 1]]) == 2 and features[row, size] == 1:
true_positive += 1
elif int(predict_copy[features[row, 0], features[row, 1]]) == 2 and features[row, size] == 0:
false_positive += 1
elif int(predict_copy[features[row, 0], features[row, 1]]) == 1 and features[row, size] == 1:
false_negative += 1
accy = (100 * (true_positive+true_negative)) / features.shape[0]
if max_acc < accy:
max_acc = accy
rgb_prediction = manual_copy
network_prediction = predict_copy
original = original_copy
return max_acc, rgb_prediction, network_prediction, original
def _homogenize(connected_components: np.ndarray,
network_prediction: np.ndarray,
rgb_prediction: np.ndarray,
original: np.ndarray):
# Imagen en con 0, 1, 2
result_image = network_prediction.copy()
# Imagen a color del resultado de la red
final_image = rgb_prediction.copy()
img_rgb = original.copy()
for x in range(1, connected_components[0]):
mask = connected_components[1] != x
mask2 = connected_components[1] == x
result_image_copy = result_image.copy()
result_image_copy[mask] = 0
n_veins = np.count_nonzero(result_image_copy == 1)
n_arteries = np.count_nonzero(result_image_copy == 2)
if n_veins == 0 and n_arteries == 0:
pass
elif n_veins == n_arteries:
pass
elif (n_veins == 1 and n_arteries == 0) or (n_veins == 0 and n_arteries == 1):
pass
elif n_veins > n_arteries:
result_image[mask2] = 1
else:
result_image[mask2] = 2
for row in range(0, result_image.shape[0]):
for col in range(0, result_image.shape[1]):
if result_image[row, col] == 1:
final_image[row, col] = [255, 0, 0]
img_rgb[row, col] = [255, 0, 0]
elif result_image[row, col] == 2:
final_image[row, col] = [0, 0, 255]
img_rgb[row, col] = [0, 0, 255]
return final_image, img_rgb
def _box_labels(bifurcations: list, c_components: np.ndarray):
connected_vessels = []
for b in bifurcations:
labels = c_components[1]
box = labels[b[1]-1:b[3]+1, b[0]-1:b[2]+1]
unique = np.unique(box)
if len(unique) == 4:
connected_vessels.append(unique[1:4])
return connected_vessels
def _average(widths: list):
acum = 0
for w in widths:
acum += w[1]+w[2]
acum /= len(widths)
return acum
def _normalize_indexes(connected_matrix: np.ndarray, label: int):
labeled = connected_matrix[1]
indexes = np.where(labeled == label)
formatted_index = []
for i in range(0, len(indexes[0])):
formatted_index.append([indexes[0][i], indexes[1][i]])
return formatted_index
def _average_width(connected_matrix: np.ndarray, connected: list, thr_img: np.ndarray, final_image: np.ndarray):
connected_avg = []
for c in connected:
formatted_indexes = _normalize_indexes(connected_matrix, c)
label_widths = l.vessel_width(thr_img, formatted_indexes)
index = int(len(formatted_indexes)/2)
connected_avg.extend([_average(label_widths), final_image[formatted_indexes[index][0], formatted_indexes[index][1]]])
return connected_avg
def _coloring(connected_matrix: np.ndarray, box: list, rgb: list, skeleton: np.ndarray):
for segment_label in box:
formatted_indexes = _normalize_indexes(connected_matrix, segment_label)
for index in formatted_indexes:
skeleton[index[0], index[1]] = rgb
return skeleton[index[0], index[1]]
def _postprocessing(connected_components: np.ndarray, thr_img: np.ndarray, bifurcs: list, final_img: np.ndarray):
rgb = final_img.copy()
connected_vessels = _box_labels(bifurcs, connected_components)
for triplet in connected_vessels:
width_and_color = _average_width(connected_components, triplet, thr_img, rgb)
red = [0, 0]
blue = [0, 0]
maxwidth = [-1, -1]
for i in [1, 3, 5]:
width = width_and_color[i - 1]
if width*1.75 > maxwidth[0]:
maxwidth[0] = width
maxwidth[1] = width_and_color[i]
if all(width_and_color[i] == [255, 0, 0]):
blue[0] += 1
blue[1] = width
elif all(width_and_color[i] == [0, 0, 255]):
red[0] += 1
red[1] = width
if (red[0]+blue[0]) == 1:
pass
else:
if not(all(maxwidth[1] == [255, 255, 255])):
_coloring(connected_components, triplet, maxwidth[1], rgb)
return rgb
def _accuracy(post_img: np.ndarray, segmented_img: np.ndarray, gt_img: np.ndarray):
counter = 0
TN = 0
FN = 0
FP = 0
TP = 0
for it_x in range(0, segmented_img.shape[0]):
for it_y in range(0, segmented_img.shape[1]):
if segmented_img[it_x, it_y] == 255:
if (all(gt_img[it_x, it_y] == [255, 0, 0]) or all(gt_img[it_x, it_y] == [0, 0, 255])) and \
not(all(post_img[it_x, it_y] == [255, 255, 255])):
counter += 1
if all(post_img[it_x, it_y] == [0, 0, 255]) and all(gt_img[it_x, it_y] == [0, 0, 255]):
TP += 1
elif all(post_img[it_x, it_y] == [255, 0, 0]) and all(gt_img[it_x, it_y] == [255, 0, 0]):
TN += 1
elif all(post_img[it_x, it_y] == [0, 0, 255]) and all(gt_img[it_x, it_y] == [255, 0, 0]):
FP += 1
elif all(post_img[it_x, it_y] == [255, 0, 0]) and all(gt_img[it_x, it_y] == [0, 0, 255]):
FN += 1
sensitivity = TP / (TP + FN)
specificity = TN / (FP + TN)
accy = (TP + TN) / (TP + TN + FP + FN)
return [accy, sensitivity, specificity]
def classification(original_img: np.ndarray, manual_img: np.ndarray):
manual = manual_img
bifurcations, crossings = l.classification(manual, 0)
features, sectioned_img, thr_img, predict_img = _loading_model(original_img, manual, None, 38)
acc, rgb, network, original = _validating_model(features, sectioned_img, original_img, predict_img, 38, 0)
connected_components = cv2.connectedComponentsWithStats(sectioned_img.astype(np.uint8), 4, cv2.CV_32S)
final_img, img_original = _homogenize(connected_components, network, rgb, original)
post_img = _postprocessing(connected_components, thr_img, bifurcations, img_original)
return post_img | M3_feature_zone/retipy/retipy/vessel_classification.py | import numpy as np
import cv2
import h5py
import glob
import os
from keras.models import model_from_json
from retipy import retina
from retipy import landmarks as l
"""Module with operations related to classify vessels into arteries and veins."""
_base_directory_training = 'retipy/resources/images/drive/training/'
_base_directory_test = 'retipy/resources/images/drive/test/'
_base_directory_model = os.path.join(os.path.dirname(__file__), 'model/')
def _vessel_widths(center_img: np.ndarray, segmented_img: np.ndarray):
image = segmented_img.copy()
widths = []
for i in range(0, image.shape[0]):
for j in range(0, image.shape[1]):
if center_img[i, j] == 255:
w0 = 0
w45 = 0
w90 = 0
w135 = 0
w180 = 1
w225 = 1
w270 = 1
w315 = 1
while True:
if image[i, j + w0 + 1] != 0:
w0 += 1
if image[i, j - w180 - 1] != 0:
w180 += 1
if image[i - w90 - 1, j] != 0:
w90 += 1
if image[i + w270 + 1, j] != 0:
w270 += 1
if image[i - w45 - 1, j + w45 + 1] != 0:
w45 += 1
if image[i + w225 + 1, j - w225 - 1] != 0:
w225 += 1
if image[i - w135 - 1, j - w135 - 1] != 0:
w135 += 1
if image[i + w315 + 1, j + w315 + 1] != 0:
w315 += 1
if image[i, j + w0 + 1] == 0 and image[i, j - w180 - 1] == 0:
widths.append([i, j, 0, w0, w180])
break
elif image[i - w90 - 1, j] == 0 and image[i + w270 + 1, j] == 0:
widths.append([i, j, 90, w90, w270])
break
elif image[i - w45 - 1, j + w45 + 1] == 0 and image[i + w225 + 1, j - w225 - 1] == 0:
widths.append([i, j, 45, w45, w225])
break
elif image[i - w135 - 1, j - w135 - 1] == 0 and image[i + w315 + 1, j + w315 + 1] == 0:
widths.append([i, j, 135, w135, w315])
break
return widths
def _local_binary_pattern(window: list):
x = [0, 0, 1, 2, 2, 2, 1, 0]
y = [1, 2, 2, 2, 1, 0, 0, 0]
decimal = 0
center = window[1][1]
for i in range(0, 8):
if center >= window[x[i]][y[i]]:
decimal += np.power(2, i)
return decimal
def _preparing_data(widths: list, sections: int, original_img: np.ndarray, classified_img: np.ndarray,
bright_img: np.ndarray, gray_img: np.ndarray):
f_vectors = []
if classified_img is not None:
for w in widths:
w0 = w[0]
w1 = w[1]
if (np.array_equal(classified_img[w0, w1], [255, 0, 0]) or np.array_equal(classified_img[w0, w1], [0, 0, 255])) \
and ((w[3]+w[4]) >= 2):
if np.array_equal(classified_img[w0, w1], [255, 0, 0]):
out = 0
elif np.array_equal(classified_img[w0, w1], [0, 0, 255]):
out = 1
iv = _vector(w, sections, original_img, bright_img, gray_img, out)
f_vectors.append(iv)
else:
for w in widths:
if (w[3] + w[4]) >= 2:
iv = _vector(w, sections, original_img, bright_img, gray_img, -1)
f_vectors.append(iv)
return f_vectors
def _vector(w: list, sections: int, original_img: np.ndarray, bright_img: np.ndarray, gray_img: np.ndarray, out: int):
iv = []
w0 = w[0]
w1 = w[1]
angle = w[2]
section = (w[3] + w[4]) / sections
if angle == 0:
y = w1 + w[3]
iv.extend([w0, w1])
for i in range(0, sections + 1):
step = int(np.floor(y - (i * section)))
iv.extend(original_img[w0, step])
iv.extend([bright_img[w0, step]])
iv.extend([_local_binary_pattern(gray_img[w0 - 1:w0 + 2, step - 1:step + 2])])
iv.extend([w[3] + w[4]])
iv.extend([out])
elif angle == 45:
x = w0 - w[3]
y = w1 + w[3]
iv.extend([w0, w1])
for i in range(0, sections + 1):
stepx = int(np.floor(x + (i * section)))
stepy = int(np.floor(y - (i * section)))
iv.extend(original_img[stepx, stepy])
iv.extend([bright_img[stepx, stepy]])
iv.extend([_local_binary_pattern(gray_img[stepx - 1:stepx + 2, stepy - 1:stepy + 2])])
iv.extend([w[3] + w[4]])
iv.extend([out])
elif angle == 90:
x = w0 - w[3]
iv.extend([w0, w1])
for i in range(0, sections + 1):
step = int(np.floor(x + (i * section)))
iv.extend(original_img[step, w1])
iv.extend([bright_img[step, w1]])
iv.extend([_local_binary_pattern(gray_img[step - 1:step + 2, w1 - 1:w1 + 2])])
iv.extend([w[3] + w[4]])
iv.extend([out])
elif angle == 135:
x = w0 - w[3]
y = w1 - w[3]
iv.extend([w0, w1])
for i in range(0, sections + 1):
stepx = int(np.floor(x + (i * section)))
stepy = int(np.floor(y + (i * section)))
iv.extend(original_img[stepx, stepy])
iv.extend([bright_img[stepx, stepy]])
iv.extend([_local_binary_pattern(gray_img[stepx - 1:stepx + 2, stepy - 1:stepy + 2])])
iv.extend([w[3] + w[4]])
iv.extend([out])
return iv
def _feature_vectors():
directory = _base_directory_training + "original/"
features = []
for filename in sorted(glob.glob(os.path.join(directory, '*.tif'))):
name = os.path.basename(filename)
name = name.split(".")[0]
original = cv2.imread(filename, 1)
gray = cv2.imread(filename, 0)
(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(gray)
lab = cv2.cvtColor(original, cv2.COLOR_BGR2LAB)
L, A, B = cv2.split(lab)
manual = retina.Retina(None, _base_directory_training + "manual/" + name + ".png")
manual.threshold_image()
thr_img = manual.get_uint_image()
cv2.circle(thr_img, maxLoc, 60, 0, -1)
manual.skeletonization()
skeleton_img = manual.get_uint_image()
cv2.circle(skeleton_img, maxLoc, 60, 0, -1)
landmarks, segmented_skeleton_img = l.potential_landmarks(skeleton_img, 3)
av = cv2.imread(_base_directory_training + "av/" + name + ".png", 1)
widths = _vessel_widths(segmented_skeleton_img, thr_img)
data = _preparing_data(widths, 6, original, av, L, gray)
features.extend(data)
h5f = h5py.File(_base_directory_model + 'vector_features_interpolation.h5', 'w')
h5f.create_dataset('training', data=features)
return features
def _loading_model(original: np.ndarray, threshold: np.ndarray, av: np.ndarray, size: int):
# Load model of the neuronal network
json_file = open(_base_directory_model + 'modelVA.json', "r")
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# Load weights
loaded_model.load_weights(_base_directory_model + 'modelVA.h5')
gray = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)
(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(gray)
lab = cv2.cvtColor(original, cv2.COLOR_BGR2LAB)
L, A, B = cv2.split(lab)
manual = retina.Retina(threshold, None)
manual.threshold_image()
thr_img = manual.get_uint_image()
cv2.circle(thr_img, maxLoc, 60, 0, -1)
manual.skeletonization()
skeleton_img = manual.get_uint_image()
cv2.circle(skeleton_img, maxLoc, 60, 0, -1)
landmarks, segmented_skeleton_img = l.potential_landmarks(skeleton_img, 3)
widths = _vessel_widths(segmented_skeleton_img, thr_img)
data = _preparing_data(widths, 6, original, av, L, gray)
features = np.array(data)
predict_img = np.full((segmented_skeleton_img.shape[0], segmented_skeleton_img.shape[1]), 3, dtype=float)
for row in range(0, features.shape[0]):
prediction = loaded_model.predict(np.divide(features[row:row + 1, 2:size], 255), batch_size=1)
predict_img[features[row, 0], features[row, 1]] = prediction
return features, segmented_skeleton_img, thr_img, predict_img
def _validating_model(features: np.ndarray, skeleton_img: np.ndarray, original_img: np.ndarray, predicted_img: np.ndarray, size: int, av: int):
max_acc = -1
rgb_prediction = []
network_prediction = []
original = []
if av == 0:
manual_copy = retina.Retina(skeleton_img, None)
manual_copy.bin_to_bgr()
manual_copy = manual_copy.get_uint_image()
original_copy = original_img.copy()
predict_copy = predicted_img.copy()
mask0 = predict_copy == 3
mask1 = (predict_copy >= 0) & (predict_copy < 0.8)
mask2 = (predict_copy >= 0.8) & (predict_copy <= 1)
predict_copy[mask1] = 1
predict_copy[mask2] = 2
predict_copy[mask0] = 0
for row in range(0, features.shape[0]):
if predict_copy[features[row, 0], features[row, 1]] == 2:
manual_copy[features[row, 0], features[row, 1]] = [0, 0, 255]
original_copy[features[row, 0], features[row, 1]] = [0, 0, 255]
elif predict_copy[features[row, 0], features[row, 1]] == 1:
manual_copy[features[row, 0], features[row, 1]] = [255, 0, 0]
original_copy[features[row, 0], features[row, 1]] = [255, 0, 0]
rgb_prediction = manual_copy
network_prediction = predict_copy
original = original_copy
else:
for i in range(0, 1000):
manual_copy = retina.Retina(skeleton_img, None)
manual_copy.bin_to_bgr()
manual_copy = manual_copy.get_uint_image()
original_copy = original_img.copy()
predict_copy = predicted_img.copy()
k = 0.001*i
mask0 = predict_copy == 3
mask1 = (predict_copy >= 0) & (predict_copy < k)
mask2 = (predict_copy >= k) & (predict_copy <= 1)
predict_copy[mask1] = 1
predict_copy[mask2] = 2
predict_copy[mask0] = 0
true_positive = 0
true_negative = 0
false_positive = 0
false_negative = 0
for row in range(0, features.shape[0]):
if predict_copy[features[row, 0], features[row, 1]] == 2:
manual_copy[features[row, 0], features[row, 1]] = [0, 0, 255]
original_copy[features[row, 0], features[row, 1]] = [0, 0, 255]
elif predict_copy[features[row, 0], features[row, 1]] == 1:
manual_copy[features[row, 0], features[row, 1]] = [255, 0, 0]
original_copy[features[row, 0], features[row, 1]] = [255, 0, 0]
if int(predict_copy[features[row, 0], features[row, 1]]) == 1 and features[row, size] == 0:
true_negative += 1
elif int(predict_copy[features[row, 0], features[row, 1]]) == 2 and features[row, size] == 1:
true_positive += 1
elif int(predict_copy[features[row, 0], features[row, 1]]) == 2 and features[row, size] == 0:
false_positive += 1
elif int(predict_copy[features[row, 0], features[row, 1]]) == 1 and features[row, size] == 1:
false_negative += 1
accy = (100 * (true_positive+true_negative)) / features.shape[0]
if max_acc < accy:
max_acc = accy
rgb_prediction = manual_copy
network_prediction = predict_copy
original = original_copy
return max_acc, rgb_prediction, network_prediction, original
def _homogenize(connected_components: np.ndarray,
network_prediction: np.ndarray,
rgb_prediction: np.ndarray,
original: np.ndarray):
# Imagen en con 0, 1, 2
result_image = network_prediction.copy()
# Imagen a color del resultado de la red
final_image = rgb_prediction.copy()
img_rgb = original.copy()
for x in range(1, connected_components[0]):
mask = connected_components[1] != x
mask2 = connected_components[1] == x
result_image_copy = result_image.copy()
result_image_copy[mask] = 0
n_veins = np.count_nonzero(result_image_copy == 1)
n_arteries = np.count_nonzero(result_image_copy == 2)
if n_veins == 0 and n_arteries == 0:
pass
elif n_veins == n_arteries:
pass
elif (n_veins == 1 and n_arteries == 0) or (n_veins == 0 and n_arteries == 1):
pass
elif n_veins > n_arteries:
result_image[mask2] = 1
else:
result_image[mask2] = 2
for row in range(0, result_image.shape[0]):
for col in range(0, result_image.shape[1]):
if result_image[row, col] == 1:
final_image[row, col] = [255, 0, 0]
img_rgb[row, col] = [255, 0, 0]
elif result_image[row, col] == 2:
final_image[row, col] = [0, 0, 255]
img_rgb[row, col] = [0, 0, 255]
return final_image, img_rgb
def _box_labels(bifurcations: list, c_components: np.ndarray):
connected_vessels = []
for b in bifurcations:
labels = c_components[1]
box = labels[b[1]-1:b[3]+1, b[0]-1:b[2]+1]
unique = np.unique(box)
if len(unique) == 4:
connected_vessels.append(unique[1:4])
return connected_vessels
def _average(widths: list):
acum = 0
for w in widths:
acum += w[1]+w[2]
acum /= len(widths)
return acum
def _normalize_indexes(connected_matrix: np.ndarray, label: int):
labeled = connected_matrix[1]
indexes = np.where(labeled == label)
formatted_index = []
for i in range(0, len(indexes[0])):
formatted_index.append([indexes[0][i], indexes[1][i]])
return formatted_index
def _average_width(connected_matrix: np.ndarray, connected: list, thr_img: np.ndarray, final_image: np.ndarray):
connected_avg = []
for c in connected:
formatted_indexes = _normalize_indexes(connected_matrix, c)
label_widths = l.vessel_width(thr_img, formatted_indexes)
index = int(len(formatted_indexes)/2)
connected_avg.extend([_average(label_widths), final_image[formatted_indexes[index][0], formatted_indexes[index][1]]])
return connected_avg
def _coloring(connected_matrix: np.ndarray, box: list, rgb: list, skeleton: np.ndarray):
for segment_label in box:
formatted_indexes = _normalize_indexes(connected_matrix, segment_label)
for index in formatted_indexes:
skeleton[index[0], index[1]] = rgb
return skeleton[index[0], index[1]]
def _postprocessing(connected_components: np.ndarray, thr_img: np.ndarray, bifurcs: list, final_img: np.ndarray):
rgb = final_img.copy()
connected_vessels = _box_labels(bifurcs, connected_components)
for triplet in connected_vessels:
width_and_color = _average_width(connected_components, triplet, thr_img, rgb)
red = [0, 0]
blue = [0, 0]
maxwidth = [-1, -1]
for i in [1, 3, 5]:
width = width_and_color[i - 1]
if width*1.75 > maxwidth[0]:
maxwidth[0] = width
maxwidth[1] = width_and_color[i]
if all(width_and_color[i] == [255, 0, 0]):
blue[0] += 1
blue[1] = width
elif all(width_and_color[i] == [0, 0, 255]):
red[0] += 1
red[1] = width
if (red[0]+blue[0]) == 1:
pass
else:
if not(all(maxwidth[1] == [255, 255, 255])):
_coloring(connected_components, triplet, maxwidth[1], rgb)
return rgb
def _accuracy(post_img: np.ndarray, segmented_img: np.ndarray, gt_img: np.ndarray):
counter = 0
TN = 0
FN = 0
FP = 0
TP = 0
for it_x in range(0, segmented_img.shape[0]):
for it_y in range(0, segmented_img.shape[1]):
if segmented_img[it_x, it_y] == 255:
if (all(gt_img[it_x, it_y] == [255, 0, 0]) or all(gt_img[it_x, it_y] == [0, 0, 255])) and \
not(all(post_img[it_x, it_y] == [255, 255, 255])):
counter += 1
if all(post_img[it_x, it_y] == [0, 0, 255]) and all(gt_img[it_x, it_y] == [0, 0, 255]):
TP += 1
elif all(post_img[it_x, it_y] == [255, 0, 0]) and all(gt_img[it_x, it_y] == [255, 0, 0]):
TN += 1
elif all(post_img[it_x, it_y] == [0, 0, 255]) and all(gt_img[it_x, it_y] == [255, 0, 0]):
FP += 1
elif all(post_img[it_x, it_y] == [255, 0, 0]) and all(gt_img[it_x, it_y] == [0, 0, 255]):
FN += 1
sensitivity = TP / (TP + FN)
specificity = TN / (FP + TN)
accy = (TP + TN) / (TP + TN + FP + FN)
return [accy, sensitivity, specificity]
def classification(original_img: np.ndarray, manual_img: np.ndarray):
manual = manual_img
bifurcations, crossings = l.classification(manual, 0)
features, sectioned_img, thr_img, predict_img = _loading_model(original_img, manual, None, 38)
acc, rgb, network, original = _validating_model(features, sectioned_img, original_img, predict_img, 38, 0)
connected_components = cv2.connectedComponentsWithStats(sectioned_img.astype(np.uint8), 4, cv2.CV_32S)
final_img, img_original = _homogenize(connected_components, network, rgb, original)
post_img = _postprocessing(connected_components, thr_img, bifurcations, img_original)
return post_img | 0.428712 | 0.330687 |
## Set up code checking
import os
if not os.path.exists("../input/train.csv"):
os.symlink("../input/home-data-for-ml-course/train.csv", "../input/train.csv")
os.symlink("../input/home-data-for-ml-course/test.csv", "../input/test.csv")
from learntools.core import binder
binder.bind(globals())
from learntools.ml_intermediate.ex5 import *
print("Setup Complete")
import pandas as pd
from sklearn.model_selection import train_test_split
## Read the data
train_data = pd.read_csv('../input/train.csv', index_col='Id')
test_data = pd.read_csv('../input/test.csv', index_col='Id')
## Remove rows with missing target, separate target from predictors
train_data.dropna(axis=0, subset=['SalePrice'], inplace=True)
y = train_data.SalePrice
train_data.drop(['SalePrice'], axis=1, inplace=True)
## Select numeric columns only
numeric_cols = [cname for cname in train_data.columns if train_data[cname].dtype in ['int64', 'float64']]
X = train_data[numeric_cols].copy()
X_test = test_data[numeric_cols].copy()
## Step 1: Write a useful function
## In this exercise, you'll use cross-validation to select parameters for a machine learning model.
## Begin by writing a function get_score() that reports the average (over three cross-validation folds) MAE of a machine learning pipeline that uses:
## the data in X and y to create folds,
## SimpleImputer() (with all parameters left as default) to replace missing values, and
## RandomForestRegressor() (with random_state=0) to fit a random forest model.
## The n_estimators parameter supplied to get_score() is used when setting the number of trees in the random forest model.
## Answer:
def get_score(n_estimators):
my_pipeline = Pipeline(steps=[
('preprocessor', SimpleImputer()),
('model', RandomForestRegressor(n_estimators, random_state=0))
])
scores = -1 * cross_val_score(my_pipeline, X, y, cv=3, scoring='neg_mean_absolute_error')
return scores.mean()
## Step 2: Test different parameter values
## Now, you will use the function that you defined in Step 1 to evaluate the model performance corresponding to eight different values for the number of trees in the random forest: 50, 100, 150, ..., 300, 350, 400.
## Store your results in a Python dictionary results, where results[i] is the average MAE returned by get_score(i).
## Answer:
results = {}
for i in range(1,9):
results[50*i] = get_score(50*i)
## Step 3: Find the best parameter value
## Given the results, which value for n_estimators seems best for the random forest model? Use your answer to set the value of n_estimators_best.
## Answer:
n_estimators_best = min(results, key=results.get) | Intermediate Machine Learning/Exercise 5.py |
## Set up code checking
import os
if not os.path.exists("../input/train.csv"):
os.symlink("../input/home-data-for-ml-course/train.csv", "../input/train.csv")
os.symlink("../input/home-data-for-ml-course/test.csv", "../input/test.csv")
from learntools.core import binder
binder.bind(globals())
from learntools.ml_intermediate.ex5 import *
print("Setup Complete")
import pandas as pd
from sklearn.model_selection import train_test_split
## Read the data
train_data = pd.read_csv('../input/train.csv', index_col='Id')
test_data = pd.read_csv('../input/test.csv', index_col='Id')
## Remove rows with missing target, separate target from predictors
train_data.dropna(axis=0, subset=['SalePrice'], inplace=True)
y = train_data.SalePrice
train_data.drop(['SalePrice'], axis=1, inplace=True)
## Select numeric columns only
numeric_cols = [cname for cname in train_data.columns if train_data[cname].dtype in ['int64', 'float64']]
X = train_data[numeric_cols].copy()
X_test = test_data[numeric_cols].copy()
## Step 1: Write a useful function
## In this exercise, you'll use cross-validation to select parameters for a machine learning model.
## Begin by writing a function get_score() that reports the average (over three cross-validation folds) MAE of a machine learning pipeline that uses:
## the data in X and y to create folds,
## SimpleImputer() (with all parameters left as default) to replace missing values, and
## RandomForestRegressor() (with random_state=0) to fit a random forest model.
## The n_estimators parameter supplied to get_score() is used when setting the number of trees in the random forest model.
## Answer:
def get_score(n_estimators):
my_pipeline = Pipeline(steps=[
('preprocessor', SimpleImputer()),
('model', RandomForestRegressor(n_estimators, random_state=0))
])
scores = -1 * cross_val_score(my_pipeline, X, y, cv=3, scoring='neg_mean_absolute_error')
return scores.mean()
## Step 2: Test different parameter values
## Now, you will use the function that you defined in Step 1 to evaluate the model performance corresponding to eight different values for the number of trees in the random forest: 50, 100, 150, ..., 300, 350, 400.
## Store your results in a Python dictionary results, where results[i] is the average MAE returned by get_score(i).
## Answer:
results = {}
for i in range(1,9):
results[50*i] = get_score(50*i)
## Step 3: Find the best parameter value
## Given the results, which value for n_estimators seems best for the random forest model? Use your answer to set the value of n_estimators_best.
## Answer:
n_estimators_best = min(results, key=results.get) | 0.511717 | 0.337954 |
import os
import binascii
from koppercoin.crypto import ietf_ed25519, lww_signature
def keygen():
"""Returns a longtime keypair (secret, public) such that one can
derive further keys from the public key.
These are basically two usual ECC-keypairs.
"""
(secret1, public1) = lww_signature.keygen()
(secret2, public2) = lww_signature.keygen()
return ((secret1, secret2), (public1, public2))
def key_to_trackingkey(key):
"""Takes a keypair and returns the tracking key."""
((a, _), (_, B)) = key
return (a, B)
def generate_ot_key(public_key, nonce=None):
"""Derives a onetime publickey.
The corresponding onetime private key can only be recovered with
knowledge of the private_key of the public_key.
More exactly the output is (ot_pubkey, dh_key), where dh_key and
the private key corresponding to the public_key in the input are
needed to compute ot_privkey, the onetime private key.
The nonce should be unique modulo the group order and can be
reused for different public keys. It will be generated
automaticaly if not set.
>>> import os
>>> nonce = os.urandom(32)
>>> (_, pk) = keygen()
>>> ot_key = generate_ot_key(pk, nonce)
>>> (_, pk) = keygen()
>>> ot_key = generate_ot_key(pk)
"""
if not nonce:
nonce = os.urandom(32)
(A, B) = public_key
(A, B) = (binascii.unhexlify(A), binascii.unhexlify(B))
nonce = int.from_bytes(nonce, "little") % ietf_ed25519.q
hashval = ietf_ed25519.sha512(
ietf_ed25519.point_compress(
ietf_ed25519.point_mul(
nonce,
ietf_ed25519.point_decompress(A))))
first = ietf_ed25519.point_mul(
int.from_bytes(hashval, "little"),
ietf_ed25519.G)
second = ietf_ed25519.point_decompress(B)
ot_pubkey = ietf_ed25519.point_add(first, second)
dh_key = ietf_ed25519.point_mul(nonce, ietf_ed25519.G)
# In the Cryptonote Whitepaper dh_key is called R
# Next we compress the points
ot_pubkey = binascii.hexlify(ietf_ed25519.point_compress(ot_pubkey)).decode()
dh_key = binascii.hexlify(ietf_ed25519.point_compress(dh_key)).decode()
return (ot_pubkey, dh_key)
def generate_ot_keys(public_keys, nonce=None):
"""Derives a list of onetime publickeys using the same nonce.
The input is a list of public keys.
Each corresponding onetime private key can only be recovered with
knowledge of the private_key of the public_key.
The output is ([ot_pubkey1, ot_pubkey2, ...], dh_key).
The dh_keys are the same for all ot_pubkeys since the nonce is the
same.
Compare with the documentation of generate_ot_key.
The nonce should be unique modulo the group order and can be
reused for different public keys. It will be generated
automaticaly if not set.
This is a batch-processing version of the function generate_ot_key.
>>> public_keys = [keygen()[1] for i in range(3)]
>>> ot_keys = generate_ot_keys(public_keys)
"""
if not nonce:
nonce = os.urandom(32)
ot_keys_with_dh_keys = [generate_ot_key(pk, nonce) for pk in public_keys]
# dh_keys are the same since the nonce is the same.
dh_key = ot_keys_with_dh_keys[0][1]
ot_pubkkeys = [ot_keys_with_dh[0] for ot_keys_with_dh in ot_keys_with_dh_keys]
return (ot_pubkkeys, dh_key)
def recoverable(ot_key, tracking_key):
"""Takes a onetime key and a tracking key and returns if
the private onetime key is recoverable.
>>> longterm_key = keygen()
>>> longterm_pub = longterm_key[1]
>>> ot_key = generate_ot_key(longterm_pub)
>>> trackingkey = key_to_trackingkey(longterm_key)
>>> recoverable(ot_key, trackingkey)
True
>>> wrong_lt_key = keygen()
>>> wrong_lt_pub = wrong_lt_key[1]
>>> wrong_trackingkey = key_to_trackingkey(wrong_lt_key)
>>> recoverable(ot_key, wrong_trackingkey)
False
"""
(ot_pubkey, dh_key) = ot_key
(ot_pubkey, dh_key) = (binascii.unhexlify(ot_pubkey), binascii.unhexlify(dh_key))
(a, B) = tracking_key
hashval = ietf_ed25519.sha512(
ietf_ed25519.point_compress(
ietf_ed25519.point_mul(
int(a, 16),
ietf_ed25519.point_decompress(dh_key))))
first = ietf_ed25519.point_mul(
int.from_bytes(hashval, "little"),
ietf_ed25519.G)
second = ietf_ed25519.point_decompress(binascii.unhexlify(B))
key_ = ietf_ed25519.point_compress(
ietf_ed25519.point_add(first, second))
return ot_pubkey == key_
def recover_sec_key(ot_key, keypair):
"""Takes a onetime public key and a keypair and recovers the
onetime secret key if possible.
>>> longterm_key = keygen()
>>> longterm_pub = longterm_key[1]
>>> ot_key = generate_ot_key(longterm_pub)
>>> (ot_pubkey, dh_key) = ot_key
>>> trackingkey = key_to_trackingkey(longterm_key)
>>> recovered_sec_key = recover_sec_key(ot_key, longterm_key)
>>> lww_signature.secret_to_public(recovered_sec_key) == ot_pubkey
True
"""
(ot_pubkey, dh_key) = ot_key
((a, b), (A, B)) = keypair
first = ietf_ed25519.sha512(
ietf_ed25519.point_compress(
ietf_ed25519.point_mul(
int(a, 16),
ietf_ed25519.point_decompress(binascii.unhexlify(dh_key)))))
first = int.from_bytes(first, "little")
second = int(b, 16)
ot_sec_key = first + second
ot_sec_key = hex(ot_sec_key)
# Check correctness
assert(ot_pubkey == lww_signature.secret_to_public(ot_sec_key))
return ot_sec_key | koppercoin/crypto/onetime_keys.py | import os
import binascii
from koppercoin.crypto import ietf_ed25519, lww_signature
def keygen():
"""Returns a longtime keypair (secret, public) such that one can
derive further keys from the public key.
These are basically two usual ECC-keypairs.
"""
(secret1, public1) = lww_signature.keygen()
(secret2, public2) = lww_signature.keygen()
return ((secret1, secret2), (public1, public2))
def key_to_trackingkey(key):
"""Takes a keypair and returns the tracking key."""
((a, _), (_, B)) = key
return (a, B)
def generate_ot_key(public_key, nonce=None):
"""Derives a onetime publickey.
The corresponding onetime private key can only be recovered with
knowledge of the private_key of the public_key.
More exactly the output is (ot_pubkey, dh_key), where dh_key and
the private key corresponding to the public_key in the input are
needed to compute ot_privkey, the onetime private key.
The nonce should be unique modulo the group order and can be
reused for different public keys. It will be generated
automaticaly if not set.
>>> import os
>>> nonce = os.urandom(32)
>>> (_, pk) = keygen()
>>> ot_key = generate_ot_key(pk, nonce)
>>> (_, pk) = keygen()
>>> ot_key = generate_ot_key(pk)
"""
if not nonce:
nonce = os.urandom(32)
(A, B) = public_key
(A, B) = (binascii.unhexlify(A), binascii.unhexlify(B))
nonce = int.from_bytes(nonce, "little") % ietf_ed25519.q
hashval = ietf_ed25519.sha512(
ietf_ed25519.point_compress(
ietf_ed25519.point_mul(
nonce,
ietf_ed25519.point_decompress(A))))
first = ietf_ed25519.point_mul(
int.from_bytes(hashval, "little"),
ietf_ed25519.G)
second = ietf_ed25519.point_decompress(B)
ot_pubkey = ietf_ed25519.point_add(first, second)
dh_key = ietf_ed25519.point_mul(nonce, ietf_ed25519.G)
# In the Cryptonote Whitepaper dh_key is called R
# Next we compress the points
ot_pubkey = binascii.hexlify(ietf_ed25519.point_compress(ot_pubkey)).decode()
dh_key = binascii.hexlify(ietf_ed25519.point_compress(dh_key)).decode()
return (ot_pubkey, dh_key)
def generate_ot_keys(public_keys, nonce=None):
"""Derives a list of onetime publickeys using the same nonce.
The input is a list of public keys.
Each corresponding onetime private key can only be recovered with
knowledge of the private_key of the public_key.
The output is ([ot_pubkey1, ot_pubkey2, ...], dh_key).
The dh_keys are the same for all ot_pubkeys since the nonce is the
same.
Compare with the documentation of generate_ot_key.
The nonce should be unique modulo the group order and can be
reused for different public keys. It will be generated
automaticaly if not set.
This is a batch-processing version of the function generate_ot_key.
>>> public_keys = [keygen()[1] for i in range(3)]
>>> ot_keys = generate_ot_keys(public_keys)
"""
if not nonce:
nonce = os.urandom(32)
ot_keys_with_dh_keys = [generate_ot_key(pk, nonce) for pk in public_keys]
# dh_keys are the same since the nonce is the same.
dh_key = ot_keys_with_dh_keys[0][1]
ot_pubkkeys = [ot_keys_with_dh[0] for ot_keys_with_dh in ot_keys_with_dh_keys]
return (ot_pubkkeys, dh_key)
def recoverable(ot_key, tracking_key):
"""Takes a onetime key and a tracking key and returns if
the private onetime key is recoverable.
>>> longterm_key = keygen()
>>> longterm_pub = longterm_key[1]
>>> ot_key = generate_ot_key(longterm_pub)
>>> trackingkey = key_to_trackingkey(longterm_key)
>>> recoverable(ot_key, trackingkey)
True
>>> wrong_lt_key = keygen()
>>> wrong_lt_pub = wrong_lt_key[1]
>>> wrong_trackingkey = key_to_trackingkey(wrong_lt_key)
>>> recoverable(ot_key, wrong_trackingkey)
False
"""
(ot_pubkey, dh_key) = ot_key
(ot_pubkey, dh_key) = (binascii.unhexlify(ot_pubkey), binascii.unhexlify(dh_key))
(a, B) = tracking_key
hashval = ietf_ed25519.sha512(
ietf_ed25519.point_compress(
ietf_ed25519.point_mul(
int(a, 16),
ietf_ed25519.point_decompress(dh_key))))
first = ietf_ed25519.point_mul(
int.from_bytes(hashval, "little"),
ietf_ed25519.G)
second = ietf_ed25519.point_decompress(binascii.unhexlify(B))
key_ = ietf_ed25519.point_compress(
ietf_ed25519.point_add(first, second))
return ot_pubkey == key_
def recover_sec_key(ot_key, keypair):
"""Takes a onetime public key and a keypair and recovers the
onetime secret key if possible.
>>> longterm_key = keygen()
>>> longterm_pub = longterm_key[1]
>>> ot_key = generate_ot_key(longterm_pub)
>>> (ot_pubkey, dh_key) = ot_key
>>> trackingkey = key_to_trackingkey(longterm_key)
>>> recovered_sec_key = recover_sec_key(ot_key, longterm_key)
>>> lww_signature.secret_to_public(recovered_sec_key) == ot_pubkey
True
"""
(ot_pubkey, dh_key) = ot_key
((a, b), (A, B)) = keypair
first = ietf_ed25519.sha512(
ietf_ed25519.point_compress(
ietf_ed25519.point_mul(
int(a, 16),
ietf_ed25519.point_decompress(binascii.unhexlify(dh_key)))))
first = int.from_bytes(first, "little")
second = int(b, 16)
ot_sec_key = first + second
ot_sec_key = hex(ot_sec_key)
# Check correctness
assert(ot_pubkey == lww_signature.secret_to_public(ot_sec_key))
return ot_sec_key | 0.690246 | 0.462959 |
from psycopg2.extensions import AsIs
from skygear.utils import db
from .exc import AlreadyDeletedException
from .predicate import Predicate
from .pubsub import _publish_record_event
from .query import Query
from .record import ChatRecord
from .user_conversation import UserConversation
from .utils import _get_schema_name, to_rfc3339_or_none
class Message(ChatRecord):
record_type = 'message'
def delete(self) -> None:
"""
Soft-delete a message
- Mark message as deleted
- Update last_message and last_read_message
"""
if self['deleted']:
raise AlreadyDeletedException()
self['deleted'] = True
self.save()
def getReceiptList(self):
"""
Returns a list of message receipt statuses.
"""
receipts = list()
with db.conn() as conn:
cur = conn.execute('''
SELECT receipt.user, read_at, delivered_at
FROM %(schema_name)s.receipt
WHERE
"message" = %(message_id)s AND
(read_at IS NOT NULL or delivered_at is NOT NULL)
''', {
'schema_name': AsIs(_get_schema_name()),
'message_id': self.id.key
}
)
for row in cur:
receipts.append({
'user': row['user'],
'read_at': to_rfc3339_or_none(row['read_at']),
'delivered_at': to_rfc3339_or_none(row['delivered_at'])
})
return receipts
def updateMessageStatus(self, conn) -> bool:
"""
Update the message status field by querying the database for
all receipt statuses.
"""
cur = conn.execute('''
WITH
read_count AS (
SELECT DISTINCT COUNT(receipt.user) as count
FROM %(schema_name)s.receipt
WHERE message = %(message_id)s
AND read_at IS NOT NULL
),
participant_count AS (
SELECT count(1) as count
FROM %(schema_name)s.user_conversation
WHERE conversation = %(conversation_id)s
)
UPDATE %(schema_name)s.message
SET _updated_at = NOW(),
message_status =
CASE
WHEN read_count.count = 0 THEN 'delivered'
WHEN read_count.count < participant_count.count
THEN 'some_read'
ELSE 'all_read'
END
FROM read_count, participant_count
WHERE _id = %(message_id)s
RETURNING _updated_at, message_status
''', {
'schema_name': AsIs(_get_schema_name()),
'message_id': self.id.key,
'conversation_id': self.conversation_id
}
)
row = cur.fetchone()
if row is not None:
self['_updated_at'] = row[0]
self['message_status'] = row[1]
def notifyParticipants(self, event_type='update') -> None:
result = UserConversation.\
fetch_all_by_conversation_id(self.conversation_id)
participants = [row['user'].recordID.key for row in result]
_publish_record_event(participants,
"message",
event_type,
self)
@property
def conversation_id(self):
return self['conversation'].recordID.key
@classmethod
def fetch_all_by_conversation_id(cls, conversation_id, limit,
before_time=None, before_message_id=None,
after_time=None, after_message_id=None,
order=None, deleted=False):
database = cls._get_database()
predicate = Predicate(conversation__eq=conversation_id,
deleted__eq=deleted)
if before_time is not None:
predicate = predicate & Predicate(_created_at__lt=before_time)
if before_message_id is not None:
before_message = Message.fetch_one(before_message_id)
predicate = predicate & Predicate(seq__lt=before_message['seq'])
if after_time is not None:
predicate = predicate & Predicate(_created_at__gt=after_time)
if after_message_id is not None:
after_message = Message.fetch_one(after_message_id)
predicate = predicate & Predicate(seq__gt=after_message['seq'])
query = Query('message', predicate=predicate, limit=limit)
if order != 'edited_at':
order = '_created_at'
query.add_order(order, 'desc')
result = database.query(query)
# remove deleted message content
if deleted:
for message in result:
if message['deleted']:
Message.clear_message_content(message)
return result
@classmethod
def fetch_all_by_conversation_id_and_seq(cls,
conversation_id,
from_seq,
to_seq):
database = cls._get_database()
predicate = Predicate(seq__lte=to_seq,
conversation__eq=conversation_id,
deleted__eq=False)
if from_seq >= 0:
predicate = predicate & Predicate(seq__gte=from_seq)
query = Query('message', predicate=predicate, limit=None)
return database.query(query)
@classmethod
def clear_message_content(cls, message):
keys_to_delete = [
'message_status',
'body',
'meta_data',
'attachment',
'revision'
]
for key in keys_to_delete:
message[key] = None
del message[key] | chat-plugin/chat/message.py | from psycopg2.extensions import AsIs
from skygear.utils import db
from .exc import AlreadyDeletedException
from .predicate import Predicate
from .pubsub import _publish_record_event
from .query import Query
from .record import ChatRecord
from .user_conversation import UserConversation
from .utils import _get_schema_name, to_rfc3339_or_none
class Message(ChatRecord):
record_type = 'message'
def delete(self) -> None:
"""
Soft-delete a message
- Mark message as deleted
- Update last_message and last_read_message
"""
if self['deleted']:
raise AlreadyDeletedException()
self['deleted'] = True
self.save()
def getReceiptList(self):
"""
Returns a list of message receipt statuses.
"""
receipts = list()
with db.conn() as conn:
cur = conn.execute('''
SELECT receipt.user, read_at, delivered_at
FROM %(schema_name)s.receipt
WHERE
"message" = %(message_id)s AND
(read_at IS NOT NULL or delivered_at is NOT NULL)
''', {
'schema_name': AsIs(_get_schema_name()),
'message_id': self.id.key
}
)
for row in cur:
receipts.append({
'user': row['user'],
'read_at': to_rfc3339_or_none(row['read_at']),
'delivered_at': to_rfc3339_or_none(row['delivered_at'])
})
return receipts
def updateMessageStatus(self, conn) -> bool:
"""
Update the message status field by querying the database for
all receipt statuses.
"""
cur = conn.execute('''
WITH
read_count AS (
SELECT DISTINCT COUNT(receipt.user) as count
FROM %(schema_name)s.receipt
WHERE message = %(message_id)s
AND read_at IS NOT NULL
),
participant_count AS (
SELECT count(1) as count
FROM %(schema_name)s.user_conversation
WHERE conversation = %(conversation_id)s
)
UPDATE %(schema_name)s.message
SET _updated_at = NOW(),
message_status =
CASE
WHEN read_count.count = 0 THEN 'delivered'
WHEN read_count.count < participant_count.count
THEN 'some_read'
ELSE 'all_read'
END
FROM read_count, participant_count
WHERE _id = %(message_id)s
RETURNING _updated_at, message_status
''', {
'schema_name': AsIs(_get_schema_name()),
'message_id': self.id.key,
'conversation_id': self.conversation_id
}
)
row = cur.fetchone()
if row is not None:
self['_updated_at'] = row[0]
self['message_status'] = row[1]
def notifyParticipants(self, event_type='update') -> None:
result = UserConversation.\
fetch_all_by_conversation_id(self.conversation_id)
participants = [row['user'].recordID.key for row in result]
_publish_record_event(participants,
"message",
event_type,
self)
@property
def conversation_id(self):
return self['conversation'].recordID.key
@classmethod
def fetch_all_by_conversation_id(cls, conversation_id, limit,
before_time=None, before_message_id=None,
after_time=None, after_message_id=None,
order=None, deleted=False):
database = cls._get_database()
predicate = Predicate(conversation__eq=conversation_id,
deleted__eq=deleted)
if before_time is not None:
predicate = predicate & Predicate(_created_at__lt=before_time)
if before_message_id is not None:
before_message = Message.fetch_one(before_message_id)
predicate = predicate & Predicate(seq__lt=before_message['seq'])
if after_time is not None:
predicate = predicate & Predicate(_created_at__gt=after_time)
if after_message_id is not None:
after_message = Message.fetch_one(after_message_id)
predicate = predicate & Predicate(seq__gt=after_message['seq'])
query = Query('message', predicate=predicate, limit=limit)
if order != 'edited_at':
order = '_created_at'
query.add_order(order, 'desc')
result = database.query(query)
# remove deleted message content
if deleted:
for message in result:
if message['deleted']:
Message.clear_message_content(message)
return result
@classmethod
def fetch_all_by_conversation_id_and_seq(cls,
conversation_id,
from_seq,
to_seq):
database = cls._get_database()
predicate = Predicate(seq__lte=to_seq,
conversation__eq=conversation_id,
deleted__eq=False)
if from_seq >= 0:
predicate = predicate & Predicate(seq__gte=from_seq)
query = Query('message', predicate=predicate, limit=None)
return database.query(query)
@classmethod
def clear_message_content(cls, message):
keys_to_delete = [
'message_status',
'body',
'meta_data',
'attachment',
'revision'
]
for key in keys_to_delete:
message[key] = None
del message[key] | 0.505371 | 0.099252 |
import numpy as np
from astropy import units as u
from gammapy.modeling import Parameter
from .spectral import PowerLawSpectralModel, SpectralModel
class _LogGaussianSpectralModel(SpectralModel):
r"""Log Gaussian spectral model with a weird parametrisation.
This should not be exposed to end-users as a Gammapy spectral model!
See Table 3 in https://ui.adsabs.harvard.edu/abs/2013APh....43..171B
"""
L = Parameter("L", 1e-12 * u.Unit("cm-2 s-1"))
Ep = Parameter("Ep", 0.107 * u.TeV)
w = Parameter("w", 0.776)
@staticmethod
def evaluate(energy, L, Ep, w):
return (
L
/ (energy * w * np.sqrt(2 * np.pi))
* np.exp(-((np.log(energy / Ep)) ** 2) / (2 * w ** 2))
)
def create_cosmic_ray_spectral_model(particle="proton"):
"""Cosmic a cosmic ray spectral model at Earth.
These are the spectra assumed in this CTA study:
Table 3 in https://ui.adsabs.harvard.edu/abs/2013APh....43..171B
The spectrum given is a differential flux ``dnde`` in units of
``cm-2 s-1 TeV-1``, as the value integrated over the whole sky.
To get a surface brightness you need to compute
``dnde / (4 * np.pi * u.sr)``.
To get the ``dnde`` in a region of solid angle ``omega``, you need
to compute ``dnde * omega / (4 * np.pi * u.sr)``.
The hadronic spectra are simple power-laws, the electron spectrum
is the sum of a power law and a log-normal component to model the
"Fermi shoulder".
Parameters
----------
particle : {'electron', 'proton', 'He', 'N', 'Si', 'Fe'}
Particle type
Returns
-------
`~gammapy.modeling.models.SpectralModel`
Spectral model (for all-sky cosmic ray flux)
"""
omega = 4 * np.pi * u.sr
if particle == "proton":
return PowerLawSpectralModel(
amplitude=0.096 * u.Unit("1 / (m2 s TeV sr)") * omega,
index=2.70,
reference=1 * u.TeV,
)
elif particle == "N":
return PowerLawSpectralModel(
amplitude=0.0719 * u.Unit("1 / (m2 s TeV sr)") * omega,
index=2.64,
reference=1 * u.TeV,
)
elif particle == "Si":
return PowerLawSpectralModel(
amplitude=0.0284 * u.Unit("1 / (m2 s TeV sr)") * omega,
index=2.66,
reference=1 * u.TeV,
)
elif particle == "Fe":
return PowerLawSpectralModel(
amplitude=0.0134 * u.Unit("1 / (m2 s TeV sr)") * omega,
index=2.63,
reference=1 * u.TeV,
)
elif particle == "electron":
return (
PowerLawSpectralModel(
amplitude=6.85e-5 * u.Unit("1 / (m2 s TeV sr)") * omega,
index=3.21,
reference=1 * u.TeV,
)
+ _LogGaussianSpectralModel(L=3.19e-3 * u.Unit("1 / (m2 s sr)") * omega)
)
else:
raise ValueError(f"Invalid particle: {particle!r}") | gammapy/modeling/models/spectral_cosmic_ray.py | import numpy as np
from astropy import units as u
from gammapy.modeling import Parameter
from .spectral import PowerLawSpectralModel, SpectralModel
class _LogGaussianSpectralModel(SpectralModel):
r"""Log Gaussian spectral model with a weird parametrisation.
This should not be exposed to end-users as a Gammapy spectral model!
See Table 3 in https://ui.adsabs.harvard.edu/abs/2013APh....43..171B
"""
L = Parameter("L", 1e-12 * u.Unit("cm-2 s-1"))
Ep = Parameter("Ep", 0.107 * u.TeV)
w = Parameter("w", 0.776)
@staticmethod
def evaluate(energy, L, Ep, w):
return (
L
/ (energy * w * np.sqrt(2 * np.pi))
* np.exp(-((np.log(energy / Ep)) ** 2) / (2 * w ** 2))
)
def create_cosmic_ray_spectral_model(particle="proton"):
"""Cosmic a cosmic ray spectral model at Earth.
These are the spectra assumed in this CTA study:
Table 3 in https://ui.adsabs.harvard.edu/abs/2013APh....43..171B
The spectrum given is a differential flux ``dnde`` in units of
``cm-2 s-1 TeV-1``, as the value integrated over the whole sky.
To get a surface brightness you need to compute
``dnde / (4 * np.pi * u.sr)``.
To get the ``dnde`` in a region of solid angle ``omega``, you need
to compute ``dnde * omega / (4 * np.pi * u.sr)``.
The hadronic spectra are simple power-laws, the electron spectrum
is the sum of a power law and a log-normal component to model the
"Fermi shoulder".
Parameters
----------
particle : {'electron', 'proton', 'He', 'N', 'Si', 'Fe'}
Particle type
Returns
-------
`~gammapy.modeling.models.SpectralModel`
Spectral model (for all-sky cosmic ray flux)
"""
omega = 4 * np.pi * u.sr
if particle == "proton":
return PowerLawSpectralModel(
amplitude=0.096 * u.Unit("1 / (m2 s TeV sr)") * omega,
index=2.70,
reference=1 * u.TeV,
)
elif particle == "N":
return PowerLawSpectralModel(
amplitude=0.0719 * u.Unit("1 / (m2 s TeV sr)") * omega,
index=2.64,
reference=1 * u.TeV,
)
elif particle == "Si":
return PowerLawSpectralModel(
amplitude=0.0284 * u.Unit("1 / (m2 s TeV sr)") * omega,
index=2.66,
reference=1 * u.TeV,
)
elif particle == "Fe":
return PowerLawSpectralModel(
amplitude=0.0134 * u.Unit("1 / (m2 s TeV sr)") * omega,
index=2.63,
reference=1 * u.TeV,
)
elif particle == "electron":
return (
PowerLawSpectralModel(
amplitude=6.85e-5 * u.Unit("1 / (m2 s TeV sr)") * omega,
index=3.21,
reference=1 * u.TeV,
)
+ _LogGaussianSpectralModel(L=3.19e-3 * u.Unit("1 / (m2 s sr)") * omega)
)
else:
raise ValueError(f"Invalid particle: {particle!r}") | 0.929576 | 0.574216 |
from lapixdl.evaluation.model import BinaryDetectionMetrics, BinaryClassificationMetrics, PredictionResult, PredictionResultType
import pytest
# From https://github.com/rafaelpadilla/Object-Detection-Metrics
predictions = [
PredictionResult(.88, PredictionResultType.FP),
PredictionResult(.70, PredictionResultType.TP),
PredictionResult(.80, PredictionResultType.FP),
PredictionResult(.71, PredictionResultType.FP),
PredictionResult(.54, PredictionResultType.TP),
PredictionResult(.74, PredictionResultType.FP),
PredictionResult(.18, PredictionResultType.TP),
PredictionResult(.67, PredictionResultType.FP),
PredictionResult(.38, PredictionResultType.FP),
PredictionResult(.91, PredictionResultType.TP),
PredictionResult(.44, PredictionResultType.FP),
PredictionResult(.35, PredictionResultType.FP),
PredictionResult(.78, PredictionResultType.FP),
PredictionResult(.45, PredictionResultType.FP),
PredictionResult(.14, PredictionResultType.FP),
PredictionResult(.62, PredictionResultType.TP),
PredictionResult(.44, PredictionResultType.FP),
PredictionResult(.95, PredictionResultType.TP),
PredictionResult(.23, PredictionResultType.FP),
PredictionResult(.45, PredictionResultType.FP),
PredictionResult(.84, PredictionResultType.FP),
PredictionResult(.43, PredictionResultType.FP),
PredictionResult(.48, PredictionResultType.TP),
PredictionResult(.95, PredictionResultType.FP)
]
TP = len([prediction for prediction in predictions if prediction.type ==
PredictionResultType.TP])
FP = len([prediction for prediction in predictions if prediction.type ==
PredictionResultType.FP])
FN = 15 - TP
def test_gt_count():
bin_class = BinaryClassificationMetrics(cls='a', FN=FN, TP=TP, FP=FP)
metrics = BinaryDetectionMetrics(bin_class, 0, predictions)
assert metrics.gt_count == 15
def test_pred_count():
bin_class = BinaryClassificationMetrics(cls='a', FN=FN, TP=TP, FP=FP)
metrics = BinaryDetectionMetrics(bin_class, 0, predictions)
assert metrics.predicted_count == 24
def test_iou():
bin_class = BinaryClassificationMetrics(cls='a', FN=FN, TP=TP, FP=FP)
metrics = BinaryDetectionMetrics(bin_class, 10, predictions)
assert metrics.iou == 10
def test_precision_recall_curve():
bin_class = BinaryClassificationMetrics(cls='a', FN=FN, TP=TP, FP=FP)
metrics = BinaryDetectionMetrics(bin_class, 10, predictions)
rounded_pr_curve = [(round(rp[0], 4), round(rp[1], 4))
for rp in metrics.precision_recall_curve]
expected_curve = [
(0.0667, 1.0000),
(0.0667, 0.5000),
(0.1333, 0.6667),
(0.1333, 0.5000),
(0.1333, 0.4000),
(0.1333, 0.3333),
(0.1333, 0.2857),
(0.1333, 0.2500),
(0.1333, 0.2222),
(0.2000, 0.3000),
(0.2000, 0.2727),
(0.2667, 0.3333),
(0.3333, 0.3846),
(0.4000, 0.4286),
(0.4000, 0.4000),
(0.4000, 0.3750),
(0.4000, 0.3529),
(0.4000, 0.3333),
(0.4000, 0.3158),
(0.4000, 0.3000),
(0.4000, 0.2857),
(0.4000, 0.2727),
(0.4667, 0.3043),
(0.4667, 0.2917),
]
assert rounded_pr_curve == expected_curve
def test_average_precision():
bin_class = BinaryClassificationMetrics(cls='a', FN=FN, TP=TP, FP=FP)
metrics = BinaryDetectionMetrics(bin_class, 10, predictions)
assert round(metrics.average_precision(11), 4) == .2684
assert round(metrics.average_precision(), 4) == .2457 | tests/evaluation/model/test_BinaryDetectionMetrics.py | from lapixdl.evaluation.model import BinaryDetectionMetrics, BinaryClassificationMetrics, PredictionResult, PredictionResultType
import pytest
# From https://github.com/rafaelpadilla/Object-Detection-Metrics
predictions = [
PredictionResult(.88, PredictionResultType.FP),
PredictionResult(.70, PredictionResultType.TP),
PredictionResult(.80, PredictionResultType.FP),
PredictionResult(.71, PredictionResultType.FP),
PredictionResult(.54, PredictionResultType.TP),
PredictionResult(.74, PredictionResultType.FP),
PredictionResult(.18, PredictionResultType.TP),
PredictionResult(.67, PredictionResultType.FP),
PredictionResult(.38, PredictionResultType.FP),
PredictionResult(.91, PredictionResultType.TP),
PredictionResult(.44, PredictionResultType.FP),
PredictionResult(.35, PredictionResultType.FP),
PredictionResult(.78, PredictionResultType.FP),
PredictionResult(.45, PredictionResultType.FP),
PredictionResult(.14, PredictionResultType.FP),
PredictionResult(.62, PredictionResultType.TP),
PredictionResult(.44, PredictionResultType.FP),
PredictionResult(.95, PredictionResultType.TP),
PredictionResult(.23, PredictionResultType.FP),
PredictionResult(.45, PredictionResultType.FP),
PredictionResult(.84, PredictionResultType.FP),
PredictionResult(.43, PredictionResultType.FP),
PredictionResult(.48, PredictionResultType.TP),
PredictionResult(.95, PredictionResultType.FP)
]
TP = len([prediction for prediction in predictions if prediction.type ==
PredictionResultType.TP])
FP = len([prediction for prediction in predictions if prediction.type ==
PredictionResultType.FP])
FN = 15 - TP
def test_gt_count():
bin_class = BinaryClassificationMetrics(cls='a', FN=FN, TP=TP, FP=FP)
metrics = BinaryDetectionMetrics(bin_class, 0, predictions)
assert metrics.gt_count == 15
def test_pred_count():
bin_class = BinaryClassificationMetrics(cls='a', FN=FN, TP=TP, FP=FP)
metrics = BinaryDetectionMetrics(bin_class, 0, predictions)
assert metrics.predicted_count == 24
def test_iou():
bin_class = BinaryClassificationMetrics(cls='a', FN=FN, TP=TP, FP=FP)
metrics = BinaryDetectionMetrics(bin_class, 10, predictions)
assert metrics.iou == 10
def test_precision_recall_curve():
bin_class = BinaryClassificationMetrics(cls='a', FN=FN, TP=TP, FP=FP)
metrics = BinaryDetectionMetrics(bin_class, 10, predictions)
rounded_pr_curve = [(round(rp[0], 4), round(rp[1], 4))
for rp in metrics.precision_recall_curve]
expected_curve = [
(0.0667, 1.0000),
(0.0667, 0.5000),
(0.1333, 0.6667),
(0.1333, 0.5000),
(0.1333, 0.4000),
(0.1333, 0.3333),
(0.1333, 0.2857),
(0.1333, 0.2500),
(0.1333, 0.2222),
(0.2000, 0.3000),
(0.2000, 0.2727),
(0.2667, 0.3333),
(0.3333, 0.3846),
(0.4000, 0.4286),
(0.4000, 0.4000),
(0.4000, 0.3750),
(0.4000, 0.3529),
(0.4000, 0.3333),
(0.4000, 0.3158),
(0.4000, 0.3000),
(0.4000, 0.2857),
(0.4000, 0.2727),
(0.4667, 0.3043),
(0.4667, 0.2917),
]
assert rounded_pr_curve == expected_curve
def test_average_precision():
bin_class = BinaryClassificationMetrics(cls='a', FN=FN, TP=TP, FP=FP)
metrics = BinaryDetectionMetrics(bin_class, 10, predictions)
assert round(metrics.average_precision(11), 4) == .2684
assert round(metrics.average_precision(), 4) == .2457 | 0.831759 | 0.540681 |
from common.config import *
from components.dataset import *
from grammar.grammar import Grammar
from grammar.turk.turk_transition_system import TurkTransitionSystem
from models.ASN import ASNParser
from models import nn_utils
from torch import optim
import os
import time
def train(args):
train_set = Dataset.from_bin_file(args.train_file)
if args.dev_file:
dev_set = Dataset.from_bin_file(args.dev_file)
else: dev_set = Dataset(examples=[])
vocab = pickle.load(open(args.vocab, 'rb'))
grammar = Grammar.from_text(open(args.asdl_file).read())
# transition_system = Registrable.by_name(args.transition_system)(grammar)
transition_system = TurkTransitionSystem(grammar)
parser = ASNParser(args, transition_system, vocab)
nn_utils.glorot_init(parser.parameters())
optimizer = optim.Adam(parser.parameters(), lr=args.lr)
best_acc = 0.0
log_every = args.log_every
train_begin = time.time()
for epoch in range(1, args.max_epoch + 1):
train_iter = 0
loss_val = 0.
epoch_loss = 0.
parser.train()
epoch_begin = time.time()
for batch_example in train_set.batch_iter(batch_size=args.batch_size, shuffle=False):
optimizer.zero_grad()
loss = parser.score(batch_example)
loss_val += torch.sum(loss).data.item()
epoch_loss += torch.sum(loss).data.item()
loss = torch.mean(loss)
loss.backward()
torch.nn.utils.clip_grad_norm_(parser.parameters(), args.clip_grad)
optimizer.step()
train_iter += 1
if train_iter % log_every == 0:
print("[epoch {}, step {}] loss: {:.3f}".format(epoch, train_iter, loss_val / (log_every * args.batch_size )))
loss_val = 0.
# print(epoch, 'Train loss', '{:.3f}'.format(epoch_loss / len(train_set)), 'time elapsed %d' % (time.time() - epoch_begin))
print('[epoch {}] train loss {:.3f}, epoch time {:.0f}, total time {:.0f}'.format(epoch, epoch_loss / len(train_set), time.time() - epoch_begin, time.time() - train_begin) )
if epoch > args.run_val_after:
eval_begin = time.time()
parser.eval()
with torch.no_grad():
parse_results = [parser.naive_parse(ex) for ex in dev_set]
match_results = [transition_system.compare_ast(e.tgt_ast, r) for e, r in zip(dev_set, parse_results)]
match_acc = sum(match_results) * 1. / len(match_results)
# print('Eval Acc', match_acc)
print('[epoch {}] eval acc {:.3f}, eval time {:.0f}'.format(epoch, match_acc, time.time() - eval_begin))
if match_acc >= best_acc:
best_acc = match_acc
parser.save(args.save_to)
if __name__ == '__main__':
args = parse_args('train')
train(args) | train.py | from common.config import *
from components.dataset import *
from grammar.grammar import Grammar
from grammar.turk.turk_transition_system import TurkTransitionSystem
from models.ASN import ASNParser
from models import nn_utils
from torch import optim
import os
import time
def train(args):
train_set = Dataset.from_bin_file(args.train_file)
if args.dev_file:
dev_set = Dataset.from_bin_file(args.dev_file)
else: dev_set = Dataset(examples=[])
vocab = pickle.load(open(args.vocab, 'rb'))
grammar = Grammar.from_text(open(args.asdl_file).read())
# transition_system = Registrable.by_name(args.transition_system)(grammar)
transition_system = TurkTransitionSystem(grammar)
parser = ASNParser(args, transition_system, vocab)
nn_utils.glorot_init(parser.parameters())
optimizer = optim.Adam(parser.parameters(), lr=args.lr)
best_acc = 0.0
log_every = args.log_every
train_begin = time.time()
for epoch in range(1, args.max_epoch + 1):
train_iter = 0
loss_val = 0.
epoch_loss = 0.
parser.train()
epoch_begin = time.time()
for batch_example in train_set.batch_iter(batch_size=args.batch_size, shuffle=False):
optimizer.zero_grad()
loss = parser.score(batch_example)
loss_val += torch.sum(loss).data.item()
epoch_loss += torch.sum(loss).data.item()
loss = torch.mean(loss)
loss.backward()
torch.nn.utils.clip_grad_norm_(parser.parameters(), args.clip_grad)
optimizer.step()
train_iter += 1
if train_iter % log_every == 0:
print("[epoch {}, step {}] loss: {:.3f}".format(epoch, train_iter, loss_val / (log_every * args.batch_size )))
loss_val = 0.
# print(epoch, 'Train loss', '{:.3f}'.format(epoch_loss / len(train_set)), 'time elapsed %d' % (time.time() - epoch_begin))
print('[epoch {}] train loss {:.3f}, epoch time {:.0f}, total time {:.0f}'.format(epoch, epoch_loss / len(train_set), time.time() - epoch_begin, time.time() - train_begin) )
if epoch > args.run_val_after:
eval_begin = time.time()
parser.eval()
with torch.no_grad():
parse_results = [parser.naive_parse(ex) for ex in dev_set]
match_results = [transition_system.compare_ast(e.tgt_ast, r) for e, r in zip(dev_set, parse_results)]
match_acc = sum(match_results) * 1. / len(match_results)
# print('Eval Acc', match_acc)
print('[epoch {}] eval acc {:.3f}, eval time {:.0f}'.format(epoch, match_acc, time.time() - eval_begin))
if match_acc >= best_acc:
best_acc = match_acc
parser.save(args.save_to)
if __name__ == '__main__':
args = parse_args('train')
train(args) | 0.673729 | 0.199639 |
import json
import time
from indy import ledger, did, wallet, pool
from src.utils import get_pool_genesis_txn_path, run_coroutine, PROTOCOL_VERSION
import logging
logger = logging.getLogger(__name__)
async def demo():
logger.info("Ledger sample -> started")
# Set protocol version 2 to work with Indy Node 1.4
await pool.set_protocol_version(PROTOCOL_VERSION)
trustee = {
'seed': '000000000000000000000000Trustee1',
'wallet_config': json.dumps({'id': 'trustee_wallet'}),
'wallet_credentials': json.dumps({'key': 'trustee_wallet_key'}),
'pool_name': 'trustee_pool',
}
# 1. Trustee open pool ledger
trustee['genesis_txn_path'] = get_pool_genesis_txn_path(trustee['pool_name'])
trustee['pool_config'] = json.dumps({"genesis_txn": str(trustee['genesis_txn_path'])})
await pool.create_pool_ledger_config(trustee['pool_name'], trustee['pool_config'])
trustee['pool'] = await pool.open_pool_ledger(trustee['pool_name'], None)
# 2. Create Trustee Wallet and Get Wallet Handle
await wallet.create_wallet(trustee['wallet_config'], trustee['wallet_credentials'])
trustee['wallet'] = await wallet.open_wallet(trustee['wallet_config'], trustee['wallet_credentials'])
# 3. Create Trustee DID
(trustee['did'], trustee['verkey']) = \
await did.create_and_store_my_did(trustee['wallet'], json.dumps({"seed": trustee['seed']}))
# 4. User init
user = {
'wallet_config': json.dumps({'id': 'user_wallet'}),
'wallet_credentials': json.dumps({'key': 'user_wallet_key'}),
'pool_name': 'user_pool'
}
user['genesis_txn_path'] = get_pool_genesis_txn_path(user['pool_name'])
user['pool_config'] = json.dumps({"genesis_txn": str(user['genesis_txn_path'])})
await pool.create_pool_ledger_config(user['pool_name'], user['pool_config'])
user['pool'] = await pool.open_pool_ledger(user['pool_name'], None)
await wallet.create_wallet(user['wallet_config'], user['wallet_credentials'])
user['wallet'] = await wallet.open_wallet(user['wallet_config'], user['wallet_credentials'])
# 5. User create DID
(user['did'], user['verkey']) = await did.create_and_store_my_did(user['wallet'], "{}")
trustee['user_did'] = user['did']
trustee['user_verkey'] = user['verkey']
# 6. Trustee prepare and send NYM transaction for user
nym_req = await ledger.build_nym_request(trustee['did'], trustee['user_did'], trustee['user_verkey'], None, None)
await ledger.sign_and_submit_request(trustee['pool'], trustee['wallet'], trustee['did'], nym_req)
# 7. User send ATTRIB transaction to Ledger
attr_req = \
await ledger.build_attrib_request(user['did'], user['did'], None, '{"endpoint":{"ha":"127.0.0.1:5555"}}', None)
resp = await ledger.sign_and_submit_request(user['pool'], user['wallet'], user['did'], attr_req)
assert json.loads(resp)['op'] == 'REPLY'
# 8. Close and delete Trustee wallet
await wallet.close_wallet(trustee['wallet'])
await wallet.delete_wallet(trustee['wallet_config'], trustee['wallet_credentials'])
# 9. Close and delete User wallet
await wallet.close_wallet(user['wallet'])
await wallet.delete_wallet(user['wallet_config'], user['wallet_credentials'])
# 10. Close Trustee and User pools
await pool.close_pool_ledger(trustee['pool'])
await pool.close_pool_ledger(user['pool'])
# 11 Delete pool ledger config
await pool.delete_pool_ledger_config(trustee['pool_name'])
await pool.delete_pool_ledger_config(user['pool_name'])
logger.info("Ledger sample -> completed")
if __name__ == '__main__':
run_coroutine(demo)
time.sleep(1) # FIXME waiting for libindy thread complete | samples/python/src/ledger.py | import json
import time
from indy import ledger, did, wallet, pool
from src.utils import get_pool_genesis_txn_path, run_coroutine, PROTOCOL_VERSION
import logging
logger = logging.getLogger(__name__)
async def demo():
logger.info("Ledger sample -> started")
# Set protocol version 2 to work with Indy Node 1.4
await pool.set_protocol_version(PROTOCOL_VERSION)
trustee = {
'seed': '000000000000000000000000Trustee1',
'wallet_config': json.dumps({'id': 'trustee_wallet'}),
'wallet_credentials': json.dumps({'key': 'trustee_wallet_key'}),
'pool_name': 'trustee_pool',
}
# 1. Trustee open pool ledger
trustee['genesis_txn_path'] = get_pool_genesis_txn_path(trustee['pool_name'])
trustee['pool_config'] = json.dumps({"genesis_txn": str(trustee['genesis_txn_path'])})
await pool.create_pool_ledger_config(trustee['pool_name'], trustee['pool_config'])
trustee['pool'] = await pool.open_pool_ledger(trustee['pool_name'], None)
# 2. Create Trustee Wallet and Get Wallet Handle
await wallet.create_wallet(trustee['wallet_config'], trustee['wallet_credentials'])
trustee['wallet'] = await wallet.open_wallet(trustee['wallet_config'], trustee['wallet_credentials'])
# 3. Create Trustee DID
(trustee['did'], trustee['verkey']) = \
await did.create_and_store_my_did(trustee['wallet'], json.dumps({"seed": trustee['seed']}))
# 4. User init
user = {
'wallet_config': json.dumps({'id': 'user_wallet'}),
'wallet_credentials': json.dumps({'key': 'user_wallet_key'}),
'pool_name': 'user_pool'
}
user['genesis_txn_path'] = get_pool_genesis_txn_path(user['pool_name'])
user['pool_config'] = json.dumps({"genesis_txn": str(user['genesis_txn_path'])})
await pool.create_pool_ledger_config(user['pool_name'], user['pool_config'])
user['pool'] = await pool.open_pool_ledger(user['pool_name'], None)
await wallet.create_wallet(user['wallet_config'], user['wallet_credentials'])
user['wallet'] = await wallet.open_wallet(user['wallet_config'], user['wallet_credentials'])
# 5. User create DID
(user['did'], user['verkey']) = await did.create_and_store_my_did(user['wallet'], "{}")
trustee['user_did'] = user['did']
trustee['user_verkey'] = user['verkey']
# 6. Trustee prepare and send NYM transaction for user
nym_req = await ledger.build_nym_request(trustee['did'], trustee['user_did'], trustee['user_verkey'], None, None)
await ledger.sign_and_submit_request(trustee['pool'], trustee['wallet'], trustee['did'], nym_req)
# 7. User send ATTRIB transaction to Ledger
attr_req = \
await ledger.build_attrib_request(user['did'], user['did'], None, '{"endpoint":{"ha":"127.0.0.1:5555"}}', None)
resp = await ledger.sign_and_submit_request(user['pool'], user['wallet'], user['did'], attr_req)
assert json.loads(resp)['op'] == 'REPLY'
# 8. Close and delete Trustee wallet
await wallet.close_wallet(trustee['wallet'])
await wallet.delete_wallet(trustee['wallet_config'], trustee['wallet_credentials'])
# 9. Close and delete User wallet
await wallet.close_wallet(user['wallet'])
await wallet.delete_wallet(user['wallet_config'], user['wallet_credentials'])
# 10. Close Trustee and User pools
await pool.close_pool_ledger(trustee['pool'])
await pool.close_pool_ledger(user['pool'])
# 11 Delete pool ledger config
await pool.delete_pool_ledger_config(trustee['pool_name'])
await pool.delete_pool_ledger_config(user['pool_name'])
logger.info("Ledger sample -> completed")
if __name__ == '__main__':
run_coroutine(demo)
time.sleep(1) # FIXME waiting for libindy thread complete | 0.221351 | 0.197251 |
import copy
import logging
from itertools import count
import numpy as np
import torch
from fvcore.transforms import HFlipTransform
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from detectron2.data.detection_utils import read_image
from detectron2.modeling import DatasetMapperTTA
__all__ = [
"SemanticSegmentorWithTTA",
]
class SemanticSegmentorWithTTA(nn.Module):
"""
A SemanticSegmentor with test-time augmentation enabled.
Its :meth:`__call__` method has the same interface as :meth:`SemanticSegmentor.forward`.
"""
def __init__(self, cfg, model, tta_mapper=None, batch_size=1):
"""
Args:
cfg (CfgNode):
model (SemanticSegmentor): a SemanticSegmentor to apply TTA on.
tta_mapper (callable): takes a dataset dict and returns a list of
augmented versions of the dataset dict. Defaults to
`DatasetMapperTTA(cfg)`.
batch_size (int): batch the augmented images into this batch size for inference.
"""
super().__init__()
if isinstance(model, DistributedDataParallel):
model = model.module
self.cfg = cfg.clone()
self.model = model
if tta_mapper is None:
tta_mapper = DatasetMapperTTA(cfg)
self.tta_mapper = tta_mapper
self.batch_size = batch_size
def __call__(self, batched_inputs):
"""
Same input/output format as :meth:`SemanticSegmentor.forward`
"""
def _maybe_read_image(dataset_dict):
ret = copy.copy(dataset_dict)
if "image" not in ret:
image = read_image(ret.pop("file_name"), self.model.input_format)
image = torch.from_numpy(np.ascontiguousarray(image.transpose(2, 0, 1))) # CHW
ret["image"] = image
if "height" not in ret and "width" not in ret:
ret["height"] = image.shape[1]
ret["width"] = image.shape[2]
return ret
processed_results = []
for x in batched_inputs:
result = self._inference_one_image(_maybe_read_image(x))
processed_results.append(result)
return processed_results
def _inference_one_image(self, input):
"""
Args:
input (dict): one dataset dict with "image" field being a CHW tensor
Returns:
dict: one output dict
"""
orig_shape = (input["height"], input["width"])
augmented_inputs, tfms = self._get_augmented_inputs(input)
final_predictions = None
count_predictions = 0
for input, tfm in zip(augmented_inputs, tfms):
count_predictions += 1
with torch.no_grad():
if final_predictions is None:
if any(isinstance(t, HFlipTransform) for t in tfm.transforms):
final_predictions = self.model([input])[0].pop("sem_seg").flip(dims=[2])
else:
final_predictions = self.model([input])[0].pop("sem_seg")
else:
if any(isinstance(t, HFlipTransform) for t in tfm.transforms):
final_predictions += self.model([input])[0].pop("sem_seg").flip(dims=[2])
else:
final_predictions += self.model([input])[0].pop("sem_seg")
final_predictions = final_predictions / count_predictions
return {"sem_seg": final_predictions}
def _get_augmented_inputs(self, input):
augmented_inputs = self.tta_mapper(input)
tfms = [x.pop("transforms") for x in augmented_inputs]
return augmented_inputs, tfms | mask2former/test_time_augmentation.py | import copy
import logging
from itertools import count
import numpy as np
import torch
from fvcore.transforms import HFlipTransform
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from detectron2.data.detection_utils import read_image
from detectron2.modeling import DatasetMapperTTA
__all__ = [
"SemanticSegmentorWithTTA",
]
class SemanticSegmentorWithTTA(nn.Module):
"""
A SemanticSegmentor with test-time augmentation enabled.
Its :meth:`__call__` method has the same interface as :meth:`SemanticSegmentor.forward`.
"""
def __init__(self, cfg, model, tta_mapper=None, batch_size=1):
"""
Args:
cfg (CfgNode):
model (SemanticSegmentor): a SemanticSegmentor to apply TTA on.
tta_mapper (callable): takes a dataset dict and returns a list of
augmented versions of the dataset dict. Defaults to
`DatasetMapperTTA(cfg)`.
batch_size (int): batch the augmented images into this batch size for inference.
"""
super().__init__()
if isinstance(model, DistributedDataParallel):
model = model.module
self.cfg = cfg.clone()
self.model = model
if tta_mapper is None:
tta_mapper = DatasetMapperTTA(cfg)
self.tta_mapper = tta_mapper
self.batch_size = batch_size
def __call__(self, batched_inputs):
"""
Same input/output format as :meth:`SemanticSegmentor.forward`
"""
def _maybe_read_image(dataset_dict):
ret = copy.copy(dataset_dict)
if "image" not in ret:
image = read_image(ret.pop("file_name"), self.model.input_format)
image = torch.from_numpy(np.ascontiguousarray(image.transpose(2, 0, 1))) # CHW
ret["image"] = image
if "height" not in ret and "width" not in ret:
ret["height"] = image.shape[1]
ret["width"] = image.shape[2]
return ret
processed_results = []
for x in batched_inputs:
result = self._inference_one_image(_maybe_read_image(x))
processed_results.append(result)
return processed_results
def _inference_one_image(self, input):
"""
Args:
input (dict): one dataset dict with "image" field being a CHW tensor
Returns:
dict: one output dict
"""
orig_shape = (input["height"], input["width"])
augmented_inputs, tfms = self._get_augmented_inputs(input)
final_predictions = None
count_predictions = 0
for input, tfm in zip(augmented_inputs, tfms):
count_predictions += 1
with torch.no_grad():
if final_predictions is None:
if any(isinstance(t, HFlipTransform) for t in tfm.transforms):
final_predictions = self.model([input])[0].pop("sem_seg").flip(dims=[2])
else:
final_predictions = self.model([input])[0].pop("sem_seg")
else:
if any(isinstance(t, HFlipTransform) for t in tfm.transforms):
final_predictions += self.model([input])[0].pop("sem_seg").flip(dims=[2])
else:
final_predictions += self.model([input])[0].pop("sem_seg")
final_predictions = final_predictions / count_predictions
return {"sem_seg": final_predictions}
def _get_augmented_inputs(self, input):
augmented_inputs = self.tta_mapper(input)
tfms = [x.pop("transforms") for x in augmented_inputs]
return augmented_inputs, tfms | 0.921358 | 0.44089 |
import sys
import glob
import re
from pylab import *
from scipy.ndimage.measurements import center_of_mass
from collections import defaultdict
def find_next_while_pixel(image):
"""
Coordinates of the first white pixel in a binary image
Returns i,j image coordinates
"""
coords = nonzero(image)
if len(coords[0]) == 0:
return None, None
else:
return coords[0][0], coords[1][0]
def construct_roi(im, i, j):
"""
Discover the rectangular region of interest,
starting from i,j
"""
# Initialize ROI to 1x1
w = 1
h = 1
# Extend the ROI to cover the whole star
extending = True
while extending:
extending = False
# If there's a white pixel to the left of the ROI
if im[i-1:i+h+1, [j-2, j-1]].any():
extending = True
j -= 1
# Or to the right
if im[i-1:i+h+1, [j+w-1, j+w]].any():
extending = True
w += 1
# Or below
if im[[i+h-1, i+h], j-1:j+w+1].any():
extending = True
h += 1
return i, j, w, h
def extract_stars(image, noise_threshold):
"""
Extract all star from the given image
Returns a list of rectangular images
"""
roi_list = []
image_list = []
# Threshold to remove background noise
image = image.copy()
image[image < noise_threshold] = 0.0
# Create binary image by thresholding
binary = image.copy()
binary[binary > 0] = 1
# Find the next white pixel in the image
i, j = find_next_while_pixel(binary)
while i is not None and j is not None:
# Construct the ROI around the pixel
i, j, w, h = construct_roi(binary, i, j)
# Save ROI to list or roi
roi_list.append([i, j, w, h])
# Erase ROI from image
binary[i:i+h, j:j+w] = 0
# Extract image region
image_list.append(np.array(image[i:i+h, j:j+w]))
# Find the next white pixel and repeat
i, j = find_next_while_pixel(binary)
return np.array(roi_list), image_list
def extract_stars_data(image, noise_threshold):
"""
Input: Calibration image with stars
Output: List of stars data: x, y, intensity
"""
roi_list, image_list = extract_stars(image, noise_threshold)
stars_parameters = []
for ((i,j,w,h), im) in zip(roi_list, image_list):
cm_i, cm_j = center_of_mass(im)
stars_parameters.append([j + cm_j, i + cm_i, im.sum()])
return np.array(stars_parameters)
def sort_by_pixel_value(stars_data):
"""
Sort a star data matrix by 29x + y,
such that the order is of increasing pixel value on the projector
"""
x_num = 29
x_step = 20
y_step = 20
weight = (x_num + 1) * x_step / y_step
key = stars_data[:,1] * weight + stars_data[:,0]
# Add key variable as the first column
plused = append(key[:,None], stars_data, axis=1)
# Sort and return the 3 original variables
return np.array(sorted(plused, key=lambda x:x[0]))[:,1:]
def make_calibration_curve(image, p_range, noise_threshold):
# Threshold, extract stars, integrate for intensity
star_data = sort_by_pixel_value(extract_stars_data(image, noise_threshold))
# Assume non visible stars are too dark
plot(p_range[-len(star_data[:,2]):], star_data[:,2], '+', ms=1)
show()
def calib_plot(images, p_range, noise_threshold, color, newfig=True):
if newfig:
figure()
for im in images:
# Threshold, extract stars, integrate for intensity, sort by pixel value
sd = sort_by_pixel_value(extract_stars_data(im, noise_threshold))
# Assume non visible stars are too dark
plot(p_range[-len(sd[:,2]):], sd[:,2], '-', ms=1, color=color)
show()
def plot_all():
f = figure()
ax = f.add_subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
calib_plot(all_images[10000], p_range, 0.2, color="red", newfig=False)
calib_plot(all_images[20000], p_range, 0.2, color="green", newfig=False)
calib_plot(all_images[40000], p_range, 0.2, color="blue", newfig=False)
xticks([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
xticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
yticks(np.arange(0, 65, 5)[1:])
grid()
xlabel('Pixel intensity')
ylabel('Measured intensity')
show()
"""
Calibration images should follow the naming convention: <shutter time>ST<number>.bmp
For example: 20000ST1.bmp
"""
if __name__ == "__main__":
# Load calibration range
p_range = np.loadtxt("calibration_range.txt")
folder = sys.argv[1]
print("Loading calibration images from", folder)
# Load all calibration images in a dictionary: st -> [np.array]
all_images = defaultdict(list)
for name in glob.glob(folder + "/*ST*.png"):
# Extract path and shutter time
pattern = folder + r"/([0-9]*)ST.*\.png"
path, st = re.match(pattern, name).group(0, 1)
# Load image and convert to float32
# im = imread(path)[:,:].astype(np.float32) / 255.0
im = imread(path)
if im.ndim == 3:
im = im[:,:,0]
all_images[int(st)].append(im)
total = sum([len(x) for x in all_images.values()])
print("{} images loaded, shutter times found: {}".format(total, list(all_images.keys()))) | calibration/stars.py | import sys
import glob
import re
from pylab import *
from scipy.ndimage.measurements import center_of_mass
from collections import defaultdict
def find_next_while_pixel(image):
"""
Coordinates of the first white pixel in a binary image
Returns i,j image coordinates
"""
coords = nonzero(image)
if len(coords[0]) == 0:
return None, None
else:
return coords[0][0], coords[1][0]
def construct_roi(im, i, j):
"""
Discover the rectangular region of interest,
starting from i,j
"""
# Initialize ROI to 1x1
w = 1
h = 1
# Extend the ROI to cover the whole star
extending = True
while extending:
extending = False
# If there's a white pixel to the left of the ROI
if im[i-1:i+h+1, [j-2, j-1]].any():
extending = True
j -= 1
# Or to the right
if im[i-1:i+h+1, [j+w-1, j+w]].any():
extending = True
w += 1
# Or below
if im[[i+h-1, i+h], j-1:j+w+1].any():
extending = True
h += 1
return i, j, w, h
def extract_stars(image, noise_threshold):
"""
Extract all star from the given image
Returns a list of rectangular images
"""
roi_list = []
image_list = []
# Threshold to remove background noise
image = image.copy()
image[image < noise_threshold] = 0.0
# Create binary image by thresholding
binary = image.copy()
binary[binary > 0] = 1
# Find the next white pixel in the image
i, j = find_next_while_pixel(binary)
while i is not None and j is not None:
# Construct the ROI around the pixel
i, j, w, h = construct_roi(binary, i, j)
# Save ROI to list or roi
roi_list.append([i, j, w, h])
# Erase ROI from image
binary[i:i+h, j:j+w] = 0
# Extract image region
image_list.append(np.array(image[i:i+h, j:j+w]))
# Find the next white pixel and repeat
i, j = find_next_while_pixel(binary)
return np.array(roi_list), image_list
def extract_stars_data(image, noise_threshold):
"""
Input: Calibration image with stars
Output: List of stars data: x, y, intensity
"""
roi_list, image_list = extract_stars(image, noise_threshold)
stars_parameters = []
for ((i,j,w,h), im) in zip(roi_list, image_list):
cm_i, cm_j = center_of_mass(im)
stars_parameters.append([j + cm_j, i + cm_i, im.sum()])
return np.array(stars_parameters)
def sort_by_pixel_value(stars_data):
"""
Sort a star data matrix by 29x + y,
such that the order is of increasing pixel value on the projector
"""
x_num = 29
x_step = 20
y_step = 20
weight = (x_num + 1) * x_step / y_step
key = stars_data[:,1] * weight + stars_data[:,0]
# Add key variable as the first column
plused = append(key[:,None], stars_data, axis=1)
# Sort and return the 3 original variables
return np.array(sorted(plused, key=lambda x:x[0]))[:,1:]
def make_calibration_curve(image, p_range, noise_threshold):
# Threshold, extract stars, integrate for intensity
star_data = sort_by_pixel_value(extract_stars_data(image, noise_threshold))
# Assume non visible stars are too dark
plot(p_range[-len(star_data[:,2]):], star_data[:,2], '+', ms=1)
show()
def calib_plot(images, p_range, noise_threshold, color, newfig=True):
if newfig:
figure()
for im in images:
# Threshold, extract stars, integrate for intensity, sort by pixel value
sd = sort_by_pixel_value(extract_stars_data(im, noise_threshold))
# Assume non visible stars are too dark
plot(p_range[-len(sd[:,2]):], sd[:,2], '-', ms=1, color=color)
show()
def plot_all():
f = figure()
ax = f.add_subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
calib_plot(all_images[10000], p_range, 0.2, color="red", newfig=False)
calib_plot(all_images[20000], p_range, 0.2, color="green", newfig=False)
calib_plot(all_images[40000], p_range, 0.2, color="blue", newfig=False)
xticks([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
xticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
yticks(np.arange(0, 65, 5)[1:])
grid()
xlabel('Pixel intensity')
ylabel('Measured intensity')
show()
"""
Calibration images should follow the naming convention: <shutter time>ST<number>.bmp
For example: 20000ST1.bmp
"""
if __name__ == "__main__":
# Load calibration range
p_range = np.loadtxt("calibration_range.txt")
folder = sys.argv[1]
print("Loading calibration images from", folder)
# Load all calibration images in a dictionary: st -> [np.array]
all_images = defaultdict(list)
for name in glob.glob(folder + "/*ST*.png"):
# Extract path and shutter time
pattern = folder + r"/([0-9]*)ST.*\.png"
path, st = re.match(pattern, name).group(0, 1)
# Load image and convert to float32
# im = imread(path)[:,:].astype(np.float32) / 255.0
im = imread(path)
if im.ndim == 3:
im = im[:,:,0]
all_images[int(st)].append(im)
total = sum([len(x) for x in all_images.values()])
print("{} images loaded, shutter times found: {}".format(total, list(all_images.keys()))) | 0.623835 | 0.519095 |
import unittest
import datetime
import os.path
from davies.compass import *
DATA_DIR = 'tests/data/compass'
# Example Compass Project with:
# - NAD83 UTM Zone 13 base location
# - Two imported Data Files
# - One with 25 cave surveys, four fixed stations
# - One with 4 surface surveys
TESTFILE = os.path.join(DATA_DIR, 'FULFORDS.MAK')
class CompassParsingTestCase(unittest.TestCase):
"""Parse the sample Compass data and test based on its known values"""
def setUp(self):
self.project = Project.read(TESTFILE)
self.assertTrue(self.project.linked_files, 'Sanity check failed: no linked_files found!')
self.cave_survey_dat = self.project.linked_files[0]
self.bs_survey = self.cave_survey_dat['BS']
self.last_shot = self.bs_survey.shots[-1]
self.shot_w_flags = self.cave_survey_dat['XS'].shots[0]
def test_name(self):
self.assertEqual(self.project.name, 'FULFORDS')
def test_len(self):
self.assertEqual(len(self.project), 2)
def test_dat(self):
dat = self.cave_survey_dat
self.assertEqual(dat.name, 'FULFORD')
self.assertEqual(len(dat), 25)
self.assertTrue('BS' in dat)
def test_survey(self):
survey = self.bs_survey
self.assertTrue('<NAME>' in survey.team)
self.assertEqual(survey.date, datetime.date(1989, 2, 11))
self.assertEqual(len(survey), 15)
def test_shot(self):
shot = self.last_shot
self.assertEqual(shot['FROM'], 'BSA2')
self.assertEqual(shot['TO'], 'BS1')
self.assertEqual(shot['LENGTH'], 37.85)
self.assertEqual(shot['BEARING'], 307.0)
self.assertEqual(shot['INC'], -23.0)
self.assertEqual(shot['LEFT'], float('inf'))
# TODO: this test data doesn't have any COMMENTS
def test_declination(self):
shot = self.last_shot
self.assertEqual(shot['BEARING'], 307.0)
self.assertEqual(shot.declination, 11.18)
self.assertEqual(shot.azm, 307.0 + 11.18)
def test_shot_flags(self):
self.assertEqual(self.shot_w_flags['FLAGS'], 'P')
self.assertTrue(Exclude.PLOT in self.shot_w_flags.flags)
class CompassSpecialCharacters(unittest.TestCase):
def runTest(self):
fname = os.path.join(DATA_DIR, 'unicode.dat')
dat = DatFile.read(fname)
for name in dat.surveys[0].team:
if name.startswith('Tanya'):
self.assertEqual(name, u'<NAME>tra\xdf')
class CompassShotCorrection(unittest.TestCase):
def runTest(self):
# TODO: add instrument correction
shot = Shot(declination=8.5)
shot['BEARING'] = 7.0
shot['AZM2'] = 189.0
shot['INC'] = -4
shot['INC2'] = 3.5
shot['DIST'] = 15.7
self.assertEqual(shot.azm, 8.0 + 8.5)
self.assertEqual(shot.inc, -3.75)
class CompassShotFlags(unittest.TestCase):
def setUp(self):
fname = os.path.join(DATA_DIR, 'FLAGS.DAT')
dat = DatFile.read(fname)
self.survey = dat['toc']
def test_comment(self):
comment_shot = self.survey.shots[8] # shot toc7-toc7a has flags and comment
self.assertTrue('Disto' in comment_shot['COMMENTS'])
def test_no_flags(self):
shot = self.survey.shots[0] # shot z23-toc0 has no flags nor comments
self.assertFalse(Exclude.LENGTH in shot.flags)
self.assertFalse(Exclude.TOTAL in shot.flags)
self.assertFalse(shot.flags)
self.assertTrue(shot.is_included)
self.assertFalse(shot.is_excluded)
def test_length_flag(self):
length_shot = self.survey.shots[8] # shot toc7-toc7a has 'L' flag
self.assertTrue(Exclude.LENGTH in length_shot.flags)
self.assertTrue(length_shot.is_excluded)
self.assertFalse(length_shot.is_included)
def test_total_flag(self):
total_shot = self.survey.shots[13] # shot toc11-toc11a has 'X' flag
self.assertTrue(Exclude.TOTAL in total_shot.flags)
self.assertTrue(total_shot.is_excluded)
self.assertFalse(total_shot.is_included)
def test_length_calculations(self):
survey_len, included_len, excluded_len = 0.0, 0.0, 0.0
for shot in self.survey:
survey_len += shot.length
if Exclude.LENGTH in shot.flags or Exclude.TOTAL in shot.flags:
excluded_len += shot.length
else:
included_len += shot.length
self.assertEqual(self.survey.length, survey_len)
self.assertEqual(self.survey.included_length, included_len)
self.assertEqual(self.survey.excluded_length, excluded_len)
class OldData(unittest.TestCase):
def test_old(self):
fname = os.path.join(DATA_DIR, '1998.DAT')
dat = DatFile.read(fname)
class DateFormatTest(unittest.TestCase):
def test_date_format(self):
date = datetime.date(2016, 3, 14)
survey = Survey(date=date)
survey._serialize() # Davies 0.1.0 blows up on Windows with "Invalid format string"
class SurveyFileFormatTest(unittest.TestCase):
def test_set_fmt(self):
FMT = 'DMMD'+'LRUD'+'LAD'+'NF'
s = Survey(file_format=FMT)
self.assertEqual(s.bearing_units, 'D')
self.assertEqual(s.length_units, 'M')
self.assertEqual(s.passage_units, 'M')
self.assertEqual(s.inclination_units, 'D')
self.assertEqual(s.passage_dimension_order, ['L','R','U','D'])
self.assertEqual(s.shot_item_order, ['L','A','D'])
self.assertEqual(s.backsight, 'N')
self.assertEqual(s.lrud_association, 'F')
def test_fmt_defaults(self):
s = Survey(file_format=' '*11)
self.assertEqual(s.backsight, 'N')
self.assertEqual(s.lrud_association, 'F')
def test_fmt_out(self):
FMT = 'DMMD'+'LRUD'+'LAD'+'NF'
s = Survey()
s.file_format = FMT
self.assertEqual(s.file_format, FMT)
def test_fmt_11(self):
FMT = 'DMMD'+'LRUD'+'LAD'
s = Survey(file_format=FMT)
self.assertEqual(s.bearing_units, 'D')
self.assertEqual(s.length_units, 'M')
self.assertEqual(s.passage_units, 'M')
self.assertEqual(s.inclination_units, 'D')
self.assertEqual(s.passage_dimension_order, ['L','R','U','D'])
self.assertEqual(s.shot_item_order, ['L','A','D'])
#self.assertEqual(s.backsight, 'N')
#self.assertEqual(s.lrud_association, 'F')
def test_fmt_12(self):
FMT = 'DMMD'+'LRUD'+'LAD'+'F'
s = Survey(file_format=FMT)
self.assertEqual(s.bearing_units, 'D')
self.assertEqual(s.length_units, 'M')
self.assertEqual(s.passage_units, 'M')
self.assertEqual(s.inclination_units, 'D')
self.assertEqual(s.passage_dimension_order, ['L','R','U','D'])
self.assertEqual(s.shot_item_order, ['L','A','D'])
#self.assertEqual(s.backsight, 'N')
self.assertEqual(s.lrud_association, 'F')
def test_fmt_13(self):
FMT = 'DMMD'+'LRUD'+'LAD'+'NF'
s = Survey(file_format=FMT)
self.assertEqual(s.bearing_units, 'D')
self.assertEqual(s.length_units, 'M')
self.assertEqual(s.passage_units, 'M')
self.assertEqual(s.inclination_units, 'D')
self.assertEqual(s.passage_dimension_order, ['L','R','U','D'])
self.assertEqual(s.shot_item_order, ['L','A','D'])
self.assertEqual(s.backsight, 'N')
self.assertEqual(s.lrud_association, 'F')
def test_fmt_15(self):
FMT = 'DMMD'+'LRUD'+'LAaDd'+'NF'
s = Survey(file_format=FMT)
self.assertEqual(s.bearing_units, 'D')
self.assertEqual(s.length_units, 'M')
self.assertEqual(s.passage_units, 'M')
self.assertEqual(s.inclination_units, 'D')
self.assertEqual(s.passage_dimension_order, ['L','R','U','D'])
self.assertEqual(s.shot_item_order, ['L','A','a','D','d'])
self.assertEqual(s.backsight, 'N')
self.assertEqual(s.lrud_association, 'F') | tests/test_compass.py | import unittest
import datetime
import os.path
from davies.compass import *
DATA_DIR = 'tests/data/compass'
# Example Compass Project with:
# - NAD83 UTM Zone 13 base location
# - Two imported Data Files
# - One with 25 cave surveys, four fixed stations
# - One with 4 surface surveys
TESTFILE = os.path.join(DATA_DIR, 'FULFORDS.MAK')
class CompassParsingTestCase(unittest.TestCase):
"""Parse the sample Compass data and test based on its known values"""
def setUp(self):
self.project = Project.read(TESTFILE)
self.assertTrue(self.project.linked_files, 'Sanity check failed: no linked_files found!')
self.cave_survey_dat = self.project.linked_files[0]
self.bs_survey = self.cave_survey_dat['BS']
self.last_shot = self.bs_survey.shots[-1]
self.shot_w_flags = self.cave_survey_dat['XS'].shots[0]
def test_name(self):
self.assertEqual(self.project.name, 'FULFORDS')
def test_len(self):
self.assertEqual(len(self.project), 2)
def test_dat(self):
dat = self.cave_survey_dat
self.assertEqual(dat.name, 'FULFORD')
self.assertEqual(len(dat), 25)
self.assertTrue('BS' in dat)
def test_survey(self):
survey = self.bs_survey
self.assertTrue('<NAME>' in survey.team)
self.assertEqual(survey.date, datetime.date(1989, 2, 11))
self.assertEqual(len(survey), 15)
def test_shot(self):
shot = self.last_shot
self.assertEqual(shot['FROM'], 'BSA2')
self.assertEqual(shot['TO'], 'BS1')
self.assertEqual(shot['LENGTH'], 37.85)
self.assertEqual(shot['BEARING'], 307.0)
self.assertEqual(shot['INC'], -23.0)
self.assertEqual(shot['LEFT'], float('inf'))
# TODO: this test data doesn't have any COMMENTS
def test_declination(self):
shot = self.last_shot
self.assertEqual(shot['BEARING'], 307.0)
self.assertEqual(shot.declination, 11.18)
self.assertEqual(shot.azm, 307.0 + 11.18)
def test_shot_flags(self):
self.assertEqual(self.shot_w_flags['FLAGS'], 'P')
self.assertTrue(Exclude.PLOT in self.shot_w_flags.flags)
class CompassSpecialCharacters(unittest.TestCase):
def runTest(self):
fname = os.path.join(DATA_DIR, 'unicode.dat')
dat = DatFile.read(fname)
for name in dat.surveys[0].team:
if name.startswith('Tanya'):
self.assertEqual(name, u'<NAME>tra\xdf')
class CompassShotCorrection(unittest.TestCase):
def runTest(self):
# TODO: add instrument correction
shot = Shot(declination=8.5)
shot['BEARING'] = 7.0
shot['AZM2'] = 189.0
shot['INC'] = -4
shot['INC2'] = 3.5
shot['DIST'] = 15.7
self.assertEqual(shot.azm, 8.0 + 8.5)
self.assertEqual(shot.inc, -3.75)
class CompassShotFlags(unittest.TestCase):
def setUp(self):
fname = os.path.join(DATA_DIR, 'FLAGS.DAT')
dat = DatFile.read(fname)
self.survey = dat['toc']
def test_comment(self):
comment_shot = self.survey.shots[8] # shot toc7-toc7a has flags and comment
self.assertTrue('Disto' in comment_shot['COMMENTS'])
def test_no_flags(self):
shot = self.survey.shots[0] # shot z23-toc0 has no flags nor comments
self.assertFalse(Exclude.LENGTH in shot.flags)
self.assertFalse(Exclude.TOTAL in shot.flags)
self.assertFalse(shot.flags)
self.assertTrue(shot.is_included)
self.assertFalse(shot.is_excluded)
def test_length_flag(self):
length_shot = self.survey.shots[8] # shot toc7-toc7a has 'L' flag
self.assertTrue(Exclude.LENGTH in length_shot.flags)
self.assertTrue(length_shot.is_excluded)
self.assertFalse(length_shot.is_included)
def test_total_flag(self):
total_shot = self.survey.shots[13] # shot toc11-toc11a has 'X' flag
self.assertTrue(Exclude.TOTAL in total_shot.flags)
self.assertTrue(total_shot.is_excluded)
self.assertFalse(total_shot.is_included)
def test_length_calculations(self):
survey_len, included_len, excluded_len = 0.0, 0.0, 0.0
for shot in self.survey:
survey_len += shot.length
if Exclude.LENGTH in shot.flags or Exclude.TOTAL in shot.flags:
excluded_len += shot.length
else:
included_len += shot.length
self.assertEqual(self.survey.length, survey_len)
self.assertEqual(self.survey.included_length, included_len)
self.assertEqual(self.survey.excluded_length, excluded_len)
class OldData(unittest.TestCase):
def test_old(self):
fname = os.path.join(DATA_DIR, '1998.DAT')
dat = DatFile.read(fname)
class DateFormatTest(unittest.TestCase):
def test_date_format(self):
date = datetime.date(2016, 3, 14)
survey = Survey(date=date)
survey._serialize() # Davies 0.1.0 blows up on Windows with "Invalid format string"
class SurveyFileFormatTest(unittest.TestCase):
def test_set_fmt(self):
FMT = 'DMMD'+'LRUD'+'LAD'+'NF'
s = Survey(file_format=FMT)
self.assertEqual(s.bearing_units, 'D')
self.assertEqual(s.length_units, 'M')
self.assertEqual(s.passage_units, 'M')
self.assertEqual(s.inclination_units, 'D')
self.assertEqual(s.passage_dimension_order, ['L','R','U','D'])
self.assertEqual(s.shot_item_order, ['L','A','D'])
self.assertEqual(s.backsight, 'N')
self.assertEqual(s.lrud_association, 'F')
def test_fmt_defaults(self):
s = Survey(file_format=' '*11)
self.assertEqual(s.backsight, 'N')
self.assertEqual(s.lrud_association, 'F')
def test_fmt_out(self):
FMT = 'DMMD'+'LRUD'+'LAD'+'NF'
s = Survey()
s.file_format = FMT
self.assertEqual(s.file_format, FMT)
def test_fmt_11(self):
FMT = 'DMMD'+'LRUD'+'LAD'
s = Survey(file_format=FMT)
self.assertEqual(s.bearing_units, 'D')
self.assertEqual(s.length_units, 'M')
self.assertEqual(s.passage_units, 'M')
self.assertEqual(s.inclination_units, 'D')
self.assertEqual(s.passage_dimension_order, ['L','R','U','D'])
self.assertEqual(s.shot_item_order, ['L','A','D'])
#self.assertEqual(s.backsight, 'N')
#self.assertEqual(s.lrud_association, 'F')
def test_fmt_12(self):
FMT = 'DMMD'+'LRUD'+'LAD'+'F'
s = Survey(file_format=FMT)
self.assertEqual(s.bearing_units, 'D')
self.assertEqual(s.length_units, 'M')
self.assertEqual(s.passage_units, 'M')
self.assertEqual(s.inclination_units, 'D')
self.assertEqual(s.passage_dimension_order, ['L','R','U','D'])
self.assertEqual(s.shot_item_order, ['L','A','D'])
#self.assertEqual(s.backsight, 'N')
self.assertEqual(s.lrud_association, 'F')
def test_fmt_13(self):
FMT = 'DMMD'+'LRUD'+'LAD'+'NF'
s = Survey(file_format=FMT)
self.assertEqual(s.bearing_units, 'D')
self.assertEqual(s.length_units, 'M')
self.assertEqual(s.passage_units, 'M')
self.assertEqual(s.inclination_units, 'D')
self.assertEqual(s.passage_dimension_order, ['L','R','U','D'])
self.assertEqual(s.shot_item_order, ['L','A','D'])
self.assertEqual(s.backsight, 'N')
self.assertEqual(s.lrud_association, 'F')
def test_fmt_15(self):
FMT = 'DMMD'+'LRUD'+'LAaDd'+'NF'
s = Survey(file_format=FMT)
self.assertEqual(s.bearing_units, 'D')
self.assertEqual(s.length_units, 'M')
self.assertEqual(s.passage_units, 'M')
self.assertEqual(s.inclination_units, 'D')
self.assertEqual(s.passage_dimension_order, ['L','R','U','D'])
self.assertEqual(s.shot_item_order, ['L','A','a','D','d'])
self.assertEqual(s.backsight, 'N')
self.assertEqual(s.lrud_association, 'F') | 0.424412 | 0.529203 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import *
from builtins import object
from copy import deepcopy
from genestack_client import FileTypes, GenestackException, Permissions, validate_constant
class FileFilter(object):
"""
Base file filter class.
"""
def __init__(self):
self._dict = {}
def get_dict(self):
return deepcopy(self._dict)
def AND(self, other):
"""
Return a new filter combining this one with another one in an AND clause.
:param other: other filter
:type other: FileFilter
:rtype: FileFilter
"""
return AndFileFilter(self, other)
def OR(self, other):
"""
Return a new filter combining this one with another one in an OR clause.
:param other: other filter
:type other: FileFilter
:rtype: FileFilter
"""
return OrFileFilter(self, other)
def __and__(self, other):
return self.AND(other)
def __or__(self, other):
return self.OR(other)
class TypeFileFilter(FileFilter):
"""
Filter to select files with a given file type.
See :ref:`fileTypes` for a list of possible file types.
"""
def __init__(self, file_type):
super(TypeFileFilter, self).__init__()
if not validate_constant(FileTypes, file_type):
raise GenestackException("Invalid file type")
self._dict.update({'type': file_type})
class KeyValueFileFilter(FileFilter):
"""
Filter to select files with a given metainfo key-value pair.
"""
def __init__(self, key, value):
super(KeyValueFileFilter, self).__init__()
self._dict.update({'keyValue': {'key': key, 'value': value}})
class OwnerFileFilter(FileFilter):
"""
Filter to select files owned by a specific user.
"""
def __init__(self, email):
super(OwnerFileFilter, self).__init__()
self._dict.update({'owner': email})
class MetainfoValuePatternFileFilter(FileFilter):
"""
Filter to select files matching a specific substring value for a metainfo key.
"""
def __init__(self, key, value):
super(MetainfoValuePatternFileFilter, self).__init__()
self._dict.update({'pattern': {'key': key, 'value': value}})
class ChildrenFileFilter(FileFilter):
"""
Filter to select files that are the children or descendants of a given container.
"""
def __init__(self, container, recursive=False):
super(ChildrenFileFilter, self).__init__()
self._dict.update({'children': {'file': container, 'recursive': recursive}})
class ContainsFileFilter(FileFilter):
"""
Filter to select containers that contain a given file.
"""
def __init__(self, file_accession):
super(ContainsFileFilter, self).__init__()
self._dict.update({'contains': file_accession})
class ActualOwnerFileFilter(FileFilter):
"""
Filter to select files that are owned by the current user.
"""
def __init__(self):
super(ActualOwnerFileFilter, self).__init__()
self._dict.update({'owned': None})
class BelongsToDatasetFileFilter(FileFilter):
"""
Same as :py:class:`~genestack_client.file_filters.ChildrenFileFilter`
but searches for files that belong to the specified dataset.
"""
def __init__(self, file_accession):
super(BelongsToDatasetFileFilter, self).__init__()
self._dict.update({'datasetAccession': file_accession})
class ActualPermissionFileFilter(FileFilter):
"""
Filter to select files for which the current user has a specific permission.
See :ref:`permissions`.
"""
def __init__(self, permission):
super(ActualPermissionFileFilter, self).__init__()
if not validate_constant(Permissions, permission):
raise GenestackException("Invalid permission")
self._dict.update({'access': permission})
class FixedValueFileFilter(FileFilter):
"""
Fixed value filter (either ``True`` or ``False``).
"""
def __init__(self, value):
super(FixedValueFileFilter, self).__init__()
self._dict.update({'fixed': value})
class HasInProvenanceFileFilter(FileFilter):
"""
Filter to select files that have a given file in their provenance graph.
"""
def __init__(self, file_accession):
super(HasInProvenanceFileFilter, self).__init__()
self._dict.update({'hasInProvenance': file_accession})
class PermissionFileFilter(FileFilter):
"""
Filter to select files for which a specific group has a specific permission.
See :ref:`permissions`.
"""
def __init__(self, group, permission):
super(PermissionFileFilter, self).__init__()
if not validate_constant(Permissions, permission):
raise GenestackException("Invalid permission")
self._dict.update({'permission': {'group': group, 'value': permission}})
class NotFileFilter(FileFilter):
"""
Negation of another :py:class:`~genestack_client.file_filters.FileFilter`
"""
def __init__(self, other_filter):
super(NotFileFilter, self).__init__()
self._dict.update({'not': other_filter.get_dict()})
class AndFileFilter(FileFilter):
"""
"AND" combination of two file filters.
"""
def __init__(self, first, second):
super(AndFileFilter, self).__init__()
self._dict.update({'and': [first.get_dict(), second.get_dict()]})
class OrFileFilter(FileFilter):
"""
"OR" combination of two file filters.
"""
def __init__(self, first, second):
super(OrFileFilter, self).__init__()
self._dict.update({'or': [first.get_dict(), second.get_dict()]}) | genestack_client/file_filters.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import *
from builtins import object
from copy import deepcopy
from genestack_client import FileTypes, GenestackException, Permissions, validate_constant
class FileFilter(object):
"""
Base file filter class.
"""
def __init__(self):
self._dict = {}
def get_dict(self):
return deepcopy(self._dict)
def AND(self, other):
"""
Return a new filter combining this one with another one in an AND clause.
:param other: other filter
:type other: FileFilter
:rtype: FileFilter
"""
return AndFileFilter(self, other)
def OR(self, other):
"""
Return a new filter combining this one with another one in an OR clause.
:param other: other filter
:type other: FileFilter
:rtype: FileFilter
"""
return OrFileFilter(self, other)
def __and__(self, other):
return self.AND(other)
def __or__(self, other):
return self.OR(other)
class TypeFileFilter(FileFilter):
"""
Filter to select files with a given file type.
See :ref:`fileTypes` for a list of possible file types.
"""
def __init__(self, file_type):
super(TypeFileFilter, self).__init__()
if not validate_constant(FileTypes, file_type):
raise GenestackException("Invalid file type")
self._dict.update({'type': file_type})
class KeyValueFileFilter(FileFilter):
"""
Filter to select files with a given metainfo key-value pair.
"""
def __init__(self, key, value):
super(KeyValueFileFilter, self).__init__()
self._dict.update({'keyValue': {'key': key, 'value': value}})
class OwnerFileFilter(FileFilter):
"""
Filter to select files owned by a specific user.
"""
def __init__(self, email):
super(OwnerFileFilter, self).__init__()
self._dict.update({'owner': email})
class MetainfoValuePatternFileFilter(FileFilter):
"""
Filter to select files matching a specific substring value for a metainfo key.
"""
def __init__(self, key, value):
super(MetainfoValuePatternFileFilter, self).__init__()
self._dict.update({'pattern': {'key': key, 'value': value}})
class ChildrenFileFilter(FileFilter):
"""
Filter to select files that are the children or descendants of a given container.
"""
def __init__(self, container, recursive=False):
super(ChildrenFileFilter, self).__init__()
self._dict.update({'children': {'file': container, 'recursive': recursive}})
class ContainsFileFilter(FileFilter):
"""
Filter to select containers that contain a given file.
"""
def __init__(self, file_accession):
super(ContainsFileFilter, self).__init__()
self._dict.update({'contains': file_accession})
class ActualOwnerFileFilter(FileFilter):
"""
Filter to select files that are owned by the current user.
"""
def __init__(self):
super(ActualOwnerFileFilter, self).__init__()
self._dict.update({'owned': None})
class BelongsToDatasetFileFilter(FileFilter):
"""
Same as :py:class:`~genestack_client.file_filters.ChildrenFileFilter`
but searches for files that belong to the specified dataset.
"""
def __init__(self, file_accession):
super(BelongsToDatasetFileFilter, self).__init__()
self._dict.update({'datasetAccession': file_accession})
class ActualPermissionFileFilter(FileFilter):
"""
Filter to select files for which the current user has a specific permission.
See :ref:`permissions`.
"""
def __init__(self, permission):
super(ActualPermissionFileFilter, self).__init__()
if not validate_constant(Permissions, permission):
raise GenestackException("Invalid permission")
self._dict.update({'access': permission})
class FixedValueFileFilter(FileFilter):
"""
Fixed value filter (either ``True`` or ``False``).
"""
def __init__(self, value):
super(FixedValueFileFilter, self).__init__()
self._dict.update({'fixed': value})
class HasInProvenanceFileFilter(FileFilter):
"""
Filter to select files that have a given file in their provenance graph.
"""
def __init__(self, file_accession):
super(HasInProvenanceFileFilter, self).__init__()
self._dict.update({'hasInProvenance': file_accession})
class PermissionFileFilter(FileFilter):
"""
Filter to select files for which a specific group has a specific permission.
See :ref:`permissions`.
"""
def __init__(self, group, permission):
super(PermissionFileFilter, self).__init__()
if not validate_constant(Permissions, permission):
raise GenestackException("Invalid permission")
self._dict.update({'permission': {'group': group, 'value': permission}})
class NotFileFilter(FileFilter):
"""
Negation of another :py:class:`~genestack_client.file_filters.FileFilter`
"""
def __init__(self, other_filter):
super(NotFileFilter, self).__init__()
self._dict.update({'not': other_filter.get_dict()})
class AndFileFilter(FileFilter):
"""
"AND" combination of two file filters.
"""
def __init__(self, first, second):
super(AndFileFilter, self).__init__()
self._dict.update({'and': [first.get_dict(), second.get_dict()]})
class OrFileFilter(FileFilter):
"""
"OR" combination of two file filters.
"""
def __init__(self, first, second):
super(OrFileFilter, self).__init__()
self._dict.update({'or': [first.get_dict(), second.get_dict()]}) | 0.874104 | 0.185301 |
import json
import os
import logging
from common import (common_const, utils, line)
from validation.table_order_param_check import TableOrderParamCheck
from table_order.table_order_payment_order_info import TableOrderPaymentOrderInfo # noqa501
import paypayopa
# 環境変数
REDIRECT_URL = os.environ.get("REDIRECT_URL")
LOGGER_LEVEL = os.environ.get("LOGGER_LEVEL")
LIFF_CHANNEL_ID = os.getenv('LIFF_CHANNEL_ID', None)
# PayPay API情報
PAY_PAY_API_KEY = os.environ.get("PAY_PAY_API_KEY")
PAY_PAY_API_SECRET = os.environ.get("PAY_PAY_API_SECRET")
PAY_PAY_API_MERCHANT_ID = os.environ.get("PAY_PAY_API_MERCHANT_ID")
if (os.environ.get("PAY_PAY_IS_PROD") == 'True'
or os.environ.get("PAY_PAY_IS_PROD") == 'true'):
PAY_PAY_IS_PROD = True
else:
PAY_PAY_IS_PROD = False
client = paypayopa.Client(auth=(PAY_PAY_API_KEY, PAY_PAY_API_SECRET),
production_mode=PAY_PAY_IS_PROD)
client.set_assume_merchant(PAY_PAY_API_MERCHANT_ID)
# ログ出力の設定
logger = logging.getLogger()
if LOGGER_LEVEL == 'DEBUG':
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# テーブル操作クラスの初期化
payment_order_table_controller = TableOrderPaymentOrderInfo()
def lambda_handler(event, context):
"""
PayPay API(reserve)の通信結果を返す
Parameters
----------
event : dict
POST時に渡されたパラメータ
context : dict
コンテキスト内容
Returns
-------
response : dict
PayPay APIの通信結果
"""
logger.info(event)
if event['body'] is None:
error_msg_display = common_const.const.MSG_ERROR_NOPARAM
return utils.create_error_response(error_msg_display, 400)
req_body = json.loads(event['body'])
# ユーザーID取得
try:
user_profile = line.get_profile(req_body['idToken'], LIFF_CHANNEL_ID)
if 'error' in user_profile and 'expired' in user_profile['error_description']: # noqa 501
return utils.create_error_response('Forbidden', 403)
else:
req_body['userId'] = user_profile['sub']
except Exception:
logger.exception('不正なIDトークンが使用されています')
return utils.create_error_response('Error')
# パラメータバリデーションチェック
param_checker = TableOrderParamCheck(req_body)
if error_msg := param_checker.check_api_payment_reserve():
error_msg_disp = ('\n').join(error_msg)
logger.error(error_msg_disp)
return utils.create_error_response(error_msg_disp, status=400) # noqa: E501
payment_id = req_body['paymentId']
payment_info = payment_order_table_controller.get_item(payment_id)
amount = int(payment_info['amount'])
request = {
"merchantPaymentId": payment_id,
"codeType": "ORDER_QR",
"redirectUrl": f'{REDIRECT_URL}/tableorder/paymentCompleted?orderId={payment_id}',
"redirectType":"WEB_LINK",
"orderDescription":'オーダー商品',
"orderItems": [ {
"name": item['itemName'], \
"category": 'food', \
"quantity": int(item['orderNum']), \
"productId": int(item['itemId']), \
"unitPrice": { "amount": int(item['price']), "currency": "JPY" } \
} for item in payment_info['order'][0]['item'] ],
"amount": {
"amount": amount,
"currency": "JPY"
},
}
try:
resp = client.Code.create_qr_code(request)
logger.debug(resp)
# 返却データ
res_body = json.dumps(resp)
except Exception as e:
logger.error('Occur Exception: %s', e)
return utils.create_error_response("Error")
else:
try:
if resp['resultInfo']['code'] == 'SUCCESS':
payment_order_table_controller.update_payment_qrcode(
payment_id, resp['data']['codeId'])
return utils.create_success_response(res_body)
elif resp['resultInfo']['code'] == 'DUPLICATE_DYNAMIC_QR_REQUEST':
logger.error('PayPay Duplicate QR Request Error')
client.Code.delete_qr_code(payment_info['qrcodeId'])
return utils.create_error_response("Error")
else:
logger.error('PayPay Error')
return utils.create_error_response("Error")
except Exception as e:
logger.error('Occur Exception: %s', e)
return utils.create_error_response("Error") | backend/APP/payment_create_qr_code/create_qr_code.py | import json
import os
import logging
from common import (common_const, utils, line)
from validation.table_order_param_check import TableOrderParamCheck
from table_order.table_order_payment_order_info import TableOrderPaymentOrderInfo # noqa501
import paypayopa
# 環境変数
REDIRECT_URL = os.environ.get("REDIRECT_URL")
LOGGER_LEVEL = os.environ.get("LOGGER_LEVEL")
LIFF_CHANNEL_ID = os.getenv('LIFF_CHANNEL_ID', None)
# PayPay API情報
PAY_PAY_API_KEY = os.environ.get("PAY_PAY_API_KEY")
PAY_PAY_API_SECRET = os.environ.get("PAY_PAY_API_SECRET")
PAY_PAY_API_MERCHANT_ID = os.environ.get("PAY_PAY_API_MERCHANT_ID")
if (os.environ.get("PAY_PAY_IS_PROD") == 'True'
or os.environ.get("PAY_PAY_IS_PROD") == 'true'):
PAY_PAY_IS_PROD = True
else:
PAY_PAY_IS_PROD = False
client = paypayopa.Client(auth=(PAY_PAY_API_KEY, PAY_PAY_API_SECRET),
production_mode=PAY_PAY_IS_PROD)
client.set_assume_merchant(PAY_PAY_API_MERCHANT_ID)
# ログ出力の設定
logger = logging.getLogger()
if LOGGER_LEVEL == 'DEBUG':
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# テーブル操作クラスの初期化
payment_order_table_controller = TableOrderPaymentOrderInfo()
def lambda_handler(event, context):
"""
PayPay API(reserve)の通信結果を返す
Parameters
----------
event : dict
POST時に渡されたパラメータ
context : dict
コンテキスト内容
Returns
-------
response : dict
PayPay APIの通信結果
"""
logger.info(event)
if event['body'] is None:
error_msg_display = common_const.const.MSG_ERROR_NOPARAM
return utils.create_error_response(error_msg_display, 400)
req_body = json.loads(event['body'])
# ユーザーID取得
try:
user_profile = line.get_profile(req_body['idToken'], LIFF_CHANNEL_ID)
if 'error' in user_profile and 'expired' in user_profile['error_description']: # noqa 501
return utils.create_error_response('Forbidden', 403)
else:
req_body['userId'] = user_profile['sub']
except Exception:
logger.exception('不正なIDトークンが使用されています')
return utils.create_error_response('Error')
# パラメータバリデーションチェック
param_checker = TableOrderParamCheck(req_body)
if error_msg := param_checker.check_api_payment_reserve():
error_msg_disp = ('\n').join(error_msg)
logger.error(error_msg_disp)
return utils.create_error_response(error_msg_disp, status=400) # noqa: E501
payment_id = req_body['paymentId']
payment_info = payment_order_table_controller.get_item(payment_id)
amount = int(payment_info['amount'])
request = {
"merchantPaymentId": payment_id,
"codeType": "ORDER_QR",
"redirectUrl": f'{REDIRECT_URL}/tableorder/paymentCompleted?orderId={payment_id}',
"redirectType":"WEB_LINK",
"orderDescription":'オーダー商品',
"orderItems": [ {
"name": item['itemName'], \
"category": 'food', \
"quantity": int(item['orderNum']), \
"productId": int(item['itemId']), \
"unitPrice": { "amount": int(item['price']), "currency": "JPY" } \
} for item in payment_info['order'][0]['item'] ],
"amount": {
"amount": amount,
"currency": "JPY"
},
}
try:
resp = client.Code.create_qr_code(request)
logger.debug(resp)
# 返却データ
res_body = json.dumps(resp)
except Exception as e:
logger.error('Occur Exception: %s', e)
return utils.create_error_response("Error")
else:
try:
if resp['resultInfo']['code'] == 'SUCCESS':
payment_order_table_controller.update_payment_qrcode(
payment_id, resp['data']['codeId'])
return utils.create_success_response(res_body)
elif resp['resultInfo']['code'] == 'DUPLICATE_DYNAMIC_QR_REQUEST':
logger.error('PayPay Duplicate QR Request Error')
client.Code.delete_qr_code(payment_info['qrcodeId'])
return utils.create_error_response("Error")
else:
logger.error('PayPay Error')
return utils.create_error_response("Error")
except Exception as e:
logger.error('Occur Exception: %s', e)
return utils.create_error_response("Error") | 0.30767 | 0.084266 |
"""The classes and predicates to assist validate test config for test cases."""
from dataclasses import dataclass
import enum
import logging
import re
from typing import Callable, Optional
import unittest
from packaging import version as pkg_version
from framework import xds_flags
from framework import xds_k8s_flags
logger = logging.getLogger(__name__)
class Lang(enum.Flag):
UNKNOWN = enum.auto()
CPP = enum.auto()
GO = enum.auto()
JAVA = enum.auto()
PYTHON = enum.auto()
NODE = enum.auto()
def __str__(self):
return str(self.name).lower()
@classmethod
def from_string(cls, lang: str):
try:
return cls[lang.upper()]
except KeyError:
return cls.UNKNOWN
@dataclass
class TestConfig:
"""Describes the config for the test suite."""
client_lang: Lang
server_lang: Lang
version: Optional[str]
def version_gte(self, another: str) -> bool:
"""Returns a bool for whether the version is >= another one.
A version is greater than or equal to another version means its version
number is greater than or equal to another version's number. Version
"master" is always considered latest.
E.g., master >= v1.41.x >= v1.40.x >= v1.9.x.
Unspecified version is treated as 'master', but isn't explicitly set.
"""
if self.version == 'master' or self.version is None:
return True
if another == 'master':
return False
return self._parse_version(self.version) >= self._parse_version(another)
def version_lt(self, another: str) -> bool:
"""Returns a bool for whether the version is < another one.
Version "master" is always considered latest.
E.g., v1.9.x < v1.40.x < v1.41.x < master.
Unspecified version is treated as 'master', but isn't explicitly set.
"""
if self.version == 'master' or self.version is None:
return False
if another == 'master':
return True
return self._parse_version(self.version) < self._parse_version(another)
def __str__(self):
return (f"TestConfig(client_lang='{self.client_lang}', "
f"server_lang='{self.server_lang}', version={self.version!r})")
@staticmethod
def _parse_version(s: str) -> pkg_version.Version:
if s.endswith(".x"):
s = s[:-2]
return pkg_version.Version(s)
def _get_lang(image_name: str) -> Lang:
return Lang.from_string(
re.search(r'/(\w+)-(client|server):', image_name).group(1))
def evaluate_test_config(check: Callable[[TestConfig], bool]) -> None:
"""Evaluates the test config check against Abseil flags."""
# NOTE(lidiz) a manual skip mechanism is needed because absl/flags
# cannot be used in the built-in test-skipping decorators. See the
# official FAQs:
# https://abseil.io/docs/python/guides/flags#faqs
test_config = TestConfig(
client_lang=_get_lang(xds_k8s_flags.CLIENT_IMAGE.value),
server_lang=_get_lang(xds_k8s_flags.SERVER_IMAGE.value),
version=xds_flags.TESTING_VERSION.value)
if not check(test_config):
logger.info('Skipping %s', test_config)
raise unittest.SkipTest(f'Unsupported test config: {test_config}')
logger.info('Detected language and version: %s', test_config) | tools/run_tests/xds_k8s_test_driver/framework/helpers/skips.py | """The classes and predicates to assist validate test config for test cases."""
from dataclasses import dataclass
import enum
import logging
import re
from typing import Callable, Optional
import unittest
from packaging import version as pkg_version
from framework import xds_flags
from framework import xds_k8s_flags
logger = logging.getLogger(__name__)
class Lang(enum.Flag):
UNKNOWN = enum.auto()
CPP = enum.auto()
GO = enum.auto()
JAVA = enum.auto()
PYTHON = enum.auto()
NODE = enum.auto()
def __str__(self):
return str(self.name).lower()
@classmethod
def from_string(cls, lang: str):
try:
return cls[lang.upper()]
except KeyError:
return cls.UNKNOWN
@dataclass
class TestConfig:
"""Describes the config for the test suite."""
client_lang: Lang
server_lang: Lang
version: Optional[str]
def version_gte(self, another: str) -> bool:
"""Returns a bool for whether the version is >= another one.
A version is greater than or equal to another version means its version
number is greater than or equal to another version's number. Version
"master" is always considered latest.
E.g., master >= v1.41.x >= v1.40.x >= v1.9.x.
Unspecified version is treated as 'master', but isn't explicitly set.
"""
if self.version == 'master' or self.version is None:
return True
if another == 'master':
return False
return self._parse_version(self.version) >= self._parse_version(another)
def version_lt(self, another: str) -> bool:
"""Returns a bool for whether the version is < another one.
Version "master" is always considered latest.
E.g., v1.9.x < v1.40.x < v1.41.x < master.
Unspecified version is treated as 'master', but isn't explicitly set.
"""
if self.version == 'master' or self.version is None:
return False
if another == 'master':
return True
return self._parse_version(self.version) < self._parse_version(another)
def __str__(self):
return (f"TestConfig(client_lang='{self.client_lang}', "
f"server_lang='{self.server_lang}', version={self.version!r})")
@staticmethod
def _parse_version(s: str) -> pkg_version.Version:
if s.endswith(".x"):
s = s[:-2]
return pkg_version.Version(s)
def _get_lang(image_name: str) -> Lang:
return Lang.from_string(
re.search(r'/(\w+)-(client|server):', image_name).group(1))
def evaluate_test_config(check: Callable[[TestConfig], bool]) -> None:
"""Evaluates the test config check against Abseil flags."""
# NOTE(lidiz) a manual skip mechanism is needed because absl/flags
# cannot be used in the built-in test-skipping decorators. See the
# official FAQs:
# https://abseil.io/docs/python/guides/flags#faqs
test_config = TestConfig(
client_lang=_get_lang(xds_k8s_flags.CLIENT_IMAGE.value),
server_lang=_get_lang(xds_k8s_flags.SERVER_IMAGE.value),
version=xds_flags.TESTING_VERSION.value)
if not check(test_config):
logger.info('Skipping %s', test_config)
raise unittest.SkipTest(f'Unsupported test config: {test_config}')
logger.info('Detected language and version: %s', test_config) | 0.89217 | 0.358718 |
from django.contrib.auth import authenticate, get_user_model
from django.db.models import Q
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
from rest_framework.authtoken.serializers import AuthTokenSerializer
from .models import (Category, Question, Comment, Request, Expenses, Stage, Reward, DigitalCategory,
RequestComment)
from .models import Messages, Expert
User = get_user_model()
class CustomAuthTokenSerializer(AuthTokenSerializer):
username = None
email = serializers.CharField(
label=_("Email"),
write_only=True
)
def validate(self, attrs):
email = attrs.get('email')
password = attrs.get('password')
if email and password:
user = authenticate(request=self.context.get('request'),
email=email, password=password)
# The authenticate call simply returns None for is_active=False
# users. (Assuming the default ModelBackend authentication
# backend.)
if not user:
msg = _('Unable to log in with provided credentials.')
raise serializers.ValidationError(msg, code='authorization')
else:
msg = _('Must include "email" and "password".')
raise serializers.ValidationError(msg, code='authorization')
attrs['user'] = user
return attrs
class UserInfoSerializer(serializers.ModelSerializer):
requests = serializers.SerializerMethodField()
messages = serializers.SerializerMethodField()
class Meta:
model = User
fields = ('id', 'full_name', 'position', 'department', 'education', 'messages',
'date_of_birth', 'experience', 'is_staff', 'email', 'phone', 'requests')
@staticmethod
def get_requests(obj):
return RequestSerializer(obj.requests, many=True).data
def get_messages(self, obj):
author = self.context['view'].request.user
recipient = obj
if author.id == recipient.id:
return []
messages = Messages.objects.filter(
Q(author=author, recipient=recipient) |
Q(author=recipient, recipient=author)).order_by('-time')
return [{'is_mine': message.author.id == author.id, 'text': message.text,
'date': message.time.strftime("%Y-%m-%d %H:%M:%S")} for message in messages]
class QuestionSerializer(serializers.ModelSerializer):
class Meta:
model = Question
fields = ('pk', 'name', 'description', 'author', 'category', 'ask_date')
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = ('pk', 'name', 'description')
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = ('text', 'author', 'question')
class DigitalCategorySerializer(serializers.ModelSerializer):
class Meta:
model = DigitalCategory
fields = "__all__"
class ExpensesSerializer(serializers.ModelSerializer):
class Meta:
model = Expenses
fields = "__all__"
class StageSerializer(serializers.ModelSerializer):
class Meta:
model = Stage
fields = "__all__"
class RewardSerializer(serializers.ModelSerializer):
class Meta:
model = Reward
fields = "__all__"
class AuthorSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'full_name', 'position', 'department', 'education',
'date_of_birth', 'experience', 'is_staff', 'email', 'phone')
class RequestCommentSerializer(serializers.ModelSerializer):
class Meta:
model = RequestComment
fields = "__all__"
def create(self, validated_data):
comment = super().create(validated_data)
request = Request.objects.get(id=self.context['view'].kwargs['request_pk'])
request.comments.add(comment.id)
return comment
class RequestSerializer(serializers.ModelSerializer):
digital_categories = serializers.PrimaryKeyRelatedField(
many=True, queryset=DigitalCategory.objects.all())
authors_ids = serializers.PrimaryKeyRelatedField(
many=True, queryset=User.objects.all(), write_only=True)
authors = AuthorSerializer(many=True, read_only=True)
expenses = ExpensesSerializer(many=True)
stages = StageSerializer(many=True)
rewards = RewardSerializer(many=True)
created_by = AuthorSerializer(read_only=True)
comments = RequestCommentSerializer(many=True, read_only=True)
status = serializers.CharField(source='get_status_display', required=False)
class Meta:
model = Request
fields = (
'id', 'title', 'is_digital_categories', 'digital_categories', 'description', 'authors_ids',
'characteristic', 'expenses', 'stages', 'expectations', 'authors', 'rewards', 'status',
'is_saving_money', 'created_at', 'status', 'authors', 'created_by', 'comments', 'is_draft',
'likes')
extra_kwargs = {
'created_at': {'read_only': True}
}
def create(self, validated_data):
digital_categories_ids = validated_data.pop('digital_categories', [])
expenses_data = validated_data.pop('expenses', [])
expense_ids = []
for expense_data in expenses_data:
expense = Expenses.objects.create(name=expense_data['name'], cost=expense_data['cost'])
expense_ids.append(expense.id)
stages_data = validated_data.pop('stages', [])
stages_ids = []
for stage_data in stages_data:
stage = Stage.objects.create(name=stage_data['name'],
count_of_days=stage_data['count_of_days'])
stages_ids.append(stage.id)
authors_ids = validated_data.pop('authors_ids', [])
rewards_data = validated_data.pop('rewards', [])
rewards_ids = []
for reward_data in rewards_data:
date = reward_data.get('date')
reward = Reward.objects.create(
author=reward_data['author'],
percentage=reward_data['percentage'],
date=date
)
rewards_ids.append(reward.id)
request = Request.objects.create(**validated_data, created_by=self.context['request'].user)
is_draft = validated_data.get('is_draft')
if not is_draft:
request.status = 'registration'
request.save()
request.digital_categories.add(*digital_categories_ids)
request.expenses.add(*expense_ids)
request.stages.add(*stages_ids)
request.authors.add(*authors_ids)
request.rewards.add(*rewards_ids)
request.authors.add(*authors_ids)
return request
def update(self, instance, validated_data):
is_draft = validated_data.get('is_draft')
if is_draft is not None:
if not is_draft:
instance.status = 'registration'
instance.save()
authors_ids = validated_data.pop('authors_ids', [])
if authors_ids:
instance.authors.add(*authors_ids)
return super().update(instance, validated_data)
class ExpertSerializer(serializers.ModelSerializer):
class Meta:
model = Expert
fields = ('user', 'organization', 'email_text')
depth = 1
class MessagesSerializer(serializers.ModelSerializer):
author = AuthorSerializer(read_only=True)
recipient_id = serializers.PrimaryKeyRelatedField(write_only=True, queryset=User.objects.all())
recipient = AuthorSerializer(read_only=True)
class Meta:
model = Messages
fields = ('id', 'text', 'time', 'author', 'recipient', 'recipient_id')
def create(self, validated_data):
author = self.context['view'].request.user
validated_data['recipient_id'] = validated_data['recipient_id'].id
validated_data['author'] = author
message = super().create(validated_data)
return message | api/serializers.py | from django.contrib.auth import authenticate, get_user_model
from django.db.models import Q
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
from rest_framework.authtoken.serializers import AuthTokenSerializer
from .models import (Category, Question, Comment, Request, Expenses, Stage, Reward, DigitalCategory,
RequestComment)
from .models import Messages, Expert
User = get_user_model()
class CustomAuthTokenSerializer(AuthTokenSerializer):
username = None
email = serializers.CharField(
label=_("Email"),
write_only=True
)
def validate(self, attrs):
email = attrs.get('email')
password = attrs.get('password')
if email and password:
user = authenticate(request=self.context.get('request'),
email=email, password=password)
# The authenticate call simply returns None for is_active=False
# users. (Assuming the default ModelBackend authentication
# backend.)
if not user:
msg = _('Unable to log in with provided credentials.')
raise serializers.ValidationError(msg, code='authorization')
else:
msg = _('Must include "email" and "password".')
raise serializers.ValidationError(msg, code='authorization')
attrs['user'] = user
return attrs
class UserInfoSerializer(serializers.ModelSerializer):
requests = serializers.SerializerMethodField()
messages = serializers.SerializerMethodField()
class Meta:
model = User
fields = ('id', 'full_name', 'position', 'department', 'education', 'messages',
'date_of_birth', 'experience', 'is_staff', 'email', 'phone', 'requests')
@staticmethod
def get_requests(obj):
return RequestSerializer(obj.requests, many=True).data
def get_messages(self, obj):
author = self.context['view'].request.user
recipient = obj
if author.id == recipient.id:
return []
messages = Messages.objects.filter(
Q(author=author, recipient=recipient) |
Q(author=recipient, recipient=author)).order_by('-time')
return [{'is_mine': message.author.id == author.id, 'text': message.text,
'date': message.time.strftime("%Y-%m-%d %H:%M:%S")} for message in messages]
class QuestionSerializer(serializers.ModelSerializer):
class Meta:
model = Question
fields = ('pk', 'name', 'description', 'author', 'category', 'ask_date')
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = ('pk', 'name', 'description')
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = ('text', 'author', 'question')
class DigitalCategorySerializer(serializers.ModelSerializer):
class Meta:
model = DigitalCategory
fields = "__all__"
class ExpensesSerializer(serializers.ModelSerializer):
class Meta:
model = Expenses
fields = "__all__"
class StageSerializer(serializers.ModelSerializer):
class Meta:
model = Stage
fields = "__all__"
class RewardSerializer(serializers.ModelSerializer):
class Meta:
model = Reward
fields = "__all__"
class AuthorSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'full_name', 'position', 'department', 'education',
'date_of_birth', 'experience', 'is_staff', 'email', 'phone')
class RequestCommentSerializer(serializers.ModelSerializer):
class Meta:
model = RequestComment
fields = "__all__"
def create(self, validated_data):
comment = super().create(validated_data)
request = Request.objects.get(id=self.context['view'].kwargs['request_pk'])
request.comments.add(comment.id)
return comment
class RequestSerializer(serializers.ModelSerializer):
digital_categories = serializers.PrimaryKeyRelatedField(
many=True, queryset=DigitalCategory.objects.all())
authors_ids = serializers.PrimaryKeyRelatedField(
many=True, queryset=User.objects.all(), write_only=True)
authors = AuthorSerializer(many=True, read_only=True)
expenses = ExpensesSerializer(many=True)
stages = StageSerializer(many=True)
rewards = RewardSerializer(many=True)
created_by = AuthorSerializer(read_only=True)
comments = RequestCommentSerializer(many=True, read_only=True)
status = serializers.CharField(source='get_status_display', required=False)
class Meta:
model = Request
fields = (
'id', 'title', 'is_digital_categories', 'digital_categories', 'description', 'authors_ids',
'characteristic', 'expenses', 'stages', 'expectations', 'authors', 'rewards', 'status',
'is_saving_money', 'created_at', 'status', 'authors', 'created_by', 'comments', 'is_draft',
'likes')
extra_kwargs = {
'created_at': {'read_only': True}
}
def create(self, validated_data):
digital_categories_ids = validated_data.pop('digital_categories', [])
expenses_data = validated_data.pop('expenses', [])
expense_ids = []
for expense_data in expenses_data:
expense = Expenses.objects.create(name=expense_data['name'], cost=expense_data['cost'])
expense_ids.append(expense.id)
stages_data = validated_data.pop('stages', [])
stages_ids = []
for stage_data in stages_data:
stage = Stage.objects.create(name=stage_data['name'],
count_of_days=stage_data['count_of_days'])
stages_ids.append(stage.id)
authors_ids = validated_data.pop('authors_ids', [])
rewards_data = validated_data.pop('rewards', [])
rewards_ids = []
for reward_data in rewards_data:
date = reward_data.get('date')
reward = Reward.objects.create(
author=reward_data['author'],
percentage=reward_data['percentage'],
date=date
)
rewards_ids.append(reward.id)
request = Request.objects.create(**validated_data, created_by=self.context['request'].user)
is_draft = validated_data.get('is_draft')
if not is_draft:
request.status = 'registration'
request.save()
request.digital_categories.add(*digital_categories_ids)
request.expenses.add(*expense_ids)
request.stages.add(*stages_ids)
request.authors.add(*authors_ids)
request.rewards.add(*rewards_ids)
request.authors.add(*authors_ids)
return request
def update(self, instance, validated_data):
is_draft = validated_data.get('is_draft')
if is_draft is not None:
if not is_draft:
instance.status = 'registration'
instance.save()
authors_ids = validated_data.pop('authors_ids', [])
if authors_ids:
instance.authors.add(*authors_ids)
return super().update(instance, validated_data)
class ExpertSerializer(serializers.ModelSerializer):
class Meta:
model = Expert
fields = ('user', 'organization', 'email_text')
depth = 1
class MessagesSerializer(serializers.ModelSerializer):
author = AuthorSerializer(read_only=True)
recipient_id = serializers.PrimaryKeyRelatedField(write_only=True, queryset=User.objects.all())
recipient = AuthorSerializer(read_only=True)
class Meta:
model = Messages
fields = ('id', 'text', 'time', 'author', 'recipient', 'recipient_id')
def create(self, validated_data):
author = self.context['view'].request.user
validated_data['recipient_id'] = validated_data['recipient_id'].id
validated_data['author'] = author
message = super().create(validated_data)
return message | 0.633297 | 0.110136 |
[
{
'date': '2014-01-01',
'description': 'Capodanno',
'locale': 'it-IT',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2014-01-06',
'description': 'Epifania',
'locale': 'it-IT',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2014-04-20',
'description': 'Pasqua',
'locale': 'it-IT',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2014-04-21',
'description': 'Pasquetta',
'locale': 'it-IT',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2014-04-25',
'description': 'Festa della liberazione',
'locale': 'it-IT',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2014-05-01',
'description': 'Festa del lavoro',
'locale': 'it-IT',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2014-06-02',
'description': 'Festa della repubblica',
'locale': 'it-IT',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2014-08-15',
'description': 'Assunzione (ferragosto)',
'locale': 'it-IT',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2014-11-01',
'description': 'Ognissanti',
'locale': 'it-IT',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2014-12-08',
'description': 'Immacolata concezione',
'locale': 'it-IT',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2014-12-25',
'description': 'Natale',
'locale': 'it-IT',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2014-12-26',
'description': '<NAME>',
'locale': 'it-IT',
'notes': '',
'region': '',
'type': 'NRF'
}
] | tests/snapshots/snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[it_IT-2014] 1.py | [
{
'date': '2014-01-01',
'description': 'Capodanno',
'locale': 'it-IT',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2014-01-06',
'description': 'Epifania',
'locale': 'it-IT',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2014-04-20',
'description': 'Pasqua',
'locale': 'it-IT',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2014-04-21',
'description': 'Pasquetta',
'locale': 'it-IT',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2014-04-25',
'description': 'Festa della liberazione',
'locale': 'it-IT',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2014-05-01',
'description': 'Festa del lavoro',
'locale': 'it-IT',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2014-06-02',
'description': 'Festa della repubblica',
'locale': 'it-IT',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2014-08-15',
'description': 'Assunzione (ferragosto)',
'locale': 'it-IT',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2014-11-01',
'description': 'Ognissanti',
'locale': 'it-IT',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2014-12-08',
'description': 'Immacolata concezione',
'locale': 'it-IT',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2014-12-25',
'description': 'Natale',
'locale': 'it-IT',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2014-12-26',
'description': '<NAME>',
'locale': 'it-IT',
'notes': '',
'region': '',
'type': 'NRF'
}
] | 0.399343 | 0.118334 |
from __future__ import (
print_function,
division,
absolute_import,
)
from collections import defaultdict
import numpy as np
from fancyimpute.knn import KNN
from fancyimpute.iterative_svd import IterativeSVD
from fancyimpute.simple_fill import SimpleFill
from fancyimpute.soft_impute import SoftImpute
from fancyimpute.mice import MICE
from fancyimpute.biscaler import BiScaler
def check_dense_pMHC_array(X, peptide_list, allele_list):
if len(peptide_list) != len(set(peptide_list)):
raise ValueError("Duplicate peptides detected in peptide list")
if len(allele_list) != len(set(allele_list)):
raise ValueError("Duplicate alleles detected in allele list")
n_rows, n_cols = X.shape
if n_rows != len(peptide_list):
raise ValueError(
"Expected dense array with shape %s to have %d rows" % (
X.shape, len(peptide_list)))
if n_cols != len(allele_list):
raise ValueError(
"Expected dense array with shape %s to have %d columns" % (
X.shape, len(allele_list)))
def prune_dense_matrix_and_labels(
X,
peptide_list,
allele_list,
min_observations_per_peptide=1,
min_observations_per_allele=1):
"""
Filter the dense matrix of pMHC binding affinities according to
the given minimum number of row/column observations.
Parameters
----------
X : numpy.ndarray
Incomplete dense matrix of pMHC affinity with n_peptides rows and
n_alleles columns.
peptide_list : list of str
Expected to have n_peptides entries
allele_list : list of str
Expected to have n_alleles entries
min_observations_per_peptide : int
Drop peptide rows with fewer than this number of observed values.
min_observations_per_allele : int
Drop allele columns with fewer than this number of observed values.
"""
observed_mask = np.isfinite(X)
n_observed_per_peptide = observed_mask.sum(axis=1)
too_few_peptide_observations = (
n_observed_per_peptide < min_observations_per_peptide)
if too_few_peptide_observations.any():
drop_peptide_indices = np.where(too_few_peptide_observations)[0]
keep_peptide_indices = np.where(~too_few_peptide_observations)[0]
print("Dropping %d peptides with <%d observations" % (
len(drop_peptide_indices),
min_observations_per_peptide))
X = X[keep_peptide_indices]
observed_mask = observed_mask[keep_peptide_indices]
peptide_list = [peptide_list[i] for i in keep_peptide_indices]
n_observed_per_allele = observed_mask.sum(axis=0)
too_few_allele_observations = (
n_observed_per_allele < min_observations_per_peptide)
if too_few_peptide_observations.any():
drop_allele_indices = np.where(too_few_allele_observations)[0]
keep_allele_indices = np.where(~too_few_allele_observations)[0]
print("Dropping %d alleles with <%d observations: %s" % (
len(drop_allele_indices),
min_observations_per_allele,
[allele_list[i] for i in drop_allele_indices]))
X = X[:, keep_allele_indices]
observed_mask = observed_mask[:, keep_allele_indices]
allele_list = [allele_list[i] for i in keep_allele_indices]
check_dense_pMHC_array(X, peptide_list, allele_list)
return X, peptide_list, allele_list
def dense_pMHC_matrix_to_nested_dict(X, peptide_list, allele_list):
"""
Converts a dense matrix of (n_peptides, n_alleles) floats to a nested
dictionary from allele -> peptide -> affinity.
"""
allele_to_peptide_to_ic50_dict = defaultdict(dict)
for row_index, peptide in enumerate(peptide_list):
for column_index, allele_name in enumerate(allele_list):
affinity = X[row_index, column_index]
if np.isfinite(affinity):
allele_to_peptide_to_ic50_dict[allele_name][peptide] = affinity
return allele_to_peptide_to_ic50_dict
def imputer_from_name(imputation_method_name, **kwargs):
"""
Helper function for constructing an imputation object from a name given
typically from a commandline argument.
"""
imputation_method_name = imputation_method_name.strip().lower()
if imputation_method_name == "mice":
kwargs["n_burn_in"] = kwargs.get("n_burn_in", 5)
kwargs["n_imputations"] = kwargs.get("n_imputations", 25)
kwargs["n_nearest_columns"] = kwargs.get("n_nearest_columns", 25)
return MICE(**kwargs)
elif imputation_method_name == "knn":
kwargs["k"] = kwargs.get("k", 3)
kwargs["orientation"] = kwargs.get("orientation", "columns")
kwargs["print_interval"] = kwargs.get("print_interval", 10)
return KNN(**kwargs)
elif imputation_method_name == "svd":
kwargs["rank"] = kwargs.get("rank", 10)
return IterativeSVD(**kwargs)
elif imputation_method_name in ("svt", "softimpute"):
kwargs["init_fill_method"] = kwargs.get("init_fill_method", "min")
kwargs["normalizer"] = kwargs.get("normalizer", BiScaler())
return SoftImpute(**kwargs)
elif imputation_method_name == "mean":
return SimpleFill("mean", **kwargs)
elif imputation_method_name == "none":
return None
else:
raise ValueError(
"Invalid imputation method: %s" % imputation_method_name) | mhcflurry/imputation_helpers.py |
from __future__ import (
print_function,
division,
absolute_import,
)
from collections import defaultdict
import numpy as np
from fancyimpute.knn import KNN
from fancyimpute.iterative_svd import IterativeSVD
from fancyimpute.simple_fill import SimpleFill
from fancyimpute.soft_impute import SoftImpute
from fancyimpute.mice import MICE
from fancyimpute.biscaler import BiScaler
def check_dense_pMHC_array(X, peptide_list, allele_list):
if len(peptide_list) != len(set(peptide_list)):
raise ValueError("Duplicate peptides detected in peptide list")
if len(allele_list) != len(set(allele_list)):
raise ValueError("Duplicate alleles detected in allele list")
n_rows, n_cols = X.shape
if n_rows != len(peptide_list):
raise ValueError(
"Expected dense array with shape %s to have %d rows" % (
X.shape, len(peptide_list)))
if n_cols != len(allele_list):
raise ValueError(
"Expected dense array with shape %s to have %d columns" % (
X.shape, len(allele_list)))
def prune_dense_matrix_and_labels(
X,
peptide_list,
allele_list,
min_observations_per_peptide=1,
min_observations_per_allele=1):
"""
Filter the dense matrix of pMHC binding affinities according to
the given minimum number of row/column observations.
Parameters
----------
X : numpy.ndarray
Incomplete dense matrix of pMHC affinity with n_peptides rows and
n_alleles columns.
peptide_list : list of str
Expected to have n_peptides entries
allele_list : list of str
Expected to have n_alleles entries
min_observations_per_peptide : int
Drop peptide rows with fewer than this number of observed values.
min_observations_per_allele : int
Drop allele columns with fewer than this number of observed values.
"""
observed_mask = np.isfinite(X)
n_observed_per_peptide = observed_mask.sum(axis=1)
too_few_peptide_observations = (
n_observed_per_peptide < min_observations_per_peptide)
if too_few_peptide_observations.any():
drop_peptide_indices = np.where(too_few_peptide_observations)[0]
keep_peptide_indices = np.where(~too_few_peptide_observations)[0]
print("Dropping %d peptides with <%d observations" % (
len(drop_peptide_indices),
min_observations_per_peptide))
X = X[keep_peptide_indices]
observed_mask = observed_mask[keep_peptide_indices]
peptide_list = [peptide_list[i] for i in keep_peptide_indices]
n_observed_per_allele = observed_mask.sum(axis=0)
too_few_allele_observations = (
n_observed_per_allele < min_observations_per_peptide)
if too_few_peptide_observations.any():
drop_allele_indices = np.where(too_few_allele_observations)[0]
keep_allele_indices = np.where(~too_few_allele_observations)[0]
print("Dropping %d alleles with <%d observations: %s" % (
len(drop_allele_indices),
min_observations_per_allele,
[allele_list[i] for i in drop_allele_indices]))
X = X[:, keep_allele_indices]
observed_mask = observed_mask[:, keep_allele_indices]
allele_list = [allele_list[i] for i in keep_allele_indices]
check_dense_pMHC_array(X, peptide_list, allele_list)
return X, peptide_list, allele_list
def dense_pMHC_matrix_to_nested_dict(X, peptide_list, allele_list):
"""
Converts a dense matrix of (n_peptides, n_alleles) floats to a nested
dictionary from allele -> peptide -> affinity.
"""
allele_to_peptide_to_ic50_dict = defaultdict(dict)
for row_index, peptide in enumerate(peptide_list):
for column_index, allele_name in enumerate(allele_list):
affinity = X[row_index, column_index]
if np.isfinite(affinity):
allele_to_peptide_to_ic50_dict[allele_name][peptide] = affinity
return allele_to_peptide_to_ic50_dict
def imputer_from_name(imputation_method_name, **kwargs):
"""
Helper function for constructing an imputation object from a name given
typically from a commandline argument.
"""
imputation_method_name = imputation_method_name.strip().lower()
if imputation_method_name == "mice":
kwargs["n_burn_in"] = kwargs.get("n_burn_in", 5)
kwargs["n_imputations"] = kwargs.get("n_imputations", 25)
kwargs["n_nearest_columns"] = kwargs.get("n_nearest_columns", 25)
return MICE(**kwargs)
elif imputation_method_name == "knn":
kwargs["k"] = kwargs.get("k", 3)
kwargs["orientation"] = kwargs.get("orientation", "columns")
kwargs["print_interval"] = kwargs.get("print_interval", 10)
return KNN(**kwargs)
elif imputation_method_name == "svd":
kwargs["rank"] = kwargs.get("rank", 10)
return IterativeSVD(**kwargs)
elif imputation_method_name in ("svt", "softimpute"):
kwargs["init_fill_method"] = kwargs.get("init_fill_method", "min")
kwargs["normalizer"] = kwargs.get("normalizer", BiScaler())
return SoftImpute(**kwargs)
elif imputation_method_name == "mean":
return SimpleFill("mean", **kwargs)
elif imputation_method_name == "none":
return None
else:
raise ValueError(
"Invalid imputation method: %s" % imputation_method_name) | 0.785802 | 0.312213 |
import numpy as np
import torch
from torch import nn
import copy
def pad_circular(x, pad):
"""
:param x: shape [B, C, H, W]
:param pad: int >= 0
:return:
"""
x = torch.cat([x, x[:, :, 0:pad, :]], dim=2)
x = torch.cat([x, x[:, :, :, 0:pad]], dim=3)
x = torch.cat([x[:, :, -2 * pad:-pad, :], x], dim=2)
x = torch.cat([x[:, :, :, -2 * pad:-pad], x], dim=3)
return x
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data)
#bias_init(module.bias.data)
return module
class World(nn.Module):
def __init__(self, map_width=16, map_height=16, prob_life=20, cuda=False,
num_proc=1, env=None):
super(World, self).__init__()
self.cuda = cuda
self.map_width = map_width
self.map_height = map_height
self.prob_life = prob_life / 100
self.num_proc = num_proc
state_shape = (num_proc, 1, map_width, map_height)
if self.cuda:
self.y1 = torch.ones(state_shape).cuda()
self.y0 = torch.zeros(state_shape).cuda()
else:
self.y1 = torch.ones(state_shape)
self.y0 = torch.zeros(state_shape)
device = torch.device("cuda:0" if cuda else "cpu")
self.conv_init_ = lambda m: init(m,
nn.init.dirac_, None,
#nn.init.calculate_gain('relu')
)
conv_weights = [[[[1, 1, 1],
[1, 9, 1],
[1, 1, 1]]]]
self.transition_rule = nn.Conv2d(1, 1, 3, 1, 0, bias=False)
self.conv_init_(self.transition_rule)
self.transition_rule.to(device)
self.populate_cells()
conv_weights = torch.FloatTensor(conv_weights)
if cuda:
conv_weights.cuda()
conv_weights = conv_weights
self.transition_rule.weight = torch.nn.Parameter(conv_weights, requires_grad=False)
self.to(device)
def populate_cells(self):
if self.cuda:
self.state = torch.cuda.FloatTensor(size=
(self.num_proc, 1, self.map_width, self.map_height)).uniform_(0, 1)
self.builds = torch.cuda.FloatTensor(size=
(self.num_proc, 1, self.map_width, self.map_height)).fill_(0)
self.failed = torch.cuda.FloatTensor(size=
(self.num_proc, 1, self.map_width, self.map_height)).fill_(0)
else:
self.state = torch.FloatTensor(size=
(self.num_proc, 1, self.map_width, self.map_height)).uniform_(0, 1)
self.builds = torch.FloatTensor(size=
(self.num_proc, 1, self.map_width, self.map_height)).fill_(0)
self.failed = torch.FloatTensor(size=
(self.num_proc, 1, self.map_width, self.map_height)).fill_(0)
self.state = torch.where(self.state < self.prob_life, self.y1, self.y0).float()
def repopulate_cells(self):
self.state.float().uniform_(0, 1)
self.state = torch.where(self.state < self.prob_life, self.y1, self.y0).float()
self.builds.fill_(0)
self.failed.fill_(0)
def build_cell(self, x, y, alive=True):
if alive:
self.state[0, 0, x, y] = 1
else:
self.state[0, 0, x, y] = 0
def _tick(self):
self.state = self.forward(self.state)
#print(self.state[0][0])
def forward(self, x):
with torch.no_grad():
if self.cuda:
x = x.cuda()
x = pad_circular(x, 1)
x = x.float()
#print(x[0])
x = self.transition_rule(x)
#print(x[0])
# Mysterious leakages appear here if we increase the batch size enough.
x = x.round() # so we hack them back into shape
#print(x[0])
x = self.GoLu(x)
return x
def GoLu(self, x):
'''
Applies the Game of Life Unit activation function, element-wise:
_
__/\______/ \_____
0 2 4 6 8 0 2 4 6 8
'''
x_out = copy.deepcopy(x).fill_(0).float()
ded_0 = (x >= 2).float()
bth_0 = ded_0 * (x < 3).float()
x_out = x_out + (bth_0 * (x - 2).float())
ded_1 = (x >= 3).float()
bth_1 = ded_1 * (x < 4).float()
x_out = x_out + abs(bth_1 * (x - 4).float())
alv_0 = (x >= 10).float()
lif_0 = alv_0 * (x < 11).float()
x_out = x_out + (lif_0 * (x - 10).float())
alv_1 = (x >= 11).float()
lif_1 = alv_1 * (x < 12).float()
x_out = x_out + lif_1
alv_2 = (x >= 12).float()
lif_2 = alv_2 * (x < 13).float()
x_out = x_out + abs(lif_2 * (x -13).float())
assert (x_out >= 0).all() and (x_out <=1).all()
#x_out = torch.clamp(x_out, 0, 1)
return x_out
def seed(self, seed=None):
np.random.seed(seed)
def main():
world = World()
for j in range(4):
world.repopulate_cells()
for i in range(100):
world._tick()
print(world.state)
if __name__ == '__main__':
main() | game_of_life/envs/world_pytorch.py | import numpy as np
import torch
from torch import nn
import copy
def pad_circular(x, pad):
"""
:param x: shape [B, C, H, W]
:param pad: int >= 0
:return:
"""
x = torch.cat([x, x[:, :, 0:pad, :]], dim=2)
x = torch.cat([x, x[:, :, :, 0:pad]], dim=3)
x = torch.cat([x[:, :, -2 * pad:-pad, :], x], dim=2)
x = torch.cat([x[:, :, :, -2 * pad:-pad], x], dim=3)
return x
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data)
#bias_init(module.bias.data)
return module
class World(nn.Module):
def __init__(self, map_width=16, map_height=16, prob_life=20, cuda=False,
num_proc=1, env=None):
super(World, self).__init__()
self.cuda = cuda
self.map_width = map_width
self.map_height = map_height
self.prob_life = prob_life / 100
self.num_proc = num_proc
state_shape = (num_proc, 1, map_width, map_height)
if self.cuda:
self.y1 = torch.ones(state_shape).cuda()
self.y0 = torch.zeros(state_shape).cuda()
else:
self.y1 = torch.ones(state_shape)
self.y0 = torch.zeros(state_shape)
device = torch.device("cuda:0" if cuda else "cpu")
self.conv_init_ = lambda m: init(m,
nn.init.dirac_, None,
#nn.init.calculate_gain('relu')
)
conv_weights = [[[[1, 1, 1],
[1, 9, 1],
[1, 1, 1]]]]
self.transition_rule = nn.Conv2d(1, 1, 3, 1, 0, bias=False)
self.conv_init_(self.transition_rule)
self.transition_rule.to(device)
self.populate_cells()
conv_weights = torch.FloatTensor(conv_weights)
if cuda:
conv_weights.cuda()
conv_weights = conv_weights
self.transition_rule.weight = torch.nn.Parameter(conv_weights, requires_grad=False)
self.to(device)
def populate_cells(self):
if self.cuda:
self.state = torch.cuda.FloatTensor(size=
(self.num_proc, 1, self.map_width, self.map_height)).uniform_(0, 1)
self.builds = torch.cuda.FloatTensor(size=
(self.num_proc, 1, self.map_width, self.map_height)).fill_(0)
self.failed = torch.cuda.FloatTensor(size=
(self.num_proc, 1, self.map_width, self.map_height)).fill_(0)
else:
self.state = torch.FloatTensor(size=
(self.num_proc, 1, self.map_width, self.map_height)).uniform_(0, 1)
self.builds = torch.FloatTensor(size=
(self.num_proc, 1, self.map_width, self.map_height)).fill_(0)
self.failed = torch.FloatTensor(size=
(self.num_proc, 1, self.map_width, self.map_height)).fill_(0)
self.state = torch.where(self.state < self.prob_life, self.y1, self.y0).float()
def repopulate_cells(self):
self.state.float().uniform_(0, 1)
self.state = torch.where(self.state < self.prob_life, self.y1, self.y0).float()
self.builds.fill_(0)
self.failed.fill_(0)
def build_cell(self, x, y, alive=True):
if alive:
self.state[0, 0, x, y] = 1
else:
self.state[0, 0, x, y] = 0
def _tick(self):
self.state = self.forward(self.state)
#print(self.state[0][0])
def forward(self, x):
with torch.no_grad():
if self.cuda:
x = x.cuda()
x = pad_circular(x, 1)
x = x.float()
#print(x[0])
x = self.transition_rule(x)
#print(x[0])
# Mysterious leakages appear here if we increase the batch size enough.
x = x.round() # so we hack them back into shape
#print(x[0])
x = self.GoLu(x)
return x
def GoLu(self, x):
'''
Applies the Game of Life Unit activation function, element-wise:
_
__/\______/ \_____
0 2 4 6 8 0 2 4 6 8
'''
x_out = copy.deepcopy(x).fill_(0).float()
ded_0 = (x >= 2).float()
bth_0 = ded_0 * (x < 3).float()
x_out = x_out + (bth_0 * (x - 2).float())
ded_1 = (x >= 3).float()
bth_1 = ded_1 * (x < 4).float()
x_out = x_out + abs(bth_1 * (x - 4).float())
alv_0 = (x >= 10).float()
lif_0 = alv_0 * (x < 11).float()
x_out = x_out + (lif_0 * (x - 10).float())
alv_1 = (x >= 11).float()
lif_1 = alv_1 * (x < 12).float()
x_out = x_out + lif_1
alv_2 = (x >= 12).float()
lif_2 = alv_2 * (x < 13).float()
x_out = x_out + abs(lif_2 * (x -13).float())
assert (x_out >= 0).all() and (x_out <=1).all()
#x_out = torch.clamp(x_out, 0, 1)
return x_out
def seed(self, seed=None):
np.random.seed(seed)
def main():
world = World()
for j in range(4):
world.repopulate_cells()
for i in range(100):
world._tick()
print(world.state)
if __name__ == '__main__':
main() | 0.800887 | 0.679571 |
import json
import os
import glob
import torch
from argparse import ArgumentParser
import cv2
import numpy as np
import tqdm
from segmentation.models import get_segmentation_model
from utils.image_transforms import get_crop
RESIZE_DIMS = (122, 28)
from utils.image_transforms import get_crop, apply_mask, get_sorted_polyline, \
parallelaze_polygon, plot_circles, affine_transform
def parse_arguments():
parser = ArgumentParser()
parser.add_argument("--data_dir", help="Path to dir containing images.")
parser.add_argument("-o", "--output_dir", dest="output_dir", default="seg_out", help="Path to output dir'.")
parser.add_argument("-l", "--load", dest="load", help="segmantation model name")
parser.add_argument("-c", "--crop_scale", dest="crop_scale", default=1.2, type=float, help="scale bounding box")
parser.add_argument("-p", "--polygon_scale", dest="polygon_scale", default=1, type=float, help="scale polygon ")
return parser.parse_args()
def main(args):
os.makedirs(args.output_dir, exist_ok=True)
test_images = glob.glob(os.path.join(args.data_dir, '*.jpg')) + \
glob.glob(os.path.join(args.data_dir, '*.bmp'))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
segmentation_model = get_segmentation_model()
with open(args.load, "rb") as fp:
print(f"load {args.load}")
state_dict = torch.load(fp, map_location="cpu")
segmentation_model.load_state_dict(state_dict)
segmentation_model.to(device)
segmentation_model.eval()
with torch.no_grad():
for img_file_name in tqdm.tqdm(test_images, total=len(test_images)):
image = cv2.imread(os.path.join(args.data_dir, img_file_name))
if image is None:
print(f"wrong file {img_file_name}")
continue
image_n = image.astype(np.float32) / 255
t_image = torch.as_tensor(image_n.transpose(2, 0, 1)).to(device)
predict = segmentation_model([t_image])
path_list = os.path.normpath(img_file_name).lstrip(os.path.sep).split(os.path.sep)
# path_list = image_filename.split(os.sep)
image_base, ext = os.path.splitext(path_list[-1])
mask_result = np.zeros(image_n.shape[0:2])
for i, (class_num, score, mask) in enumerate(zip(predict[0]["labels"], predict[0]["scores"],
predict[0]["masks"])):
if class_num == 1 and score > 0.4:
mask = mask.cpu().squeeze().numpy()
mask_result += mask
mask_result[mask_result > 0.3] = 1
mask_result = mask_result.astype(np.uint8)
contours, _ = cv2.findContours(mask_result.astype(np.uint8), 1, 1)
for i, contour in enumerate(contours):
x, y, w, h = cv2.boundingRect(contour)
crop_image, _ = get_crop(image,np.array([[x,y],[x + w, y+ h]], dtype=np.int32),
args.crop_scale, make_square=True)
fout = os.path.join(args.output_dir, f"{image_base}_{i}{ext}")
cv2.imwrite(fout, crop_image)
if __name__ == "__main__":
main(parse_arguments()) | src/scripts/test-segmentation.py | import json
import os
import glob
import torch
from argparse import ArgumentParser
import cv2
import numpy as np
import tqdm
from segmentation.models import get_segmentation_model
from utils.image_transforms import get_crop
RESIZE_DIMS = (122, 28)
from utils.image_transforms import get_crop, apply_mask, get_sorted_polyline, \
parallelaze_polygon, plot_circles, affine_transform
def parse_arguments():
parser = ArgumentParser()
parser.add_argument("--data_dir", help="Path to dir containing images.")
parser.add_argument("-o", "--output_dir", dest="output_dir", default="seg_out", help="Path to output dir'.")
parser.add_argument("-l", "--load", dest="load", help="segmantation model name")
parser.add_argument("-c", "--crop_scale", dest="crop_scale", default=1.2, type=float, help="scale bounding box")
parser.add_argument("-p", "--polygon_scale", dest="polygon_scale", default=1, type=float, help="scale polygon ")
return parser.parse_args()
def main(args):
os.makedirs(args.output_dir, exist_ok=True)
test_images = glob.glob(os.path.join(args.data_dir, '*.jpg')) + \
glob.glob(os.path.join(args.data_dir, '*.bmp'))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
segmentation_model = get_segmentation_model()
with open(args.load, "rb") as fp:
print(f"load {args.load}")
state_dict = torch.load(fp, map_location="cpu")
segmentation_model.load_state_dict(state_dict)
segmentation_model.to(device)
segmentation_model.eval()
with torch.no_grad():
for img_file_name in tqdm.tqdm(test_images, total=len(test_images)):
image = cv2.imread(os.path.join(args.data_dir, img_file_name))
if image is None:
print(f"wrong file {img_file_name}")
continue
image_n = image.astype(np.float32) / 255
t_image = torch.as_tensor(image_n.transpose(2, 0, 1)).to(device)
predict = segmentation_model([t_image])
path_list = os.path.normpath(img_file_name).lstrip(os.path.sep).split(os.path.sep)
# path_list = image_filename.split(os.sep)
image_base, ext = os.path.splitext(path_list[-1])
mask_result = np.zeros(image_n.shape[0:2])
for i, (class_num, score, mask) in enumerate(zip(predict[0]["labels"], predict[0]["scores"],
predict[0]["masks"])):
if class_num == 1 and score > 0.4:
mask = mask.cpu().squeeze().numpy()
mask_result += mask
mask_result[mask_result > 0.3] = 1
mask_result = mask_result.astype(np.uint8)
contours, _ = cv2.findContours(mask_result.astype(np.uint8), 1, 1)
for i, contour in enumerate(contours):
x, y, w, h = cv2.boundingRect(contour)
crop_image, _ = get_crop(image,np.array([[x,y],[x + w, y+ h]], dtype=np.int32),
args.crop_scale, make_square=True)
fout = os.path.join(args.output_dir, f"{image_base}_{i}{ext}")
cv2.imwrite(fout, crop_image)
if __name__ == "__main__":
main(parse_arguments()) | 0.486088 | 0.195057 |
from math import cos, pi, sqrt
class GameTile:
CO = cos(pi / 6)
def __init__(self, x, y):
self.x = x
self.y = y
self._x = self.x * self.CO
def dist(self, other):
return sqrt((self._x - other._x) ** 2 + (self.y - other.y) ** 2)
def neighbours(self):
return [
self + GameTile(-1, 0.5),
self + GameTile(0, 1),
self + GameTile(1, 0.5),
self + GameTile(-1, -0.5),
self + GameTile(0, -1),
self + GameTile(1, -0.5),
]
def in_boundaries(self, radius):
return self.dist(GameTile(0, 0)) < radius
def __add__(self, other):
"""Tiles are vectors and can as well express steps, can be added etc."""
return GameTile(self.x + other.x, self.y + other.y)
def __sub__(self, other):
return GameTile(self.x - other.x, self.y - other.y)
def __eq__(self, other):
return self.__hash__() == other.__hash__()
def __str__(self):
return "<%s %s>" % (self.x, self.y)
def __repr__(self):
return "<%s %s>" % (self.x, self.y)
def __hash__(self):
return round(2 * self.x) + 100 * round(2 * self.y)
def _dist_to_axis(self, d0, dx, dy, c):
return abs(self._x * dy - self.y * dx + c) / (d0 or 1)
def raycast(self, other, go_through=False, valid_steps=None):
"""Used for los checks mostly"""
CO = cos(pi / 6)
d0 = self.dist(other)
current_tile = self
dx, dy = other._x - self._x, other.y - self.y
c = - dy * self._x + dx * self.y
while True:
forward_tiles = [n for n in current_tile.neighbours() if n._dist_to_axis(d0, dx, dy, c) < 0.5001
and n.dist(other) < current_tile.dist(other)]
for tile in forward_tiles:
yield tile
for forward_tile in forward_tiles.copy():
if valid_steps and forward_tile not in valid_steps:
forward_tiles.remove(forward_tile)
if forward_tiles:
current_tile = forward_tiles[-1]
else:
if self != other and current_tile == other and go_through:
for tile in current_tile.raycast(current_tile + current_tile - self, go_through):
yield tile
break
def display_location(self):
return 340 + 32 * (8 + self.x), 92 + 32 * (7 + self.y)
def dict_dump(self):
return "%.1f %.1f" % (self.x, self.y)
@staticmethod
def from_string(string):
x, y = string.split(' ')
return GameTile(float(x), float(y))
@staticmethod
def get_tile_for_mouse(mouse_pos):
x = int((mouse_pos[0] - 340) // 32 - 8)
if x % 2 == 0:
y = int((mouse_pos[1] - 92) // 32 - 7)
else:
y = int((mouse_pos[1] - 108) // 32 - 7) + 0.5
return GameTile(x, y)
@staticmethod
def all_tiles(radius):
for i in range(-int(radius + 1), int(radius+ 1)):
for j in range(-int(radius + 1), int(radius+ 1)):
tile = GameTile(i, j + (i % 2)/2)
if tile.in_boundaries(radius):
yield tile | gametile.py | from math import cos, pi, sqrt
class GameTile:
CO = cos(pi / 6)
def __init__(self, x, y):
self.x = x
self.y = y
self._x = self.x * self.CO
def dist(self, other):
return sqrt((self._x - other._x) ** 2 + (self.y - other.y) ** 2)
def neighbours(self):
return [
self + GameTile(-1, 0.5),
self + GameTile(0, 1),
self + GameTile(1, 0.5),
self + GameTile(-1, -0.5),
self + GameTile(0, -1),
self + GameTile(1, -0.5),
]
def in_boundaries(self, radius):
return self.dist(GameTile(0, 0)) < radius
def __add__(self, other):
"""Tiles are vectors and can as well express steps, can be added etc."""
return GameTile(self.x + other.x, self.y + other.y)
def __sub__(self, other):
return GameTile(self.x - other.x, self.y - other.y)
def __eq__(self, other):
return self.__hash__() == other.__hash__()
def __str__(self):
return "<%s %s>" % (self.x, self.y)
def __repr__(self):
return "<%s %s>" % (self.x, self.y)
def __hash__(self):
return round(2 * self.x) + 100 * round(2 * self.y)
def _dist_to_axis(self, d0, dx, dy, c):
return abs(self._x * dy - self.y * dx + c) / (d0 or 1)
def raycast(self, other, go_through=False, valid_steps=None):
"""Used for los checks mostly"""
CO = cos(pi / 6)
d0 = self.dist(other)
current_tile = self
dx, dy = other._x - self._x, other.y - self.y
c = - dy * self._x + dx * self.y
while True:
forward_tiles = [n for n in current_tile.neighbours() if n._dist_to_axis(d0, dx, dy, c) < 0.5001
and n.dist(other) < current_tile.dist(other)]
for tile in forward_tiles:
yield tile
for forward_tile in forward_tiles.copy():
if valid_steps and forward_tile not in valid_steps:
forward_tiles.remove(forward_tile)
if forward_tiles:
current_tile = forward_tiles[-1]
else:
if self != other and current_tile == other and go_through:
for tile in current_tile.raycast(current_tile + current_tile - self, go_through):
yield tile
break
def display_location(self):
return 340 + 32 * (8 + self.x), 92 + 32 * (7 + self.y)
def dict_dump(self):
return "%.1f %.1f" % (self.x, self.y)
@staticmethod
def from_string(string):
x, y = string.split(' ')
return GameTile(float(x), float(y))
@staticmethod
def get_tile_for_mouse(mouse_pos):
x = int((mouse_pos[0] - 340) // 32 - 8)
if x % 2 == 0:
y = int((mouse_pos[1] - 92) // 32 - 7)
else:
y = int((mouse_pos[1] - 108) // 32 - 7) + 0.5
return GameTile(x, y)
@staticmethod
def all_tiles(radius):
for i in range(-int(radius + 1), int(radius+ 1)):
for j in range(-int(radius + 1), int(radius+ 1)):
tile = GameTile(i, j + (i % 2)/2)
if tile.in_boundaries(radius):
yield tile | 0.867345 | 0.398406 |
import torch
import pandas as pd
import numpy as np
from torch.utils.data import Dataset
from PIL import Image
import os, wget
__all__ = ['RefurbishCIFAR100']
class RefurbishCIFAR100(Dataset):
def __init__(self, root, refur=False, use_certain=False, transforms=None, target_transforms=None, download=True, verbose=True):
root = root + '/refurbished_cifar100'
self._download(root)
self.dataset, self.certain_indices = self._data_mapper(root, refur, use_certain, verbose)
self.use_certain = use_certain
self.transforms, self.target_transforms = transforms, target_transforms
def _data_mapper(self, root, refur, use_certain, verbose):
raw_data = np.fromfile(root+'/train_data.bin', dtype='uint8')
raw_data = raw_data.reshape(50000, 4 + 4 + 3*32*32) # idx(4), label(4), img(3*32*32)
df_label = pd.read_csv(root+'/refurbishment.csv', names=['index', 'refurred'])
dataset, certain_indices = [], []
refur_count = 0
dummy_flag = True
for index in range(50000):
data = raw_data[index][8:].reshape(3, 32, 32)
data = data.transpose((1, 2, 0))
label = self._bytes_to_int(raw_data[index][4:8])
assert(index == self._bytes_to_int([index][:4]))
if refur:
temp = df_label.loc[df_label['index']==index]['refurred'].values[0]
if temp != -1:
if label != temp:
refur_count += 1
label = temp
certain_indices.append(index)
else:
if dummy_flag:
if verbose:
print('\nsample index %d is uncertain but included to meet ghost batch size.\n' % index)
certain_indices.append(index)
dummy_flag = False
dataset.append((data, label))
if (index+1) % 10000 == 0 and verbose:
print('index %d of refurred set has processed' % (index+1))
uncertain = (50000-len(certain_indices)) if use_certain else 0
if verbose:
print('----------------------------------------------------------------')
print('%d samples have refurred and %d uncertain samples have excluded\n' % (refur_count, uncertain))
return dataset, certain_indices
def _bytes_to_int(self, bytes_array):
result = 0
for b in bytes_array:
result = result * 256 + int(b)
return result
def __getitem__(self, index):
if self.use_certain:
img, target = self.dataset[self.certain_indices[index]]
else:
img, target = self.dataset[index]
img = Image.fromarray(img, 'RGB')
if self.transforms is not None:
img = self.transforms(img)
if self.target_transforms is not None:
target = self.target_transforms(target)
return img, target
def __len__(self):
if self.use_certain:
return len(self.certain_indices)
else:
return len(self.dataset)
def _download(self, root):
if not os.path.isdir(root):
os.makedirs(root)
refurbish_path = root+'/refurbishment.csv'
train_bin_path = root+'/train_data.bin'
refurbish_exist = os.path.isfile(refurbish_path)
train_bin_exist = os.path.isfile(train_bin_path)
if not refurbish_exist:
url = 'https://www.dropbox.com/s/izxfv9fko4hds7u/refurbishment.csv?dl=1'
print('\nrefurbished label downloading...')
wget.download(url, refurbish_path)
if not train_bin_exist:
print('\ntrain data binary file downloading...')
url = 'https://www.dropbox.com/s/picdidpo5aziqcf/train_data.bin?dl=1'
wget.download(url, train_bin_path)
print('data and labels are setted! \n') | data_utils/refurbish_cifar100.py | import torch
import pandas as pd
import numpy as np
from torch.utils.data import Dataset
from PIL import Image
import os, wget
__all__ = ['RefurbishCIFAR100']
class RefurbishCIFAR100(Dataset):
def __init__(self, root, refur=False, use_certain=False, transforms=None, target_transforms=None, download=True, verbose=True):
root = root + '/refurbished_cifar100'
self._download(root)
self.dataset, self.certain_indices = self._data_mapper(root, refur, use_certain, verbose)
self.use_certain = use_certain
self.transforms, self.target_transforms = transforms, target_transforms
def _data_mapper(self, root, refur, use_certain, verbose):
raw_data = np.fromfile(root+'/train_data.bin', dtype='uint8')
raw_data = raw_data.reshape(50000, 4 + 4 + 3*32*32) # idx(4), label(4), img(3*32*32)
df_label = pd.read_csv(root+'/refurbishment.csv', names=['index', 'refurred'])
dataset, certain_indices = [], []
refur_count = 0
dummy_flag = True
for index in range(50000):
data = raw_data[index][8:].reshape(3, 32, 32)
data = data.transpose((1, 2, 0))
label = self._bytes_to_int(raw_data[index][4:8])
assert(index == self._bytes_to_int([index][:4]))
if refur:
temp = df_label.loc[df_label['index']==index]['refurred'].values[0]
if temp != -1:
if label != temp:
refur_count += 1
label = temp
certain_indices.append(index)
else:
if dummy_flag:
if verbose:
print('\nsample index %d is uncertain but included to meet ghost batch size.\n' % index)
certain_indices.append(index)
dummy_flag = False
dataset.append((data, label))
if (index+1) % 10000 == 0 and verbose:
print('index %d of refurred set has processed' % (index+1))
uncertain = (50000-len(certain_indices)) if use_certain else 0
if verbose:
print('----------------------------------------------------------------')
print('%d samples have refurred and %d uncertain samples have excluded\n' % (refur_count, uncertain))
return dataset, certain_indices
def _bytes_to_int(self, bytes_array):
result = 0
for b in bytes_array:
result = result * 256 + int(b)
return result
def __getitem__(self, index):
if self.use_certain:
img, target = self.dataset[self.certain_indices[index]]
else:
img, target = self.dataset[index]
img = Image.fromarray(img, 'RGB')
if self.transforms is not None:
img = self.transforms(img)
if self.target_transforms is not None:
target = self.target_transforms(target)
return img, target
def __len__(self):
if self.use_certain:
return len(self.certain_indices)
else:
return len(self.dataset)
def _download(self, root):
if not os.path.isdir(root):
os.makedirs(root)
refurbish_path = root+'/refurbishment.csv'
train_bin_path = root+'/train_data.bin'
refurbish_exist = os.path.isfile(refurbish_path)
train_bin_exist = os.path.isfile(train_bin_path)
if not refurbish_exist:
url = 'https://www.dropbox.com/s/izxfv9fko4hds7u/refurbishment.csv?dl=1'
print('\nrefurbished label downloading...')
wget.download(url, refurbish_path)
if not train_bin_exist:
print('\ntrain data binary file downloading...')
url = 'https://www.dropbox.com/s/picdidpo5aziqcf/train_data.bin?dl=1'
wget.download(url, train_bin_path)
print('data and labels are setted! \n') | 0.504394 | 0.317955 |