code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
from __future__ import annotations
from datetime import timedelta
import logging
from typing import Any, Generic, cast
from hahomematic.const import HmEntityUsage
from hahomematic.entity import (
CallbackEntity,
CustomEntity,
GenericEntity,
GenericSystemVariable,
)
from homeassistant.core import callback
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.helpers.entity import DeviceInfo, Entity
from .const import DOMAIN
from .control_unit import ControlUnit
from .entity_helpers import get_entity_description
from .helpers import HmGenericEntity, HmGenericSysvarEntity
SCAN_INTERVAL = timedelta(seconds=120)
_LOGGER = logging.getLogger(__name__)
class HaHomematicGenericEntity(Generic[HmGenericEntity], Entity):
"""Representation of the HomematicIP generic entity."""
def __init__(
self,
control_unit: ControlUnit,
hm_entity: HmGenericEntity,
) -> None:
"""Initialize the generic entity."""
self._cu: ControlUnit = control_unit
self._hm_entity: HmGenericEntity = hm_entity
self._attr_should_poll = self._hm_entity.should_poll
if entity_description := get_entity_description(self._hm_entity):
self.entity_description = entity_description
if (
entity_registry_enabled_default := self._get_entity_registry_enabled_default()
) is not None:
self._attr_entity_registry_enabled_default = entity_registry_enabled_default
# Marker showing that the Hm device hase been removed.
self._hm_device_removed = False
self._attr_name = hm_entity.name
self._attr_unique_id = hm_entity.unique_id
_LOGGER.debug("init: Setting up %s", self.name)
@property
def available(self) -> bool:
"""Return if entity is available."""
return self._hm_entity.available
@property
def device_info(self) -> DeviceInfo | None:
"""Return device specific attributes."""
info = self._hm_entity.device_information
return DeviceInfo(
identifiers={
(
DOMAIN,
info.identifier,
)
},
manufacturer=info.manufacturer,
model=info.model,
name=info.name,
sw_version=info.version,
suggested_area=info.room,
# Link to the homematic control unit.
via_device=cast(tuple[str, str], info.central),
)
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return the state attributes of the generic entity."""
return self._hm_entity.attributes
async def async_added_to_hass(self) -> None:
"""Register callbacks and load initial data."""
if isinstance(self._hm_entity, CallbackEntity):
self._hm_entity.register_update_callback(
update_callback=self._async_device_changed
)
self._hm_entity.register_remove_callback(
remove_callback=self._async_device_removed
)
self._cu.async_add_hm_entity(
entity_id=self.entity_id, hm_entity=self._hm_entity
)
# Init value of entity.
await self.async_update()
def _get_entity_registry_enabled_default(self) -> bool | None:
"""Return, if entity should be enabled based on usage attribute."""
if self._hm_entity.usage in {
HmEntityUsage.CE_SECONDARY,
HmEntityUsage.CE_VISIBLE,
HmEntityUsage.ENTITY_NO_CREATE,
}:
return False
if self._hm_entity.usage in {HmEntityUsage.CE_PRIMARY}:
return True
return None
@callback
def _async_device_changed(self, *args: Any, **kwargs: Any) -> None:
"""Handle device state changes."""
# Don't update disabled entities
if self.enabled:
_LOGGER.debug("Event %s", self.name)
self.async_write_ha_state()
else:
_LOGGER.debug(
"Device Changed Event for %s not fired. Entity is disabled",
self.name,
)
async def async_update(self) -> None:
"""Update entities from MASTER paramset."""
if isinstance(self._hm_entity, (GenericEntity, CustomEntity)):
await self._hm_entity.load_entity_value()
async def async_will_remove_from_hass(self) -> None:
"""Run when hmip device will be removed from hass."""
# Only go further if the device/entity should be removed from registries
# due to a removal of the HM device.
if self._hm_device_removed:
try:
self._cu.async_remove_hm_entity(self.entity_id)
self._async_remove_from_registries()
except KeyError as err:
_LOGGER.debug("Error removing HM device from registry: %s", err)
@callback
def _async_remove_from_registries(self) -> None:
"""Remove entity/device from registry."""
# Remove callback from device.
self._hm_entity.unregister_update_callback(
update_callback=self._async_device_changed
)
self._hm_entity.unregister_remove_callback(
remove_callback=self._async_device_removed
)
if not self.registry_entry:
return
if device_id := self.registry_entry.device_id:
# Remove from device registry.
device_registry = dr.async_get(self.hass)
if device_id in device_registry.devices:
# This will also remove associated entities from entity registry.
device_registry.async_remove_device(device_id)
else:
# Remove from entity registry.
# Only relevant for entities that do not belong to a device.
if entity_id := self.registry_entry.entity_id:
entity_registry = er.async_get(self.hass)
if entity_id in entity_registry.entities:
entity_registry.async_remove(entity_id)
@callback
def _async_device_removed(self, *args: Any, **kwargs: Any) -> None:
"""Handle hm device removal."""
# Set marker showing that the Hm device hase been removed.
self._hm_device_removed = True
self.hass.async_create_task(self.async_remove(force_remove=True))
class HaHomematicGenericSysvarEntity(Generic[HmGenericSysvarEntity], Entity):
"""Representation of the HomematicIP generic sysvar entity."""
def __init__(
self,
control_unit: ControlUnit,
hm_sysvar_entity: GenericSystemVariable,
) -> None:
"""Initialize the generic entity."""
self._cu: ControlUnit = control_unit
self._hm_sysvar_entity: GenericSystemVariable = hm_sysvar_entity
self._attr_should_poll = self._hm_sysvar_entity.should_poll
self._attr_name = hm_sysvar_entity.name
self._attr_unique_id = hm_sysvar_entity.unique_id
_LOGGER.debug("init sysvar: Setting up %s", self.name)
@property
def available(self) -> bool:
"""Return if entity is available."""
return self._hm_sysvar_entity.available
@property
def device_info(self) -> DeviceInfo | None:
"""Return device specific attributes."""
info = self._hm_sysvar_entity.device_information
return DeviceInfo(
identifiers={
(
DOMAIN,
info.identifier,
)
},
manufacturer=info.manufacturer,
model=info.model,
name=info.name,
sw_version=info.version,
suggested_area=info.room,
# Link to the homematic control unit.
via_device=cast(tuple[str, str], info.central),
)
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return the state attributes of the generic entity."""
return self._hm_sysvar_entity.attributes
async def async_added_to_hass(self) -> None:
"""Register callbacks and load initial data."""
if isinstance(self._hm_sysvar_entity, CallbackEntity):
self._hm_sysvar_entity.register_update_callback(
update_callback=self._async_sysvar_changed
)
self._hm_sysvar_entity.register_remove_callback(
remove_callback=self._async_sysvar_removed
)
self._cu.async_add_hm_sysvar_entity(
entity_id=self.entity_id, hm_sysvar_entity=self._hm_sysvar_entity
)
@callback
def _async_sysvar_changed(self, *args: Any, **kwargs: Any) -> None:
"""Handle sysvar entity state changes."""
# Don't update disabled entities
if self.enabled:
_LOGGER.debug("Event %s", self.name)
self.async_write_ha_state()
else:
_LOGGER.debug(
"Sysvar Changed Event for %s not fired. Sysvar entity is disabled",
self.name,
)
async def async_will_remove_from_hass(self) -> None:
"""Run when hmip sysvar entity will be removed from hass."""
try:
self._cu.async_remove_hm_sysvar_entity(self.entity_id)
self._async_remove_from_registries()
except KeyError as err:
_LOGGER.debug("Error removing HM sysvar entity from registry: %s", err)
@callback
def _async_remove_from_registries(self) -> None:
"""Remove entity/device from registry."""
# Remove callback from device.
self._hm_sysvar_entity.unregister_update_callback(
update_callback=self._async_sysvar_changed
)
self._hm_sysvar_entity.unregister_remove_callback(
remove_callback=self._async_sysvar_removed
)
if not self.registry_entry:
return
# Remove from entity registry.
# Only relevant for entities that do not belong to a device.
if entity_id := self.registry_entry.entity_id:
entity_registry = er.async_get(self.hass)
if entity_id in entity_registry.entities:
entity_registry.async_remove(entity_id)
@callback
def _async_sysvar_removed(self, *args: Any, **kwargs: Any) -> None:
"""Handle hm sysvar entity removal."""
self.hass.async_create_task(self.async_remove(force_remove=True)) | custom_components/homematicip_local/generic_entity.py | from __future__ import annotations
from datetime import timedelta
import logging
from typing import Any, Generic, cast
from hahomematic.const import HmEntityUsage
from hahomematic.entity import (
CallbackEntity,
CustomEntity,
GenericEntity,
GenericSystemVariable,
)
from homeassistant.core import callback
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.helpers.entity import DeviceInfo, Entity
from .const import DOMAIN
from .control_unit import ControlUnit
from .entity_helpers import get_entity_description
from .helpers import HmGenericEntity, HmGenericSysvarEntity
SCAN_INTERVAL = timedelta(seconds=120)
_LOGGER = logging.getLogger(__name__)
class HaHomematicGenericEntity(Generic[HmGenericEntity], Entity):
"""Representation of the HomematicIP generic entity."""
def __init__(
self,
control_unit: ControlUnit,
hm_entity: HmGenericEntity,
) -> None:
"""Initialize the generic entity."""
self._cu: ControlUnit = control_unit
self._hm_entity: HmGenericEntity = hm_entity
self._attr_should_poll = self._hm_entity.should_poll
if entity_description := get_entity_description(self._hm_entity):
self.entity_description = entity_description
if (
entity_registry_enabled_default := self._get_entity_registry_enabled_default()
) is not None:
self._attr_entity_registry_enabled_default = entity_registry_enabled_default
# Marker showing that the Hm device hase been removed.
self._hm_device_removed = False
self._attr_name = hm_entity.name
self._attr_unique_id = hm_entity.unique_id
_LOGGER.debug("init: Setting up %s", self.name)
@property
def available(self) -> bool:
"""Return if entity is available."""
return self._hm_entity.available
@property
def device_info(self) -> DeviceInfo | None:
"""Return device specific attributes."""
info = self._hm_entity.device_information
return DeviceInfo(
identifiers={
(
DOMAIN,
info.identifier,
)
},
manufacturer=info.manufacturer,
model=info.model,
name=info.name,
sw_version=info.version,
suggested_area=info.room,
# Link to the homematic control unit.
via_device=cast(tuple[str, str], info.central),
)
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return the state attributes of the generic entity."""
return self._hm_entity.attributes
async def async_added_to_hass(self) -> None:
"""Register callbacks and load initial data."""
if isinstance(self._hm_entity, CallbackEntity):
self._hm_entity.register_update_callback(
update_callback=self._async_device_changed
)
self._hm_entity.register_remove_callback(
remove_callback=self._async_device_removed
)
self._cu.async_add_hm_entity(
entity_id=self.entity_id, hm_entity=self._hm_entity
)
# Init value of entity.
await self.async_update()
def _get_entity_registry_enabled_default(self) -> bool | None:
"""Return, if entity should be enabled based on usage attribute."""
if self._hm_entity.usage in {
HmEntityUsage.CE_SECONDARY,
HmEntityUsage.CE_VISIBLE,
HmEntityUsage.ENTITY_NO_CREATE,
}:
return False
if self._hm_entity.usage in {HmEntityUsage.CE_PRIMARY}:
return True
return None
@callback
def _async_device_changed(self, *args: Any, **kwargs: Any) -> None:
"""Handle device state changes."""
# Don't update disabled entities
if self.enabled:
_LOGGER.debug("Event %s", self.name)
self.async_write_ha_state()
else:
_LOGGER.debug(
"Device Changed Event for %s not fired. Entity is disabled",
self.name,
)
async def async_update(self) -> None:
"""Update entities from MASTER paramset."""
if isinstance(self._hm_entity, (GenericEntity, CustomEntity)):
await self._hm_entity.load_entity_value()
async def async_will_remove_from_hass(self) -> None:
"""Run when hmip device will be removed from hass."""
# Only go further if the device/entity should be removed from registries
# due to a removal of the HM device.
if self._hm_device_removed:
try:
self._cu.async_remove_hm_entity(self.entity_id)
self._async_remove_from_registries()
except KeyError as err:
_LOGGER.debug("Error removing HM device from registry: %s", err)
@callback
def _async_remove_from_registries(self) -> None:
"""Remove entity/device from registry."""
# Remove callback from device.
self._hm_entity.unregister_update_callback(
update_callback=self._async_device_changed
)
self._hm_entity.unregister_remove_callback(
remove_callback=self._async_device_removed
)
if not self.registry_entry:
return
if device_id := self.registry_entry.device_id:
# Remove from device registry.
device_registry = dr.async_get(self.hass)
if device_id in device_registry.devices:
# This will also remove associated entities from entity registry.
device_registry.async_remove_device(device_id)
else:
# Remove from entity registry.
# Only relevant for entities that do not belong to a device.
if entity_id := self.registry_entry.entity_id:
entity_registry = er.async_get(self.hass)
if entity_id in entity_registry.entities:
entity_registry.async_remove(entity_id)
@callback
def _async_device_removed(self, *args: Any, **kwargs: Any) -> None:
"""Handle hm device removal."""
# Set marker showing that the Hm device hase been removed.
self._hm_device_removed = True
self.hass.async_create_task(self.async_remove(force_remove=True))
class HaHomematicGenericSysvarEntity(Generic[HmGenericSysvarEntity], Entity):
"""Representation of the HomematicIP generic sysvar entity."""
def __init__(
self,
control_unit: ControlUnit,
hm_sysvar_entity: GenericSystemVariable,
) -> None:
"""Initialize the generic entity."""
self._cu: ControlUnit = control_unit
self._hm_sysvar_entity: GenericSystemVariable = hm_sysvar_entity
self._attr_should_poll = self._hm_sysvar_entity.should_poll
self._attr_name = hm_sysvar_entity.name
self._attr_unique_id = hm_sysvar_entity.unique_id
_LOGGER.debug("init sysvar: Setting up %s", self.name)
@property
def available(self) -> bool:
"""Return if entity is available."""
return self._hm_sysvar_entity.available
@property
def device_info(self) -> DeviceInfo | None:
"""Return device specific attributes."""
info = self._hm_sysvar_entity.device_information
return DeviceInfo(
identifiers={
(
DOMAIN,
info.identifier,
)
},
manufacturer=info.manufacturer,
model=info.model,
name=info.name,
sw_version=info.version,
suggested_area=info.room,
# Link to the homematic control unit.
via_device=cast(tuple[str, str], info.central),
)
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return the state attributes of the generic entity."""
return self._hm_sysvar_entity.attributes
async def async_added_to_hass(self) -> None:
"""Register callbacks and load initial data."""
if isinstance(self._hm_sysvar_entity, CallbackEntity):
self._hm_sysvar_entity.register_update_callback(
update_callback=self._async_sysvar_changed
)
self._hm_sysvar_entity.register_remove_callback(
remove_callback=self._async_sysvar_removed
)
self._cu.async_add_hm_sysvar_entity(
entity_id=self.entity_id, hm_sysvar_entity=self._hm_sysvar_entity
)
@callback
def _async_sysvar_changed(self, *args: Any, **kwargs: Any) -> None:
"""Handle sysvar entity state changes."""
# Don't update disabled entities
if self.enabled:
_LOGGER.debug("Event %s", self.name)
self.async_write_ha_state()
else:
_LOGGER.debug(
"Sysvar Changed Event for %s not fired. Sysvar entity is disabled",
self.name,
)
async def async_will_remove_from_hass(self) -> None:
"""Run when hmip sysvar entity will be removed from hass."""
try:
self._cu.async_remove_hm_sysvar_entity(self.entity_id)
self._async_remove_from_registries()
except KeyError as err:
_LOGGER.debug("Error removing HM sysvar entity from registry: %s", err)
@callback
def _async_remove_from_registries(self) -> None:
"""Remove entity/device from registry."""
# Remove callback from device.
self._hm_sysvar_entity.unregister_update_callback(
update_callback=self._async_sysvar_changed
)
self._hm_sysvar_entity.unregister_remove_callback(
remove_callback=self._async_sysvar_removed
)
if not self.registry_entry:
return
# Remove from entity registry.
# Only relevant for entities that do not belong to a device.
if entity_id := self.registry_entry.entity_id:
entity_registry = er.async_get(self.hass)
if entity_id in entity_registry.entities:
entity_registry.async_remove(entity_id)
@callback
def _async_sysvar_removed(self, *args: Any, **kwargs: Any) -> None:
"""Handle hm sysvar entity removal."""
self.hass.async_create_task(self.async_remove(force_remove=True)) | 0.887802 | 0.09709 |
from PyQt5.QtCore import QObject, pyqtSignal
from multiprocessing import cpu_count, Pool
import time
from drawDataProcessUtils import *
class CalDrawData(QObject):
sbar = pyqtSignal(str)
pbar = pyqtSignal(int)
tbw = pyqtSignal(str)
run_end = pyqtSignal()
logger = MyLog("CalDrawData", BASEDIR)
def __init__(self, key_para, dataset):
super().__init__()
self.key_para = key_para
self.dataset = dataset
def run(self):
key_para = self.key_para
dataset = self.dataset
args = []
for data in dataset:
args.append((data, key_para))
cpu_N = cpu_count()
pool = Pool(cpu_N - 1)
t1 = time.perf_counter()
self.pbar.emit(40)
self.draw_dataset = pool.starmap_async(self.get_draw_data, args).get()
pool.close()
pool.join()
t2 = time.perf_counter()
self.logger.debug(f"Computes the parallel time of the data required for drawing: {int(t2 - t1)}")
self.pbar.emit(60)
self.run_end.emit()
@classmethod
def get_draw_data(cls,data, key_para):
cls.logger.debug(f"Drawing process pid:{os.getpid()},Start time of drawing calculation process:{time.perf_counter()}")
SELECT_OPTION = key_para["SELECT_OPTION"]
distance, conductance, length, distance_draw, conductance_draw, ALL_TRACE_NUM, SELECT_TRACE_NUM = None, None, None, None, None, None, None,
try:
if key_para["PROCESS"] == 0:
if not SELECT_OPTION:
distance, conductance, length, distance_draw, conductance_draw, ALL_TRACE_NUM, SELECT_TRACE_NUM = DrawDataProcessUtils.calculate_draw_data(
data, key_para)
else:
distance, conductance, length, distance_draw, conductance_draw, ALL_TRACE_NUM, SELECT_TRACE_NUM = DrawDataProcessUtils.calculate_draw_data_with_select(
data, key_para)
else:
# 添加close过程的处理
if not SELECT_OPTION:
distance, conductance, length, distance_draw, conductance_draw, ALL_TRACE_NUM, SELECT_TRACE_NUM = DrawDataProcessUtils.calculate_draw_data_close(
data, key_para)
else:
distance, conductance, length, distance_draw, conductance_draw, ALL_TRACE_NUM, SELECT_TRACE_NUM = DrawDataProcessUtils.calculate_draw_data_close_with_select(
data, key_para)
except Exception as e:
errMsg = f"CALCULATE DRAW DATA ERROR: {e}"
cls.logger.error(errMsg)
return None
else:
return [distance, conductance, length, distance_draw, conductance_draw, ALL_TRACE_NUM, SELECT_TRACE_NUM] | basic_analysis_module/calDrawData.py | from PyQt5.QtCore import QObject, pyqtSignal
from multiprocessing import cpu_count, Pool
import time
from drawDataProcessUtils import *
class CalDrawData(QObject):
sbar = pyqtSignal(str)
pbar = pyqtSignal(int)
tbw = pyqtSignal(str)
run_end = pyqtSignal()
logger = MyLog("CalDrawData", BASEDIR)
def __init__(self, key_para, dataset):
super().__init__()
self.key_para = key_para
self.dataset = dataset
def run(self):
key_para = self.key_para
dataset = self.dataset
args = []
for data in dataset:
args.append((data, key_para))
cpu_N = cpu_count()
pool = Pool(cpu_N - 1)
t1 = time.perf_counter()
self.pbar.emit(40)
self.draw_dataset = pool.starmap_async(self.get_draw_data, args).get()
pool.close()
pool.join()
t2 = time.perf_counter()
self.logger.debug(f"Computes the parallel time of the data required for drawing: {int(t2 - t1)}")
self.pbar.emit(60)
self.run_end.emit()
@classmethod
def get_draw_data(cls,data, key_para):
cls.logger.debug(f"Drawing process pid:{os.getpid()},Start time of drawing calculation process:{time.perf_counter()}")
SELECT_OPTION = key_para["SELECT_OPTION"]
distance, conductance, length, distance_draw, conductance_draw, ALL_TRACE_NUM, SELECT_TRACE_NUM = None, None, None, None, None, None, None,
try:
if key_para["PROCESS"] == 0:
if not SELECT_OPTION:
distance, conductance, length, distance_draw, conductance_draw, ALL_TRACE_NUM, SELECT_TRACE_NUM = DrawDataProcessUtils.calculate_draw_data(
data, key_para)
else:
distance, conductance, length, distance_draw, conductance_draw, ALL_TRACE_NUM, SELECT_TRACE_NUM = DrawDataProcessUtils.calculate_draw_data_with_select(
data, key_para)
else:
# 添加close过程的处理
if not SELECT_OPTION:
distance, conductance, length, distance_draw, conductance_draw, ALL_TRACE_NUM, SELECT_TRACE_NUM = DrawDataProcessUtils.calculate_draw_data_close(
data, key_para)
else:
distance, conductance, length, distance_draw, conductance_draw, ALL_TRACE_NUM, SELECT_TRACE_NUM = DrawDataProcessUtils.calculate_draw_data_close_with_select(
data, key_para)
except Exception as e:
errMsg = f"CALCULATE DRAW DATA ERROR: {e}"
cls.logger.error(errMsg)
return None
else:
return [distance, conductance, length, distance_draw, conductance_draw, ALL_TRACE_NUM, SELECT_TRACE_NUM] | 0.453262 | 0.18717 |
import voluptuous as vol
from esphome import pins
from esphome.components import light
from esphome.components.light import AddressableLight
from esphome.components.power_supply import PowerSupplyComponent
import esphome.config_validation as cv
from esphome.const import CONF_CLOCK_PIN, CONF_COLOR_CORRECT, CONF_DATA_PIN, \
CONF_DEFAULT_TRANSITION_LENGTH, CONF_EFFECTS, CONF_GAMMA_CORRECT, CONF_MAKE_ID, CONF_METHOD, \
CONF_NAME, CONF_NUM_LEDS, CONF_PIN, CONF_POWER_SUPPLY, CONF_TYPE, CONF_VARIANT
from esphome.core import CORE
from esphome.cpp_generator import TemplateArguments, add, get_variable, variable
from esphome.cpp_helpers import setup_component
from esphome.cpp_types import App, Application, Component, global_ns
NeoPixelBusLightOutputBase = light.light_ns.class_('NeoPixelBusLightOutputBase', Component,
AddressableLight)
ESPNeoPixelOrder = light.light_ns.namespace('ESPNeoPixelOrder')
def validate_type(value):
value = cv.string(value).upper()
if 'R' not in value:
raise vol.Invalid("Must have R in type")
if 'G' not in value:
raise vol.Invalid("Must have G in type")
if 'B' not in value:
raise vol.Invalid("Must have B in type")
rest = set(value) - set('RGBW')
if rest:
raise vol.Invalid("Type has invalid color: {}".format(', '.join(rest)))
if len(set(value)) != len(value):
raise vol.Invalid("Type has duplicate color!")
return value
def validate_variant(value):
value = cv.string(value).upper()
if value == 'WS2813':
value = 'WS2812X'
if value == 'WS2812':
value = '800KBPS'
if value == 'LC8812':
value = 'SK6812'
return cv.one_of(*VARIANTS)(value)
def validate_method(value):
if value is None:
if CORE.is_esp32:
return 'ESP32_I2S_1'
if CORE.is_esp8266:
return 'ESP8266_DMA'
raise NotImplementedError
if CORE.is_esp32:
return cv.one_of(*ESP32_METHODS, upper=True, space='_')(value)
if CORE.is_esp8266:
return cv.one_of(*ESP8266_METHODS, upper=True, space='_')(value)
raise NotImplementedError
def validate_method_pin(value):
method = value[CONF_METHOD]
method_pins = {
'ESP8266_DMA': [3],
'ESP8266_UART0': [1],
'ESP8266_ASYNC_UART0': [1],
'ESP8266_UART1': [2],
'ESP8266_ASYNC_UART1': [2],
'ESP32_I2S_0': list(range(0, 32)),
'ESP32_I2S_1': list(range(0, 32)),
}
if CORE.is_esp8266:
method_pins['BIT_BANG'] = list(range(0, 16))
elif CORE.is_esp32:
method_pins['BIT_BANG'] = list(range(0, 32))
pins_ = method_pins[method]
for opt in (CONF_PIN, CONF_CLOCK_PIN, CONF_DATA_PIN):
if opt in value and value[opt] not in pins_:
raise vol.Invalid("Method {} only supports pin(s) {}".format(
method, ', '.join('GPIO{}'.format(x) for x in pins_)
), path=[CONF_METHOD])
return value
VARIANTS = {
'WS2812X': 'Ws2812x',
'SK6812': 'Sk6812',
'800KBPS': '800Kbps',
'400KBPS': '400Kbps',
}
ESP8266_METHODS = {
'ESP8266_DMA': 'NeoEsp8266Dma{}Method',
'ESP8266_UART0': 'NeoEsp8266Uart0{}Method',
'ESP8266_UART1': 'NeoEsp8266Uart1{}Method',
'ESP8266_ASYNC_UART0': 'NeoEsp8266AsyncUart0{}Method',
'ESP8266_ASYNC_UART1': 'NeoEsp8266AsyncUart1{}Method',
'BIT_BANG': 'NeoEsp8266BitBang{}Method',
}
ESP32_METHODS = {
'ESP32_I2S_0': 'NeoEsp32I2s0{}Method',
'ESP32_I2S_1': 'NeoEsp32I2s1{}Method',
'BIT_BANG': 'NeoEsp32BitBang{}Method',
}
def format_method(config):
variant = VARIANTS[config[CONF_VARIANT]]
method = config[CONF_METHOD]
if CORE.is_esp8266:
return ESP8266_METHODS[method].format(variant)
if CORE.is_esp32:
return ESP32_METHODS[method].format(variant)
raise NotImplementedError
def validate(config):
if CONF_PIN in config:
if CONF_CLOCK_PIN in config or CONF_DATA_PIN in config:
raise vol.Invalid("Cannot specify both 'pin' and 'clock_pin'+'data_pin'")
return config
if CONF_CLOCK_PIN in config:
if CONF_DATA_PIN not in config:
raise vol.Invalid("If you give clock_pin, you must also specify data_pin")
return config
raise vol.Invalid("Must specify at least one of 'pin' or 'clock_pin'+'data_pin'")
MakeNeoPixelBusLight = Application.struct('MakeNeoPixelBusLight')
PLATFORM_SCHEMA = cv.nameable(light.LIGHT_PLATFORM_SCHEMA.extend({
cv.GenerateID(): cv.declare_variable_id(light.AddressableLightState),
cv.GenerateID(CONF_MAKE_ID): cv.declare_variable_id(MakeNeoPixelBusLight),
vol.Optional(CONF_TYPE, default='GRB'): validate_type,
vol.Optional(CONF_VARIANT, default='800KBPS'): validate_variant,
vol.Optional(CONF_METHOD, default=None): validate_method,
vol.Optional(CONF_PIN): pins.output_pin,
vol.Optional(CONF_CLOCK_PIN): pins.output_pin,
vol.Optional(CONF_DATA_PIN): pins.output_pin,
vol.Required(CONF_NUM_LEDS): cv.positive_not_null_int,
vol.Optional(CONF_GAMMA_CORRECT): cv.positive_float,
vol.Optional(CONF_COLOR_CORRECT): vol.All([cv.percentage], vol.Length(min=3, max=4)),
vol.Optional(CONF_DEFAULT_TRANSITION_LENGTH): cv.positive_time_period_milliseconds,
vol.Optional(CONF_POWER_SUPPLY): cv.use_variable_id(PowerSupplyComponent),
vol.Optional(CONF_EFFECTS): light.validate_effects(light.ADDRESSABLE_EFFECTS),
}).extend(cv.COMPONENT_SCHEMA.schema), validate, validate_method_pin)
def to_code(config):
type_ = config[CONF_TYPE]
has_white = 'W' in type_
if has_white:
func = App.make_neo_pixel_bus_rgbw_light
color_feat = global_ns.NeoRgbwFeature
else:
func = App.make_neo_pixel_bus_rgb_light
color_feat = global_ns.NeoRgbFeature
template = TemplateArguments(getattr(global_ns, format_method(config)), color_feat)
rhs = func(template, config[CONF_NAME])
make = variable(config[CONF_MAKE_ID], rhs, type=MakeNeoPixelBusLight.template(template))
output = make.Poutput
if CONF_PIN in config:
add(output.add_leds(config[CONF_NUM_LEDS], config[CONF_PIN]))
else:
add(output.add_leds(config[CONF_NUM_LEDS], config[CONF_CLOCK_PIN], config[CONF_DATA_PIN]))
add(output.set_pixel_order(getattr(ESPNeoPixelOrder, type_)))
if CONF_POWER_SUPPLY in config:
for power_supply in get_variable(config[CONF_POWER_SUPPLY]):
yield
add(output.set_power_supply(power_supply))
if CONF_COLOR_CORRECT in config:
add(output.set_correction(*config[CONF_COLOR_CORRECT]))
light.setup_light(make.Pstate, config)
setup_component(output, config)
REQUIRED_BUILD_FLAGS = '-DUSE_NEO_PIXEL_BUS_LIGHT'
LIB_DEPS = 'NeoPixelBus@2.4.1' | esphome/components/light/neopixelbus.py | import voluptuous as vol
from esphome import pins
from esphome.components import light
from esphome.components.light import AddressableLight
from esphome.components.power_supply import PowerSupplyComponent
import esphome.config_validation as cv
from esphome.const import CONF_CLOCK_PIN, CONF_COLOR_CORRECT, CONF_DATA_PIN, \
CONF_DEFAULT_TRANSITION_LENGTH, CONF_EFFECTS, CONF_GAMMA_CORRECT, CONF_MAKE_ID, CONF_METHOD, \
CONF_NAME, CONF_NUM_LEDS, CONF_PIN, CONF_POWER_SUPPLY, CONF_TYPE, CONF_VARIANT
from esphome.core import CORE
from esphome.cpp_generator import TemplateArguments, add, get_variable, variable
from esphome.cpp_helpers import setup_component
from esphome.cpp_types import App, Application, Component, global_ns
NeoPixelBusLightOutputBase = light.light_ns.class_('NeoPixelBusLightOutputBase', Component,
AddressableLight)
ESPNeoPixelOrder = light.light_ns.namespace('ESPNeoPixelOrder')
def validate_type(value):
value = cv.string(value).upper()
if 'R' not in value:
raise vol.Invalid("Must have R in type")
if 'G' not in value:
raise vol.Invalid("Must have G in type")
if 'B' not in value:
raise vol.Invalid("Must have B in type")
rest = set(value) - set('RGBW')
if rest:
raise vol.Invalid("Type has invalid color: {}".format(', '.join(rest)))
if len(set(value)) != len(value):
raise vol.Invalid("Type has duplicate color!")
return value
def validate_variant(value):
value = cv.string(value).upper()
if value == 'WS2813':
value = 'WS2812X'
if value == 'WS2812':
value = '800KBPS'
if value == 'LC8812':
value = 'SK6812'
return cv.one_of(*VARIANTS)(value)
def validate_method(value):
if value is None:
if CORE.is_esp32:
return 'ESP32_I2S_1'
if CORE.is_esp8266:
return 'ESP8266_DMA'
raise NotImplementedError
if CORE.is_esp32:
return cv.one_of(*ESP32_METHODS, upper=True, space='_')(value)
if CORE.is_esp8266:
return cv.one_of(*ESP8266_METHODS, upper=True, space='_')(value)
raise NotImplementedError
def validate_method_pin(value):
method = value[CONF_METHOD]
method_pins = {
'ESP8266_DMA': [3],
'ESP8266_UART0': [1],
'ESP8266_ASYNC_UART0': [1],
'ESP8266_UART1': [2],
'ESP8266_ASYNC_UART1': [2],
'ESP32_I2S_0': list(range(0, 32)),
'ESP32_I2S_1': list(range(0, 32)),
}
if CORE.is_esp8266:
method_pins['BIT_BANG'] = list(range(0, 16))
elif CORE.is_esp32:
method_pins['BIT_BANG'] = list(range(0, 32))
pins_ = method_pins[method]
for opt in (CONF_PIN, CONF_CLOCK_PIN, CONF_DATA_PIN):
if opt in value and value[opt] not in pins_:
raise vol.Invalid("Method {} only supports pin(s) {}".format(
method, ', '.join('GPIO{}'.format(x) for x in pins_)
), path=[CONF_METHOD])
return value
VARIANTS = {
'WS2812X': 'Ws2812x',
'SK6812': 'Sk6812',
'800KBPS': '800Kbps',
'400KBPS': '400Kbps',
}
ESP8266_METHODS = {
'ESP8266_DMA': 'NeoEsp8266Dma{}Method',
'ESP8266_UART0': 'NeoEsp8266Uart0{}Method',
'ESP8266_UART1': 'NeoEsp8266Uart1{}Method',
'ESP8266_ASYNC_UART0': 'NeoEsp8266AsyncUart0{}Method',
'ESP8266_ASYNC_UART1': 'NeoEsp8266AsyncUart1{}Method',
'BIT_BANG': 'NeoEsp8266BitBang{}Method',
}
ESP32_METHODS = {
'ESP32_I2S_0': 'NeoEsp32I2s0{}Method',
'ESP32_I2S_1': 'NeoEsp32I2s1{}Method',
'BIT_BANG': 'NeoEsp32BitBang{}Method',
}
def format_method(config):
variant = VARIANTS[config[CONF_VARIANT]]
method = config[CONF_METHOD]
if CORE.is_esp8266:
return ESP8266_METHODS[method].format(variant)
if CORE.is_esp32:
return ESP32_METHODS[method].format(variant)
raise NotImplementedError
def validate(config):
if CONF_PIN in config:
if CONF_CLOCK_PIN in config or CONF_DATA_PIN in config:
raise vol.Invalid("Cannot specify both 'pin' and 'clock_pin'+'data_pin'")
return config
if CONF_CLOCK_PIN in config:
if CONF_DATA_PIN not in config:
raise vol.Invalid("If you give clock_pin, you must also specify data_pin")
return config
raise vol.Invalid("Must specify at least one of 'pin' or 'clock_pin'+'data_pin'")
MakeNeoPixelBusLight = Application.struct('MakeNeoPixelBusLight')
PLATFORM_SCHEMA = cv.nameable(light.LIGHT_PLATFORM_SCHEMA.extend({
cv.GenerateID(): cv.declare_variable_id(light.AddressableLightState),
cv.GenerateID(CONF_MAKE_ID): cv.declare_variable_id(MakeNeoPixelBusLight),
vol.Optional(CONF_TYPE, default='GRB'): validate_type,
vol.Optional(CONF_VARIANT, default='800KBPS'): validate_variant,
vol.Optional(CONF_METHOD, default=None): validate_method,
vol.Optional(CONF_PIN): pins.output_pin,
vol.Optional(CONF_CLOCK_PIN): pins.output_pin,
vol.Optional(CONF_DATA_PIN): pins.output_pin,
vol.Required(CONF_NUM_LEDS): cv.positive_not_null_int,
vol.Optional(CONF_GAMMA_CORRECT): cv.positive_float,
vol.Optional(CONF_COLOR_CORRECT): vol.All([cv.percentage], vol.Length(min=3, max=4)),
vol.Optional(CONF_DEFAULT_TRANSITION_LENGTH): cv.positive_time_period_milliseconds,
vol.Optional(CONF_POWER_SUPPLY): cv.use_variable_id(PowerSupplyComponent),
vol.Optional(CONF_EFFECTS): light.validate_effects(light.ADDRESSABLE_EFFECTS),
}).extend(cv.COMPONENT_SCHEMA.schema), validate, validate_method_pin)
def to_code(config):
type_ = config[CONF_TYPE]
has_white = 'W' in type_
if has_white:
func = App.make_neo_pixel_bus_rgbw_light
color_feat = global_ns.NeoRgbwFeature
else:
func = App.make_neo_pixel_bus_rgb_light
color_feat = global_ns.NeoRgbFeature
template = TemplateArguments(getattr(global_ns, format_method(config)), color_feat)
rhs = func(template, config[CONF_NAME])
make = variable(config[CONF_MAKE_ID], rhs, type=MakeNeoPixelBusLight.template(template))
output = make.Poutput
if CONF_PIN in config:
add(output.add_leds(config[CONF_NUM_LEDS], config[CONF_PIN]))
else:
add(output.add_leds(config[CONF_NUM_LEDS], config[CONF_CLOCK_PIN], config[CONF_DATA_PIN]))
add(output.set_pixel_order(getattr(ESPNeoPixelOrder, type_)))
if CONF_POWER_SUPPLY in config:
for power_supply in get_variable(config[CONF_POWER_SUPPLY]):
yield
add(output.set_power_supply(power_supply))
if CONF_COLOR_CORRECT in config:
add(output.set_correction(*config[CONF_COLOR_CORRECT]))
light.setup_light(make.Pstate, config)
setup_component(output, config)
REQUIRED_BUILD_FLAGS = '-DUSE_NEO_PIXEL_BUS_LIGHT'
LIB_DEPS = 'NeoPixelBus@2.4.1' | 0.501709 | 0.125039 |
import urllib.request
import urllib.parse
import re
import pathlib
from html.parser import HTMLParser
from html.entities import name2codepoint
BASE_URL = "https://ga-covid19.ondemand.sas.com/"
PATTERN = re.compile(r"JSON.parse\('(.*?)'\)")
FILENAME = [
"timestamp",
"county_history",
"county_totals",
"individual_death",
"race_ethnicity",
"lab_testing_history",
"lab_testing_totals",
"lab_testing_summary",
"current_status",
"current_status_changes",
"demographics_history",
"age_history",
"ethnicity_history",
"ethnicity_totals",
"comorbidities_totals",
"comorbidities_ethnicy_totals",
"comorbidity_summary",
"testing_summary",
]
SAVE_DIR = "artifacts"
class MyHTMLParser(HTMLParser):
"""HTML parser that stores <script> tag,
src attribute, as self.url
"""
def handle_starttag(self, tag, attrs):
if tag.lower() == "script":
for attr in attrs:
if attr[0].lower() == "src":
rel = attr[1].lower()
if "main" in rel:
self.url = urllib.parse.urljoin(BASE_URL, rel)
def extract_json(text: str):
"""Extract <something> from JSON.parse(<something>) in text
and return them as a generator."""
matches = PATTERN.finditer(text)
for match in matches:
yield match.group(1)
def extract_js_url(text: str):
"""Extract main.js url
"""
parser = MyHTMLParser()
parser.feed(text)
return parser.url
def run(url=BASE_URL, prefix=""):
path_root = pathlib.Path(SAVE_DIR)
path_root.mkdir(exist_ok=True)
with urllib.request.urlopen(url) as response:
html = response.read().decode("utf-8")
url_js = extract_js_url(html)
with urllib.request.urlopen(url_js) as response:
js_text = response.read().decode("utf-8")
gen = extract_json(js_text)
for i, s in enumerate(gen):
name = FILENAME[i] if i < len(FILENAME) else f"{i:02d}"
path = path_root / f"{prefix}{name}.json"
with open(path, "w") as f:
print(s, file=f)
if __name__ == "__main__":
run() | fetch.py | import urllib.request
import urllib.parse
import re
import pathlib
from html.parser import HTMLParser
from html.entities import name2codepoint
BASE_URL = "https://ga-covid19.ondemand.sas.com/"
PATTERN = re.compile(r"JSON.parse\('(.*?)'\)")
FILENAME = [
"timestamp",
"county_history",
"county_totals",
"individual_death",
"race_ethnicity",
"lab_testing_history",
"lab_testing_totals",
"lab_testing_summary",
"current_status",
"current_status_changes",
"demographics_history",
"age_history",
"ethnicity_history",
"ethnicity_totals",
"comorbidities_totals",
"comorbidities_ethnicy_totals",
"comorbidity_summary",
"testing_summary",
]
SAVE_DIR = "artifacts"
class MyHTMLParser(HTMLParser):
"""HTML parser that stores <script> tag,
src attribute, as self.url
"""
def handle_starttag(self, tag, attrs):
if tag.lower() == "script":
for attr in attrs:
if attr[0].lower() == "src":
rel = attr[1].lower()
if "main" in rel:
self.url = urllib.parse.urljoin(BASE_URL, rel)
def extract_json(text: str):
"""Extract <something> from JSON.parse(<something>) in text
and return them as a generator."""
matches = PATTERN.finditer(text)
for match in matches:
yield match.group(1)
def extract_js_url(text: str):
"""Extract main.js url
"""
parser = MyHTMLParser()
parser.feed(text)
return parser.url
def run(url=BASE_URL, prefix=""):
path_root = pathlib.Path(SAVE_DIR)
path_root.mkdir(exist_ok=True)
with urllib.request.urlopen(url) as response:
html = response.read().decode("utf-8")
url_js = extract_js_url(html)
with urllib.request.urlopen(url_js) as response:
js_text = response.read().decode("utf-8")
gen = extract_json(js_text)
for i, s in enumerate(gen):
name = FILENAME[i] if i < len(FILENAME) else f"{i:02d}"
path = path_root / f"{prefix}{name}.json"
with open(path, "w") as f:
print(s, file=f)
if __name__ == "__main__":
run() | 0.380644 | 0.176867 |
import logging
from datetime import datetime, timezone
from flask import current_app as app
from flask import Blueprint, flash, render_template, request, redirect, url_for
from flask_login import login_required
from flask_paginate import Pagination
from iso8601 import parse_date
from structlog import wrap_logger
from response_operations_ui.common.mappers import map_ce_response_status, map_region
from response_operations_ui.controllers import case_controller, iac_controller, party_controller, \
reporting_units_controllers
from response_operations_ui.controllers.collection_exercise_controllers import \
get_case_group_status_by_collection_exercise, get_collection_exercise_by_id
from response_operations_ui.controllers.survey_controllers import get_survey_by_id
from response_operations_ui.forms import EditContactDetailsForm, RuSearchForm
logger = wrap_logger(logging.getLogger(__name__))
reporting_unit_bp = Blueprint('reporting_unit_bp', __name__, static_folder='static', template_folder='templates')
@reporting_unit_bp.route('/<ru_ref>', methods=['GET'])
@login_required
def view_reporting_unit(ru_ref):
logger.info("Gathering data to view reporting unit", ru_ref=ru_ref)
# Make some initial calls to retrieve some data we'll need
reporting_unit = party_controller.get_party_by_ru_ref(ru_ref)
cases = case_controller.get_cases_by_business_party_id(reporting_unit['id'])
case_groups = [case['caseGroup'] for case in cases]
# Get all collection exercises for retrieved case groups
collection_exercise_ids = {case_group['collectionExerciseId'] for case_group in case_groups}
collection_exercises = [get_collection_exercise_by_id(ce_id) for ce_id in collection_exercise_ids]
now = datetime.now(timezone.utc)
live_collection_exercises = [ce for ce in collection_exercises if parse_date(ce['scheduledStartDateTime']) < now]
live_collection_exercises_ids = [ce['id'] for ce in live_collection_exercises]
# Attributes represent the data for a reporting unit at the time they were enrolled onto each collection exercise.
all_attributes = party_controller.get_business_attributes_by_party_id(reporting_unit['id'])
# Copies and uses only the collection exercises that have gone live
attributes = {k: v for k, v in all_attributes.items() if k in live_collection_exercises_ids}
refined_live_collection_exercises = [add_collection_exercise_details(ce, attributes[ce['id']], case_groups)
for ce in live_collection_exercises]
# Get all related surveys for gathered collection exercises
survey_ids = {collection_exercise['surveyId'] for collection_exercise in refined_live_collection_exercises}
surveys = [get_survey_by_id(survey_id) for survey_id in survey_ids]
# Get all respondents for the given ru
respondent_party_ids = [respondent['partyId'] for respondent in reporting_unit.get('associations')]
respondents = party_controller.get_respondent_by_party_ids(respondent_party_ids)
# Link collection exercises and respondents to appropriate surveys
linked_surveys = [survey_with_respondents_and_exercises(survey, respondents, refined_live_collection_exercises,
ru_ref)
for survey in surveys]
sorted_linked_surveys = sorted(linked_surveys, key=lambda survey: survey['surveyRef'])
# Add latest active iac code to surveys
surveys_with_latest_case = [
{
**survey,
"case": get_latest_case_with_ce(cases, survey['collection_exercises'])
}
for survey in sorted_linked_surveys
]
# Generate appropriate info message is necessary
# TODO Standardise how the info messages are generated
survey_arg = request.args.get('survey')
period_arg = request.args.get('period')
if survey_arg and period_arg:
survey = next(filter(lambda s: s['shortName'] == survey_arg, sorted_linked_surveys))
collection_exercise = next(filter(lambda s: s['exerciseRef'] == period_arg, survey['collection_exercises']))
new_status = collection_exercise['responseStatus']
flash(f'Response status for {survey["surveyRef"]} {survey["shortName"]}'
f' period {period_arg} changed to {new_status}')
info = request.args.get('info')
if request.args.get('enrolment_changed'):
flash('Enrolment status changed', 'information')
if request.args.get('account_status_changed'):
flash('Account status changed', 'information')
elif info:
flash(info, 'information')
breadcrumbs = [
{
"text": "Reporting units",
"url": "/reporting-units"
},
{
"text": f"{ru_ref}"
}
]
logger.info("Successfully gathered data to view reporting unit", ru_ref=ru_ref)
return render_template('reporting-unit.html', ru_ref=ru_ref, ru=reporting_unit,
surveys=surveys_with_latest_case, breadcrumbs=breadcrumbs)
def add_collection_exercise_details(collection_exercise, reporting_unit, case_groups):
"""
Creates a dict of formatted data.
:param collection_exercise: A dict containing collection exercise data
:type collection_exercise: dict
:param reporting_unit: A dict containing reporting unit attribute data
:type reporting_unit: dict
:param case_groups: A list of case group data
:return: A dict containing formatted data to be used by the template
:rtype: dict
"""
response_status = get_case_group_status_by_collection_exercise(case_groups, collection_exercise['id'])
return {
**collection_exercise,
'responseStatus': map_ce_response_status(response_status),
'companyName': reporting_unit['name'],
'companyRegion': map_region(reporting_unit['attributes']['region']),
'trading_as': reporting_unit['trading_as']
}
def survey_with_respondents_and_exercises(survey, respondents, collection_exercises, ru_ref):
survey_respondents = [party_controller.add_enrolment_status_to_respondent(respondent, ru_ref, survey['id'])
for respondent in respondents
if survey['id'] in party_controller.survey_ids_for_respondent(respondent, ru_ref)]
survey_collection_exercises = [collection_exercise
for collection_exercise in collection_exercises
if survey['id'] == collection_exercise['surveyId']]
sorted_survey_exercises = sorted(survey_collection_exercises,
key=lambda ce: ce['scheduledStartDateTime'], reverse=True)
return {
**survey,
'respondents': survey_respondents,
'collection_exercises': sorted_survey_exercises
}
def get_latest_case_with_ce(cases, collection_exercises):
"""
Creates a dict of formatted data.
Takes in a list of cases and a list of collection exercises
:return: The latest case which is in one of the collection exercises with activeIAC added to the case
"""
ces_ids = [ce['id'] for ce in collection_exercises]
cases_for_survey = [case
for case in cases
if case.get('caseGroup', {}).get('collectionExerciseId') in ces_ids]
cases_for_survey_ordered = sorted(cases_for_survey, key=lambda c: c['createdDateTime'], reverse=True)
case = next(iter(cases_for_survey_ordered), None)
case['activeIAC'] = iac_controller.is_iac_active(case['iac'])
return case
@reporting_unit_bp.route('/<ru_ref>/edit-contact-details/<respondent_id>', methods=['GET'])
@login_required
def view_contact_details(ru_ref, respondent_id):
respondent_details = party_controller.get_respondent_by_party_id(respondent_id)
form = EditContactDetailsForm(form=request.form, default_values=respondent_details)
return render_template('edit-contact-details.html', ru_ref=ru_ref, respondent_details=respondent_details,
form=form, tab='reporting_units')
@reporting_unit_bp.route('/<ru_ref>/edit-contact-details/<respondent_id>', methods=['POST'])
@login_required
def edit_contact_details(ru_ref, respondent_id):
edit_contact_details_form = EditContactDetailsForm(form=request.form)
if not edit_contact_details_form.validate():
contact_details = party_controller.get_respondent_by_party_id(respondent_id)
return render_template('edit-contact-details.html', form=edit_contact_details_form, tab='reporting_units',
ru_ref=ru_ref, respondent_id=respondent_id,
errors=edit_contact_details_form.errors, respondent_details=contact_details)
logger.info('Updating respondent details', ru_ref=ru_ref, respondent_id=respondent_id)
form = request.form
contact_details_changed = party_controller.update_contact_details(respondent_id, form, ru_ref)
if 'emailAddress' in contact_details_changed:
flash(f'Contact details changed and verification email sent to {form.get("email")}')
elif len(contact_details_changed) > 0:
flash('Contact details changed')
else:
flash('No updates were necessary')
return redirect(url_for('reporting_unit_bp.view_reporting_unit', ru_ref=ru_ref))
@reporting_unit_bp.route('/', methods=['GET'])
@login_required
def search_reporting_unit_home():
return render_template('reporting-unit-search/reporting-units-search.html',
form=RuSearchForm(),
breadcrumbs=[{"text": "Reporting units"}])
@reporting_unit_bp.route('/', methods=['POST'])
@login_required
def search_redirect():
form = RuSearchForm(request.form)
if form.validate_on_submit():
query = request.form.get('query')
return redirect(url_for('reporting_unit_bp.search_reporting_units', query=query))
@reporting_unit_bp.route('/search', methods=['GET'])
@login_required
def search_reporting_units():
search_key_words = request.values.get('query', '')
page = request.values.get('page', '1')
limit = app.config["PARTY_BUSINESS_RESULTS_PER_PAGE"]
breadcrumbs = [{"text": "Reporting units"}]
form = RuSearchForm()
form.query.data = search_key_words
response_data = reporting_units_controllers.search_reporting_units(search_key_words, limit, page)
business_list = response_data['businesses']
total_business_count = response_data['total_business_count']
offset = (int(page) - 1) * limit
last_index = (limit + offset) if total_business_count >= limit else total_business_count
pagination = Pagination(page=int(page),
per_page=limit,
total=total_business_count,
record_name='Business',
prev_label='Previous',
next_label='Next',
outer_window=0,
format_total=True,
format_number=True,
show_single_page=False)
return render_template('reporting-unit-search/reporting-units.html',
form=form,
business_list=business_list,
total_business_count=total_business_count,
breadcrumbs=breadcrumbs,
first_index=1 + offset,
last_index=last_index,
pagination=pagination,
show_pagination=bool(total_business_count > limit))
@reporting_unit_bp.route('/resend_verification/<ru_ref>/<party_id>', methods=['GET'])
@login_required
def view_resend_verification(ru_ref, party_id):
logger.info("Re-send verification email requested", ru_ref=ru_ref, party_id=party_id)
respondent = party_controller.get_respondent_by_party_id(party_id)
email = respondent['pendingEmailAddress'] if 'pendingEmailAddress' in respondent \
else respondent['emailAddress']
return render_template('re-send-verification-email.html', ru_ref=ru_ref, email=email, tab='reporting_units')
@reporting_unit_bp.route('/resend_verification/<ru_ref>/<party_id>', methods=['POST'])
@login_required
def resend_verification(ru_ref, party_id):
reporting_units_controllers.resend_verification_email(party_id)
logger.info("Re-sent verification email.", party_id=party_id)
flash('Verification email re-sent')
return redirect(url_for('reporting_unit_bp.view_reporting_unit', ru_ref=ru_ref))
@reporting_unit_bp.route('/<ru_ref>/new_enrolment_code', methods=['GET'])
@login_required
def generate_new_enrolment_code(ru_ref):
case_id = request.args.get('case_id')
reporting_units_controllers.generate_new_enrolment_code(case_id)
case = case_controller.get_case_by_id(case_id)
return render_template('new-enrolment-code.html',
iac=case['iac'],
ru_ref=ru_ref,
ru_name=request.args.get('ru_name'),
trading_as=request.args.get('trading_as'),
survey_name=request.args.get('survey_name'),
survey_ref=request.args.get('survey_ref'))
@reporting_unit_bp.route('/<ru_ref>/change-enrolment-status', methods=['GET'])
@login_required
def confirm_change_enrolment_status(ru_ref):
return render_template('confirm-enrolment-change.html', business_id=request.args['business_id'], ru_ref=ru_ref,
ru_name=request.args.get('ru_name'),
trading_as=request.args['trading_as'], survey_id=request.args['survey_id'],
survey_name=request.args['survey_name'], respondent_id=request.args['respondent_id'],
first_name=request.args['respondent_first_name'],
last_name=request.args['respondent_last_name'],
change_flag=request.args['change_flag'],
tab=request.args['tab'])
@reporting_unit_bp.route('/<ru_ref>/change-respondent-status', methods=['GET'])
@login_required
def confirm_change_respondent_status(ru_ref):
respondent = party_controller.get_respondent_by_party_id(request.args['party_id'])
return render_template('confirm-respondent-status-change.html',
ru_ref=ru_ref,
respondent_id=respondent['id'],
first_name=respondent['firstName'],
last_name=respondent['lastName'],
email_address=respondent['emailAddress'],
change_flag=request.args['change_flag'],
tab=request.args['tab'])
@reporting_unit_bp.route('/<ru_ref>/change-enrolment-status', methods=['POST'])
@login_required
def change_enrolment_status(ru_ref):
reporting_units_controllers.change_enrolment_status(business_id=request.args['business_id'],
respondent_id=request.args['respondent_id'],
survey_id=request.args['survey_id'],
change_flag=request.args['change_flag'])
return redirect(url_for('reporting_unit_bp.view_reporting_unit', ru_ref=ru_ref, enrolment_changed='True'))
@reporting_unit_bp.route('/<ru_ref>/change-respondent-status', methods=['POST'])
@login_required
def change_respondent_status(ru_ref):
reporting_units_controllers.change_respondent_status(respondent_id=request.args['respondent_id'],
change_flag=request.args['change_flag'])
return redirect(url_for('reporting_unit_bp.view_reporting_unit', ru_ref=ru_ref, account_status_changed='True')) | response_operations_ui/views/reporting_units.py | import logging
from datetime import datetime, timezone
from flask import current_app as app
from flask import Blueprint, flash, render_template, request, redirect, url_for
from flask_login import login_required
from flask_paginate import Pagination
from iso8601 import parse_date
from structlog import wrap_logger
from response_operations_ui.common.mappers import map_ce_response_status, map_region
from response_operations_ui.controllers import case_controller, iac_controller, party_controller, \
reporting_units_controllers
from response_operations_ui.controllers.collection_exercise_controllers import \
get_case_group_status_by_collection_exercise, get_collection_exercise_by_id
from response_operations_ui.controllers.survey_controllers import get_survey_by_id
from response_operations_ui.forms import EditContactDetailsForm, RuSearchForm
logger = wrap_logger(logging.getLogger(__name__))
reporting_unit_bp = Blueprint('reporting_unit_bp', __name__, static_folder='static', template_folder='templates')
@reporting_unit_bp.route('/<ru_ref>', methods=['GET'])
@login_required
def view_reporting_unit(ru_ref):
logger.info("Gathering data to view reporting unit", ru_ref=ru_ref)
# Make some initial calls to retrieve some data we'll need
reporting_unit = party_controller.get_party_by_ru_ref(ru_ref)
cases = case_controller.get_cases_by_business_party_id(reporting_unit['id'])
case_groups = [case['caseGroup'] for case in cases]
# Get all collection exercises for retrieved case groups
collection_exercise_ids = {case_group['collectionExerciseId'] for case_group in case_groups}
collection_exercises = [get_collection_exercise_by_id(ce_id) for ce_id in collection_exercise_ids]
now = datetime.now(timezone.utc)
live_collection_exercises = [ce for ce in collection_exercises if parse_date(ce['scheduledStartDateTime']) < now]
live_collection_exercises_ids = [ce['id'] for ce in live_collection_exercises]
# Attributes represent the data for a reporting unit at the time they were enrolled onto each collection exercise.
all_attributes = party_controller.get_business_attributes_by_party_id(reporting_unit['id'])
# Copies and uses only the collection exercises that have gone live
attributes = {k: v for k, v in all_attributes.items() if k in live_collection_exercises_ids}
refined_live_collection_exercises = [add_collection_exercise_details(ce, attributes[ce['id']], case_groups)
for ce in live_collection_exercises]
# Get all related surveys for gathered collection exercises
survey_ids = {collection_exercise['surveyId'] for collection_exercise in refined_live_collection_exercises}
surveys = [get_survey_by_id(survey_id) for survey_id in survey_ids]
# Get all respondents for the given ru
respondent_party_ids = [respondent['partyId'] for respondent in reporting_unit.get('associations')]
respondents = party_controller.get_respondent_by_party_ids(respondent_party_ids)
# Link collection exercises and respondents to appropriate surveys
linked_surveys = [survey_with_respondents_and_exercises(survey, respondents, refined_live_collection_exercises,
ru_ref)
for survey in surveys]
sorted_linked_surveys = sorted(linked_surveys, key=lambda survey: survey['surveyRef'])
# Add latest active iac code to surveys
surveys_with_latest_case = [
{
**survey,
"case": get_latest_case_with_ce(cases, survey['collection_exercises'])
}
for survey in sorted_linked_surveys
]
# Generate appropriate info message is necessary
# TODO Standardise how the info messages are generated
survey_arg = request.args.get('survey')
period_arg = request.args.get('period')
if survey_arg and period_arg:
survey = next(filter(lambda s: s['shortName'] == survey_arg, sorted_linked_surveys))
collection_exercise = next(filter(lambda s: s['exerciseRef'] == period_arg, survey['collection_exercises']))
new_status = collection_exercise['responseStatus']
flash(f'Response status for {survey["surveyRef"]} {survey["shortName"]}'
f' period {period_arg} changed to {new_status}')
info = request.args.get('info')
if request.args.get('enrolment_changed'):
flash('Enrolment status changed', 'information')
if request.args.get('account_status_changed'):
flash('Account status changed', 'information')
elif info:
flash(info, 'information')
breadcrumbs = [
{
"text": "Reporting units",
"url": "/reporting-units"
},
{
"text": f"{ru_ref}"
}
]
logger.info("Successfully gathered data to view reporting unit", ru_ref=ru_ref)
return render_template('reporting-unit.html', ru_ref=ru_ref, ru=reporting_unit,
surveys=surveys_with_latest_case, breadcrumbs=breadcrumbs)
def add_collection_exercise_details(collection_exercise, reporting_unit, case_groups):
"""
Creates a dict of formatted data.
:param collection_exercise: A dict containing collection exercise data
:type collection_exercise: dict
:param reporting_unit: A dict containing reporting unit attribute data
:type reporting_unit: dict
:param case_groups: A list of case group data
:return: A dict containing formatted data to be used by the template
:rtype: dict
"""
response_status = get_case_group_status_by_collection_exercise(case_groups, collection_exercise['id'])
return {
**collection_exercise,
'responseStatus': map_ce_response_status(response_status),
'companyName': reporting_unit['name'],
'companyRegion': map_region(reporting_unit['attributes']['region']),
'trading_as': reporting_unit['trading_as']
}
def survey_with_respondents_and_exercises(survey, respondents, collection_exercises, ru_ref):
survey_respondents = [party_controller.add_enrolment_status_to_respondent(respondent, ru_ref, survey['id'])
for respondent in respondents
if survey['id'] in party_controller.survey_ids_for_respondent(respondent, ru_ref)]
survey_collection_exercises = [collection_exercise
for collection_exercise in collection_exercises
if survey['id'] == collection_exercise['surveyId']]
sorted_survey_exercises = sorted(survey_collection_exercises,
key=lambda ce: ce['scheduledStartDateTime'], reverse=True)
return {
**survey,
'respondents': survey_respondents,
'collection_exercises': sorted_survey_exercises
}
def get_latest_case_with_ce(cases, collection_exercises):
"""
Creates a dict of formatted data.
Takes in a list of cases and a list of collection exercises
:return: The latest case which is in one of the collection exercises with activeIAC added to the case
"""
ces_ids = [ce['id'] for ce in collection_exercises]
cases_for_survey = [case
for case in cases
if case.get('caseGroup', {}).get('collectionExerciseId') in ces_ids]
cases_for_survey_ordered = sorted(cases_for_survey, key=lambda c: c['createdDateTime'], reverse=True)
case = next(iter(cases_for_survey_ordered), None)
case['activeIAC'] = iac_controller.is_iac_active(case['iac'])
return case
@reporting_unit_bp.route('/<ru_ref>/edit-contact-details/<respondent_id>', methods=['GET'])
@login_required
def view_contact_details(ru_ref, respondent_id):
respondent_details = party_controller.get_respondent_by_party_id(respondent_id)
form = EditContactDetailsForm(form=request.form, default_values=respondent_details)
return render_template('edit-contact-details.html', ru_ref=ru_ref, respondent_details=respondent_details,
form=form, tab='reporting_units')
@reporting_unit_bp.route('/<ru_ref>/edit-contact-details/<respondent_id>', methods=['POST'])
@login_required
def edit_contact_details(ru_ref, respondent_id):
edit_contact_details_form = EditContactDetailsForm(form=request.form)
if not edit_contact_details_form.validate():
contact_details = party_controller.get_respondent_by_party_id(respondent_id)
return render_template('edit-contact-details.html', form=edit_contact_details_form, tab='reporting_units',
ru_ref=ru_ref, respondent_id=respondent_id,
errors=edit_contact_details_form.errors, respondent_details=contact_details)
logger.info('Updating respondent details', ru_ref=ru_ref, respondent_id=respondent_id)
form = request.form
contact_details_changed = party_controller.update_contact_details(respondent_id, form, ru_ref)
if 'emailAddress' in contact_details_changed:
flash(f'Contact details changed and verification email sent to {form.get("email")}')
elif len(contact_details_changed) > 0:
flash('Contact details changed')
else:
flash('No updates were necessary')
return redirect(url_for('reporting_unit_bp.view_reporting_unit', ru_ref=ru_ref))
@reporting_unit_bp.route('/', methods=['GET'])
@login_required
def search_reporting_unit_home():
return render_template('reporting-unit-search/reporting-units-search.html',
form=RuSearchForm(),
breadcrumbs=[{"text": "Reporting units"}])
@reporting_unit_bp.route('/', methods=['POST'])
@login_required
def search_redirect():
form = RuSearchForm(request.form)
if form.validate_on_submit():
query = request.form.get('query')
return redirect(url_for('reporting_unit_bp.search_reporting_units', query=query))
@reporting_unit_bp.route('/search', methods=['GET'])
@login_required
def search_reporting_units():
search_key_words = request.values.get('query', '')
page = request.values.get('page', '1')
limit = app.config["PARTY_BUSINESS_RESULTS_PER_PAGE"]
breadcrumbs = [{"text": "Reporting units"}]
form = RuSearchForm()
form.query.data = search_key_words
response_data = reporting_units_controllers.search_reporting_units(search_key_words, limit, page)
business_list = response_data['businesses']
total_business_count = response_data['total_business_count']
offset = (int(page) - 1) * limit
last_index = (limit + offset) if total_business_count >= limit else total_business_count
pagination = Pagination(page=int(page),
per_page=limit,
total=total_business_count,
record_name='Business',
prev_label='Previous',
next_label='Next',
outer_window=0,
format_total=True,
format_number=True,
show_single_page=False)
return render_template('reporting-unit-search/reporting-units.html',
form=form,
business_list=business_list,
total_business_count=total_business_count,
breadcrumbs=breadcrumbs,
first_index=1 + offset,
last_index=last_index,
pagination=pagination,
show_pagination=bool(total_business_count > limit))
@reporting_unit_bp.route('/resend_verification/<ru_ref>/<party_id>', methods=['GET'])
@login_required
def view_resend_verification(ru_ref, party_id):
logger.info("Re-send verification email requested", ru_ref=ru_ref, party_id=party_id)
respondent = party_controller.get_respondent_by_party_id(party_id)
email = respondent['pendingEmailAddress'] if 'pendingEmailAddress' in respondent \
else respondent['emailAddress']
return render_template('re-send-verification-email.html', ru_ref=ru_ref, email=email, tab='reporting_units')
@reporting_unit_bp.route('/resend_verification/<ru_ref>/<party_id>', methods=['POST'])
@login_required
def resend_verification(ru_ref, party_id):
reporting_units_controllers.resend_verification_email(party_id)
logger.info("Re-sent verification email.", party_id=party_id)
flash('Verification email re-sent')
return redirect(url_for('reporting_unit_bp.view_reporting_unit', ru_ref=ru_ref))
@reporting_unit_bp.route('/<ru_ref>/new_enrolment_code', methods=['GET'])
@login_required
def generate_new_enrolment_code(ru_ref):
case_id = request.args.get('case_id')
reporting_units_controllers.generate_new_enrolment_code(case_id)
case = case_controller.get_case_by_id(case_id)
return render_template('new-enrolment-code.html',
iac=case['iac'],
ru_ref=ru_ref,
ru_name=request.args.get('ru_name'),
trading_as=request.args.get('trading_as'),
survey_name=request.args.get('survey_name'),
survey_ref=request.args.get('survey_ref'))
@reporting_unit_bp.route('/<ru_ref>/change-enrolment-status', methods=['GET'])
@login_required
def confirm_change_enrolment_status(ru_ref):
return render_template('confirm-enrolment-change.html', business_id=request.args['business_id'], ru_ref=ru_ref,
ru_name=request.args.get('ru_name'),
trading_as=request.args['trading_as'], survey_id=request.args['survey_id'],
survey_name=request.args['survey_name'], respondent_id=request.args['respondent_id'],
first_name=request.args['respondent_first_name'],
last_name=request.args['respondent_last_name'],
change_flag=request.args['change_flag'],
tab=request.args['tab'])
@reporting_unit_bp.route('/<ru_ref>/change-respondent-status', methods=['GET'])
@login_required
def confirm_change_respondent_status(ru_ref):
respondent = party_controller.get_respondent_by_party_id(request.args['party_id'])
return render_template('confirm-respondent-status-change.html',
ru_ref=ru_ref,
respondent_id=respondent['id'],
first_name=respondent['firstName'],
last_name=respondent['lastName'],
email_address=respondent['emailAddress'],
change_flag=request.args['change_flag'],
tab=request.args['tab'])
@reporting_unit_bp.route('/<ru_ref>/change-enrolment-status', methods=['POST'])
@login_required
def change_enrolment_status(ru_ref):
reporting_units_controllers.change_enrolment_status(business_id=request.args['business_id'],
respondent_id=request.args['respondent_id'],
survey_id=request.args['survey_id'],
change_flag=request.args['change_flag'])
return redirect(url_for('reporting_unit_bp.view_reporting_unit', ru_ref=ru_ref, enrolment_changed='True'))
@reporting_unit_bp.route('/<ru_ref>/change-respondent-status', methods=['POST'])
@login_required
def change_respondent_status(ru_ref):
reporting_units_controllers.change_respondent_status(respondent_id=request.args['respondent_id'],
change_flag=request.args['change_flag'])
return redirect(url_for('reporting_unit_bp.view_reporting_unit', ru_ref=ru_ref, account_status_changed='True')) | 0.375936 | 0.302897 |
import argparse
import logging
import plistlib
import psycopg2
DEFAULT_LIBRARY_FILE_LOCATION = '/Users/stephan/Music/iTunes/iTunes Music Library.xml'
DEFAULT_DATABASE_NAME = 'music'
DEFAULT_SCHEMA_NAME = 'public'
DEFAULT_USER_NAME = 'postgres'
DEFAULT_PASSWORD = '<PASSWORD>'
DEFAULT_PORT = 5432
TEMPORARY_TABLE_NAME = 'itunes'
TEMPORARY_SCHEMA_NAME = 'itunes'
def main(arg_list=None):
args = parse_args(arg_list)
library_xml = args.library_xml
db_name = args.database_name
schema_name = args.schema_name
port = args.port
username = args.username
password = <PASSWORD>
logging.warning("Connecting to database %s on port %s with username %s. Importing data to schema %s",
db_name, port, username, schema_name)
logging.warning("Parsing library file at location: %s", library_xml.name)
library = plistlib.load(library_xml)
logging.warning("Extracting track data from library file...")
tracks_table, tracks = process_tracks(library)
# Import data 'as-is' to postgres
conn, cur = open_db(db_name, port, username, password)
logging.warning("Importing data into temp schema...")
import_itunes_data(cur, tracks, tracks_table)
close_db(conn, cur)
# Create normalised data structure
conn, cur = open_db(db_name, port, username, password)
logging.warning("Creating the new tables...")
create_normalised_tables(cur, schema_name)
close_db(conn, cur)
# Migrate data over to new structure
conn, cur = open_db(db_name, port, username, password)
logging.warning("Migrating data to new tables...")
normalise_data(cur, schema_name)
close_db(conn, cur)
def parse_args(arg_list):
parser = argparse.ArgumentParser()
parser.add_argument('--library',
help='Path to XML library file',
dest='library_xml',
type=argparse.FileType('rb'),
default=DEFAULT_LIBRARY_FILE_LOCATION)
parser.add_argument('--db', '-d',
help='Name of postgres database [%(default)s]',
dest='database_name',
default=DEFAULT_DATABASE_NAME)
parser.add_argument('--port', '-p',
help='Local database port [%(default)s]',
dest='port',
default=DEFAULT_PORT)
parser.add_argument('--schema', '-s',
help='Name of database schema [%(default)s]',
dest='schema_name',
default=DEFAULT_SCHEMA_NAME)
parser.add_argument('--user', '-u',
help='Postgres username [%(default)s]',
dest='username',
default=DEFAULT_USER_NAME)
parser.add_argument('--pass', '-x',
help='Postgres password [%(default)s]',
dest='password',
default=DEFAULT_PASSWORD)
args = parser.parse_args(arg_list)
return args
def import_itunes_data(db, tracks, tracks_table):
db.execute("DROP SCHEMA IF EXISTS itunes CASCADE")
db.execute("CREATE SCHEMA itunes")
db.execute("DROP TABLE IF EXISTS {0}.{1}".format(TEMPORARY_SCHEMA_NAME, TEMPORARY_TABLE_NAME))
db.execute(tracks_table)
db.execute("CREATE UNIQUE INDEX idx_itunes_itunes_id ON itunes.itunes (persistent_id);")
for query in tracks:
db.execute(query[0], list(query[1]))
def create_normalised_tables(db, schema_name):
db.execute("CREATE SCHEMA IF NOT EXISTS {0}".format(schema_name))
db.execute("CREATE TABLE IF NOT EXISTS {0}.artist ("
"artist_id BIGINT GENERATED BY DEFAULT AS IDENTITY,"
"artist_name TEXT NOT NULL,"
"CONSTRAINT pk_artist PRIMARY KEY (artist_id),"
"CONSTRAINT uk_artist_name UNIQUE (artist_name)"
"); "
.format(schema_name))
db.execute("CREATE TABLE IF NOT EXISTS {0}.album ("
"album_id BIGINT GENERATED BY DEFAULT AS IDENTITY,"
"album_name TEXT NOT NULL,"
"artist_id BIGINT NOT NULL,"
"release_year INT,"
"CONSTRAINT pk_album PRIMARY KEY (album_id),"
"CONSTRAINT fk_album_artist_id FOREIGN KEY (artist_id) REFERENCES {0}.artist (artist_id),"
"CONSTRAINT ck_album_release_year CHECK (release_year BETWEEN 1900 AND 2050),"
"CONSTRAINT uk_album_artist UNIQUE (album_name, artist_id)"
"); "
.format(schema_name))
db.execute("CREATE TABLE IF NOT EXISTS {0}.track ("
"track_id BIGINT GENERATED BY DEFAULT AS IDENTITY, "
"track_name TEXT NOT NULL, "
"length BIGINT NOT NULL, "
"album_id BIGINT NOT NULL, "
"artist_id BIGINT NOT NULL, "
"play_count INT NOT NULL, "
"last_played TIMESTAMP, "
"date_added TIMESTAMP, "
"track_number INT NOT NULL, "
"bpm INT, "
"loved BOOLEAN NOT NULL, "
"itunes_id VARCHAR(16) NOT NULL, "
"CONSTRAINT pk_track PRIMARY KEY (track_id), "
"CONSTRAINT fk_track_artist_id FOREIGN KEY (artist_id) REFERENCES {0}.artist (artist_id), "
"CONSTRAINT fk_track_album_id FOREIGN KEY (album_id) REFERENCES {0}.album (album_id), "
"CONSTRAINT ck_track_date_added CHECK (date_added <= dbrent_timestamp :: TIMESTAMP), "
"CONSTRAINT ck_track_play_count CHECK (play_count >= 0), "
"CONSTRAINT uk_track_artist_album UNIQUE (track_name, album_id, artist_id, track_number), "
"CONSTRAINT uk_track_itunes_id UNIQUE (itunes_id));"
.format(schema_name))
db.execute("CREATE TABLE IF NOT EXISTS {0}.play ("
"play_id BIGINT GENERATED BY DEFAULT AS IDENTITY,"
"track_id BIGINT,"
"played_at TIMESTAMP NOT NULL,"
"CONSTRAINT pk_play PRIMARY KEY (play_id),"
"CONSTRAINT fk_play_track_id FOREIGN KEY (track_id) REFERENCES {0}.track (track_id),"
"CONSTRAINT uk_play_track_play_at UNIQUE (track_id, played_at)"
");"
.format(schema_name))
db.execute("CREATE UNIQUE INDEX IF NOT EXISTS idx_track_itunes_id ON {0}.track (itunes_id);".format(schema_name))
def normalise_data(db, schema_name):
db.execute("INSERT INTO {0}.artist (artist_name) "
"SELECT artist "
" FROM itunes.itunes "
" WHERE artist IS NOT NULL "
" GROUP BY artist "
" ON CONFLICT (artist_name) "
" DO NOTHING;"
.format(schema_name))
db.execute("INSERT INTO {0}.album (album_name, artist_id, release_year) "
"SELECT album, "
" (SELECT artist_id "
" FROM {0}.artist "
" WHERE artist_name = artist), "
" MAX(year :: INT) "
" FROM itunes.itunes "
" WHERE album IS NOT NULL "
" AND year IS NOT NULL "
" GROUP BY album, artist "
" ON CONFLICT (album_name, artist_id)"
" DO NOTHING;"
.format(schema_name))
db.execute("UPDATE {0}.track t "
" SET track_name = i.name, "
" length = i.total_time, "
" album_id = i.album_id, "
" artist_id = i.artist_id, "
" play_count = i.play_count, "
" last_played = i.play_date_utc, "
" date_added = i.date_added, "
" track_number = i.track_number, "
" itunes_id = i.persistent_id, "
" bpm = i.bpm, "
" loved = i.loved "
" FROM (SELECT name, "
" total_time :: BIGINT, "
" al.album_id, "
" ar.artist_id, "
" COALESCE(play_count :: INT, 0) AS play_count, "
" play_date_utc :: TIMESTAMP, "
" date_added :: TIMESTAMP, "
" COALESCE(track_number :: INT, 1) AS track_number, "
" bpm :: INT, "
" COALESCE(loved :: BOOLEAN, FALSE) AS loved, "
" persistent_id "
" FROM itunes.itunes t "
" JOIN {0}.artist ar ON (artist_name = artist) "
" JOIN {0}.album al ON (album_name = album "
" AND release_year = year :: INT "
" AND al.artist_id = ar.artist_id) "
") AS i "
"WHERE t.itunes_id = i.persistent_id "
" OR (t.track_name, t.album_id, t.artist_id, t.track_number) "
" = (i.name, i.album_id, i.artist_id, i.track_number); "
.format(schema_name))
db.execute("INSERT INTO {0}.track (track_name, "
" length, "
" album_id, "
" artist_id, "
" play_count, "
" last_played, "
" date_added, "
" track_number, "
" bpm, "
" loved, "
" itunes_id) "
" SELECT name, "
" total_time :: BIGINT, "
" al.album_id, "
" ar.artist_id, "
" COALESCE(play_count :: INT, 0), "
" play_date_utc :: TIMESTAMP, "
" date_added :: TIMESTAMP, "
" COALESCE(track_number :: INT, 1), "
" bpm :: INT, "
" COALESCE(loved :: BOOLEAN, FALSE) AS loved, "
" persistent_id "
" FROM itunes.itunes i "
" JOIN {0}.artist ar "
" ON (artist_name = artist) "
" JOIN {0}.album al "
" ON (album_name = album "
" AND release_year = year :: int "
" AND al.artist_id = ar.artist_id) "
" WHERE NOT EXISTS (SELECT "
" FROM {0}.track t "
" WHERE t.itunes_id = i.persistent_id "
" OR (t.track_name, t.album_id, t.artist_id, t.track_number) "
" = (i.name, al.album_id, ar.artist_id, COALESCE(track_number :: INT, 1))); "
.format(schema_name))
db.execute("INSERT INTO {0}.play (track_id, played_at)"
"SELECT track_id,"
" last_played"
" FROM {0}.track"
" WHERE last_played IS NOT NULL"
" AND NOT EXISTS (SELECT "
" FROM {0}.play"
" WHERE track_id = track_id"
" AND played_at = last_played);"
.format(schema_name))
def process_tracks(library):
all_keys = set()
inserts = []
for track_id in library['Tracks'].keys():
track = library['Tracks'][track_id]
track_keys = list(map(slugify, track.keys()))
if 'Podcast' in track and track['Podcast']:
track.pop('Podcast')
continue
if 'Music Video' in track:
track.pop('Music Video')
continue
if 'Artist' not in track:
continue
if 'Album' not in track:
continue
if 'Year' not in track:
continue
track_values = track.values()
all_keys = all_keys.union(set(track_keys))
inserts.append(get_parameterized(track_keys, track_values))
all_keys = list(map(slugify, all_keys))
all_keys_with_type = (key + " TEXT" for key in all_keys)
return "CREATE TABLE IF NOT EXISTS {0}.{1} ({2})".format(TEMPORARY_SCHEMA_NAME, TEMPORARY_TABLE_NAME,
', '.join(all_keys_with_type)), inserts
def get_parameterized(keys, values):
return (
"INSERT INTO {0}.{1} ({2}) VALUES ({3})".format(
TEMPORARY_SCHEMA_NAME,
TEMPORARY_TABLE_NAME,
', '.join(map(str, keys)),
', '.join(['%s'] * len(values))
),
[value for value in values]
)
def slugify(name):
return name.lower().replace(' ', '_')
def open_db(name, port, user, password):
conn = psycopg2.connect(host="localhost", port=port, database=name, user=user, password=password)
cur = conn.cursor()
return conn, cur
def close_db(conn, cur):
conn.commit()
cur.close()
conn.close()
if __name__ == '__main__':
main() | import-to-postgres.py |
import argparse
import logging
import plistlib
import psycopg2
DEFAULT_LIBRARY_FILE_LOCATION = '/Users/stephan/Music/iTunes/iTunes Music Library.xml'
DEFAULT_DATABASE_NAME = 'music'
DEFAULT_SCHEMA_NAME = 'public'
DEFAULT_USER_NAME = 'postgres'
DEFAULT_PASSWORD = '<PASSWORD>'
DEFAULT_PORT = 5432
TEMPORARY_TABLE_NAME = 'itunes'
TEMPORARY_SCHEMA_NAME = 'itunes'
def main(arg_list=None):
args = parse_args(arg_list)
library_xml = args.library_xml
db_name = args.database_name
schema_name = args.schema_name
port = args.port
username = args.username
password = <PASSWORD>
logging.warning("Connecting to database %s on port %s with username %s. Importing data to schema %s",
db_name, port, username, schema_name)
logging.warning("Parsing library file at location: %s", library_xml.name)
library = plistlib.load(library_xml)
logging.warning("Extracting track data from library file...")
tracks_table, tracks = process_tracks(library)
# Import data 'as-is' to postgres
conn, cur = open_db(db_name, port, username, password)
logging.warning("Importing data into temp schema...")
import_itunes_data(cur, tracks, tracks_table)
close_db(conn, cur)
# Create normalised data structure
conn, cur = open_db(db_name, port, username, password)
logging.warning("Creating the new tables...")
create_normalised_tables(cur, schema_name)
close_db(conn, cur)
# Migrate data over to new structure
conn, cur = open_db(db_name, port, username, password)
logging.warning("Migrating data to new tables...")
normalise_data(cur, schema_name)
close_db(conn, cur)
def parse_args(arg_list):
parser = argparse.ArgumentParser()
parser.add_argument('--library',
help='Path to XML library file',
dest='library_xml',
type=argparse.FileType('rb'),
default=DEFAULT_LIBRARY_FILE_LOCATION)
parser.add_argument('--db', '-d',
help='Name of postgres database [%(default)s]',
dest='database_name',
default=DEFAULT_DATABASE_NAME)
parser.add_argument('--port', '-p',
help='Local database port [%(default)s]',
dest='port',
default=DEFAULT_PORT)
parser.add_argument('--schema', '-s',
help='Name of database schema [%(default)s]',
dest='schema_name',
default=DEFAULT_SCHEMA_NAME)
parser.add_argument('--user', '-u',
help='Postgres username [%(default)s]',
dest='username',
default=DEFAULT_USER_NAME)
parser.add_argument('--pass', '-x',
help='Postgres password [%(default)s]',
dest='password',
default=DEFAULT_PASSWORD)
args = parser.parse_args(arg_list)
return args
def import_itunes_data(db, tracks, tracks_table):
db.execute("DROP SCHEMA IF EXISTS itunes CASCADE")
db.execute("CREATE SCHEMA itunes")
db.execute("DROP TABLE IF EXISTS {0}.{1}".format(TEMPORARY_SCHEMA_NAME, TEMPORARY_TABLE_NAME))
db.execute(tracks_table)
db.execute("CREATE UNIQUE INDEX idx_itunes_itunes_id ON itunes.itunes (persistent_id);")
for query in tracks:
db.execute(query[0], list(query[1]))
def create_normalised_tables(db, schema_name):
db.execute("CREATE SCHEMA IF NOT EXISTS {0}".format(schema_name))
db.execute("CREATE TABLE IF NOT EXISTS {0}.artist ("
"artist_id BIGINT GENERATED BY DEFAULT AS IDENTITY,"
"artist_name TEXT NOT NULL,"
"CONSTRAINT pk_artist PRIMARY KEY (artist_id),"
"CONSTRAINT uk_artist_name UNIQUE (artist_name)"
"); "
.format(schema_name))
db.execute("CREATE TABLE IF NOT EXISTS {0}.album ("
"album_id BIGINT GENERATED BY DEFAULT AS IDENTITY,"
"album_name TEXT NOT NULL,"
"artist_id BIGINT NOT NULL,"
"release_year INT,"
"CONSTRAINT pk_album PRIMARY KEY (album_id),"
"CONSTRAINT fk_album_artist_id FOREIGN KEY (artist_id) REFERENCES {0}.artist (artist_id),"
"CONSTRAINT ck_album_release_year CHECK (release_year BETWEEN 1900 AND 2050),"
"CONSTRAINT uk_album_artist UNIQUE (album_name, artist_id)"
"); "
.format(schema_name))
db.execute("CREATE TABLE IF NOT EXISTS {0}.track ("
"track_id BIGINT GENERATED BY DEFAULT AS IDENTITY, "
"track_name TEXT NOT NULL, "
"length BIGINT NOT NULL, "
"album_id BIGINT NOT NULL, "
"artist_id BIGINT NOT NULL, "
"play_count INT NOT NULL, "
"last_played TIMESTAMP, "
"date_added TIMESTAMP, "
"track_number INT NOT NULL, "
"bpm INT, "
"loved BOOLEAN NOT NULL, "
"itunes_id VARCHAR(16) NOT NULL, "
"CONSTRAINT pk_track PRIMARY KEY (track_id), "
"CONSTRAINT fk_track_artist_id FOREIGN KEY (artist_id) REFERENCES {0}.artist (artist_id), "
"CONSTRAINT fk_track_album_id FOREIGN KEY (album_id) REFERENCES {0}.album (album_id), "
"CONSTRAINT ck_track_date_added CHECK (date_added <= dbrent_timestamp :: TIMESTAMP), "
"CONSTRAINT ck_track_play_count CHECK (play_count >= 0), "
"CONSTRAINT uk_track_artist_album UNIQUE (track_name, album_id, artist_id, track_number), "
"CONSTRAINT uk_track_itunes_id UNIQUE (itunes_id));"
.format(schema_name))
db.execute("CREATE TABLE IF NOT EXISTS {0}.play ("
"play_id BIGINT GENERATED BY DEFAULT AS IDENTITY,"
"track_id BIGINT,"
"played_at TIMESTAMP NOT NULL,"
"CONSTRAINT pk_play PRIMARY KEY (play_id),"
"CONSTRAINT fk_play_track_id FOREIGN KEY (track_id) REFERENCES {0}.track (track_id),"
"CONSTRAINT uk_play_track_play_at UNIQUE (track_id, played_at)"
");"
.format(schema_name))
db.execute("CREATE UNIQUE INDEX IF NOT EXISTS idx_track_itunes_id ON {0}.track (itunes_id);".format(schema_name))
def normalise_data(db, schema_name):
db.execute("INSERT INTO {0}.artist (artist_name) "
"SELECT artist "
" FROM itunes.itunes "
" WHERE artist IS NOT NULL "
" GROUP BY artist "
" ON CONFLICT (artist_name) "
" DO NOTHING;"
.format(schema_name))
db.execute("INSERT INTO {0}.album (album_name, artist_id, release_year) "
"SELECT album, "
" (SELECT artist_id "
" FROM {0}.artist "
" WHERE artist_name = artist), "
" MAX(year :: INT) "
" FROM itunes.itunes "
" WHERE album IS NOT NULL "
" AND year IS NOT NULL "
" GROUP BY album, artist "
" ON CONFLICT (album_name, artist_id)"
" DO NOTHING;"
.format(schema_name))
db.execute("UPDATE {0}.track t "
" SET track_name = i.name, "
" length = i.total_time, "
" album_id = i.album_id, "
" artist_id = i.artist_id, "
" play_count = i.play_count, "
" last_played = i.play_date_utc, "
" date_added = i.date_added, "
" track_number = i.track_number, "
" itunes_id = i.persistent_id, "
" bpm = i.bpm, "
" loved = i.loved "
" FROM (SELECT name, "
" total_time :: BIGINT, "
" al.album_id, "
" ar.artist_id, "
" COALESCE(play_count :: INT, 0) AS play_count, "
" play_date_utc :: TIMESTAMP, "
" date_added :: TIMESTAMP, "
" COALESCE(track_number :: INT, 1) AS track_number, "
" bpm :: INT, "
" COALESCE(loved :: BOOLEAN, FALSE) AS loved, "
" persistent_id "
" FROM itunes.itunes t "
" JOIN {0}.artist ar ON (artist_name = artist) "
" JOIN {0}.album al ON (album_name = album "
" AND release_year = year :: INT "
" AND al.artist_id = ar.artist_id) "
") AS i "
"WHERE t.itunes_id = i.persistent_id "
" OR (t.track_name, t.album_id, t.artist_id, t.track_number) "
" = (i.name, i.album_id, i.artist_id, i.track_number); "
.format(schema_name))
db.execute("INSERT INTO {0}.track (track_name, "
" length, "
" album_id, "
" artist_id, "
" play_count, "
" last_played, "
" date_added, "
" track_number, "
" bpm, "
" loved, "
" itunes_id) "
" SELECT name, "
" total_time :: BIGINT, "
" al.album_id, "
" ar.artist_id, "
" COALESCE(play_count :: INT, 0), "
" play_date_utc :: TIMESTAMP, "
" date_added :: TIMESTAMP, "
" COALESCE(track_number :: INT, 1), "
" bpm :: INT, "
" COALESCE(loved :: BOOLEAN, FALSE) AS loved, "
" persistent_id "
" FROM itunes.itunes i "
" JOIN {0}.artist ar "
" ON (artist_name = artist) "
" JOIN {0}.album al "
" ON (album_name = album "
" AND release_year = year :: int "
" AND al.artist_id = ar.artist_id) "
" WHERE NOT EXISTS (SELECT "
" FROM {0}.track t "
" WHERE t.itunes_id = i.persistent_id "
" OR (t.track_name, t.album_id, t.artist_id, t.track_number) "
" = (i.name, al.album_id, ar.artist_id, COALESCE(track_number :: INT, 1))); "
.format(schema_name))
db.execute("INSERT INTO {0}.play (track_id, played_at)"
"SELECT track_id,"
" last_played"
" FROM {0}.track"
" WHERE last_played IS NOT NULL"
" AND NOT EXISTS (SELECT "
" FROM {0}.play"
" WHERE track_id = track_id"
" AND played_at = last_played);"
.format(schema_name))
def process_tracks(library):
all_keys = set()
inserts = []
for track_id in library['Tracks'].keys():
track = library['Tracks'][track_id]
track_keys = list(map(slugify, track.keys()))
if 'Podcast' in track and track['Podcast']:
track.pop('Podcast')
continue
if 'Music Video' in track:
track.pop('Music Video')
continue
if 'Artist' not in track:
continue
if 'Album' not in track:
continue
if 'Year' not in track:
continue
track_values = track.values()
all_keys = all_keys.union(set(track_keys))
inserts.append(get_parameterized(track_keys, track_values))
all_keys = list(map(slugify, all_keys))
all_keys_with_type = (key + " TEXT" for key in all_keys)
return "CREATE TABLE IF NOT EXISTS {0}.{1} ({2})".format(TEMPORARY_SCHEMA_NAME, TEMPORARY_TABLE_NAME,
', '.join(all_keys_with_type)), inserts
def get_parameterized(keys, values):
return (
"INSERT INTO {0}.{1} ({2}) VALUES ({3})".format(
TEMPORARY_SCHEMA_NAME,
TEMPORARY_TABLE_NAME,
', '.join(map(str, keys)),
', '.join(['%s'] * len(values))
),
[value for value in values]
)
def slugify(name):
return name.lower().replace(' ', '_')
def open_db(name, port, user, password):
conn = psycopg2.connect(host="localhost", port=port, database=name, user=user, password=password)
cur = conn.cursor()
return conn, cur
def close_db(conn, cur):
conn.commit()
cur.close()
conn.close()
if __name__ == '__main__':
main() | 0.310694 | 0.068413 |
import os, sys
from sqlalchemy import func
from codes.upbit.recorder.upbit_price_collector import get_coin_price_class, db_session, Unit, local_fmt, get_price
idx = os.getcwd().index("trade")
PROJECT_HOME = os.getcwd()[:idx] + "trade"
sys.path.append(PROJECT_HOME)
from codes.upbit.upbit_api import Upbit
from common.global_variables import CLIENT_ID_UPBIT, CLIENT_SECRET_UPBIT, fmt
import warnings
warnings.filterwarnings("ignore")
upbit = Upbit(CLIENT_ID_UPBIT, CLIENT_SECRET_UPBIT, fmt)
if __name__ == "__main__":
count = 200
for unit in Unit:
num_of_total_collect = 0
while True:
for idx, coin_name in enumerate(upbit.get_all_coin_names()):
print(unit, idx, coin_name, end=": ")
coin_price_class = get_coin_price_class(coin_name, unit)
last_utc_date_time = db_session.query(func.min(coin_price_class.datetime_utc)).one()[0]
last_utc_date_time = last_utc_date_time.strftime(local_fmt)
price_list, _ = get_price(coin_name, last_utc_date_time, unit, count)
for price in price_list:
datetime_utc = price[1].split("+")[0].replace("T", " ")
datetime_krw = price[2].split("+")[0].replace("T", " ")
q = db_session.query(coin_price_class).filter(coin_price_class.datetime_utc == datetime_utc)
coin_price = q.first()
if coin_price is None:
coin_price = coin_price_class()
coin_price.datetime_utc = datetime_utc
coin_price.datetime_krw = datetime_krw
coin_price.open = float(price[3])
coin_price.high = float(price[4])
coin_price.low = float(price[5])
coin_price.final = float(price[6])
coin_price.volume = float(price[7])
db_session.add(coin_price)
db_session.commit()
num_of_total_collect += len(price_list)
print(last_utc_date_time, len(price_list))
if num_of_total_collect == 0:
break | codes/upbit/recorder/upbit_price_past_collector.py | import os, sys
from sqlalchemy import func
from codes.upbit.recorder.upbit_price_collector import get_coin_price_class, db_session, Unit, local_fmt, get_price
idx = os.getcwd().index("trade")
PROJECT_HOME = os.getcwd()[:idx] + "trade"
sys.path.append(PROJECT_HOME)
from codes.upbit.upbit_api import Upbit
from common.global_variables import CLIENT_ID_UPBIT, CLIENT_SECRET_UPBIT, fmt
import warnings
warnings.filterwarnings("ignore")
upbit = Upbit(CLIENT_ID_UPBIT, CLIENT_SECRET_UPBIT, fmt)
if __name__ == "__main__":
count = 200
for unit in Unit:
num_of_total_collect = 0
while True:
for idx, coin_name in enumerate(upbit.get_all_coin_names()):
print(unit, idx, coin_name, end=": ")
coin_price_class = get_coin_price_class(coin_name, unit)
last_utc_date_time = db_session.query(func.min(coin_price_class.datetime_utc)).one()[0]
last_utc_date_time = last_utc_date_time.strftime(local_fmt)
price_list, _ = get_price(coin_name, last_utc_date_time, unit, count)
for price in price_list:
datetime_utc = price[1].split("+")[0].replace("T", " ")
datetime_krw = price[2].split("+")[0].replace("T", " ")
q = db_session.query(coin_price_class).filter(coin_price_class.datetime_utc == datetime_utc)
coin_price = q.first()
if coin_price is None:
coin_price = coin_price_class()
coin_price.datetime_utc = datetime_utc
coin_price.datetime_krw = datetime_krw
coin_price.open = float(price[3])
coin_price.high = float(price[4])
coin_price.low = float(price[5])
coin_price.final = float(price[6])
coin_price.volume = float(price[7])
db_session.add(coin_price)
db_session.commit()
num_of_total_collect += len(price_list)
print(last_utc_date_time, len(price_list))
if num_of_total_collect == 0:
break | 0.187318 | 0.139983 |
import tkinter as tk
from tkinter import filedialog
from tkinter.simpledialog import askinteger
from PIL import Image, ImageTk
import numpy as np
import matplotlib.pyplot as plt
import random
import math
from scipy import misc
import os
root = tk.Tk()
root.title('AIP 60947061s')
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
root.geometry(str(screen_width) + 'x' + str(screen_height))
top_frame = tk.Frame(root)
top_frame.pack()
scrollbar = tk.Scrollbar(root)
canvas = tk.Canvas(root, yscrollcommand = scrollbar.set)
scrollbar.config(command = canvas.yview)
scrollbar.pack(side = tk.LEFT, fill = tk.Y)
bottom = tk.Frame(canvas)
canvas.pack(fill = "both", expand = True)
canvas.create_window(0, 0, window = bottom, anchor = 'nw')
bottom_frame = tk.Frame(bottom)
bottom_frame.pack()
bottom_frame_2 = tk.Frame(bottom)
bottom_frame_2.pack()
tk.Label(bottom_frame_2, width = screen_height // 4).pack()
def new_image():
for widget in bottom_frame.winfo_children():
widget.destroy()
for widget in bottom_frame_2.winfo_children():
widget.destroy()
plt.clf()
canvas.config(scrollregion = (0, 0, 0, 0))
global file_path, img_gray, new_img, new_img_gray, img_gray_histogram, flag
resized = False
file_path = filedialog.askopenfilename()
try:
img = Image.open(file_path)
except:
root.mainloop()
img_message = img.format + ' ' + str(img.size)
img_width, img_height = img.size
if img_width > screen_width / 2 or img_height > screen_height / 2:
resized = True
if img_width > img_height:
img = img.resize((int(img_width * (screen_width / img_width) / 2), int(img_height * (screen_width / img_width) / 2)))
else:
img = img.resize((int(img_width * (screen_height / img_width) / 2), int(img_height * (screen_height / img_width) / 2)))
img_gray = img.convert('L')
new_img = ImageTk.PhotoImage(img)
new_img_gray = ImageTk.PhotoImage(img_gray)
tk.Label(bottom_frame, image = new_img).pack()
if resized:
tk.messagebox.showinfo('圖片資訊', img_message + "\n重新縮放尺寸")
else:
tk.messagebox.showinfo('圖片資訊', img_message + "\n圖片大小未改變")
flag = True
plt.hist(np.array(Image.open(file_path).convert('L')).ravel(), 256, [0, 256])
plt.savefig("histogram.png")
img_gray_histogram = ImageTk.PhotoImage(Image.open("histogram.png").convert('L'))
os.remove("histogram.png")
root.mainloop()
def gray_image():
for widget in bottom_frame.winfo_children():
widget.destroy()
for widget in bottom_frame_2.winfo_children():
widget.destroy()
canvas.config(scrollregion = (0, 0, 0, 0))
global new_img, new_img_gray, flag
if flag == False:
tk.messagebox.showinfo("操作錯誤", "請先新增圖片")
root.mainloop()
tk.Label(bottom_frame, image = new_img).pack(side = tk.LEFT)
tk.Label(bottom_frame, image = new_img_gray).pack(side = tk.RIGHT)
root.mainloop()
def set_histogram():
for widget in bottom_frame.winfo_children():
widget.destroy()
for widget in bottom_frame_2.winfo_children():
widget.destroy()
canvas.config(scrollregion = (0, 0, 0, 0))
global file_path, new_img_gray, img_gray_histogram, flag
if flag == False:
tk.messagebox.showinfo("操作錯誤", "請先新增圖片")
root.mainloop()
tk.Label(bottom_frame, image = new_img_gray).pack(side = tk.LEFT)
tk.Label(bottom_frame, image = img_gray_histogram).pack(side = tk.RIGHT)
root.mainloop()
def AWGN():
for widget in bottom_frame.winfo_children():
widget.destroy()
for widget in bottom_frame_2.winfo_children():
widget.destroy()
plt.clf()
canvas.config(scrollregion = (0, 0, 0, 0))
global img_gray, flag
if flag == False:
tk.messagebox.showinfo("操作錯誤", "請先新增圖片")
root.mainloop()
AWGN_img = np.array(img_gray)
rows, cols = AWGN_img.shape
n = askinteger("輸入整數", "輸入雜訊值\nGray-level range(0~255)")
if n == None:
root.mainloop()
for i in range(rows):
for j in range(0, cols - 1, 2):
r1 = random.random()
r2 = random.random()
z1 = n * math.cos(2 * math.pi * r2) * math.sqrt( -2 * np.log(r1))
z2 = n * math.sin(2 * math.pi * r2) * math.sqrt( -2 * np.log(r1))
AWGN_img[i, j] = AWGN_img[i, j] + z1
if(AWGN_img[i, j] < 0):
AWGN_img[i, j] = 0
elif(AWGN_img[i, j] > 255):
AWGN_img[i, j] = 255
else:
pass
AWGN_img[i, j + 1] = AWGN_img[i, j + 1] + z2
if(AWGN_img[i, j + 1] < 0):
AWGN_img[i, j + 1] = 0
elif(AWGN_img[i, j + 1] > 255):
AWGN_img[i, j + 1] = 255
else:
pass
misc.imsave("AWGN.png", AWGN_img)
AWGN_histogram_img = np.array(Image.open("AWGN.png"))
plt.hist(AWGN_histogram_img.ravel(), 256, [0, 256])
plt.savefig("histogram.png")
AWGN_img = ImageTk.PhotoImage(Image.open("AWGN.png"))
AWGN_histogram = ImageTk.PhotoImage(Image.open("histogram.png").convert('L'))
tk.Label(bottom_frame, image = AWGN_img).pack(side = tk.LEFT)
tk.Label(bottom_frame, image = AWGN_histogram).pack(side = tk.RIGHT)
os.remove("AWGN.png")
os.remove("histogram.png")
root.mainloop()
def DWT():
for widget in bottom_frame.winfo_children():
widget.destroy()
for widget in bottom_frame_2.winfo_children():
widget.destroy()
canvas.config(scrollregion = (0, 0, 0, 0))
global img_gray, new_img_gray, flag
if flag == False:
tk.messagebox.showinfo("操作錯誤", "請先新增圖片")
root.mainloop()
DWT_img = np.array(img_gray, 'float')
rows, cols = DWT_img.shape
if rows % 2 == 1 or cols % 2 == 1:
tk.messagebox.showinfo("操作錯誤", "圖片長寬 : (" + str(rows) + ", " + str(cols) + ")\n不全為偶數")
root.mainloop()
n = askinteger("輸入整數", "輸入小波轉換層數")
if n == None:
root.mainloop()
DWT_output = np.zeros((rows, cols), 'float')
for levels in range(n):
for i in range(0, rows, 2):
for j in range(0, cols, 2):
DWT_output[0 + i // 2, 0 + j // 2] = (DWT_img[i, j] + DWT_img[i, j + 1] + DWT_img[i + 1, j] + DWT_img[i + 1, j + 1]) / 4 #LL
DWT_output[0 + i // 2, cols // 2 + j // 2] = (DWT_img[i, j] - DWT_img[i, j + 1] + DWT_img[i + 1, j] - DWT_img[i + 1, j + 1]) / 4 #LH
DWT_output[rows // 2 + i // 2, 0 + j // 2] = (DWT_img[i, j] + DWT_img[i, j + 1] - DWT_img[i + 1, j] - DWT_img[i + 1, j + 1]) / 4 #HL
DWT_output[rows // 2 + i // 2, cols // 2 + j // 2] = (DWT_img[i, j] - DWT_img[i, j + 1] - DWT_img[i + 1, j] + DWT_img[i + 1, j + 1]) / 4 #HH
DWT_img[:rows, :cols] = DWT_output[:rows, :cols]
rows = rows // 2
cols = cols // 2
misc.imsave("DWT.png", DWT_img)
DWT_img = ImageTk.PhotoImage(Image.open("DWT.png"))
tk.Label(bottom_frame, image = new_img_gray).pack(side = tk.LEFT)
tk.Label(bottom_frame, image = DWT_img).pack(side = tk.RIGHT)
os.remove("DWT.png")
root.mainloop()
def histogram_qualization():
for widget in bottom_frame.winfo_children():
widget.destroy()
for widget in bottom_frame_2.winfo_children():
widget.destroy()
plt.clf()
global img_gray, new_img_gray, img_gray_histogram, flag
if flag == False:
tk.messagebox.showinfo("操作錯誤", "請先新增圖片")
root.mainloop()
HQ_img = np.array(img_gray)
rows, cols = HQ_img.shape
H = np.zeros(256, int)
Hc = np.zeros(256, int)
T = np.zeros(256, int)
for i in range(rows):
for j in range(cols):
H[HQ_img[i][j]] = H[HQ_img[i][j]] + 1
g_min = np.nonzero(H)[0][0]
for i in range(1, 256):
Hc[i] = Hc[i - 1] + H[i]
H_min = Hc[g_min]
try:
for i in range(256):
T[i] = round(((Hc[i] - H_min) * 255) / ((cols * rows) - H_min))
except:
tk.messagebox.showinfo("計算錯誤", "分母為0")
root.mainloop()
for i in range(rows):
for j in range(cols):
HQ_img[i][j] = T[HQ_img[i][j]]
misc.imsave("HQ.png", HQ_img)
HQ_img = ImageTk.PhotoImage(Image.open("HQ.png"))
plt.hist(np.array(Image.open("HQ.png")).ravel(), 256, [0, 256])
plt.savefig("histogram.png")
HQ_histogram = ImageTk.PhotoImage(Image.open("histogram.png").convert('L'))
imLabel_left = tk.Label(bottom_frame, image = new_img_gray).pack(side = tk.LEFT)
imLabel_right = tk.Label(bottom_frame, image = HQ_img).pack(side = tk.RIGHT)
tk.Label(bottom_frame_2, image = img_gray_histogram).pack(side = tk.LEFT)
tk.Label(bottom_frame_2, image = HQ_histogram).pack(side = tk.RIGHT)
root.update()
canvas.config(scrollregion = canvas.bbox("all"))
os.remove("HQ.png")
os.remove("histogram.png")
root.mainloop()
def ImageSmoothing_And_EdgeDetection():
for widget in bottom_frame.winfo_children():
widget.destroy()
for widget in bottom_frame_2.winfo_children():
widget.destroy()
canvas.config(scrollregion = (0, 0, 0, 0))
global img_gray, flag
if flag == False:
tk.messagebox.showinfo("操作錯誤", "請先新增圖片")
root.mainloop()
ISED_img = np.array(img_gray)
rows, cols = ISED_img.shape
n = askinteger("輸入整數", "輸入convolution masks大小(奇數數字)")
if n == None or n % 2 == 0:
tk.messagebox.showinfo("操作錯誤", "convolution masks不為奇數")
root.mainloop()
elif rows - n + 1 <= 0 or cols - n + 1 <= 0:
tk.messagebox.showinfo("操作錯誤", "convolution masks數字過大")
root.mainloop()
convolution_masks_IS = np.empty((n, n) , dtype = float)
convolution_masks_ED = np.empty((n, n) , dtype = float)
array_window = tk.Toplevel(root)
array_IS = []
array_ED = []
tk.Label(array_window, text = "Image smoothing convolution masks").grid(row = 0, column = 0)
for i in range(n):
for j in range(n):
IS = tk.Entry(array_window, width = 5)
IS.grid(row = i, column = j + 1, pady = 1, padx = 1)
array_IS.append(IS)
tk.Label(array_window, text = "").grid(row = n, column = 0)
tk.Label(array_window, text = "Edge detection convolution masks").grid(row = n + 1, column = 0)
for i in range(n):
for j in range(n):
ED = tk.Entry(array_window, width = 5)
ED.grid(row = i + n + 1, column = j + 1, pady = 1, padx = 1)
array_ED.append(ED)
def calculate():
global new_img_gray
try:
for i in range(len(array_IS)):
convolution_masks_IS[i // n][i % n] = array_IS[i].get()
for i in range(len(array_ED)):
convolution_masks_ED[i // n][i % n] = array_ED[i].get()
except:
tk.messagebox.showinfo("操作錯誤", "Array不全為數字")
array_window.destroy()
root.mainloop()
out_img = np.empty((rows - n + 1, cols - n + 1) , dtype = float)
for i in range(rows - n + 1):
for j in range(cols - n + 1):
out_img[i][j] = (ISED_img[i:i + n, j:j + n] * convolution_masks_IS).sum() / convolution_masks_IS.sum()
for i in range(rows - n + 1):
for j in range(cols - n + 1):
out_img[i][j] = (ISED_img[i:i + n, j:j + n] * convolution_masks_ED).sum()
out_img = misc.imresize(out_img, [rows, cols])
misc.imsave("ISED.png", out_img)
DWT_img = ImageTk.PhotoImage(Image.open("ISED.png"))
tk.Label(bottom_frame, image = new_img_gray).pack(side = tk.LEFT)
tk.Label(bottom_frame, image = DWT_img).pack(side = tk.RIGHT)
os.remove("ISED.png")
array_window.destroy()
root.mainloop()
tk.Button(array_window, text = '確定', fg = 'black', command = calculate).grid(row = 2 * n + 1, column = 0)
tk.Button(top_frame, text = '新增圖片', fg = 'black', command = new_image).pack(side = tk.LEFT)
tk.Button(top_frame, text = '灰階圖片', fg = 'black', command = gray_image).pack(side = tk.LEFT)
tk.Button(top_frame, text = '灰階直方圖', fg = 'black', command = set_histogram).pack(side = tk.LEFT)
tk.Button(top_frame, text = '加性高斯白雜訊', fg = 'black', command = AWGN).pack(side = tk.LEFT)
tk.Button(top_frame, text = '離散小波轉換', fg = 'black', command = DWT).pack(side = tk.LEFT)
tk.Button(top_frame, text = '直方圖均衡化', fg = 'black', command = histogram_qualization).pack(side = tk.LEFT)
tk.Button(top_frame, text = '影像平滑化與邊緣偵測', fg = 'black', command = ImageSmoothing_And_EdgeDetection).pack(side = tk.LEFT)
file_path = None
img_gray = None
new_img = None
new_img_gray = None
img_gray_histogram = None
flag = False
root.mainloop() | Advanced Image Processing/HW6/HW6 60947061s.py | import tkinter as tk
from tkinter import filedialog
from tkinter.simpledialog import askinteger
from PIL import Image, ImageTk
import numpy as np
import matplotlib.pyplot as plt
import random
import math
from scipy import misc
import os
root = tk.Tk()
root.title('AIP 60947061s')
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
root.geometry(str(screen_width) + 'x' + str(screen_height))
top_frame = tk.Frame(root)
top_frame.pack()
scrollbar = tk.Scrollbar(root)
canvas = tk.Canvas(root, yscrollcommand = scrollbar.set)
scrollbar.config(command = canvas.yview)
scrollbar.pack(side = tk.LEFT, fill = tk.Y)
bottom = tk.Frame(canvas)
canvas.pack(fill = "both", expand = True)
canvas.create_window(0, 0, window = bottom, anchor = 'nw')
bottom_frame = tk.Frame(bottom)
bottom_frame.pack()
bottom_frame_2 = tk.Frame(bottom)
bottom_frame_2.pack()
tk.Label(bottom_frame_2, width = screen_height // 4).pack()
def new_image():
for widget in bottom_frame.winfo_children():
widget.destroy()
for widget in bottom_frame_2.winfo_children():
widget.destroy()
plt.clf()
canvas.config(scrollregion = (0, 0, 0, 0))
global file_path, img_gray, new_img, new_img_gray, img_gray_histogram, flag
resized = False
file_path = filedialog.askopenfilename()
try:
img = Image.open(file_path)
except:
root.mainloop()
img_message = img.format + ' ' + str(img.size)
img_width, img_height = img.size
if img_width > screen_width / 2 or img_height > screen_height / 2:
resized = True
if img_width > img_height:
img = img.resize((int(img_width * (screen_width / img_width) / 2), int(img_height * (screen_width / img_width) / 2)))
else:
img = img.resize((int(img_width * (screen_height / img_width) / 2), int(img_height * (screen_height / img_width) / 2)))
img_gray = img.convert('L')
new_img = ImageTk.PhotoImage(img)
new_img_gray = ImageTk.PhotoImage(img_gray)
tk.Label(bottom_frame, image = new_img).pack()
if resized:
tk.messagebox.showinfo('圖片資訊', img_message + "\n重新縮放尺寸")
else:
tk.messagebox.showinfo('圖片資訊', img_message + "\n圖片大小未改變")
flag = True
plt.hist(np.array(Image.open(file_path).convert('L')).ravel(), 256, [0, 256])
plt.savefig("histogram.png")
img_gray_histogram = ImageTk.PhotoImage(Image.open("histogram.png").convert('L'))
os.remove("histogram.png")
root.mainloop()
def gray_image():
for widget in bottom_frame.winfo_children():
widget.destroy()
for widget in bottom_frame_2.winfo_children():
widget.destroy()
canvas.config(scrollregion = (0, 0, 0, 0))
global new_img, new_img_gray, flag
if flag == False:
tk.messagebox.showinfo("操作錯誤", "請先新增圖片")
root.mainloop()
tk.Label(bottom_frame, image = new_img).pack(side = tk.LEFT)
tk.Label(bottom_frame, image = new_img_gray).pack(side = tk.RIGHT)
root.mainloop()
def set_histogram():
for widget in bottom_frame.winfo_children():
widget.destroy()
for widget in bottom_frame_2.winfo_children():
widget.destroy()
canvas.config(scrollregion = (0, 0, 0, 0))
global file_path, new_img_gray, img_gray_histogram, flag
if flag == False:
tk.messagebox.showinfo("操作錯誤", "請先新增圖片")
root.mainloop()
tk.Label(bottom_frame, image = new_img_gray).pack(side = tk.LEFT)
tk.Label(bottom_frame, image = img_gray_histogram).pack(side = tk.RIGHT)
root.mainloop()
def AWGN():
for widget in bottom_frame.winfo_children():
widget.destroy()
for widget in bottom_frame_2.winfo_children():
widget.destroy()
plt.clf()
canvas.config(scrollregion = (0, 0, 0, 0))
global img_gray, flag
if flag == False:
tk.messagebox.showinfo("操作錯誤", "請先新增圖片")
root.mainloop()
AWGN_img = np.array(img_gray)
rows, cols = AWGN_img.shape
n = askinteger("輸入整數", "輸入雜訊值\nGray-level range(0~255)")
if n == None:
root.mainloop()
for i in range(rows):
for j in range(0, cols - 1, 2):
r1 = random.random()
r2 = random.random()
z1 = n * math.cos(2 * math.pi * r2) * math.sqrt( -2 * np.log(r1))
z2 = n * math.sin(2 * math.pi * r2) * math.sqrt( -2 * np.log(r1))
AWGN_img[i, j] = AWGN_img[i, j] + z1
if(AWGN_img[i, j] < 0):
AWGN_img[i, j] = 0
elif(AWGN_img[i, j] > 255):
AWGN_img[i, j] = 255
else:
pass
AWGN_img[i, j + 1] = AWGN_img[i, j + 1] + z2
if(AWGN_img[i, j + 1] < 0):
AWGN_img[i, j + 1] = 0
elif(AWGN_img[i, j + 1] > 255):
AWGN_img[i, j + 1] = 255
else:
pass
misc.imsave("AWGN.png", AWGN_img)
AWGN_histogram_img = np.array(Image.open("AWGN.png"))
plt.hist(AWGN_histogram_img.ravel(), 256, [0, 256])
plt.savefig("histogram.png")
AWGN_img = ImageTk.PhotoImage(Image.open("AWGN.png"))
AWGN_histogram = ImageTk.PhotoImage(Image.open("histogram.png").convert('L'))
tk.Label(bottom_frame, image = AWGN_img).pack(side = tk.LEFT)
tk.Label(bottom_frame, image = AWGN_histogram).pack(side = tk.RIGHT)
os.remove("AWGN.png")
os.remove("histogram.png")
root.mainloop()
def DWT():
for widget in bottom_frame.winfo_children():
widget.destroy()
for widget in bottom_frame_2.winfo_children():
widget.destroy()
canvas.config(scrollregion = (0, 0, 0, 0))
global img_gray, new_img_gray, flag
if flag == False:
tk.messagebox.showinfo("操作錯誤", "請先新增圖片")
root.mainloop()
DWT_img = np.array(img_gray, 'float')
rows, cols = DWT_img.shape
if rows % 2 == 1 or cols % 2 == 1:
tk.messagebox.showinfo("操作錯誤", "圖片長寬 : (" + str(rows) + ", " + str(cols) + ")\n不全為偶數")
root.mainloop()
n = askinteger("輸入整數", "輸入小波轉換層數")
if n == None:
root.mainloop()
DWT_output = np.zeros((rows, cols), 'float')
for levels in range(n):
for i in range(0, rows, 2):
for j in range(0, cols, 2):
DWT_output[0 + i // 2, 0 + j // 2] = (DWT_img[i, j] + DWT_img[i, j + 1] + DWT_img[i + 1, j] + DWT_img[i + 1, j + 1]) / 4 #LL
DWT_output[0 + i // 2, cols // 2 + j // 2] = (DWT_img[i, j] - DWT_img[i, j + 1] + DWT_img[i + 1, j] - DWT_img[i + 1, j + 1]) / 4 #LH
DWT_output[rows // 2 + i // 2, 0 + j // 2] = (DWT_img[i, j] + DWT_img[i, j + 1] - DWT_img[i + 1, j] - DWT_img[i + 1, j + 1]) / 4 #HL
DWT_output[rows // 2 + i // 2, cols // 2 + j // 2] = (DWT_img[i, j] - DWT_img[i, j + 1] - DWT_img[i + 1, j] + DWT_img[i + 1, j + 1]) / 4 #HH
DWT_img[:rows, :cols] = DWT_output[:rows, :cols]
rows = rows // 2
cols = cols // 2
misc.imsave("DWT.png", DWT_img)
DWT_img = ImageTk.PhotoImage(Image.open("DWT.png"))
tk.Label(bottom_frame, image = new_img_gray).pack(side = tk.LEFT)
tk.Label(bottom_frame, image = DWT_img).pack(side = tk.RIGHT)
os.remove("DWT.png")
root.mainloop()
def histogram_qualization():
for widget in bottom_frame.winfo_children():
widget.destroy()
for widget in bottom_frame_2.winfo_children():
widget.destroy()
plt.clf()
global img_gray, new_img_gray, img_gray_histogram, flag
if flag == False:
tk.messagebox.showinfo("操作錯誤", "請先新增圖片")
root.mainloop()
HQ_img = np.array(img_gray)
rows, cols = HQ_img.shape
H = np.zeros(256, int)
Hc = np.zeros(256, int)
T = np.zeros(256, int)
for i in range(rows):
for j in range(cols):
H[HQ_img[i][j]] = H[HQ_img[i][j]] + 1
g_min = np.nonzero(H)[0][0]
for i in range(1, 256):
Hc[i] = Hc[i - 1] + H[i]
H_min = Hc[g_min]
try:
for i in range(256):
T[i] = round(((Hc[i] - H_min) * 255) / ((cols * rows) - H_min))
except:
tk.messagebox.showinfo("計算錯誤", "分母為0")
root.mainloop()
for i in range(rows):
for j in range(cols):
HQ_img[i][j] = T[HQ_img[i][j]]
misc.imsave("HQ.png", HQ_img)
HQ_img = ImageTk.PhotoImage(Image.open("HQ.png"))
plt.hist(np.array(Image.open("HQ.png")).ravel(), 256, [0, 256])
plt.savefig("histogram.png")
HQ_histogram = ImageTk.PhotoImage(Image.open("histogram.png").convert('L'))
imLabel_left = tk.Label(bottom_frame, image = new_img_gray).pack(side = tk.LEFT)
imLabel_right = tk.Label(bottom_frame, image = HQ_img).pack(side = tk.RIGHT)
tk.Label(bottom_frame_2, image = img_gray_histogram).pack(side = tk.LEFT)
tk.Label(bottom_frame_2, image = HQ_histogram).pack(side = tk.RIGHT)
root.update()
canvas.config(scrollregion = canvas.bbox("all"))
os.remove("HQ.png")
os.remove("histogram.png")
root.mainloop()
def ImageSmoothing_And_EdgeDetection():
for widget in bottom_frame.winfo_children():
widget.destroy()
for widget in bottom_frame_2.winfo_children():
widget.destroy()
canvas.config(scrollregion = (0, 0, 0, 0))
global img_gray, flag
if flag == False:
tk.messagebox.showinfo("操作錯誤", "請先新增圖片")
root.mainloop()
ISED_img = np.array(img_gray)
rows, cols = ISED_img.shape
n = askinteger("輸入整數", "輸入convolution masks大小(奇數數字)")
if n == None or n % 2 == 0:
tk.messagebox.showinfo("操作錯誤", "convolution masks不為奇數")
root.mainloop()
elif rows - n + 1 <= 0 or cols - n + 1 <= 0:
tk.messagebox.showinfo("操作錯誤", "convolution masks數字過大")
root.mainloop()
convolution_masks_IS = np.empty((n, n) , dtype = float)
convolution_masks_ED = np.empty((n, n) , dtype = float)
array_window = tk.Toplevel(root)
array_IS = []
array_ED = []
tk.Label(array_window, text = "Image smoothing convolution masks").grid(row = 0, column = 0)
for i in range(n):
for j in range(n):
IS = tk.Entry(array_window, width = 5)
IS.grid(row = i, column = j + 1, pady = 1, padx = 1)
array_IS.append(IS)
tk.Label(array_window, text = "").grid(row = n, column = 0)
tk.Label(array_window, text = "Edge detection convolution masks").grid(row = n + 1, column = 0)
for i in range(n):
for j in range(n):
ED = tk.Entry(array_window, width = 5)
ED.grid(row = i + n + 1, column = j + 1, pady = 1, padx = 1)
array_ED.append(ED)
def calculate():
global new_img_gray
try:
for i in range(len(array_IS)):
convolution_masks_IS[i // n][i % n] = array_IS[i].get()
for i in range(len(array_ED)):
convolution_masks_ED[i // n][i % n] = array_ED[i].get()
except:
tk.messagebox.showinfo("操作錯誤", "Array不全為數字")
array_window.destroy()
root.mainloop()
out_img = np.empty((rows - n + 1, cols - n + 1) , dtype = float)
for i in range(rows - n + 1):
for j in range(cols - n + 1):
out_img[i][j] = (ISED_img[i:i + n, j:j + n] * convolution_masks_IS).sum() / convolution_masks_IS.sum()
for i in range(rows - n + 1):
for j in range(cols - n + 1):
out_img[i][j] = (ISED_img[i:i + n, j:j + n] * convolution_masks_ED).sum()
out_img = misc.imresize(out_img, [rows, cols])
misc.imsave("ISED.png", out_img)
DWT_img = ImageTk.PhotoImage(Image.open("ISED.png"))
tk.Label(bottom_frame, image = new_img_gray).pack(side = tk.LEFT)
tk.Label(bottom_frame, image = DWT_img).pack(side = tk.RIGHT)
os.remove("ISED.png")
array_window.destroy()
root.mainloop()
tk.Button(array_window, text = '確定', fg = 'black', command = calculate).grid(row = 2 * n + 1, column = 0)
tk.Button(top_frame, text = '新增圖片', fg = 'black', command = new_image).pack(side = tk.LEFT)
tk.Button(top_frame, text = '灰階圖片', fg = 'black', command = gray_image).pack(side = tk.LEFT)
tk.Button(top_frame, text = '灰階直方圖', fg = 'black', command = set_histogram).pack(side = tk.LEFT)
tk.Button(top_frame, text = '加性高斯白雜訊', fg = 'black', command = AWGN).pack(side = tk.LEFT)
tk.Button(top_frame, text = '離散小波轉換', fg = 'black', command = DWT).pack(side = tk.LEFT)
tk.Button(top_frame, text = '直方圖均衡化', fg = 'black', command = histogram_qualization).pack(side = tk.LEFT)
tk.Button(top_frame, text = '影像平滑化與邊緣偵測', fg = 'black', command = ImageSmoothing_And_EdgeDetection).pack(side = tk.LEFT)
file_path = None
img_gray = None
new_img = None
new_img_gray = None
img_gray_histogram = None
flag = False
root.mainloop() | 0.147863 | 0.150528 |
import pytest
from pytest import approx
import numpy as np
import quimb as qu
import quimb.tensor as qtn
from quimb.tensor.tensor_1d_tebd import OTOC_local
class TestTEBD:
def test_setup_and_sweep(self):
n = 10
H_int = qu.ham_heis(n=2, cyclic=False)
psi0 = qtn.MPS_neel_state(n, dtype=complex)
tebd = qtn.TEBD(psi0, H_int, dt=0.05)
assert tebd.pt.bond_size(0, 1) == 1
tebd.sweep('right', 1 / 2)
assert tebd.pt.count_canonized() == (n - 1, 0)
tebd.sweep('left', 1 / 2)
assert tebd.pt.count_canonized() == (0, n - 1)
assert tebd.pt.bond_size(0, 1) > 1
assert not tebd._queued_sweep
@pytest.mark.parametrize('cyclic', [False, True])
@pytest.mark.parametrize('order', [2, 4])
@pytest.mark.parametrize('dt,tol', [
(0.0759283, None),
(None, 1e-4),
(0.0759283, 1e-4),
(None, None),
])
@pytest.mark.parametrize('n', [5, 6])
def test_evolve_obc_pbc(self, n, order, dt, tol, cyclic):
tf = 1.0 if cyclic else 2
psi0 = qtn.MPS_neel_state(n, cyclic=cyclic)
H_int = qu.ham_heis(2, cyclic=False) # this is just the interaction
if dt and tol:
with pytest.raises(ValueError):
qtn.TEBD(psi0, H_int, dt=dt, tol=tol)
return
tebd = qtn.TEBD(psi0, H_int, dt=dt, tol=tol)
assert tebd.cyclic == cyclic
tebd.split_opts['cutoff'] = 1e-10
if (dt is None and tol is None):
with pytest.raises(ValueError):
tebd.update_to(tf, order=order)
return
tebd.update_to(tf, order=order)
assert tebd.t == approx(tf)
assert not tebd._queued_sweep
dpsi0 = psi0.to_dense()
dham = qu.ham_heis(n=n, sparse=True, cyclic=cyclic)
evo = qu.Evolution(dpsi0, dham)
evo.update_to(tf)
assert qu.expec(evo.pt, tebd.pt.to_dense()) == approx(1, rel=1e-3 if
cyclic else 1e-5)
@pytest.mark.parametrize('cyclic', [False, True])
@pytest.mark.parametrize('order', [2, 4])
@pytest.mark.parametrize('dt,tol', [
(0.0759283, None),
(None, 1e-4),
(0.0759283, 1e-4),
(None, None),
])
@pytest.mark.parametrize('n', [5, 6])
def test_imag_evolve_obc_pbc(self, n, order, dt, tol, cyclic):
tf = 2
ground = qtn.MPS_computational_state('0' * n, cyclic=cyclic)
excited = qtn.MPS_computational_state(('01' * n)[:n], cyclic=cyclic)
psi0 = (ground + excited) / 2**0.5
H = qtn.ham_1d_ising(n, j=-1, cyclic=cyclic)
if dt and tol:
with pytest.raises(ValueError):
qtn.TEBD(psi0, H, dt=dt, tol=tol, imag=True)
return
tebd = qtn.TEBD(psi0, H, dt=dt, tol=tol, imag=True)
assert tebd.cyclic == cyclic
tebd.split_opts['cutoff'] = 1e-10
if (dt is None and tol is None):
with pytest.raises(ValueError):
tebd.update_to(tf, order=order)
return
tebd.update_to(tf, order=order)
assert tebd.t == approx(tf)
assert not tebd._queued_sweep
H_mpo = qtn.MPO_ham_ising(n, j=-1, cyclic=cyclic)
E_ground = qtn.expec_TN_1D(ground.H, H_mpo, ground)
E_excited = qtn.expec_TN_1D(excited.H, H_mpo, excited)
psi1 = (np.exp(-tf * E_ground) * ground +
np.exp(-tf * E_excited) * excited)
psi1 /= np.sqrt(np.exp(-2 * tf * E_ground) +
np.exp(-2 * tf * E_excited))
assert qtn.expec_TN_1D(psi1.H, tebd.pt) == approx(1, rel=1e-5)
@pytest.mark.parametrize('cyclic', [False, True])
@pytest.mark.parametrize('dt,tol', [
(0.0659283, None),
(None, 1e-5),
])
def test_at_times(self, dt, tol, cyclic):
n = 10
psi0 = qtn.MPS_neel_state(n, cyclic=cyclic)
H_int = qu.ham_heis(2, cyclic=False)
tebd = qtn.TEBD(psi0, H_int, dt=dt, tol=tol)
for pt in tebd.at_times([0.1, 0.2, 0.3, 0.4, 0.5]):
assert pt.H @ pt == approx(1, rel=1e-5)
assert tebd.err <= 1e-5
def test_local_ham_1d_and_single_site_terms(self):
n = 10
psi0 = qtn.MPS_neel_state(n)
lham_1d = qtn.ham_1d_XY(n, bz=0.9)
tebd = qtn.TEBD(psi0, lham_1d)
tebd.update_to(1.0, tol=1e-5)
assert abs(psi0.H @ tebd.pt) < 1.0
assert tebd.pt.entropy(5) > 0.0
psi0_dns = qu.neel_state(n)
H_dns = qu.ham_XY(10, jxy=1.0, bz=0.9, cyclic=False)
evo = qu.Evolution(psi0_dns, H_dns)
evo.update_to(1.0)
assert qu.expec(tebd.pt.to_dense(), evo.pt) == pytest.approx(1.0)
def test_local_ham_1d_and_single_site_terms_heis(self):
n = 10
psi0 = qtn.MPS_neel_state(n)
lham_1d = qtn.ham_1d_heis(n, j=(0.7, 0.8, 0.9), bz=0.337)
tebd = qtn.TEBD(psi0, lham_1d)
tebd.update_to(1.0, tol=1e-5)
assert abs(psi0.H @ tebd.pt) < 1.0
assert tebd.pt.entropy(5) > 0.0
psi0_dns = qu.neel_state(n)
H_dns = qu.ham_heis(10, j=(0.7, 0.8, 0.9), b=0.337, cyclic=False)
evo = qu.Evolution(psi0_dns, H_dns)
evo.update_to(1.0)
assert qu.expec(tebd.pt.to_dense(), evo.pt) == pytest.approx(1.0)
def test_non_trans_invar(self):
n = 10
tf = 1.0
p0 = qtn.MPS_rand_state(n, bond_dim=1)
H = qtn.ham_1d_mbl(n, dh=1.7, cyclic=False, seed=42)
print(H)
tebd = qtn.TEBD(p0, H)
tebd.update_to(tf, tol=1e-3)
p0d = p0.to_dense()
Hd = qu.ham_mbl(n, dh=1.7, cyclic=False, seed=42, sparse=True)
evo = qu.Evolution(p0d, Hd)
evo.update_to(tf)
assert qu.expec(tebd.pt.to_dense(), evo.pt) == pytest.approx(1.0)
@pytest.mark.parametrize('cyclic', [False, True])
def test_ising_model_with_field(self, cyclic):
p = qtn.MPS_computational_state('0000100000', cyclic=cyclic)
pd = p.to_dense()
lham_1d = qtn.ham_1d_ising(10, j=4, bx=1, cyclic=cyclic)
H_mpo = qtn.MPO_ham_ising(10, j=4, bx=1, cyclic=cyclic)
H = qu.ham_ising(10, jz=4, bx=1, cyclic=cyclic)
tebd = qtn.TEBD(p, lham_1d, tol=1e-6)
tebd.split_opts['cutoff'] = 1e-9
tebd.split_opts['cutoff_mode'] = 'rel'
evo = qu.Evolution(pd, H)
e0 = qu.expec(pd, H)
e0_mpo = qtn.expec_TN_1D(p.H, H_mpo, p)
assert e0_mpo == pytest.approx(e0)
tf = 2
ts = np.linspace(0, tf, 21)
evo.update_to(tf)
for pt in tebd.at_times(ts):
assert isinstance(pt, qtn.MatrixProductState)
assert (pt.H @ pt) == pytest.approx(1.0, rel=1e-5)
assert (qu.expec(tebd.pt.to_dense(), evo.pt) ==
pytest.approx(1.0, rel=1e-5))
ef_mpo = qtn.expec_TN_1D(tebd.pt.H, H_mpo, tebd.pt)
assert ef_mpo == pytest.approx(e0, 1e-5)
def test_OTOC_local():
L = 10
psi0 = qtn.MPS_computational_state('0' * L, cyclic=True)
H1 = qtn.ham_1d_ising(L, j=4, bx=0, cyclic=True)
H_back1 = qtn.ham_1d_ising(L, j=-4, bx=0, cyclic=True)
H2 = qtn.ham_1d_ising(L, j=4, bx=1, cyclic=True)
H_back2 = qtn.ham_1d_ising(L, j=-4, bx=-1, cyclic=True)
A = qu.pauli('z')
ts = np.linspace(1, 2, 2)
OTOC_t = []
for OTOC in OTOC_local(psi0, H1, H_back1, ts, 5, A, tol=1e-5,
split_opts={'cutoff': 1e-5, 'cutoff_mode': 'rel'},
initial_eigenstate='check'):
OTOC_t += [OTOC]
assert OTOC_t[0] == pytest.approx(1.0)
assert OTOC_t[1] == pytest.approx(1.0)
x_t = []
for x in OTOC_local(psi0, H2, H_back2, ts, 5, A, tol=1e-5,
split_opts={'cutoff': 1e-5, 'cutoff_mode': 'rel'},
initial_eigenstate='check'):
x_t += [x]
assert x_t[0] == pytest.approx(0.52745, rel=1e-4, abs=1e-9)
assert x_t[1] == pytest.approx(0.70440, rel=1e-4, abs=1e-9) | tests/test_tensor/test_tensor_tebd.py | import pytest
from pytest import approx
import numpy as np
import quimb as qu
import quimb.tensor as qtn
from quimb.tensor.tensor_1d_tebd import OTOC_local
class TestTEBD:
def test_setup_and_sweep(self):
n = 10
H_int = qu.ham_heis(n=2, cyclic=False)
psi0 = qtn.MPS_neel_state(n, dtype=complex)
tebd = qtn.TEBD(psi0, H_int, dt=0.05)
assert tebd.pt.bond_size(0, 1) == 1
tebd.sweep('right', 1 / 2)
assert tebd.pt.count_canonized() == (n - 1, 0)
tebd.sweep('left', 1 / 2)
assert tebd.pt.count_canonized() == (0, n - 1)
assert tebd.pt.bond_size(0, 1) > 1
assert not tebd._queued_sweep
@pytest.mark.parametrize('cyclic', [False, True])
@pytest.mark.parametrize('order', [2, 4])
@pytest.mark.parametrize('dt,tol', [
(0.0759283, None),
(None, 1e-4),
(0.0759283, 1e-4),
(None, None),
])
@pytest.mark.parametrize('n', [5, 6])
def test_evolve_obc_pbc(self, n, order, dt, tol, cyclic):
tf = 1.0 if cyclic else 2
psi0 = qtn.MPS_neel_state(n, cyclic=cyclic)
H_int = qu.ham_heis(2, cyclic=False) # this is just the interaction
if dt and tol:
with pytest.raises(ValueError):
qtn.TEBD(psi0, H_int, dt=dt, tol=tol)
return
tebd = qtn.TEBD(psi0, H_int, dt=dt, tol=tol)
assert tebd.cyclic == cyclic
tebd.split_opts['cutoff'] = 1e-10
if (dt is None and tol is None):
with pytest.raises(ValueError):
tebd.update_to(tf, order=order)
return
tebd.update_to(tf, order=order)
assert tebd.t == approx(tf)
assert not tebd._queued_sweep
dpsi0 = psi0.to_dense()
dham = qu.ham_heis(n=n, sparse=True, cyclic=cyclic)
evo = qu.Evolution(dpsi0, dham)
evo.update_to(tf)
assert qu.expec(evo.pt, tebd.pt.to_dense()) == approx(1, rel=1e-3 if
cyclic else 1e-5)
@pytest.mark.parametrize('cyclic', [False, True])
@pytest.mark.parametrize('order', [2, 4])
@pytest.mark.parametrize('dt,tol', [
(0.0759283, None),
(None, 1e-4),
(0.0759283, 1e-4),
(None, None),
])
@pytest.mark.parametrize('n', [5, 6])
def test_imag_evolve_obc_pbc(self, n, order, dt, tol, cyclic):
tf = 2
ground = qtn.MPS_computational_state('0' * n, cyclic=cyclic)
excited = qtn.MPS_computational_state(('01' * n)[:n], cyclic=cyclic)
psi0 = (ground + excited) / 2**0.5
H = qtn.ham_1d_ising(n, j=-1, cyclic=cyclic)
if dt and tol:
with pytest.raises(ValueError):
qtn.TEBD(psi0, H, dt=dt, tol=tol, imag=True)
return
tebd = qtn.TEBD(psi0, H, dt=dt, tol=tol, imag=True)
assert tebd.cyclic == cyclic
tebd.split_opts['cutoff'] = 1e-10
if (dt is None and tol is None):
with pytest.raises(ValueError):
tebd.update_to(tf, order=order)
return
tebd.update_to(tf, order=order)
assert tebd.t == approx(tf)
assert not tebd._queued_sweep
H_mpo = qtn.MPO_ham_ising(n, j=-1, cyclic=cyclic)
E_ground = qtn.expec_TN_1D(ground.H, H_mpo, ground)
E_excited = qtn.expec_TN_1D(excited.H, H_mpo, excited)
psi1 = (np.exp(-tf * E_ground) * ground +
np.exp(-tf * E_excited) * excited)
psi1 /= np.sqrt(np.exp(-2 * tf * E_ground) +
np.exp(-2 * tf * E_excited))
assert qtn.expec_TN_1D(psi1.H, tebd.pt) == approx(1, rel=1e-5)
@pytest.mark.parametrize('cyclic', [False, True])
@pytest.mark.parametrize('dt,tol', [
(0.0659283, None),
(None, 1e-5),
])
def test_at_times(self, dt, tol, cyclic):
n = 10
psi0 = qtn.MPS_neel_state(n, cyclic=cyclic)
H_int = qu.ham_heis(2, cyclic=False)
tebd = qtn.TEBD(psi0, H_int, dt=dt, tol=tol)
for pt in tebd.at_times([0.1, 0.2, 0.3, 0.4, 0.5]):
assert pt.H @ pt == approx(1, rel=1e-5)
assert tebd.err <= 1e-5
def test_local_ham_1d_and_single_site_terms(self):
n = 10
psi0 = qtn.MPS_neel_state(n)
lham_1d = qtn.ham_1d_XY(n, bz=0.9)
tebd = qtn.TEBD(psi0, lham_1d)
tebd.update_to(1.0, tol=1e-5)
assert abs(psi0.H @ tebd.pt) < 1.0
assert tebd.pt.entropy(5) > 0.0
psi0_dns = qu.neel_state(n)
H_dns = qu.ham_XY(10, jxy=1.0, bz=0.9, cyclic=False)
evo = qu.Evolution(psi0_dns, H_dns)
evo.update_to(1.0)
assert qu.expec(tebd.pt.to_dense(), evo.pt) == pytest.approx(1.0)
def test_local_ham_1d_and_single_site_terms_heis(self):
n = 10
psi0 = qtn.MPS_neel_state(n)
lham_1d = qtn.ham_1d_heis(n, j=(0.7, 0.8, 0.9), bz=0.337)
tebd = qtn.TEBD(psi0, lham_1d)
tebd.update_to(1.0, tol=1e-5)
assert abs(psi0.H @ tebd.pt) < 1.0
assert tebd.pt.entropy(5) > 0.0
psi0_dns = qu.neel_state(n)
H_dns = qu.ham_heis(10, j=(0.7, 0.8, 0.9), b=0.337, cyclic=False)
evo = qu.Evolution(psi0_dns, H_dns)
evo.update_to(1.0)
assert qu.expec(tebd.pt.to_dense(), evo.pt) == pytest.approx(1.0)
def test_non_trans_invar(self):
n = 10
tf = 1.0
p0 = qtn.MPS_rand_state(n, bond_dim=1)
H = qtn.ham_1d_mbl(n, dh=1.7, cyclic=False, seed=42)
print(H)
tebd = qtn.TEBD(p0, H)
tebd.update_to(tf, tol=1e-3)
p0d = p0.to_dense()
Hd = qu.ham_mbl(n, dh=1.7, cyclic=False, seed=42, sparse=True)
evo = qu.Evolution(p0d, Hd)
evo.update_to(tf)
assert qu.expec(tebd.pt.to_dense(), evo.pt) == pytest.approx(1.0)
@pytest.mark.parametrize('cyclic', [False, True])
def test_ising_model_with_field(self, cyclic):
p = qtn.MPS_computational_state('0000100000', cyclic=cyclic)
pd = p.to_dense()
lham_1d = qtn.ham_1d_ising(10, j=4, bx=1, cyclic=cyclic)
H_mpo = qtn.MPO_ham_ising(10, j=4, bx=1, cyclic=cyclic)
H = qu.ham_ising(10, jz=4, bx=1, cyclic=cyclic)
tebd = qtn.TEBD(p, lham_1d, tol=1e-6)
tebd.split_opts['cutoff'] = 1e-9
tebd.split_opts['cutoff_mode'] = 'rel'
evo = qu.Evolution(pd, H)
e0 = qu.expec(pd, H)
e0_mpo = qtn.expec_TN_1D(p.H, H_mpo, p)
assert e0_mpo == pytest.approx(e0)
tf = 2
ts = np.linspace(0, tf, 21)
evo.update_to(tf)
for pt in tebd.at_times(ts):
assert isinstance(pt, qtn.MatrixProductState)
assert (pt.H @ pt) == pytest.approx(1.0, rel=1e-5)
assert (qu.expec(tebd.pt.to_dense(), evo.pt) ==
pytest.approx(1.0, rel=1e-5))
ef_mpo = qtn.expec_TN_1D(tebd.pt.H, H_mpo, tebd.pt)
assert ef_mpo == pytest.approx(e0, 1e-5)
def test_OTOC_local():
L = 10
psi0 = qtn.MPS_computational_state('0' * L, cyclic=True)
H1 = qtn.ham_1d_ising(L, j=4, bx=0, cyclic=True)
H_back1 = qtn.ham_1d_ising(L, j=-4, bx=0, cyclic=True)
H2 = qtn.ham_1d_ising(L, j=4, bx=1, cyclic=True)
H_back2 = qtn.ham_1d_ising(L, j=-4, bx=-1, cyclic=True)
A = qu.pauli('z')
ts = np.linspace(1, 2, 2)
OTOC_t = []
for OTOC in OTOC_local(psi0, H1, H_back1, ts, 5, A, tol=1e-5,
split_opts={'cutoff': 1e-5, 'cutoff_mode': 'rel'},
initial_eigenstate='check'):
OTOC_t += [OTOC]
assert OTOC_t[0] == pytest.approx(1.0)
assert OTOC_t[1] == pytest.approx(1.0)
x_t = []
for x in OTOC_local(psi0, H2, H_back2, ts, 5, A, tol=1e-5,
split_opts={'cutoff': 1e-5, 'cutoff_mode': 'rel'},
initial_eigenstate='check'):
x_t += [x]
assert x_t[0] == pytest.approx(0.52745, rel=1e-4, abs=1e-9)
assert x_t[1] == pytest.approx(0.70440, rel=1e-4, abs=1e-9) | 0.641198 | 0.671504 |
from asgiref.sync import async_to_sync
from channels.generic.websocket import WebsocketConsumer
import json
class LobbyConsumer(WebsocketConsumer):
def connect(self):
self.room_name = 'lobby'
self.room_group_name = 'chat_%s' % self.room_name
# Join room group
async_to_sync(self.channel_layer.group_add)(
self.room_group_name,
self.channel_name
)
self.accept()
def disconnect(self, close_code):
# Leave room group
async_to_sync(self.channel_layer.group_discard)(
self.room_group_name,
self.channel_name
)
# Receive message from WebSocket
def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json['message']
name = text_data_json['name']
colour = text_data_json['colour']
prev_name = text_data_json['prev']
# Send message to room group
async_to_sync(self.channel_layer.group_send)(
self.room_group_name,
{
'type': 'chat_message',
'message': message,
'name': name,
'colour': colour,
'prev': prev_name
}
)
# Receive message from room group
def chat_message(self, event):
message = event['message']
name = event['name']
colour = event['colour']
prev_name = event['prev']
# Send message to WebSocket
self.send(text_data=json.dumps({
'name': name,
'colour': colour,
'message': message,
'prev': prev_name
}))
class ChatConsumer(WebsocketConsumer):
def connect(self):
self.room_name = self.scope['url_route']['kwargs']['channel_id']
self.room_group_name = 'chat_%s' % self.room_name
# Join room group
async_to_sync(self.channel_layer.group_add)(
self.room_group_name,
self.channel_name
)
self.accept()
def disconnect(self, close_code):
# Leave room group
async_to_sync(self.channel_layer.group_discard)(
self.room_group_name,
self.channel_name
)
# Receive message from WebSocket
def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json['message']
name = text_data_json['name']
colour = text_data_json['colour']
prev_name = text_data_json['prev']
# Send message to room group
async_to_sync(self.channel_layer.group_send)(
self.room_group_name,
{
'type': 'chat_message',
'message': message,
'name': name,
'colour': colour,
'prev': prev_name
}
)
# Receive message from room group
def chat_message(self, event):
message = event['message']
name = event['name']
colour = event['colour']
prev_name = event['prev']
# Send message to WebSocket
self.send(text_data=json.dumps({
'name': name,
'colour': colour,
'message': message,
'prev': prev_name
})) | lobby/consumers.py | from asgiref.sync import async_to_sync
from channels.generic.websocket import WebsocketConsumer
import json
class LobbyConsumer(WebsocketConsumer):
def connect(self):
self.room_name = 'lobby'
self.room_group_name = 'chat_%s' % self.room_name
# Join room group
async_to_sync(self.channel_layer.group_add)(
self.room_group_name,
self.channel_name
)
self.accept()
def disconnect(self, close_code):
# Leave room group
async_to_sync(self.channel_layer.group_discard)(
self.room_group_name,
self.channel_name
)
# Receive message from WebSocket
def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json['message']
name = text_data_json['name']
colour = text_data_json['colour']
prev_name = text_data_json['prev']
# Send message to room group
async_to_sync(self.channel_layer.group_send)(
self.room_group_name,
{
'type': 'chat_message',
'message': message,
'name': name,
'colour': colour,
'prev': prev_name
}
)
# Receive message from room group
def chat_message(self, event):
message = event['message']
name = event['name']
colour = event['colour']
prev_name = event['prev']
# Send message to WebSocket
self.send(text_data=json.dumps({
'name': name,
'colour': colour,
'message': message,
'prev': prev_name
}))
class ChatConsumer(WebsocketConsumer):
def connect(self):
self.room_name = self.scope['url_route']['kwargs']['channel_id']
self.room_group_name = 'chat_%s' % self.room_name
# Join room group
async_to_sync(self.channel_layer.group_add)(
self.room_group_name,
self.channel_name
)
self.accept()
def disconnect(self, close_code):
# Leave room group
async_to_sync(self.channel_layer.group_discard)(
self.room_group_name,
self.channel_name
)
# Receive message from WebSocket
def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json['message']
name = text_data_json['name']
colour = text_data_json['colour']
prev_name = text_data_json['prev']
# Send message to room group
async_to_sync(self.channel_layer.group_send)(
self.room_group_name,
{
'type': 'chat_message',
'message': message,
'name': name,
'colour': colour,
'prev': prev_name
}
)
# Receive message from room group
def chat_message(self, event):
message = event['message']
name = event['name']
colour = event['colour']
prev_name = event['prev']
# Send message to WebSocket
self.send(text_data=json.dumps({
'name': name,
'colour': colour,
'message': message,
'prev': prev_name
})) | 0.3863 | 0.065965 |
from copy import deepcopy
from random import sample, choice
from BucketLib.bucket import Bucket
from cb_tools.cb_cli import CbCli
from constants.sdk_constants.java_client import SDKConstants
from couchbase_helper.documentgenerator import doc_generator
from couchbase_helper.durability_helper import BucketDurability
from epengine.durability_base import BucketDurabilityBase
from error_simulation.cb_error import CouchbaseError
from sdk_client3 import SDKClient
from sdk_exceptions import SDKException
class CreateBucketTests(BucketDurabilityBase):
def setUp(self):
super(CreateBucketTests, self).setUp()
def tearDown(self):
super(CreateBucketTests, self).tearDown()
def test_create_bucket_using_cli(self):
"""
Create Bucket with all possible durability_levels and make sure
durability levels are honored for document CRUDs
- Will test for all bucket types (Couchbase, Ephemeral, Memcached)
- With all possible d_levels for bucket_durability
- Perform doc insert for each bucket to validate the sync_writes
"""
# Create cb_cli session object
shell = self.vbs_in_node[self.cluster.master]["shell"]
cb_cli = CbCli(shell)
err_for_three_replicas = "ERROR: durability_min_level - Durability " \
"minimum level cannot be specified with 3"
for d_level in self.bucket_util.get_supported_durability_levels():
create_failed = False
test_step = "Creating %s bucket with level %s" \
% (self.bucket_type, d_level)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Remove unsupported replica string in case if MC bucket
if self.bucket_type == Bucket.Type.MEMCACHED:
del bucket_dict[Bucket.replicaNumber]
# Object to support performing CRUDs
bucket_obj = Bucket(bucket_dict)
output = cb_cli.create_bucket(bucket_dict, wait=True)
self.log.info(output)
if self.num_replicas == Bucket.ReplicaNum.THREE \
and d_level != Bucket.DurabilityLevel.NONE:
if err_for_three_replicas not in str(output):
self.log_failure("Bucket created with replica=3")
else:
create_failed = True
elif "SUCCESS: Bucket created" not in str(output):
create_failed = True
if d_level in self.possible_d_levels[self.bucket_type]:
self.log_failure("Create failed for %s bucket "
"with min_durability_level %s"
% (self.bucket_type, d_level))
else:
# Wait for bucket warm_up to complete
while not self.bucket_util.is_warmup_complete(self.cluster,
[bucket_obj]):
pass
self.get_vbucket_type_mapping(bucket_obj.name)
self.cluster.buckets = [bucket_obj]
self.bucket_util.print_bucket_stats(self.cluster)
self.summary.add_step(test_step)
# Perform CRUDs to validate bucket_creation with durability
if not create_failed:
verification_dict = self.get_cb_stat_verification_dict()
self.validate_durability_with_crud(bucket_obj, d_level,
verification_dict)
self.summary.add_step("Validate_CRUD_operation")
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
output = cb_cli.delete_bucket(bucket_obj.name)
if create_failed:
if "ERROR: Bucket not found" not in str(output):
self.log_failure("Mismatch in bucket-delete output")
elif "SUCCESS: Bucket deleted" not in str(output):
self.log_failure("Mismatch in bucket-delete output")
self.summary.add_step("Delete bucket")
def test_create_bucket_using_rest(self):
log_failure_msg = "Bucket creation succeeded for replica=3"
for d_level in self.bucket_util.get_supported_durability_levels():
create_failed = False
test_step = "Creating %s bucket with level %s" \
% (self.bucket_type, d_level)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Object to support performing CRUDs
bucket_obj = Bucket(bucket_dict)
try:
self.bucket_util.create_bucket(self.cluster, bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
if self.num_replicas == Bucket.ReplicaNum.THREE:
if d_level != Bucket.DurabilityLevel.NONE:
self.log_failure(log_failure_msg)
elif d_level not in self.possible_d_levels[self.bucket_type]:
self.log_failure("Create succeeded for %s bucket for "
"unsupported durability %s"
% (self.bucket_type, d_level))
except Exception as rest_exception:
create_failed = True
self.log.debug(rest_exception)
self.bucket_util.print_bucket_stats(self.cluster)
self.summary.add_step(test_step)
# Perform CRUDs to validate bucket_creation with durability
if not create_failed:
verification_dict = self.get_cb_stat_verification_dict()
self.validate_durability_with_crud(bucket_obj, d_level,
verification_dict)
self.summary.add_step("Validate CRUD operation")
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
self.bucket_util.delete_bucket(self.cluster, bucket_obj)
self.summary.add_step("Bucket deletion")
class BucketDurabilityTests(BucketDurabilityBase):
def setUp(self):
super(BucketDurabilityTests, self).setUp()
def tearDown(self):
super(BucketDurabilityTests, self).tearDown()
def test_durability_with_bucket_level_none(self):
"""
Create Buckets with NONE durability level.
Attempts sync_write with different durability_levels and validate
CRUDs are honored with respective durability_levels set from clients
"""
create_desc = "Creating %s bucket with level 'None'" % self.bucket_type
b_durability = Bucket.DurabilityLevel.NONE
verification_dict = self.get_cb_stat_verification_dict()
bucket_dict = self.get_bucket_dict(self.bucket_type, b_durability)
self.log.info(create_desc)
# Object to support performing CRUDs and create Bucket
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(self.cluster, bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
# Index for doc_gen to avoid creating/deleting same docs across d_level
index = 0
for d_level in self.get_supported_durability_for_bucket():
self.validate_durability_with_crud(bucket_obj, b_durability,
verification_dict,
doc_durability=d_level,
doc_start_index=index)
self.summary.add_step("CRUD with doc_durability %s" % d_level)
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
index += 10
def test_ops_only_with_bucket_level_durability(self):
"""
Create Buckets with durability_levels set and perform
CRUDs from client without explicitly setting the durability and
validate the ops to make sure respective durability is honored
"""
for d_level in self.get_supported_durability_for_bucket():
# Avoid creating bucket with durability=None
if d_level == Bucket.DurabilityLevel.NONE:
continue
step_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, d_level)
verification_dict = self.get_cb_stat_verification_dict()
self.log.info(step_desc)
# Object to support performing CRUDs and create Bucket
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(self.cluster, bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(step_desc)
self.validate_durability_with_crud(bucket_obj, d_level,
verification_dict)
self.summary.add_step("Async write with bucket durability %s"
% d_level)
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Delete the bucket on server
self.bucket_util.delete_bucket(self.cluster, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_sub_doc_op_with_bucket_level_durability(self):
"""
Create Buckets with durability_levels set and perform
Sub_doc CRUDs from client without durability settings and
validate the ops to make sure respective durability is honored
"""
key, value = doc_generator("test_key", 0, 1).next()
sub_doc_key = "sub_doc_key"
sub_doc_vals = ["val_1", "val_2", "val_3", "val_4", "val_5"]
for d_level in self.get_supported_durability_for_bucket():
# Avoid creating bucket with durability=None
if d_level == Bucket.DurabilityLevel.NONE:
continue
step_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, d_level)
verification_dict = self.get_cb_stat_verification_dict()
self.log.info(step_desc)
# Object to support performing CRUDs and create Bucket
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(self.cluster, bucket_obj,
wait_for_warmup=True)
self.summary.add_step(step_desc)
# SDK client to perform sub_doc ops
client = SDKClient([self.cluster.master], bucket_obj)
result = client.crud("create", key, value)
verification_dict["ops_create"] += 1
verification_dict["sync_write_committed_count"] += 1
if result["status"] is False:
self.log_failure("Doc insert failed for key: %s" % key)
# Perform sub_doc CRUD
for sub_doc_op in ["subdoc_insert", "subdoc_upsert",
"subdoc_replace"]:
sub_doc_val = choice(sub_doc_vals)
_, fail = client.crud(sub_doc_op, key,
[sub_doc_key, sub_doc_val])
if fail:
self.log_failure("%s failure. Key %s, sub_doc (%s, %s): %s"
% (sub_doc_op, key,
sub_doc_key, sub_doc_val, result))
else:
verification_dict["ops_update"] += 1
verification_dict["sync_write_committed_count"] += 1
success, fail = client.crud("subdoc_read", key, sub_doc_key)
if fail or str(success[key]["value"].get(0)) != sub_doc_val:
self.log_failure("%s failed. Expected: %s, Actual: %s"
% (sub_doc_op, sub_doc_val,
success[key]["value"].get(0)))
self.summary.add_step("%s for key %s" % (sub_doc_op, key))
# Subdoc_delete and verify
sub_doc_op = "subdoc_delete"
_, fail = client.crud(sub_doc_op, key, sub_doc_key)
if fail:
self.log_failure("%s failure. Key %s, sub_doc (%s, %s): %s"
% (sub_doc_op, key,
sub_doc_key, sub_doc_val, result))
verification_dict["ops_update"] += 1
verification_dict["sync_write_committed_count"] += 1
_, fail = client.crud(sub_doc_op, key, sub_doc_key)
if SDKException.PathNotFoundException \
not in str(fail[key]["error"]):
self.log_failure("Invalid error after sub_doc_delete")
self.summary.add_step("%s for key %s" % (sub_doc_op, key))
# Validate doc_count
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets)
self.bucket_util.verify_stats_all_buckets(self.cluster, 1)
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Close SDK client
client.close()
# Delete the bucket on server
self.bucket_util.delete_bucket(self.cluster, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_higher_durability_level_from_client(self):
"""
Create bucket with durability_levels set and perform CRUDs using
durability_level > the bucket's durability_level and validate
"""
d_level_order_len = len(self.d_level_order)
supported_d_levels = self.get_supported_durability_for_bucket()
for d_level in supported_d_levels:
create_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, d_level)
verification_dict = self.get_cb_stat_verification_dict()
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Object to support performing CRUDs and create Bucket
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(self.cluster, bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
# Perform doc_ops using all possible higher durability levels
index = 0
op_type = "create"
durability_index = self.d_level_order.index(d_level) + 1
while durability_index < d_level_order_len:
# Ephemeral case
if self.d_level_order[durability_index] \
not in supported_d_levels:
durability_index += 1
continue
self.validate_durability_with_crud(
bucket_obj,
d_level,
verification_dict,
op_type=op_type,
doc_durability=self.d_level_order[durability_index],
doc_start_index=index)
self.summary.add_step("%s with doc_level_durability %s"
% (op_type,
self.d_level_order[durability_index]))
durability_index += 1
index += 10
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Delete the bucket on server
self.bucket_util.delete_bucket(self.cluster, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_lower_durability_level_from_client(self):
"""
Create bucket with durability_levels set and perform CRUDs using
durability_level > the bucket's d_level and validate
"""
for d_level in self.get_supported_durability_for_bucket():
create_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, d_level)
verification_dict = self.get_cb_stat_verification_dict()
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Object to support performing CRUDs and create Bucket
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(self.cluster, bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
# Perform doc_ops using all possible higher durability levels
index = 0
op_type = "create"
durability_index = self.d_level_order.index(d_level) - 1
while durability_index >= 0:
self.validate_durability_with_crud(
bucket_obj,
d_level,
verification_dict,
op_type=op_type,
doc_durability=self.d_level_order[durability_index],
doc_start_index=index)
self.summary.add_step("%s with doc_level_durability %s"
% (op_type,
self.d_level_order[durability_index]))
durability_index -= 1
index += 10
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Delete the bucket on server
self.bucket_util.delete_bucket(self.cluster, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_update_durability_level(self):
"""
Create buckets with None durability levels and perform doc_ops.
Update bucket_durability using diag-eval with/without doc_ops in
parallel and validate the doc_ops results.
"""
update_during_ops = self.input.param("update_during_ops", False)
supported_d_levels = self.get_supported_durability_for_bucket()
supported_bucket_d_levels = self.possible_d_levels[self.bucket_type]
create_gen_1 = doc_generator(self.key, 0, self.num_items)
create_gen_2 = doc_generator("random_keys", self.num_items,
self.num_items*2)
update_gen = doc_generator(self.key, 0, self.num_items/2)
delete_gen = doc_generator(self.key, self.num_items/2, self.num_items)
# Override sdk_timeout to max value to avoid TimeoutExceptions
self.sdk_timeout = 60
for bucket_durability in sample(supported_bucket_d_levels,
len(supported_bucket_d_levels)):
b_durability_to_update = list(set(supported_bucket_d_levels)
- set(bucket_durability))
create_desc = "Create %s bucket with durability level '%s'" \
% (self.bucket_type, bucket_durability)
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type,
bucket_durability)
# Object to support performing CRUDs and create Bucket
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(self.cluster, bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
self.bucket_util.print_bucket_stats(self.cluster)
# Load basic docs to support other CRUDs
self.log.info("Performing initial doc_load")
create_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, create_gen_1, "create",
exp=self.maxttl,
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=8,
batch_size=200,
sdk_client_pool=self.sdk_client_pool)
self.task_manager.get_task_result(create_task)
if create_task.fail:
self.log_failure("Failures seen during initial creates")
self.summary.add_step("Initial doc_loading")
# Initiate CRUD task objects
create_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, create_gen_2, "create",
exp=self.maxttl,
durability=choice(supported_d_levels),
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=2,
batch_size=100,
start_task=False,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
update_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, update_gen, "update",
exp=self.maxttl,
durability=choice(supported_d_levels),
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=2,
batch_size=100,
start_task=False,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
read_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, update_gen, "read",
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=2,
batch_size=100,
start_task=False,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
delete_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, delete_gen, "delete",
exp=self.maxttl,
durability=choice(supported_d_levels),
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=2,
batch_size=100,
start_task=False,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
# Start CRUD and update bucket-durability as specified
# by config param 'update_during_ops'
tasks_to_run = [create_task, update_task,
read_task, delete_task]
if self.bucket_type == Bucket.Type.EPHEMERAL:
tasks_to_run = [create_task,
choice([update_task, delete_task])]
clients = read_task.clients
# Close clients in unused tasks
if tasks_to_run[1].op_type == "delete":
clients += update_task.clients
else:
clients += delete_task.clients
for client in clients:
client.close()
for task in tasks_to_run:
new_d_level = BucketDurability[b_durability_to_update.pop()]
self.log.info("Starting %s task" % task.op_type)
self.task_manager.add_new_task(task)
if update_during_ops:
self.sleep(5, "Wait for load_task to start before "
"setting durability=%s" % new_d_level)
else:
self.task_manager.get_task_result(task)
# Update bucket durability
self.bucket_util.update_bucket_property(
self.cluster.master,
bucket_obj,
bucket_durability=new_d_level)
buckets = self.bucket_util.get_all_buckets(self.cluster)
if buckets[0].durability_level != new_d_level:
self.log_failure("Failed to update bucket_d_level to %s"
% new_d_level)
self.summary.add_step("Set bucket-durability=%s"
% new_d_level)
self.bucket_util.print_bucket_stats(self.cluster)
if update_during_ops:
self.task_manager.get_task_result(task)
if task.fail:
self.log_failure("Failures seen during %s"
% task.op_type)
self.summary.add_step("Doc op %s during bucket durability"
% task.op_type)
# Delete the bucket on server
self.bucket_util.delete_bucket(self.cluster, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_update_durability_between_doc_op(self):
"""
1. Create Bucket with durability level set.
2. Bring down a node such that durability CRUD will wait
3. Perform doc_op and update bucket_level_durability
4. Revert scenario induced in step#2, such that doc_op will complete
5. Make sure doc_ops in step#3 went through using prev. d-level
"""
# Starting from max_durability levels because to iterate
# all lower levels for doc_ops with level update
supported_d_levels = deepcopy(self.d_level_order)
if self.bucket_type == Bucket.Type.EPHEMERAL:
supported_d_levels = supported_d_levels[0:2]
supported_d_levels.reverse()
supported_d_levels += [supported_d_levels[0]]
create_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, supported_d_levels[0])
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type,
supported_d_levels[0])
# Object to support performing CRUDs and create Bucket
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(self.cluster, bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
self.bucket_util.print_bucket_stats(self.cluster)
# Loop to update all other durability levels
prev_d_level = supported_d_levels[0]
for bucket_durability in supported_d_levels[1:]:
target_vb_type, simulate_error = \
self.durability_helper.get_vb_and_error_type(bucket_durability)
# Pick a random node to perform error sim and load
random_node = choice(self.vbs_in_node.keys())
error_sim = CouchbaseError(
self.log,
self.vbs_in_node[random_node]["shell"])
target_vbs = self.vbs_in_node[random_node][target_vb_type]
doc_gen = doc_generator(self.key, 0, 1,
target_vbucket=target_vbs)
doc_load_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, doc_gen, "update",
durability=Bucket.DurabilityLevel.NONE,
timeout_secs=60,
start_task=False,
sdk_client_pool=self.sdk_client_pool)
# Simulate target error condition
error_sim.create(simulate_error)
self.sleep(5, "Wait before starting doc_op")
self.task_manager.add_new_task(doc_load_task)
new_d_level = BucketDurability[bucket_durability]
self.sleep(5, "Wait before updating bucket level "
"durability=%s" % new_d_level)
self.bucket_util.update_bucket_property(
self.cluster.master,
bucket_obj,
bucket_durability=new_d_level)
self.bucket_util.print_bucket_stats(self.cluster)
buckets = self.bucket_util.get_all_buckets(self.cluster)
if buckets[0].durability_level != new_d_level:
self.log_failure("Failed to update bucket_d_level to %s"
% new_d_level)
self.summary.add_step("Set bucket-durability=%s" % new_d_level)
if prev_d_level == Bucket.DurabilityLevel.NONE:
if not doc_load_task.completed:
self.log_failure("Doc-op still pending for d_level 'NONE'")
elif doc_load_task.completed:
self.log_failure("Doc-op completed before reverting the "
"error condition: %s" % simulate_error)
# Revert the induced error condition
error_sim.revert(simulate_error)
self.task_manager.get_task_result(doc_load_task)
if doc_load_task.fail:
self.log_failure("Doc_op failed")
self.summary.add_step("Doc_op with previous d_level %s"
% prev_d_level)
prev_d_level = bucket_durability
# Delete the bucket on server
self.bucket_util.delete_bucket(self.cluster, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_sync_write_in_progress(self):
"""
Test to simulate sync_write_in_progress error and validate the behavior
This will validate failure in majority of nodes, where durability will
surely fail for all CRUDs
1. Select nodes to simulate the error which will affect the durability
2. Enable the specified error_scenario on the selected nodes
3. Perform individual CRUDs and verify sync_write_in_progress errors
4. Validate the end results
"""
def test_scenario(bucket, doc_ops,
with_sync_write_val=None):
# Set crud_batch_size
crud_batch_size = 4
simulate_error = CouchbaseError.STOP_MEMCACHED
# Fetch target_vbs for CRUDs
node_vb_info = self.vbs_in_node
target_vbuckets = node_vb_info[target_nodes[0]]["replica"]
if len(target_nodes) > 1:
index = 1
while index < len(target_nodes):
target_vbuckets = list(
set(target_vbuckets).intersection(
set(node_vb_info[target_nodes[index]]["replica"]))
)
index += 1
# Variable to hold one of the doc_generator objects
gen_loader_1 = None
gen_loader_2 = None
# Initialize doc_generators to use for testing
self.log.info("Creating doc_generators")
gen_create = doc_generator(
self.key, self.num_items, crud_batch_size,
vbuckets=self.cluster.vbuckets,
target_vbucket=target_vbuckets)
gen_update = doc_generator(
self.key, 0, crud_batch_size,
vbuckets=self.cluster.vbuckets,
target_vbucket=target_vbuckets, mutate=1)
gen_delete = doc_generator(
self.key, 0, crud_batch_size,
vbuckets=self.cluster.vbuckets,
target_vbucket=target_vbuckets)
self.log.info("Done creating doc_generators")
# Start CRUD operation based on the given 'doc_op' type
if doc_ops[0] == "create":
self.num_items += crud_batch_size
gen_loader_1 = gen_create
elif doc_ops[0] in ["update", "replace", "touch"]:
gen_loader_1 = gen_update
elif doc_ops[0] == "delete":
gen_loader_1 = gen_delete
self.num_items -= crud_batch_size
if doc_ops[1] == "create":
gen_loader_2 = gen_create
elif doc_ops[1] in ["update", "replace", "touch"]:
gen_loader_2 = gen_update
elif doc_ops[1] == "delete":
gen_loader_2 = gen_delete
# Load required docs for doc_op_1 in case of type != create
if doc_op[2] == "load_initial_docs":
doc_loading_task = self.task.async_load_gen_docs(
self.cluster, bucket, gen_loader_1, "create", 0,
batch_size=crud_batch_size, process_concurrency=1,
timeout_secs=10,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
self.task_manager.get_task_result(doc_loading_task)
if doc_loading_task.fail:
self.log_failure("Failure while loading initial docs")
self.summary.add_step("Create docs for %s" % doc_op[0])
verification_dict["ops_create"] += crud_batch_size
verification_dict["sync_write_committed_count"] \
+= crud_batch_size
# Initialize tasks and store the task objects
doc_loader_task = self.task.async_load_gen_docs(
self.cluster, bucket, gen_loader_1, doc_ops[0], 0,
batch_size=crud_batch_size, process_concurrency=8,
timeout_secs=60,
print_ops_rate=False,
start_task=False,
sdk_client_pool=self.sdk_client_pool)
# SDK client for performing individual ops
client = SDKClient([self.cluster.master], bucket)
# Perform specified action
for node in target_nodes:
error_sim = CouchbaseError(self.log,
self.vbs_in_node[node]["shell"])
error_sim.create(simulate_error,
bucket_name=bucket.name)
self.sleep(5, "Wait for error simulation to take effect")
self.task_manager.add_new_task(doc_loader_task)
self.sleep(5, "Wait for task_1 CRUDs to reach server")
# Perform specified CRUD operation on sync_write docs
tem_gen = deepcopy(gen_loader_2)
while tem_gen.has_next():
key, value = tem_gen.next()
for retry_strategy in [
SDKConstants.RetryStrategy.FAIL_FAST,
SDKConstants.RetryStrategy.BEST_EFFORT]:
if with_sync_write_val:
fail = client.crud(doc_ops[1], key, value=value,
exp=0,
durability=with_sync_write_val,
timeout=3, time_unit="seconds",
sdk_retry_strategy=retry_strategy)
else:
fail = client.crud(doc_ops[1], key, value=value,
exp=0,
timeout=3, time_unit="seconds",
sdk_retry_strategy=retry_strategy)
expected_exception = SDKException.AmbiguousTimeoutException
retry_reason = \
SDKException.RetryReason.KV_SYNC_WRITE_IN_PROGRESS
if retry_strategy == SDKConstants.RetryStrategy.FAIL_FAST:
expected_exception = \
SDKException.RequestCanceledException
retry_reason = \
SDKException.RetryReason \
.KV_SYNC_WRITE_IN_PROGRESS_NO_MORE_RETRIES
# Validate the returned error from the SDK
if expected_exception not in str(fail["error"]):
self.log_failure("Invalid exception for {0}: {1}"
.format(key, fail["error"]))
if retry_reason not in str(fail["error"]):
self.log_failure("Invalid retry reason for {0}: {1}"
.format(key, fail["error"]))
# Try reading the value in SyncWrite in-progress state
fail = client.crud("read", key)
if doc_ops[0] == "create":
# Expected KeyNotFound in case of CREATE operation
if fail["status"] is True:
self.log_failure(
"%s returned value during SyncWrite state: %s"
% (key, fail))
else:
# Expects prev value in case of other operations
if fail["status"] is False:
self.log_failure(
"Key %s read failed for previous value: %s"
% (key, fail))
# Revert the introduced error condition
for node in target_nodes:
error_sim = CouchbaseError(self.log,
self.vbs_in_node[node]["shell"])
error_sim.revert(simulate_error,
bucket_name=bucket.name)
# Wait for doc_loader_task to complete
self.task.jython_task_manager.get_task_result(doc_loader_task)
verification_dict["ops_%s" % doc_op[0]] += crud_batch_size
verification_dict["sync_write_committed_count"] \
+= crud_batch_size
# Disconnect the client
client.close()
crud_variations = [
["create", "create", ""],
["update", "update", "load_initial_docs"],
["update", "delete", ""],
["update", "touch", ""],
["update", "replace", ""],
["delete", "delete", ""],
["delete", "update", "load_initial_docs"],
["delete", "touch", "load_initial_docs"],
["delete", "replace", "load_initial_docs"]
]
# Select nodes to affect and open required shell_connections
target_nodes = self.getTargetNodes()
for b_d_level in self.possible_d_levels[self.bucket_type]:
# Skip of Bucket durability level 'None'
if b_d_level == Bucket.DurabilityLevel.NONE:
continue
verification_dict = self.get_cb_stat_verification_dict()
create_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, b_d_level)
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, b_d_level)
# Object to support performing CRUDs and create Bucket
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(self.cluster, bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
for doc_op in crud_variations:
test_scenario(bucket_obj, doc_op)
self.summary.add_step("SyncWriteInProgress for [%s, %s]"
% (doc_op[0], doc_op[1]))
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Bucket deletion
self.bucket_util.delete_bucket(self.cluster, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_observe_scenario(self):
"""
Creates bucket with bucket level durability.
Perform CRUD operations and make sure all the operations are
done as sync_write in server.
Note: Passing persistTo/replicateTo will test the observe scenarios
"""
def perform_crud_ops():
old_cas = 0
client = SDKClient([self.cluster.master], bucket_obj)
for op_type in ["create", "update", "read", "replace", "delete"]:
crud_desc = "Key %s, doc_op: %s" % (key, op_type)
self.log.info(crud_desc)
result = client.crud(op_type, key, value,
replicate_to=self.replicate_to,
persist_to=self.persist_to)
if op_type != "read":
if op_type != "replace":
dict_key = "ops_%s" % op_type
else:
dict_key = "ops_update"
verification_dict[dict_key] += 1
verification_dict["sync_write_committed_count"] += 1
if result["cas"] == old_cas:
self.log_failure("CAS didn't get updated: %s"
% result["cas"])
elif op_type == "read":
if result["cas"] != old_cas:
self.log_failure("CAS updated for read operation: %s"
% result["cas"])
self.summary.add_step(crud_desc)
old_cas = result["cas"]
client.close()
doc_gen = doc_generator("test_key", 0, 1, mutate=0)
key, value = doc_gen.next()
for d_level in self.possible_d_levels[self.bucket_type]:
if d_level == Bucket.DurabilityLevel.NONE:
continue
create_desc = "Create bucket with durability %s" % d_level
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Object to support performing CRUDs
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(self.cluster, bucket_obj,
wait_for_warmup=True)
self.summary.add_step(create_desc)
verification_dict = self.get_cb_stat_verification_dict()
# Test CRUD operations
perform_crud_ops()
# Validate doc_count
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets)
self.bucket_util.verify_stats_all_buckets(self.cluster, 0)
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Delete the created bucket
self.bucket_util.delete_bucket(self.cluster, bucket_obj)
self.summary.add_step("Delete bucket with d_level %s" % d_level)
def test_durability_impossible(self):
"""
Create bucket with replica > num_kv_nodes.
Perform doc insert to make sure we get TimeoutException due to
durability_impossible from the server.
"""
verification_dict = self.get_cb_stat_verification_dict()
key, value = doc_generator("test_key", 0, 1).next()
for d_level in self.possible_d_levels[self.bucket_type]:
if d_level == Bucket.DurabilityLevel.NONE:
continue
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Object to support performing CRUDs
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(self.cluster, bucket_obj,
wait_for_warmup=True)
self.summary.add_step("Create bucket with durability %s"
% d_level)
client = SDKClient([self.cluster.master], bucket_obj)
result = client.crud("create", key, value, timeout=3)
if result["status"] is True \
or SDKException.DurabilityImpossibleException \
not in result["error"]:
self.log_failure("Indirect sync_write succeeded "
"without enough nodes")
client.close()
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Delete the created bucket
self.bucket_util.delete_bucket(self.cluster, bucket_obj)
self.summary.add_step("Delete bucket with d_level %s" % d_level) | pytests/epengine/bucket_level_durability.py | from copy import deepcopy
from random import sample, choice
from BucketLib.bucket import Bucket
from cb_tools.cb_cli import CbCli
from constants.sdk_constants.java_client import SDKConstants
from couchbase_helper.documentgenerator import doc_generator
from couchbase_helper.durability_helper import BucketDurability
from epengine.durability_base import BucketDurabilityBase
from error_simulation.cb_error import CouchbaseError
from sdk_client3 import SDKClient
from sdk_exceptions import SDKException
class CreateBucketTests(BucketDurabilityBase):
def setUp(self):
super(CreateBucketTests, self).setUp()
def tearDown(self):
super(CreateBucketTests, self).tearDown()
def test_create_bucket_using_cli(self):
"""
Create Bucket with all possible durability_levels and make sure
durability levels are honored for document CRUDs
- Will test for all bucket types (Couchbase, Ephemeral, Memcached)
- With all possible d_levels for bucket_durability
- Perform doc insert for each bucket to validate the sync_writes
"""
# Create cb_cli session object
shell = self.vbs_in_node[self.cluster.master]["shell"]
cb_cli = CbCli(shell)
err_for_three_replicas = "ERROR: durability_min_level - Durability " \
"minimum level cannot be specified with 3"
for d_level in self.bucket_util.get_supported_durability_levels():
create_failed = False
test_step = "Creating %s bucket with level %s" \
% (self.bucket_type, d_level)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Remove unsupported replica string in case if MC bucket
if self.bucket_type == Bucket.Type.MEMCACHED:
del bucket_dict[Bucket.replicaNumber]
# Object to support performing CRUDs
bucket_obj = Bucket(bucket_dict)
output = cb_cli.create_bucket(bucket_dict, wait=True)
self.log.info(output)
if self.num_replicas == Bucket.ReplicaNum.THREE \
and d_level != Bucket.DurabilityLevel.NONE:
if err_for_three_replicas not in str(output):
self.log_failure("Bucket created with replica=3")
else:
create_failed = True
elif "SUCCESS: Bucket created" not in str(output):
create_failed = True
if d_level in self.possible_d_levels[self.bucket_type]:
self.log_failure("Create failed for %s bucket "
"with min_durability_level %s"
% (self.bucket_type, d_level))
else:
# Wait for bucket warm_up to complete
while not self.bucket_util.is_warmup_complete(self.cluster,
[bucket_obj]):
pass
self.get_vbucket_type_mapping(bucket_obj.name)
self.cluster.buckets = [bucket_obj]
self.bucket_util.print_bucket_stats(self.cluster)
self.summary.add_step(test_step)
# Perform CRUDs to validate bucket_creation with durability
if not create_failed:
verification_dict = self.get_cb_stat_verification_dict()
self.validate_durability_with_crud(bucket_obj, d_level,
verification_dict)
self.summary.add_step("Validate_CRUD_operation")
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
output = cb_cli.delete_bucket(bucket_obj.name)
if create_failed:
if "ERROR: Bucket not found" not in str(output):
self.log_failure("Mismatch in bucket-delete output")
elif "SUCCESS: Bucket deleted" not in str(output):
self.log_failure("Mismatch in bucket-delete output")
self.summary.add_step("Delete bucket")
def test_create_bucket_using_rest(self):
log_failure_msg = "Bucket creation succeeded for replica=3"
for d_level in self.bucket_util.get_supported_durability_levels():
create_failed = False
test_step = "Creating %s bucket with level %s" \
% (self.bucket_type, d_level)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Object to support performing CRUDs
bucket_obj = Bucket(bucket_dict)
try:
self.bucket_util.create_bucket(self.cluster, bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
if self.num_replicas == Bucket.ReplicaNum.THREE:
if d_level != Bucket.DurabilityLevel.NONE:
self.log_failure(log_failure_msg)
elif d_level not in self.possible_d_levels[self.bucket_type]:
self.log_failure("Create succeeded for %s bucket for "
"unsupported durability %s"
% (self.bucket_type, d_level))
except Exception as rest_exception:
create_failed = True
self.log.debug(rest_exception)
self.bucket_util.print_bucket_stats(self.cluster)
self.summary.add_step(test_step)
# Perform CRUDs to validate bucket_creation with durability
if not create_failed:
verification_dict = self.get_cb_stat_verification_dict()
self.validate_durability_with_crud(bucket_obj, d_level,
verification_dict)
self.summary.add_step("Validate CRUD operation")
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
self.bucket_util.delete_bucket(self.cluster, bucket_obj)
self.summary.add_step("Bucket deletion")
class BucketDurabilityTests(BucketDurabilityBase):
def setUp(self):
super(BucketDurabilityTests, self).setUp()
def tearDown(self):
super(BucketDurabilityTests, self).tearDown()
def test_durability_with_bucket_level_none(self):
"""
Create Buckets with NONE durability level.
Attempts sync_write with different durability_levels and validate
CRUDs are honored with respective durability_levels set from clients
"""
create_desc = "Creating %s bucket with level 'None'" % self.bucket_type
b_durability = Bucket.DurabilityLevel.NONE
verification_dict = self.get_cb_stat_verification_dict()
bucket_dict = self.get_bucket_dict(self.bucket_type, b_durability)
self.log.info(create_desc)
# Object to support performing CRUDs and create Bucket
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(self.cluster, bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
# Index for doc_gen to avoid creating/deleting same docs across d_level
index = 0
for d_level in self.get_supported_durability_for_bucket():
self.validate_durability_with_crud(bucket_obj, b_durability,
verification_dict,
doc_durability=d_level,
doc_start_index=index)
self.summary.add_step("CRUD with doc_durability %s" % d_level)
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
index += 10
def test_ops_only_with_bucket_level_durability(self):
"""
Create Buckets with durability_levels set and perform
CRUDs from client without explicitly setting the durability and
validate the ops to make sure respective durability is honored
"""
for d_level in self.get_supported_durability_for_bucket():
# Avoid creating bucket with durability=None
if d_level == Bucket.DurabilityLevel.NONE:
continue
step_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, d_level)
verification_dict = self.get_cb_stat_verification_dict()
self.log.info(step_desc)
# Object to support performing CRUDs and create Bucket
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(self.cluster, bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(step_desc)
self.validate_durability_with_crud(bucket_obj, d_level,
verification_dict)
self.summary.add_step("Async write with bucket durability %s"
% d_level)
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Delete the bucket on server
self.bucket_util.delete_bucket(self.cluster, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_sub_doc_op_with_bucket_level_durability(self):
"""
Create Buckets with durability_levels set and perform
Sub_doc CRUDs from client without durability settings and
validate the ops to make sure respective durability is honored
"""
key, value = doc_generator("test_key", 0, 1).next()
sub_doc_key = "sub_doc_key"
sub_doc_vals = ["val_1", "val_2", "val_3", "val_4", "val_5"]
for d_level in self.get_supported_durability_for_bucket():
# Avoid creating bucket with durability=None
if d_level == Bucket.DurabilityLevel.NONE:
continue
step_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, d_level)
verification_dict = self.get_cb_stat_verification_dict()
self.log.info(step_desc)
# Object to support performing CRUDs and create Bucket
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(self.cluster, bucket_obj,
wait_for_warmup=True)
self.summary.add_step(step_desc)
# SDK client to perform sub_doc ops
client = SDKClient([self.cluster.master], bucket_obj)
result = client.crud("create", key, value)
verification_dict["ops_create"] += 1
verification_dict["sync_write_committed_count"] += 1
if result["status"] is False:
self.log_failure("Doc insert failed for key: %s" % key)
# Perform sub_doc CRUD
for sub_doc_op in ["subdoc_insert", "subdoc_upsert",
"subdoc_replace"]:
sub_doc_val = choice(sub_doc_vals)
_, fail = client.crud(sub_doc_op, key,
[sub_doc_key, sub_doc_val])
if fail:
self.log_failure("%s failure. Key %s, sub_doc (%s, %s): %s"
% (sub_doc_op, key,
sub_doc_key, sub_doc_val, result))
else:
verification_dict["ops_update"] += 1
verification_dict["sync_write_committed_count"] += 1
success, fail = client.crud("subdoc_read", key, sub_doc_key)
if fail or str(success[key]["value"].get(0)) != sub_doc_val:
self.log_failure("%s failed. Expected: %s, Actual: %s"
% (sub_doc_op, sub_doc_val,
success[key]["value"].get(0)))
self.summary.add_step("%s for key %s" % (sub_doc_op, key))
# Subdoc_delete and verify
sub_doc_op = "subdoc_delete"
_, fail = client.crud(sub_doc_op, key, sub_doc_key)
if fail:
self.log_failure("%s failure. Key %s, sub_doc (%s, %s): %s"
% (sub_doc_op, key,
sub_doc_key, sub_doc_val, result))
verification_dict["ops_update"] += 1
verification_dict["sync_write_committed_count"] += 1
_, fail = client.crud(sub_doc_op, key, sub_doc_key)
if SDKException.PathNotFoundException \
not in str(fail[key]["error"]):
self.log_failure("Invalid error after sub_doc_delete")
self.summary.add_step("%s for key %s" % (sub_doc_op, key))
# Validate doc_count
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets)
self.bucket_util.verify_stats_all_buckets(self.cluster, 1)
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Close SDK client
client.close()
# Delete the bucket on server
self.bucket_util.delete_bucket(self.cluster, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_higher_durability_level_from_client(self):
"""
Create bucket with durability_levels set and perform CRUDs using
durability_level > the bucket's durability_level and validate
"""
d_level_order_len = len(self.d_level_order)
supported_d_levels = self.get_supported_durability_for_bucket()
for d_level in supported_d_levels:
create_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, d_level)
verification_dict = self.get_cb_stat_verification_dict()
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Object to support performing CRUDs and create Bucket
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(self.cluster, bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
# Perform doc_ops using all possible higher durability levels
index = 0
op_type = "create"
durability_index = self.d_level_order.index(d_level) + 1
while durability_index < d_level_order_len:
# Ephemeral case
if self.d_level_order[durability_index] \
not in supported_d_levels:
durability_index += 1
continue
self.validate_durability_with_crud(
bucket_obj,
d_level,
verification_dict,
op_type=op_type,
doc_durability=self.d_level_order[durability_index],
doc_start_index=index)
self.summary.add_step("%s with doc_level_durability %s"
% (op_type,
self.d_level_order[durability_index]))
durability_index += 1
index += 10
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Delete the bucket on server
self.bucket_util.delete_bucket(self.cluster, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_lower_durability_level_from_client(self):
"""
Create bucket with durability_levels set and perform CRUDs using
durability_level > the bucket's d_level and validate
"""
for d_level in self.get_supported_durability_for_bucket():
create_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, d_level)
verification_dict = self.get_cb_stat_verification_dict()
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Object to support performing CRUDs and create Bucket
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(self.cluster, bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
# Perform doc_ops using all possible higher durability levels
index = 0
op_type = "create"
durability_index = self.d_level_order.index(d_level) - 1
while durability_index >= 0:
self.validate_durability_with_crud(
bucket_obj,
d_level,
verification_dict,
op_type=op_type,
doc_durability=self.d_level_order[durability_index],
doc_start_index=index)
self.summary.add_step("%s with doc_level_durability %s"
% (op_type,
self.d_level_order[durability_index]))
durability_index -= 1
index += 10
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Delete the bucket on server
self.bucket_util.delete_bucket(self.cluster, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_update_durability_level(self):
"""
Create buckets with None durability levels and perform doc_ops.
Update bucket_durability using diag-eval with/without doc_ops in
parallel and validate the doc_ops results.
"""
update_during_ops = self.input.param("update_during_ops", False)
supported_d_levels = self.get_supported_durability_for_bucket()
supported_bucket_d_levels = self.possible_d_levels[self.bucket_type]
create_gen_1 = doc_generator(self.key, 0, self.num_items)
create_gen_2 = doc_generator("random_keys", self.num_items,
self.num_items*2)
update_gen = doc_generator(self.key, 0, self.num_items/2)
delete_gen = doc_generator(self.key, self.num_items/2, self.num_items)
# Override sdk_timeout to max value to avoid TimeoutExceptions
self.sdk_timeout = 60
for bucket_durability in sample(supported_bucket_d_levels,
len(supported_bucket_d_levels)):
b_durability_to_update = list(set(supported_bucket_d_levels)
- set(bucket_durability))
create_desc = "Create %s bucket with durability level '%s'" \
% (self.bucket_type, bucket_durability)
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type,
bucket_durability)
# Object to support performing CRUDs and create Bucket
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(self.cluster, bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
self.bucket_util.print_bucket_stats(self.cluster)
# Load basic docs to support other CRUDs
self.log.info("Performing initial doc_load")
create_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, create_gen_1, "create",
exp=self.maxttl,
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=8,
batch_size=200,
sdk_client_pool=self.sdk_client_pool)
self.task_manager.get_task_result(create_task)
if create_task.fail:
self.log_failure("Failures seen during initial creates")
self.summary.add_step("Initial doc_loading")
# Initiate CRUD task objects
create_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, create_gen_2, "create",
exp=self.maxttl,
durability=choice(supported_d_levels),
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=2,
batch_size=100,
start_task=False,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
update_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, update_gen, "update",
exp=self.maxttl,
durability=choice(supported_d_levels),
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=2,
batch_size=100,
start_task=False,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
read_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, update_gen, "read",
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=2,
batch_size=100,
start_task=False,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
delete_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, delete_gen, "delete",
exp=self.maxttl,
durability=choice(supported_d_levels),
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=2,
batch_size=100,
start_task=False,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
# Start CRUD and update bucket-durability as specified
# by config param 'update_during_ops'
tasks_to_run = [create_task, update_task,
read_task, delete_task]
if self.bucket_type == Bucket.Type.EPHEMERAL:
tasks_to_run = [create_task,
choice([update_task, delete_task])]
clients = read_task.clients
# Close clients in unused tasks
if tasks_to_run[1].op_type == "delete":
clients += update_task.clients
else:
clients += delete_task.clients
for client in clients:
client.close()
for task in tasks_to_run:
new_d_level = BucketDurability[b_durability_to_update.pop()]
self.log.info("Starting %s task" % task.op_type)
self.task_manager.add_new_task(task)
if update_during_ops:
self.sleep(5, "Wait for load_task to start before "
"setting durability=%s" % new_d_level)
else:
self.task_manager.get_task_result(task)
# Update bucket durability
self.bucket_util.update_bucket_property(
self.cluster.master,
bucket_obj,
bucket_durability=new_d_level)
buckets = self.bucket_util.get_all_buckets(self.cluster)
if buckets[0].durability_level != new_d_level:
self.log_failure("Failed to update bucket_d_level to %s"
% new_d_level)
self.summary.add_step("Set bucket-durability=%s"
% new_d_level)
self.bucket_util.print_bucket_stats(self.cluster)
if update_during_ops:
self.task_manager.get_task_result(task)
if task.fail:
self.log_failure("Failures seen during %s"
% task.op_type)
self.summary.add_step("Doc op %s during bucket durability"
% task.op_type)
# Delete the bucket on server
self.bucket_util.delete_bucket(self.cluster, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_update_durability_between_doc_op(self):
"""
1. Create Bucket with durability level set.
2. Bring down a node such that durability CRUD will wait
3. Perform doc_op and update bucket_level_durability
4. Revert scenario induced in step#2, such that doc_op will complete
5. Make sure doc_ops in step#3 went through using prev. d-level
"""
# Starting from max_durability levels because to iterate
# all lower levels for doc_ops with level update
supported_d_levels = deepcopy(self.d_level_order)
if self.bucket_type == Bucket.Type.EPHEMERAL:
supported_d_levels = supported_d_levels[0:2]
supported_d_levels.reverse()
supported_d_levels += [supported_d_levels[0]]
create_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, supported_d_levels[0])
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type,
supported_d_levels[0])
# Object to support performing CRUDs and create Bucket
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(self.cluster, bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
self.bucket_util.print_bucket_stats(self.cluster)
# Loop to update all other durability levels
prev_d_level = supported_d_levels[0]
for bucket_durability in supported_d_levels[1:]:
target_vb_type, simulate_error = \
self.durability_helper.get_vb_and_error_type(bucket_durability)
# Pick a random node to perform error sim and load
random_node = choice(self.vbs_in_node.keys())
error_sim = CouchbaseError(
self.log,
self.vbs_in_node[random_node]["shell"])
target_vbs = self.vbs_in_node[random_node][target_vb_type]
doc_gen = doc_generator(self.key, 0, 1,
target_vbucket=target_vbs)
doc_load_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, doc_gen, "update",
durability=Bucket.DurabilityLevel.NONE,
timeout_secs=60,
start_task=False,
sdk_client_pool=self.sdk_client_pool)
# Simulate target error condition
error_sim.create(simulate_error)
self.sleep(5, "Wait before starting doc_op")
self.task_manager.add_new_task(doc_load_task)
new_d_level = BucketDurability[bucket_durability]
self.sleep(5, "Wait before updating bucket level "
"durability=%s" % new_d_level)
self.bucket_util.update_bucket_property(
self.cluster.master,
bucket_obj,
bucket_durability=new_d_level)
self.bucket_util.print_bucket_stats(self.cluster)
buckets = self.bucket_util.get_all_buckets(self.cluster)
if buckets[0].durability_level != new_d_level:
self.log_failure("Failed to update bucket_d_level to %s"
% new_d_level)
self.summary.add_step("Set bucket-durability=%s" % new_d_level)
if prev_d_level == Bucket.DurabilityLevel.NONE:
if not doc_load_task.completed:
self.log_failure("Doc-op still pending for d_level 'NONE'")
elif doc_load_task.completed:
self.log_failure("Doc-op completed before reverting the "
"error condition: %s" % simulate_error)
# Revert the induced error condition
error_sim.revert(simulate_error)
self.task_manager.get_task_result(doc_load_task)
if doc_load_task.fail:
self.log_failure("Doc_op failed")
self.summary.add_step("Doc_op with previous d_level %s"
% prev_d_level)
prev_d_level = bucket_durability
# Delete the bucket on server
self.bucket_util.delete_bucket(self.cluster, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_sync_write_in_progress(self):
"""
Test to simulate sync_write_in_progress error and validate the behavior
This will validate failure in majority of nodes, where durability will
surely fail for all CRUDs
1. Select nodes to simulate the error which will affect the durability
2. Enable the specified error_scenario on the selected nodes
3. Perform individual CRUDs and verify sync_write_in_progress errors
4. Validate the end results
"""
def test_scenario(bucket, doc_ops,
with_sync_write_val=None):
# Set crud_batch_size
crud_batch_size = 4
simulate_error = CouchbaseError.STOP_MEMCACHED
# Fetch target_vbs for CRUDs
node_vb_info = self.vbs_in_node
target_vbuckets = node_vb_info[target_nodes[0]]["replica"]
if len(target_nodes) > 1:
index = 1
while index < len(target_nodes):
target_vbuckets = list(
set(target_vbuckets).intersection(
set(node_vb_info[target_nodes[index]]["replica"]))
)
index += 1
# Variable to hold one of the doc_generator objects
gen_loader_1 = None
gen_loader_2 = None
# Initialize doc_generators to use for testing
self.log.info("Creating doc_generators")
gen_create = doc_generator(
self.key, self.num_items, crud_batch_size,
vbuckets=self.cluster.vbuckets,
target_vbucket=target_vbuckets)
gen_update = doc_generator(
self.key, 0, crud_batch_size,
vbuckets=self.cluster.vbuckets,
target_vbucket=target_vbuckets, mutate=1)
gen_delete = doc_generator(
self.key, 0, crud_batch_size,
vbuckets=self.cluster.vbuckets,
target_vbucket=target_vbuckets)
self.log.info("Done creating doc_generators")
# Start CRUD operation based on the given 'doc_op' type
if doc_ops[0] == "create":
self.num_items += crud_batch_size
gen_loader_1 = gen_create
elif doc_ops[0] in ["update", "replace", "touch"]:
gen_loader_1 = gen_update
elif doc_ops[0] == "delete":
gen_loader_1 = gen_delete
self.num_items -= crud_batch_size
if doc_ops[1] == "create":
gen_loader_2 = gen_create
elif doc_ops[1] in ["update", "replace", "touch"]:
gen_loader_2 = gen_update
elif doc_ops[1] == "delete":
gen_loader_2 = gen_delete
# Load required docs for doc_op_1 in case of type != create
if doc_op[2] == "load_initial_docs":
doc_loading_task = self.task.async_load_gen_docs(
self.cluster, bucket, gen_loader_1, "create", 0,
batch_size=crud_batch_size, process_concurrency=1,
timeout_secs=10,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
self.task_manager.get_task_result(doc_loading_task)
if doc_loading_task.fail:
self.log_failure("Failure while loading initial docs")
self.summary.add_step("Create docs for %s" % doc_op[0])
verification_dict["ops_create"] += crud_batch_size
verification_dict["sync_write_committed_count"] \
+= crud_batch_size
# Initialize tasks and store the task objects
doc_loader_task = self.task.async_load_gen_docs(
self.cluster, bucket, gen_loader_1, doc_ops[0], 0,
batch_size=crud_batch_size, process_concurrency=8,
timeout_secs=60,
print_ops_rate=False,
start_task=False,
sdk_client_pool=self.sdk_client_pool)
# SDK client for performing individual ops
client = SDKClient([self.cluster.master], bucket)
# Perform specified action
for node in target_nodes:
error_sim = CouchbaseError(self.log,
self.vbs_in_node[node]["shell"])
error_sim.create(simulate_error,
bucket_name=bucket.name)
self.sleep(5, "Wait for error simulation to take effect")
self.task_manager.add_new_task(doc_loader_task)
self.sleep(5, "Wait for task_1 CRUDs to reach server")
# Perform specified CRUD operation on sync_write docs
tem_gen = deepcopy(gen_loader_2)
while tem_gen.has_next():
key, value = tem_gen.next()
for retry_strategy in [
SDKConstants.RetryStrategy.FAIL_FAST,
SDKConstants.RetryStrategy.BEST_EFFORT]:
if with_sync_write_val:
fail = client.crud(doc_ops[1], key, value=value,
exp=0,
durability=with_sync_write_val,
timeout=3, time_unit="seconds",
sdk_retry_strategy=retry_strategy)
else:
fail = client.crud(doc_ops[1], key, value=value,
exp=0,
timeout=3, time_unit="seconds",
sdk_retry_strategy=retry_strategy)
expected_exception = SDKException.AmbiguousTimeoutException
retry_reason = \
SDKException.RetryReason.KV_SYNC_WRITE_IN_PROGRESS
if retry_strategy == SDKConstants.RetryStrategy.FAIL_FAST:
expected_exception = \
SDKException.RequestCanceledException
retry_reason = \
SDKException.RetryReason \
.KV_SYNC_WRITE_IN_PROGRESS_NO_MORE_RETRIES
# Validate the returned error from the SDK
if expected_exception not in str(fail["error"]):
self.log_failure("Invalid exception for {0}: {1}"
.format(key, fail["error"]))
if retry_reason not in str(fail["error"]):
self.log_failure("Invalid retry reason for {0}: {1}"
.format(key, fail["error"]))
# Try reading the value in SyncWrite in-progress state
fail = client.crud("read", key)
if doc_ops[0] == "create":
# Expected KeyNotFound in case of CREATE operation
if fail["status"] is True:
self.log_failure(
"%s returned value during SyncWrite state: %s"
% (key, fail))
else:
# Expects prev value in case of other operations
if fail["status"] is False:
self.log_failure(
"Key %s read failed for previous value: %s"
% (key, fail))
# Revert the introduced error condition
for node in target_nodes:
error_sim = CouchbaseError(self.log,
self.vbs_in_node[node]["shell"])
error_sim.revert(simulate_error,
bucket_name=bucket.name)
# Wait for doc_loader_task to complete
self.task.jython_task_manager.get_task_result(doc_loader_task)
verification_dict["ops_%s" % doc_op[0]] += crud_batch_size
verification_dict["sync_write_committed_count"] \
+= crud_batch_size
# Disconnect the client
client.close()
crud_variations = [
["create", "create", ""],
["update", "update", "load_initial_docs"],
["update", "delete", ""],
["update", "touch", ""],
["update", "replace", ""],
["delete", "delete", ""],
["delete", "update", "load_initial_docs"],
["delete", "touch", "load_initial_docs"],
["delete", "replace", "load_initial_docs"]
]
# Select nodes to affect and open required shell_connections
target_nodes = self.getTargetNodes()
for b_d_level in self.possible_d_levels[self.bucket_type]:
# Skip of Bucket durability level 'None'
if b_d_level == Bucket.DurabilityLevel.NONE:
continue
verification_dict = self.get_cb_stat_verification_dict()
create_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, b_d_level)
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, b_d_level)
# Object to support performing CRUDs and create Bucket
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(self.cluster, bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
for doc_op in crud_variations:
test_scenario(bucket_obj, doc_op)
self.summary.add_step("SyncWriteInProgress for [%s, %s]"
% (doc_op[0], doc_op[1]))
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Bucket deletion
self.bucket_util.delete_bucket(self.cluster, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_observe_scenario(self):
"""
Creates bucket with bucket level durability.
Perform CRUD operations and make sure all the operations are
done as sync_write in server.
Note: Passing persistTo/replicateTo will test the observe scenarios
"""
def perform_crud_ops():
old_cas = 0
client = SDKClient([self.cluster.master], bucket_obj)
for op_type in ["create", "update", "read", "replace", "delete"]:
crud_desc = "Key %s, doc_op: %s" % (key, op_type)
self.log.info(crud_desc)
result = client.crud(op_type, key, value,
replicate_to=self.replicate_to,
persist_to=self.persist_to)
if op_type != "read":
if op_type != "replace":
dict_key = "ops_%s" % op_type
else:
dict_key = "ops_update"
verification_dict[dict_key] += 1
verification_dict["sync_write_committed_count"] += 1
if result["cas"] == old_cas:
self.log_failure("CAS didn't get updated: %s"
% result["cas"])
elif op_type == "read":
if result["cas"] != old_cas:
self.log_failure("CAS updated for read operation: %s"
% result["cas"])
self.summary.add_step(crud_desc)
old_cas = result["cas"]
client.close()
doc_gen = doc_generator("test_key", 0, 1, mutate=0)
key, value = doc_gen.next()
for d_level in self.possible_d_levels[self.bucket_type]:
if d_level == Bucket.DurabilityLevel.NONE:
continue
create_desc = "Create bucket with durability %s" % d_level
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Object to support performing CRUDs
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(self.cluster, bucket_obj,
wait_for_warmup=True)
self.summary.add_step(create_desc)
verification_dict = self.get_cb_stat_verification_dict()
# Test CRUD operations
perform_crud_ops()
# Validate doc_count
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets)
self.bucket_util.verify_stats_all_buckets(self.cluster, 0)
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Delete the created bucket
self.bucket_util.delete_bucket(self.cluster, bucket_obj)
self.summary.add_step("Delete bucket with d_level %s" % d_level)
def test_durability_impossible(self):
"""
Create bucket with replica > num_kv_nodes.
Perform doc insert to make sure we get TimeoutException due to
durability_impossible from the server.
"""
verification_dict = self.get_cb_stat_verification_dict()
key, value = doc_generator("test_key", 0, 1).next()
for d_level in self.possible_d_levels[self.bucket_type]:
if d_level == Bucket.DurabilityLevel.NONE:
continue
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Object to support performing CRUDs
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(self.cluster, bucket_obj,
wait_for_warmup=True)
self.summary.add_step("Create bucket with durability %s"
% d_level)
client = SDKClient([self.cluster.master], bucket_obj)
result = client.crud("create", key, value, timeout=3)
if result["status"] is True \
or SDKException.DurabilityImpossibleException \
not in result["error"]:
self.log_failure("Indirect sync_write succeeded "
"without enough nodes")
client.close()
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Delete the created bucket
self.bucket_util.delete_bucket(self.cluster, bucket_obj)
self.summary.add_step("Delete bucket with d_level %s" % d_level) | 0.531209 | 0.091829 |
import json
from django.test import TestCase
from unittest.mock import Mock
from silk.model_factory import RequestModelFactory, ResponseModelFactory
DJANGO_META_CONTENT_TYPE = 'CONTENT_TYPE'
HTTP_CONTENT_TYPE = 'Content-Type'
CLEANSED = RequestModelFactory.CLEANSED_SUBSTITUTE
class MaskCredentialsInFormsTest(TestCase):
def _mask(self, value):
return RequestModelFactory(None)._mask_credentials(value)
def test_mask_credentials_preserves_single_insensitive_values(self):
body = "foo=public"
expected = "foo=public"
self.assertEqual(expected, self._mask(body))
def test_mask_credentials_masks_sensitive_values(self):
body = "password=<PASSWORD>"
expected = "password={}".format(CLEANSED)
self.assertEqual(expected, self._mask(body))
def test_mask_credentials_masks_multiple_sensitive_values(self):
body = "password=<PASSWORD>&secret=mysecret"
expected = "password={}&secret={}".format(CLEANSED, CLEANSED)
self.assertEqual(expected, self._mask(body))
def test_mask_credentials_masks_sensitive_values_between_insensitive_values(self):
body = "public1=foo&password=<PASSWORD>&public2=bar"
expected = "public1=foo&password={}&public2=bar".format(CLEANSED)
self.assertEqual(expected, self._mask(body))
def test_mask_credentials_preserves_insensitive_values_between_sensitive_values(self):
body = "password=1&foo=public&secret=2"
expected = "password={}&foo=public&secret={}".format(CLEANSED, CLEANSED)
self.assertEqual(expected, self._mask(body))
def test_mask_credentials_is_case_insensitive(self):
body = "UsErNaMe=secret"
expected = "UsErNaMe={}".format(CLEANSED)
self.assertEqual(expected, self._mask(body))
def test_mask_credentials_handles_prefixes(self):
body = "prefixed-username=secret"
expected = "prefixed-username={}".format(CLEANSED)
self.assertEqual(expected, self._mask(body))
def test_mask_credentials_handles_suffixes(self):
body = "username-with-suffix=secret"
expected = "username-with-suffix={}".format(CLEANSED)
self.assertEqual(expected, self._mask(body))
def test_mask_credentials_handles_regex_characters(self):
body = "password=<PASSWORD>++"
expected = "password={}".format(CLEANSED)
self.assertEqual(expected, self._mask(body))
def test_mask_credentials_handles_complex_cases(self):
body = "foo=public&prefixed-uSeRname-with-suffix=secret&bar=public"
expected = "foo=public&prefixed-uSeRname-with-suffix={}&bar=public".format(CLEANSED)
self.assertEqual(expected, self._mask(body))
class MaskCredentialsInJsonTest(TestCase):
def _mask(self, value):
return RequestModelFactory(None)._mask_credentials(json.dumps(value))
def test_mask_credentials_preserves_single_insensitive_values(self):
self.assertIn("public", self._mask({"foo": "public"}))
def test_mask_credentials_preserves_insensitive_values_in_presence_of_sensitive(self):
self.assertIn("public", self._mask({"password": "<PASSWORD>", "foo": "public"}))
def test_mask_credentials_masks_sensitive_values(self):
self.assertNotIn("secret", self._mask({"password": "<PASSWORD>"}))
def test_mask_credentials_masks_sensitive_values_in_presence_of_regular(self):
self.assertNotIn("secret", self._mask({"foo": "public", "password": "<PASSWORD>"}))
def test_mask_credentials_is_case_insensitive(self):
self.assertNotIn("secret", self._mask({"UsErNaMe": "secret"}))
def test_mask_credentials_handles_prefixes(self):
self.assertNotIn("secret", self._mask({"prefixed-username": "secret"}))
def test_mask_credentials_handles_suffixes(self):
self.assertNotIn("secret", self._mask({"username-with-suffix": "secret"}))
def test_mask_credentials_handles_complex_cases(self):
self.assertNotIn("secret", self._mask({
"foo": "public",
"prefixed-uSeRname-with-suffix": "secret"
}))
def test_mask_credentials_in_nested_data_structures(self):
self.assertNotIn("secret", self._mask({
"foo": "public",
"nested": {
"prefixed-uSeRname-with-suffix": "secret",
},
}))
class TestEncodingForRequests(TestCase):
"""
Check that the RequestModelFactory masks sensitive data
"""
def test_password_in_body(self):
mock_request = Mock()
mock_request.META = {DJANGO_META_CONTENT_TYPE: 'text/plain'}
mock_request.body = 'username=test_username&unmasked=testunmasked&password=<PASSWORD>'
mock_request.get = mock_request.META.get
factory = RequestModelFactory(mock_request)
body, raw_body = factory.body()
self.assertIn('testunmasked', raw_body)
self.assertNotIn('test_username', raw_body)
self.assertNotIn('testpassword', raw_body)
self.assertNotIn('test_username', body)
self.assertNotIn('testpassword', body)
def test_password_in_json(self):
mock_request = Mock()
mock_request.META = {DJANGO_META_CONTENT_TYPE: 'application/json; charset=UTF-8'}
d = {'x': 'testunmasked', 'username': 'test_username', 'password': '<PASSWORD>',
'prefixed-secret': 'testsecret'}
mock_request.body = json.dumps(d)
mock_request.get = mock_request.META.get
factory = RequestModelFactory(mock_request)
body, raw_body = factory.body()
self.assertIn('testunmasked', raw_body)
self.assertNotIn('test_username', raw_body)
self.assertNotIn('testpassword', raw_body)
self.assertNotIn('testsecret', raw_body)
self.assertNotIn('test_username', body)
self.assertNotIn('testpassword', body)
self.assertNotIn('testsecret', body)
for datum in [json.loads(body), json.loads(raw_body)]:
self.assertEqual(datum['username'], RequestModelFactory.CLEANSED_SUBSTITUTE)
self.assertEqual(datum['password'], RequestModelFactory.CLEANSED_SUBSTITUTE)
self.assertEqual(datum['prefixed-secret'], RequestModelFactory.CLEANSED_SUBSTITUTE)
self.assertEqual(datum['x'], 'testunmasked')
def test_password_in_batched_json(self):
mock_request = Mock()
mock_request.META = {DJANGO_META_CONTENT_TYPE: 'application/json; charset=UTF-8'}
d = [
{'x': 'testunmasked', 'username': 'test_username', 'password': '<PASSWORD>'},
{'x': 'testunmasked', 'username': 'test_username', 'password': '<PASSWORD>'}
]
mock_request.body = json.dumps(d)
mock_request.get = mock_request.META.get
factory = RequestModelFactory(mock_request)
body, raw_body = factory.body()
self.assertIn('testunmasked', raw_body)
self.assertNotIn('test_username', raw_body)
self.assertNotIn('testpassword', raw_body)
self.assertNotIn('test_username', body[0])
self.assertNotIn('testpassword', body[0])
self.assertNotIn('test_username', body[1])
self.assertNotIn('testpassword', body[1])
for data in [json.loads(body), json.loads(raw_body)]:
for datum in data:
self.assertEqual(datum['username'], RequestModelFactory.CLEANSED_SUBSTITUTE)
self.assertEqual(datum['password'], RequestModelFactory.CLEANSED_SUBSTITUTE)
self.assertEqual(datum['x'], 'testunmasked')
def test_authorization_header(self):
mock_request = Mock()
mock_request.META = {'HTTP_AUTHORIZATION': 'secret'}
mock_request.body = ''
mock_request.get = mock_request.META.get
factory = RequestModelFactory(mock_request)
headers = factory.encoded_headers()
json_headers = json.loads(headers)
self.assertIn('AUTHORIZATION', json_headers)
self.assertEqual(json_headers['AUTHORIZATION'], RequestModelFactory.CLEANSED_SUBSTITUTE) | project/tests/test_sensitive_data_in_request.py |
import json
from django.test import TestCase
from unittest.mock import Mock
from silk.model_factory import RequestModelFactory, ResponseModelFactory
DJANGO_META_CONTENT_TYPE = 'CONTENT_TYPE'
HTTP_CONTENT_TYPE = 'Content-Type'
CLEANSED = RequestModelFactory.CLEANSED_SUBSTITUTE
class MaskCredentialsInFormsTest(TestCase):
def _mask(self, value):
return RequestModelFactory(None)._mask_credentials(value)
def test_mask_credentials_preserves_single_insensitive_values(self):
body = "foo=public"
expected = "foo=public"
self.assertEqual(expected, self._mask(body))
def test_mask_credentials_masks_sensitive_values(self):
body = "password=<PASSWORD>"
expected = "password={}".format(CLEANSED)
self.assertEqual(expected, self._mask(body))
def test_mask_credentials_masks_multiple_sensitive_values(self):
body = "password=<PASSWORD>&secret=mysecret"
expected = "password={}&secret={}".format(CLEANSED, CLEANSED)
self.assertEqual(expected, self._mask(body))
def test_mask_credentials_masks_sensitive_values_between_insensitive_values(self):
body = "public1=foo&password=<PASSWORD>&public2=bar"
expected = "public1=foo&password={}&public2=bar".format(CLEANSED)
self.assertEqual(expected, self._mask(body))
def test_mask_credentials_preserves_insensitive_values_between_sensitive_values(self):
body = "password=1&foo=public&secret=2"
expected = "password={}&foo=public&secret={}".format(CLEANSED, CLEANSED)
self.assertEqual(expected, self._mask(body))
def test_mask_credentials_is_case_insensitive(self):
body = "UsErNaMe=secret"
expected = "UsErNaMe={}".format(CLEANSED)
self.assertEqual(expected, self._mask(body))
def test_mask_credentials_handles_prefixes(self):
body = "prefixed-username=secret"
expected = "prefixed-username={}".format(CLEANSED)
self.assertEqual(expected, self._mask(body))
def test_mask_credentials_handles_suffixes(self):
body = "username-with-suffix=secret"
expected = "username-with-suffix={}".format(CLEANSED)
self.assertEqual(expected, self._mask(body))
def test_mask_credentials_handles_regex_characters(self):
body = "password=<PASSWORD>++"
expected = "password={}".format(CLEANSED)
self.assertEqual(expected, self._mask(body))
def test_mask_credentials_handles_complex_cases(self):
body = "foo=public&prefixed-uSeRname-with-suffix=secret&bar=public"
expected = "foo=public&prefixed-uSeRname-with-suffix={}&bar=public".format(CLEANSED)
self.assertEqual(expected, self._mask(body))
class MaskCredentialsInJsonTest(TestCase):
def _mask(self, value):
return RequestModelFactory(None)._mask_credentials(json.dumps(value))
def test_mask_credentials_preserves_single_insensitive_values(self):
self.assertIn("public", self._mask({"foo": "public"}))
def test_mask_credentials_preserves_insensitive_values_in_presence_of_sensitive(self):
self.assertIn("public", self._mask({"password": "<PASSWORD>", "foo": "public"}))
def test_mask_credentials_masks_sensitive_values(self):
self.assertNotIn("secret", self._mask({"password": "<PASSWORD>"}))
def test_mask_credentials_masks_sensitive_values_in_presence_of_regular(self):
self.assertNotIn("secret", self._mask({"foo": "public", "password": "<PASSWORD>"}))
def test_mask_credentials_is_case_insensitive(self):
self.assertNotIn("secret", self._mask({"UsErNaMe": "secret"}))
def test_mask_credentials_handles_prefixes(self):
self.assertNotIn("secret", self._mask({"prefixed-username": "secret"}))
def test_mask_credentials_handles_suffixes(self):
self.assertNotIn("secret", self._mask({"username-with-suffix": "secret"}))
def test_mask_credentials_handles_complex_cases(self):
self.assertNotIn("secret", self._mask({
"foo": "public",
"prefixed-uSeRname-with-suffix": "secret"
}))
def test_mask_credentials_in_nested_data_structures(self):
self.assertNotIn("secret", self._mask({
"foo": "public",
"nested": {
"prefixed-uSeRname-with-suffix": "secret",
},
}))
class TestEncodingForRequests(TestCase):
"""
Check that the RequestModelFactory masks sensitive data
"""
def test_password_in_body(self):
mock_request = Mock()
mock_request.META = {DJANGO_META_CONTENT_TYPE: 'text/plain'}
mock_request.body = 'username=test_username&unmasked=testunmasked&password=<PASSWORD>'
mock_request.get = mock_request.META.get
factory = RequestModelFactory(mock_request)
body, raw_body = factory.body()
self.assertIn('testunmasked', raw_body)
self.assertNotIn('test_username', raw_body)
self.assertNotIn('testpassword', raw_body)
self.assertNotIn('test_username', body)
self.assertNotIn('testpassword', body)
def test_password_in_json(self):
mock_request = Mock()
mock_request.META = {DJANGO_META_CONTENT_TYPE: 'application/json; charset=UTF-8'}
d = {'x': 'testunmasked', 'username': 'test_username', 'password': '<PASSWORD>',
'prefixed-secret': 'testsecret'}
mock_request.body = json.dumps(d)
mock_request.get = mock_request.META.get
factory = RequestModelFactory(mock_request)
body, raw_body = factory.body()
self.assertIn('testunmasked', raw_body)
self.assertNotIn('test_username', raw_body)
self.assertNotIn('testpassword', raw_body)
self.assertNotIn('testsecret', raw_body)
self.assertNotIn('test_username', body)
self.assertNotIn('testpassword', body)
self.assertNotIn('testsecret', body)
for datum in [json.loads(body), json.loads(raw_body)]:
self.assertEqual(datum['username'], RequestModelFactory.CLEANSED_SUBSTITUTE)
self.assertEqual(datum['password'], RequestModelFactory.CLEANSED_SUBSTITUTE)
self.assertEqual(datum['prefixed-secret'], RequestModelFactory.CLEANSED_SUBSTITUTE)
self.assertEqual(datum['x'], 'testunmasked')
def test_password_in_batched_json(self):
mock_request = Mock()
mock_request.META = {DJANGO_META_CONTENT_TYPE: 'application/json; charset=UTF-8'}
d = [
{'x': 'testunmasked', 'username': 'test_username', 'password': '<PASSWORD>'},
{'x': 'testunmasked', 'username': 'test_username', 'password': '<PASSWORD>'}
]
mock_request.body = json.dumps(d)
mock_request.get = mock_request.META.get
factory = RequestModelFactory(mock_request)
body, raw_body = factory.body()
self.assertIn('testunmasked', raw_body)
self.assertNotIn('test_username', raw_body)
self.assertNotIn('testpassword', raw_body)
self.assertNotIn('test_username', body[0])
self.assertNotIn('testpassword', body[0])
self.assertNotIn('test_username', body[1])
self.assertNotIn('testpassword', body[1])
for data in [json.loads(body), json.loads(raw_body)]:
for datum in data:
self.assertEqual(datum['username'], RequestModelFactory.CLEANSED_SUBSTITUTE)
self.assertEqual(datum['password'], RequestModelFactory.CLEANSED_SUBSTITUTE)
self.assertEqual(datum['x'], 'testunmasked')
def test_authorization_header(self):
mock_request = Mock()
mock_request.META = {'HTTP_AUTHORIZATION': 'secret'}
mock_request.body = ''
mock_request.get = mock_request.META.get
factory = RequestModelFactory(mock_request)
headers = factory.encoded_headers()
json_headers = json.loads(headers)
self.assertIn('AUTHORIZATION', json_headers)
self.assertEqual(json_headers['AUTHORIZATION'], RequestModelFactory.CLEANSED_SUBSTITUTE) | 0.562177 | 0.304998 |
import gc
import os
import statistics
import sys
import textwrap
import time
from argparse import Namespace
from operator import attrgetter
import click
MAX_DAG_RUNS_ALLOWED = 1
class ShortCircuitExecutorMixin:
"""
Mixin class to manage the scheduler state during the performance test run.
"""
def __init__(self, dag_ids_to_watch, num_runs):
super().__init__()
self.num_runs_per_dag = num_runs
self.reset(dag_ids_to_watch)
def reset(self, dag_ids_to_watch):
"""
Capture the value that will determine when the scheduler is reset.
"""
self.dags_to_watch = {
dag_id: Namespace(
waiting_for=self.num_runs_per_dag,
# A "cache" of DagRun row, so we don't have to look it up each
# time. This is to try and reduce the impact of our
# benchmarking code on runtime,
runs={}
) for dag_id in dag_ids_to_watch
}
def change_state(self, key, state, info=None):
"""
Change the state of scheduler by waiting till the tasks is complete
and then shut down the scheduler after the task is complete
"""
from airflow.utils.state import State
super().change_state(key, state, info=info)
dag_id, _, execution_date, __ = key
if dag_id not in self.dags_to_watch:
return
# This fn is called before the DagRun state is updated, so we can't
# check the DR.state - so instead we need to check the state of the
# tasks in that run
run = self.dags_to_watch[dag_id].runs.get(execution_date)
if not run:
import airflow.models
# odd `list()` is to work across Airflow versions.
run = list(airflow.models.DagRun.find(dag_id=dag_id, execution_date=execution_date))[0]
self.dags_to_watch[dag_id].runs[execution_date] = run
if run and all(t.state == State.SUCCESS for t in run.get_task_instances()):
self.dags_to_watch[dag_id].runs.pop(execution_date)
self.dags_to_watch[dag_id].waiting_for -= 1
if self.dags_to_watch[dag_id].waiting_for == 0:
self.dags_to_watch.pop(dag_id)
if not self.dags_to_watch:
self.log.warning("STOPPING SCHEDULER -- all runs complete")
self.scheduler_job.processor_agent._done = True # pylint: disable=protected-access
return
self.log.warning("WAITING ON %d RUNS",
sum(map(attrgetter('waiting_for'), self.dags_to_watch.values())))
def get_executor_under_test(dotted_path):
"""
Create and return a MockExecutor
"""
from airflow.executors.executor_loader import ExecutorLoader
if dotted_path == "MockExecutor":
try:
# Run against master and 1.10.x releases
from tests.test_utils.mock_executor import MockExecutor as Executor
except ImportError:
from tests.executors.test_executor import TestExecutor as Executor
else:
Executor = ExecutorLoader.load_executor(dotted_path)
# Change this to try other executors
class ShortCircuitExecutor(ShortCircuitExecutorMixin, Executor):
"""
Placeholder class that implements the inheritance hierarchy
"""
scheduler_job = None
return ShortCircuitExecutor
def reset_dag(dag, session):
"""
Delete all dag and task instances and then un_pause the Dag.
"""
import airflow.models
DR = airflow.models.DagRun
DM = airflow.models.DagModel
TI = airflow.models.TaskInstance
TF = airflow.models.TaskFail
dag_id = dag.dag_id
session.query(DM).filter(DM.dag_id == dag_id).update({'is_paused': False})
session.query(DR).filter(DR.dag_id == dag_id).delete()
session.query(TI).filter(TI.dag_id == dag_id).delete()
session.query(TF).filter(TF.dag_id == dag_id).delete()
def pause_all_dags(session):
"""
Pause all Dags
"""
from airflow.models.dag import DagModel
session.query(DagModel).update({'is_paused': True})
def create_dag_runs(dag, num_runs, session):
"""
Create `num_runs` of dag runs for sub-sequent schedules
"""
from airflow.utils import timezone
from airflow.utils.state import State
try:
from airflow.utils.types import DagRunType
ID_PREFIX = f'{DagRunType.SCHEDULED.value}__'
except ImportError:
from airflow.models.dagrun import DagRun
ID_PREFIX = DagRun.ID_PREFIX
next_run_date = dag.normalize_schedule(dag.start_date or min(t.start_date for t in dag.tasks))
for _ in range(num_runs):
dag.create_dagrun(
run_id=ID_PREFIX + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
)
next_run_date = dag.following_schedule(next_run_date)
@click.command()
@click.option('--num-runs', default=1, help='number of DagRun, to run for each DAG')
@click.option('--repeat', default=3, help='number of times to run test, to reduce variance')
@click.option('--pre-create-dag-runs', is_flag=True, default=False,
help='''Pre-create the dag runs and stop the scheduler creating more.
Warning: this makes the scheduler do (slightly) less work so may skew your numbers. Use sparingly!
''')
@click.option('--executor-class', default='MockExecutor',
help=textwrap.dedent('''
Dotted path Executor class to test, for example
'airflow.executors.local_executor.LocalExecutor'. Defaults to MockExcutor which doesn't run tasks.
'''))
@click.argument('dag_ids', required=True, nargs=-1)
def main(num_runs, repeat, pre_create_dag_runs, executor_class, dag_ids): # pylint: disable=too-many-locals
"""
This script can be used to measure the total "scheduler overhead" of Airflow.
By overhead we mean if the tasks executed instantly as soon as they are
executed (i.e. they do nothing) how quickly could we schedule them.
It will monitor the task completion of the Mock/stub executor (no actual
tasks are run) and after the required number of dag runs for all the
specified dags have completed all their tasks, it will cleanly shut down
the scheduler.
The dags you run with need to have an early enough start_date to create the
desired number of runs.
Care should be taken that other limits (DAG concurrency, pool size etc) are
not the bottleneck. This script doesn't help you in that regard.
It is recommended to repeat the test at least 3 times (`--repeat=3`, the
default) so that you can get somewhat-accurate variance on the reported
timing numbers, but this can be disabled for longer runs if needed.
"""
# Turn on unit test mode so that we don't do any sleep() in the scheduler
# loop - not needed on master, but this script can run against older
# releases too!
os.environ['AIRFLOW__CORE__UNIT_TEST_MODE'] = 'True'
os.environ['AIRFLOW__CORE__DAG_CONCURRENCY'] = '500'
# Set this so that dags can dynamically configure their end_date
os.environ['AIRFLOW_BENCHMARK_MAX_DAG_RUNS'] = str(num_runs)
os.environ['PERF_MAX_RUNS'] = str(num_runs)
if pre_create_dag_runs:
os.environ['AIRFLOW__SCHEDULER__USE_JOB_SCHEDULE'] = 'False'
from airflow.jobs.scheduler_job import SchedulerJob
from airflow.models.dagbag import DagBag
from airflow.utils import db
dagbag = DagBag()
dags = []
with db.create_session() as session:
pause_all_dags(session)
for dag_id in dag_ids:
dag = dagbag.get_dag(dag_id)
dag.sync_to_db(session=session)
dags.append(dag)
reset_dag(dag, session)
next_run_date = dag.normalize_schedule(dag.start_date or min(t.start_date for t in dag.tasks))
for _ in range(num_runs - 1):
next_run_date = dag.following_schedule(next_run_date)
end_date = dag.end_date or dag.default_args.get('end_date')
if end_date != next_run_date:
message = (
f"DAG {dag_id} has incorrect end_date ({end_date}) for number of runs! "
f"It should be "
f" {next_run_date}")
sys.exit(message)
if pre_create_dag_runs:
create_dag_runs(dag, num_runs, session)
ShortCircutExecutor = get_executor_under_test(executor_class)
executor = ShortCircutExecutor(dag_ids_to_watch=dag_ids, num_runs=num_runs)
scheduler_job = SchedulerJob(dag_ids=dag_ids, do_pickle=False, executor=executor)
executor.scheduler_job = scheduler_job
total_tasks = sum(len(dag.tasks) for dag in dags)
if 'PYSPY' in os.environ:
pid = str(os.getpid())
filename = os.environ.get('PYSPY_O', 'flame-' + pid + '.html')
os.spawnlp(os.P_NOWAIT, 'sudo', 'sudo', 'py-spy', 'record', '-o', filename, '-p', pid, '--idle')
times = []
# Need a lambda to refer to the _latest_ value fo scheduler_job, not just
# the initial one
code_to_test = lambda: scheduler_job.run() # pylint: disable=unnecessary-lambda
for count in range(repeat):
gc.disable()
start = time.perf_counter()
code_to_test()
times.append(time.perf_counter() - start)
gc.enable()
print("Run %d time: %.5f" % (count + 1, times[-1]))
if count + 1 != repeat:
with db.create_session() as session:
for dag in dags:
reset_dag(dag, session)
executor.reset(dag_ids)
scheduler_job = SchedulerJob(dag_ids=dag_ids, do_pickle=False, executor=executor)
executor.scheduler_job = scheduler_job
print()
print()
msg = "Time for %d dag runs of %d dags with %d total tasks: %.4fs"
if len(times) > 1:
print((msg + " (±%.3fs)") % (
num_runs,
len(dags),
total_tasks,
statistics.mean(times),
statistics.stdev(times)
))
else:
print(msg % (num_runs, len(dags), total_tasks, times[0]))
print()
print()
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter | scripts/perf/scheduler_dag_execution_timing.py | import gc
import os
import statistics
import sys
import textwrap
import time
from argparse import Namespace
from operator import attrgetter
import click
MAX_DAG_RUNS_ALLOWED = 1
class ShortCircuitExecutorMixin:
"""
Mixin class to manage the scheduler state during the performance test run.
"""
def __init__(self, dag_ids_to_watch, num_runs):
super().__init__()
self.num_runs_per_dag = num_runs
self.reset(dag_ids_to_watch)
def reset(self, dag_ids_to_watch):
"""
Capture the value that will determine when the scheduler is reset.
"""
self.dags_to_watch = {
dag_id: Namespace(
waiting_for=self.num_runs_per_dag,
# A "cache" of DagRun row, so we don't have to look it up each
# time. This is to try and reduce the impact of our
# benchmarking code on runtime,
runs={}
) for dag_id in dag_ids_to_watch
}
def change_state(self, key, state, info=None):
"""
Change the state of scheduler by waiting till the tasks is complete
and then shut down the scheduler after the task is complete
"""
from airflow.utils.state import State
super().change_state(key, state, info=info)
dag_id, _, execution_date, __ = key
if dag_id not in self.dags_to_watch:
return
# This fn is called before the DagRun state is updated, so we can't
# check the DR.state - so instead we need to check the state of the
# tasks in that run
run = self.dags_to_watch[dag_id].runs.get(execution_date)
if not run:
import airflow.models
# odd `list()` is to work across Airflow versions.
run = list(airflow.models.DagRun.find(dag_id=dag_id, execution_date=execution_date))[0]
self.dags_to_watch[dag_id].runs[execution_date] = run
if run and all(t.state == State.SUCCESS for t in run.get_task_instances()):
self.dags_to_watch[dag_id].runs.pop(execution_date)
self.dags_to_watch[dag_id].waiting_for -= 1
if self.dags_to_watch[dag_id].waiting_for == 0:
self.dags_to_watch.pop(dag_id)
if not self.dags_to_watch:
self.log.warning("STOPPING SCHEDULER -- all runs complete")
self.scheduler_job.processor_agent._done = True # pylint: disable=protected-access
return
self.log.warning("WAITING ON %d RUNS",
sum(map(attrgetter('waiting_for'), self.dags_to_watch.values())))
def get_executor_under_test(dotted_path):
"""
Create and return a MockExecutor
"""
from airflow.executors.executor_loader import ExecutorLoader
if dotted_path == "MockExecutor":
try:
# Run against master and 1.10.x releases
from tests.test_utils.mock_executor import MockExecutor as Executor
except ImportError:
from tests.executors.test_executor import TestExecutor as Executor
else:
Executor = ExecutorLoader.load_executor(dotted_path)
# Change this to try other executors
class ShortCircuitExecutor(ShortCircuitExecutorMixin, Executor):
"""
Placeholder class that implements the inheritance hierarchy
"""
scheduler_job = None
return ShortCircuitExecutor
def reset_dag(dag, session):
"""
Delete all dag and task instances and then un_pause the Dag.
"""
import airflow.models
DR = airflow.models.DagRun
DM = airflow.models.DagModel
TI = airflow.models.TaskInstance
TF = airflow.models.TaskFail
dag_id = dag.dag_id
session.query(DM).filter(DM.dag_id == dag_id).update({'is_paused': False})
session.query(DR).filter(DR.dag_id == dag_id).delete()
session.query(TI).filter(TI.dag_id == dag_id).delete()
session.query(TF).filter(TF.dag_id == dag_id).delete()
def pause_all_dags(session):
"""
Pause all Dags
"""
from airflow.models.dag import DagModel
session.query(DagModel).update({'is_paused': True})
def create_dag_runs(dag, num_runs, session):
"""
Create `num_runs` of dag runs for sub-sequent schedules
"""
from airflow.utils import timezone
from airflow.utils.state import State
try:
from airflow.utils.types import DagRunType
ID_PREFIX = f'{DagRunType.SCHEDULED.value}__'
except ImportError:
from airflow.models.dagrun import DagRun
ID_PREFIX = DagRun.ID_PREFIX
next_run_date = dag.normalize_schedule(dag.start_date or min(t.start_date for t in dag.tasks))
for _ in range(num_runs):
dag.create_dagrun(
run_id=ID_PREFIX + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
)
next_run_date = dag.following_schedule(next_run_date)
@click.command()
@click.option('--num-runs', default=1, help='number of DagRun, to run for each DAG')
@click.option('--repeat', default=3, help='number of times to run test, to reduce variance')
@click.option('--pre-create-dag-runs', is_flag=True, default=False,
help='''Pre-create the dag runs and stop the scheduler creating more.
Warning: this makes the scheduler do (slightly) less work so may skew your numbers. Use sparingly!
''')
@click.option('--executor-class', default='MockExecutor',
help=textwrap.dedent('''
Dotted path Executor class to test, for example
'airflow.executors.local_executor.LocalExecutor'. Defaults to MockExcutor which doesn't run tasks.
'''))
@click.argument('dag_ids', required=True, nargs=-1)
def main(num_runs, repeat, pre_create_dag_runs, executor_class, dag_ids): # pylint: disable=too-many-locals
"""
This script can be used to measure the total "scheduler overhead" of Airflow.
By overhead we mean if the tasks executed instantly as soon as they are
executed (i.e. they do nothing) how quickly could we schedule them.
It will monitor the task completion of the Mock/stub executor (no actual
tasks are run) and after the required number of dag runs for all the
specified dags have completed all their tasks, it will cleanly shut down
the scheduler.
The dags you run with need to have an early enough start_date to create the
desired number of runs.
Care should be taken that other limits (DAG concurrency, pool size etc) are
not the bottleneck. This script doesn't help you in that regard.
It is recommended to repeat the test at least 3 times (`--repeat=3`, the
default) so that you can get somewhat-accurate variance on the reported
timing numbers, but this can be disabled for longer runs if needed.
"""
# Turn on unit test mode so that we don't do any sleep() in the scheduler
# loop - not needed on master, but this script can run against older
# releases too!
os.environ['AIRFLOW__CORE__UNIT_TEST_MODE'] = 'True'
os.environ['AIRFLOW__CORE__DAG_CONCURRENCY'] = '500'
# Set this so that dags can dynamically configure their end_date
os.environ['AIRFLOW_BENCHMARK_MAX_DAG_RUNS'] = str(num_runs)
os.environ['PERF_MAX_RUNS'] = str(num_runs)
if pre_create_dag_runs:
os.environ['AIRFLOW__SCHEDULER__USE_JOB_SCHEDULE'] = 'False'
from airflow.jobs.scheduler_job import SchedulerJob
from airflow.models.dagbag import DagBag
from airflow.utils import db
dagbag = DagBag()
dags = []
with db.create_session() as session:
pause_all_dags(session)
for dag_id in dag_ids:
dag = dagbag.get_dag(dag_id)
dag.sync_to_db(session=session)
dags.append(dag)
reset_dag(dag, session)
next_run_date = dag.normalize_schedule(dag.start_date or min(t.start_date for t in dag.tasks))
for _ in range(num_runs - 1):
next_run_date = dag.following_schedule(next_run_date)
end_date = dag.end_date or dag.default_args.get('end_date')
if end_date != next_run_date:
message = (
f"DAG {dag_id} has incorrect end_date ({end_date}) for number of runs! "
f"It should be "
f" {next_run_date}")
sys.exit(message)
if pre_create_dag_runs:
create_dag_runs(dag, num_runs, session)
ShortCircutExecutor = get_executor_under_test(executor_class)
executor = ShortCircutExecutor(dag_ids_to_watch=dag_ids, num_runs=num_runs)
scheduler_job = SchedulerJob(dag_ids=dag_ids, do_pickle=False, executor=executor)
executor.scheduler_job = scheduler_job
total_tasks = sum(len(dag.tasks) for dag in dags)
if 'PYSPY' in os.environ:
pid = str(os.getpid())
filename = os.environ.get('PYSPY_O', 'flame-' + pid + '.html')
os.spawnlp(os.P_NOWAIT, 'sudo', 'sudo', 'py-spy', 'record', '-o', filename, '-p', pid, '--idle')
times = []
# Need a lambda to refer to the _latest_ value fo scheduler_job, not just
# the initial one
code_to_test = lambda: scheduler_job.run() # pylint: disable=unnecessary-lambda
for count in range(repeat):
gc.disable()
start = time.perf_counter()
code_to_test()
times.append(time.perf_counter() - start)
gc.enable()
print("Run %d time: %.5f" % (count + 1, times[-1]))
if count + 1 != repeat:
with db.create_session() as session:
for dag in dags:
reset_dag(dag, session)
executor.reset(dag_ids)
scheduler_job = SchedulerJob(dag_ids=dag_ids, do_pickle=False, executor=executor)
executor.scheduler_job = scheduler_job
print()
print()
msg = "Time for %d dag runs of %d dags with %d total tasks: %.4fs"
if len(times) > 1:
print((msg + " (±%.3fs)") % (
num_runs,
len(dags),
total_tasks,
statistics.mean(times),
statistics.stdev(times)
))
else:
print(msg % (num_runs, len(dags), total_tasks, times[0]))
print()
print()
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter | 0.563138 | 0.364042 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djstripe', '0024_auto_20170308_0757'),
]
operations = [
migrations.AlterField(
model_name='account',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='account',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='charge',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='charge',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='customer',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='customer',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='event',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='event',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='eventprocessingexception',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='eventprocessingexception',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='invoice',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='invoice',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='invoiceitem',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='invoiceitem',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='plan',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='plan',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='stripesource',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='stripesource',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='subscription',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='subscription',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='transfer',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='transfer',
name='modified',
field=models.DateTimeField(auto_now=True),
),
] | djstripe/migrations/0025_auto_20170322_0428.py | from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djstripe', '0024_auto_20170308_0757'),
]
operations = [
migrations.AlterField(
model_name='account',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='account',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='charge',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='charge',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='customer',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='customer',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='event',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='event',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='eventprocessingexception',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='eventprocessingexception',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='invoice',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='invoice',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='invoiceitem',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='invoiceitem',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='plan',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='plan',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='stripesource',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='stripesource',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='subscription',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='subscription',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='transfer',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='transfer',
name='modified',
field=models.DateTimeField(auto_now=True),
),
] | 0.729423 | 0.158207 |
__author__ = 'VMware, Inc.'
__copyright__ = 'Copyright 2017 VMware, Inc. All rights reserved.' # pylint: disable=line-too-long
import requests
from com.vmware.cis_client import Session
from vmware.vapi.bindings.stub import ApiClient
from vmware.vapi.bindings.stub import StubFactoryBase
from vmware.vapi.lib.connect import get_requests_connector
from vmware.vapi.security.client.security_context_filter import \
LegacySecurityContextFilter
from vmware.vapi.security.session import create_session_security_context
from vmware.vapi.security.sso import create_saml_bearer_security_context
from vmware.vapi.security.sso import create_saml_security_context
from vmware.vapi.security.user_password import \
create_user_password_security_context
from vmware.vapi.stdlib.client.factories import StubConfigurationFactory
from com.vmware.vcenter.hvc_client import StubFactory as hvc_factory
from com.vmware.vcenter.compute_client import StubFactory as compute_factory
from com.vmware.vcenter.vm.compute_client import StubFactory as vm_compute_factory
from com.vmware.vcenter.inventory_client import StubFactory as inventory_factory
from com.vmware.vcenter.iso_client import StubFactory as iso_factory
from com.vmware.vcenter.ovf_client import StubFactory as ovf_factory
from com.vmware.vcenter.vm_template_client import StubFactory as vm_template_factory
from com.vmware.appliance.recovery_client import StubFactory as appliance_recovery_factory
from com.vmware.appliance.infraprofile_client import StubFactory as appliance_infraprofile_factory
from com.vmware.appliance.vmon_client import StubFactory as appliance_vmon_factory
JSON_RPC_ENDPOINT = '/api'
class StubFactory(StubFactoryBase):
def __init__(self, stub_config):
StubFactoryBase.__init__(self, stub_config)
self.vcenter.hvc = hvc_factory(stub_config)
self.vcenter.compute = compute_factory(stub_config)
self.vcenter.vm.compute = vm_compute_factory(stub_config)
self.vcenter.inventory = inventory_factory(stub_config)
self.vcenter.iso = iso_factory(stub_config)
self.vcenter.ovf = ovf_factory(stub_config)
self.vcenter.vm_template = vm_template_factory(stub_config)
self.appliance.recovery = appliance_recovery_factory(stub_config)
self.appliance.infraprofile = appliance_infraprofile_factory(stub_config)
self.appliance.vmon = appliance_vmon_factory(stub_config)
_attrs = {
'vcenter': 'com.vmware.vcenter_client.StubFactory',
'appliance': 'com.vmware.appliance_client.StubFactory',
'content': 'com.vmware.content_client.StubFactory',
'tagging': 'com.vmware.cis.tagging_client.StubFactory',
}
class VsphereClient(ApiClient):
"""
vSphere Client class that provides access to stubs for all services in the
vSphere API
"""
def __init__(self, session, server, username, password, bearer_token,
hok_token, private_key):
"""
Initialize VsphereClient by creating a parent stub factory instance
of all vSphere components.
:type session: :class:`requests.Session`
:param session: Requests HTTP session instance. If not specified,
then one is automatically created and used
:type server: :class:`str`
:param server: vCenter host name or IP address
:type username: :class:`str`
:param username: Name of the user
:type password: :class:`str`
:param password: <PASSWORD>
:type bearer_token: :class:`str`
:param bearer_token: SAML Bearer Token
:type hok_token: :class:`str`
:param hok_token: SAML Hok Token
:type private_key: :class:`str`
:param private_key: Absolute file path of the private key of the user
"""
if not session:
self.session = session = requests.Session()
host_url = "https://" + server + JSON_RPC_ENDPOINT
if username is not None and password is not None and \
not bearer_token and not hok_token:
sec_ctx = create_user_password_security_context(username, password)
elif bearer_token and not username and not hok_token:
sec_ctx = create_saml_bearer_security_context(bearer_token)
elif hok_token and private_key and not bearer_token and not username:
sec_ctx = create_saml_security_context(hok_token, private_key)
else:
raise ValueError('Please provide exactly one of the following '
'authentication scheme: username/password, '
'bearer_token or hok_token/private_key')
session_svc = Session(
StubConfigurationFactory.new_std_configuration(
get_requests_connector(
session=session, url=host_url,
provider_filter_chain=[
LegacySecurityContextFilter(
security_context=sec_ctx)])))
session_id = session_svc.create()
sec_ctx = create_session_security_context(session_id)
stub_config = StubConfigurationFactory.new_std_configuration(
get_requests_connector(
session=session, url=host_url,
provider_filter_chain=[
LegacySecurityContextFilter(
security_context=sec_ctx)]))
self.session_svc = Session(stub_config)
stub_factory = StubFactory(stub_config)
ApiClient.__init__(self, stub_factory)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.__del__()
def __del__(self):
try:
self.session_svc.delete()
except Exception:
# Catch exceptions when terminating the vSphere session and go ahead
# close the requests session.
pass
if hasattr(self, 'session'):
self.session.close()
def create_vsphere_client(server, username=None, password=<PASSWORD>,
bearer_token=None, hok_token=None, private_key=None,
session=None):
"""
Helper method to create an instance of the vSphere API client.
Please provide one of the following options to authenticate:
* username and password,
* bearer_token,
* hok_token and private_key
:type server: :class:`str`
:param server: vCenter host name or IP address
:type username: :class:`str`
:param username: Name of the user
:type password: :class:`str`
:param password: Password of the user
:type bearer_token: :class:`str`
:param bearer_token: SAML Bearer Token
:type hok_token: :class:`str`
:param hok_token: SAML Hok Token
:type private_key: :class:`str`
:param private_key: Absolute file path of the private key of the user
:type session: :class:`requests.Session` or ``None``
:param session: Requests HTTP session instance. If not specified, then one
is automatically created and used
:rtype: :class:`vmware.vapi.vmc.client.VsphereClient`
:return: Vsphere Client instance
"""
return VsphereClient(session=session, server=server, username=username,
password=password, bearer_token=bearer_token,
hok_token=hok_token, private_key=private_key) | vmware/vapi/vsphere/client.py | __author__ = 'VMware, Inc.'
__copyright__ = 'Copyright 2017 VMware, Inc. All rights reserved.' # pylint: disable=line-too-long
import requests
from com.vmware.cis_client import Session
from vmware.vapi.bindings.stub import ApiClient
from vmware.vapi.bindings.stub import StubFactoryBase
from vmware.vapi.lib.connect import get_requests_connector
from vmware.vapi.security.client.security_context_filter import \
LegacySecurityContextFilter
from vmware.vapi.security.session import create_session_security_context
from vmware.vapi.security.sso import create_saml_bearer_security_context
from vmware.vapi.security.sso import create_saml_security_context
from vmware.vapi.security.user_password import \
create_user_password_security_context
from vmware.vapi.stdlib.client.factories import StubConfigurationFactory
from com.vmware.vcenter.hvc_client import StubFactory as hvc_factory
from com.vmware.vcenter.compute_client import StubFactory as compute_factory
from com.vmware.vcenter.vm.compute_client import StubFactory as vm_compute_factory
from com.vmware.vcenter.inventory_client import StubFactory as inventory_factory
from com.vmware.vcenter.iso_client import StubFactory as iso_factory
from com.vmware.vcenter.ovf_client import StubFactory as ovf_factory
from com.vmware.vcenter.vm_template_client import StubFactory as vm_template_factory
from com.vmware.appliance.recovery_client import StubFactory as appliance_recovery_factory
from com.vmware.appliance.infraprofile_client import StubFactory as appliance_infraprofile_factory
from com.vmware.appliance.vmon_client import StubFactory as appliance_vmon_factory
JSON_RPC_ENDPOINT = '/api'
class StubFactory(StubFactoryBase):
def __init__(self, stub_config):
StubFactoryBase.__init__(self, stub_config)
self.vcenter.hvc = hvc_factory(stub_config)
self.vcenter.compute = compute_factory(stub_config)
self.vcenter.vm.compute = vm_compute_factory(stub_config)
self.vcenter.inventory = inventory_factory(stub_config)
self.vcenter.iso = iso_factory(stub_config)
self.vcenter.ovf = ovf_factory(stub_config)
self.vcenter.vm_template = vm_template_factory(stub_config)
self.appliance.recovery = appliance_recovery_factory(stub_config)
self.appliance.infraprofile = appliance_infraprofile_factory(stub_config)
self.appliance.vmon = appliance_vmon_factory(stub_config)
_attrs = {
'vcenter': 'com.vmware.vcenter_client.StubFactory',
'appliance': 'com.vmware.appliance_client.StubFactory',
'content': 'com.vmware.content_client.StubFactory',
'tagging': 'com.vmware.cis.tagging_client.StubFactory',
}
class VsphereClient(ApiClient):
"""
vSphere Client class that provides access to stubs for all services in the
vSphere API
"""
def __init__(self, session, server, username, password, bearer_token,
hok_token, private_key):
"""
Initialize VsphereClient by creating a parent stub factory instance
of all vSphere components.
:type session: :class:`requests.Session`
:param session: Requests HTTP session instance. If not specified,
then one is automatically created and used
:type server: :class:`str`
:param server: vCenter host name or IP address
:type username: :class:`str`
:param username: Name of the user
:type password: :class:`str`
:param password: <PASSWORD>
:type bearer_token: :class:`str`
:param bearer_token: SAML Bearer Token
:type hok_token: :class:`str`
:param hok_token: SAML Hok Token
:type private_key: :class:`str`
:param private_key: Absolute file path of the private key of the user
"""
if not session:
self.session = session = requests.Session()
host_url = "https://" + server + JSON_RPC_ENDPOINT
if username is not None and password is not None and \
not bearer_token and not hok_token:
sec_ctx = create_user_password_security_context(username, password)
elif bearer_token and not username and not hok_token:
sec_ctx = create_saml_bearer_security_context(bearer_token)
elif hok_token and private_key and not bearer_token and not username:
sec_ctx = create_saml_security_context(hok_token, private_key)
else:
raise ValueError('Please provide exactly one of the following '
'authentication scheme: username/password, '
'bearer_token or hok_token/private_key')
session_svc = Session(
StubConfigurationFactory.new_std_configuration(
get_requests_connector(
session=session, url=host_url,
provider_filter_chain=[
LegacySecurityContextFilter(
security_context=sec_ctx)])))
session_id = session_svc.create()
sec_ctx = create_session_security_context(session_id)
stub_config = StubConfigurationFactory.new_std_configuration(
get_requests_connector(
session=session, url=host_url,
provider_filter_chain=[
LegacySecurityContextFilter(
security_context=sec_ctx)]))
self.session_svc = Session(stub_config)
stub_factory = StubFactory(stub_config)
ApiClient.__init__(self, stub_factory)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.__del__()
def __del__(self):
try:
self.session_svc.delete()
except Exception:
# Catch exceptions when terminating the vSphere session and go ahead
# close the requests session.
pass
if hasattr(self, 'session'):
self.session.close()
def create_vsphere_client(server, username=None, password=<PASSWORD>,
bearer_token=None, hok_token=None, private_key=None,
session=None):
"""
Helper method to create an instance of the vSphere API client.
Please provide one of the following options to authenticate:
* username and password,
* bearer_token,
* hok_token and private_key
:type server: :class:`str`
:param server: vCenter host name or IP address
:type username: :class:`str`
:param username: Name of the user
:type password: :class:`str`
:param password: Password of the user
:type bearer_token: :class:`str`
:param bearer_token: SAML Bearer Token
:type hok_token: :class:`str`
:param hok_token: SAML Hok Token
:type private_key: :class:`str`
:param private_key: Absolute file path of the private key of the user
:type session: :class:`requests.Session` or ``None``
:param session: Requests HTTP session instance. If not specified, then one
is automatically created and used
:rtype: :class:`vmware.vapi.vmc.client.VsphereClient`
:return: Vsphere Client instance
"""
return VsphereClient(session=session, server=server, username=username,
password=password, bearer_token=bearer_token,
hok_token=hok_token, private_key=private_key) | 0.653238 | 0.057812 |
from commands.autoformat import AutoPep8
from handlers.autoformat_handler import AutoFormatHandler
_code = '''import math, sys;
def example1():
####This is a long comment. This should be wrapped to fit within 72 characters.
some_tuple=( 1,2, 3,'a' );
some_variable={'long':'Long code lines should be wrapped within 79 characters.',
'other':[math.pi, 100,200,300,9876543210,'This is a long string that goes on'],
'more':{'inner':'This whole logical line should be wrapped.',some_tuple:[1,
20,300,40000,500000000,60000000000000000]}}
return (some_tuple, some_variable)
def example2(): return {'has_key() is deprecated':True}.has_key({'f':2}.has_key(''));
class Example3( object ):
def __init__ ( self, bar ):
#Comments should have a space after the hash.
if bar : bar+=1; bar=bar* bar ; return bar
else:
some_string = """
Indentation in multiline strings should not be touched.
Only actual code should be reindented.
"""
return (sys.path, some_string)'''
_fixed_code_select = '''import math, sys;
def example1():
# This is a long comment. This should be wrapped to fit within 72
# characters.
some_tuple=( 1,2, 3,'a' );
some_variable={
'long':'Long code lines should be wrapped within 79 characters.',
'other':[
math.pi,
100,
200,
300,
9876543210,
'This is a long string that goes on'],
'more':{
'inner':'This whole logical line should be wrapped.',
some_tuple:[
1,
20,
300,
40000,
500000000,
60000000000000000]}}
return (some_tuple, some_variable)
def example2(): return {'has_key() is deprecated':True}.has_key(
{'f':2}.has_key(''));
class Example3( object ):
def __init__ ( self, bar ):
#Comments should have a space after the hash.
if bar : bar+=1; bar=bar* bar ; return bar
else:
some_string = """
Indentation in multiline strings should not be touched.
Only actual code should be reindented.
"""
return (sys.path, some_string)
'''
_fixed_code = '''import math
import sys
def example1():
# This is a long comment. This should be wrapped to fit within 72
# characters.
some_tuple = (1, 2, 3, 'a')
some_variable = {
'long': 'Long code lines should be wrapped within 79 characters.',
'other': [
math.pi,
100,
200,
300,
9876543210,
'This is a long string that goes on'],
'more': {
'inner': 'This whole logical line should be wrapped.',
some_tuple: [
1,
20,
300,
40000,
500000000,
60000000000000000]}}
return (some_tuple, some_variable)
def example2(): return ('' in {'f': 2}) in {'has_key() is deprecated': True}
class Example3(object):
def __init__(self, bar):
# Comments should have a space after the hash.
if bar:
bar += 1
bar = bar * bar
return bar
else:
some_string = """
Indentation in multiline strings should not be touched.
Only actual code should be reindented.
"""
return (sys.path, some_string)
'''
class TestAutoformat(object):
"""AutoPEP8 formatting tests suite
"""
def setUp(self):
self.settings = {
'aggressive': 2,
'list-fixes': False,
'autoformat_ignore': [],
'autoformat_select': [],
'pep8_max_line_length': 79
}
def test_autoformat_command(self):
AutoPep8(self._check_autoformat, 0, 0, _code, self.settings)
def test_autoformat_ignore(self):
self.settings['autoformat_ignore'] = ['E501']
AutoPep8(self._check_max_line, 0, 0, _code, self.settings)
def test_autoformat_select(self):
self.settings['autoformat_select'] = ['E501']
AutoPep8(self._check_autoformat_select, 0, 0, _code, self.settings)
def test_autoformat_max_line_length(self):
self.settings['pep8_max_line_length'] = 120
AutoPep8(self._check_max_line, 0, 0, _code, self.settings)
def test_autoformat_handler(self):
data = {'code': _code, 'settings': self.settings}
handler = AutoFormatHandler('pep8', data, 0, 0, self._check_autoformat) # noqa
handler.run()
def _check_autoformat(self, result):
assert result['success'] is True
assert result['buffer'] == _fixed_code
assert result['uid'] == 0
assert result['vid'] == 0
def _check_autoformat_select(self, result):
assert result['success'] is True
assert result['buffer'] == _fixed_code_select
assert result['uid'] == 0
assert result['vid'] == 0
def _check_max_line(self, result):
assert result['success'] is True
assert result['buffer'].splitlines()[5] == ' # This is a long comment. This should be wrapped to fit within 72 characters.' # noqa
assert result['uid'] == 0
assert result['vid'] == 0 | sublime-packages/Anaconda/test/test_autoformat.py |
from commands.autoformat import AutoPep8
from handlers.autoformat_handler import AutoFormatHandler
_code = '''import math, sys;
def example1():
####This is a long comment. This should be wrapped to fit within 72 characters.
some_tuple=( 1,2, 3,'a' );
some_variable={'long':'Long code lines should be wrapped within 79 characters.',
'other':[math.pi, 100,200,300,9876543210,'This is a long string that goes on'],
'more':{'inner':'This whole logical line should be wrapped.',some_tuple:[1,
20,300,40000,500000000,60000000000000000]}}
return (some_tuple, some_variable)
def example2(): return {'has_key() is deprecated':True}.has_key({'f':2}.has_key(''));
class Example3( object ):
def __init__ ( self, bar ):
#Comments should have a space after the hash.
if bar : bar+=1; bar=bar* bar ; return bar
else:
some_string = """
Indentation in multiline strings should not be touched.
Only actual code should be reindented.
"""
return (sys.path, some_string)'''
_fixed_code_select = '''import math, sys;
def example1():
# This is a long comment. This should be wrapped to fit within 72
# characters.
some_tuple=( 1,2, 3,'a' );
some_variable={
'long':'Long code lines should be wrapped within 79 characters.',
'other':[
math.pi,
100,
200,
300,
9876543210,
'This is a long string that goes on'],
'more':{
'inner':'This whole logical line should be wrapped.',
some_tuple:[
1,
20,
300,
40000,
500000000,
60000000000000000]}}
return (some_tuple, some_variable)
def example2(): return {'has_key() is deprecated':True}.has_key(
{'f':2}.has_key(''));
class Example3( object ):
def __init__ ( self, bar ):
#Comments should have a space after the hash.
if bar : bar+=1; bar=bar* bar ; return bar
else:
some_string = """
Indentation in multiline strings should not be touched.
Only actual code should be reindented.
"""
return (sys.path, some_string)
'''
_fixed_code = '''import math
import sys
def example1():
# This is a long comment. This should be wrapped to fit within 72
# characters.
some_tuple = (1, 2, 3, 'a')
some_variable = {
'long': 'Long code lines should be wrapped within 79 characters.',
'other': [
math.pi,
100,
200,
300,
9876543210,
'This is a long string that goes on'],
'more': {
'inner': 'This whole logical line should be wrapped.',
some_tuple: [
1,
20,
300,
40000,
500000000,
60000000000000000]}}
return (some_tuple, some_variable)
def example2(): return ('' in {'f': 2}) in {'has_key() is deprecated': True}
class Example3(object):
def __init__(self, bar):
# Comments should have a space after the hash.
if bar:
bar += 1
bar = bar * bar
return bar
else:
some_string = """
Indentation in multiline strings should not be touched.
Only actual code should be reindented.
"""
return (sys.path, some_string)
'''
class TestAutoformat(object):
"""AutoPEP8 formatting tests suite
"""
def setUp(self):
self.settings = {
'aggressive': 2,
'list-fixes': False,
'autoformat_ignore': [],
'autoformat_select': [],
'pep8_max_line_length': 79
}
def test_autoformat_command(self):
AutoPep8(self._check_autoformat, 0, 0, _code, self.settings)
def test_autoformat_ignore(self):
self.settings['autoformat_ignore'] = ['E501']
AutoPep8(self._check_max_line, 0, 0, _code, self.settings)
def test_autoformat_select(self):
self.settings['autoformat_select'] = ['E501']
AutoPep8(self._check_autoformat_select, 0, 0, _code, self.settings)
def test_autoformat_max_line_length(self):
self.settings['pep8_max_line_length'] = 120
AutoPep8(self._check_max_line, 0, 0, _code, self.settings)
def test_autoformat_handler(self):
data = {'code': _code, 'settings': self.settings}
handler = AutoFormatHandler('pep8', data, 0, 0, self._check_autoformat) # noqa
handler.run()
def _check_autoformat(self, result):
assert result['success'] is True
assert result['buffer'] == _fixed_code
assert result['uid'] == 0
assert result['vid'] == 0
def _check_autoformat_select(self, result):
assert result['success'] is True
assert result['buffer'] == _fixed_code_select
assert result['uid'] == 0
assert result['vid'] == 0
def _check_max_line(self, result):
assert result['success'] is True
assert result['buffer'].splitlines()[5] == ' # This is a long comment. This should be wrapped to fit within 72 characters.' # noqa
assert result['uid'] == 0
assert result['vid'] == 0 | 0.407451 | 0.262381 |
from collections import OrderedDict
import warnings
import numpy as np
from jax import random
import jax.numpy as jnp
import numpyro
from numpyro.distributions.distribution import COERCIONS
from numpyro.primitives import (
_PYRO_STACK,
CondIndepStackFrame,
Messenger,
apply_stack,
plate,
)
from numpyro.util import find_stack_level, not_jax_tracer
__all__ = [
"block",
"collapse",
"condition",
"infer_config",
"lift",
"mask",
"reparam",
"replay",
"scale",
"scope",
"seed",
"substitute",
"trace",
"do",
]
class trace(Messenger):
"""
Returns a handler that records the inputs and outputs at primitive calls
inside `fn`.
**Example:**
.. doctest::
>>> from jax import random
>>> import numpyro
>>> import numpyro.distributions as dist
>>> from numpyro.handlers import seed, trace
>>> import pprint as pp
>>> def model():
... numpyro.sample('a', dist.Normal(0., 1.))
>>> exec_trace = trace(seed(model, random.PRNGKey(0))).get_trace()
>>> pp.pprint(exec_trace) # doctest: +SKIP
OrderedDict([('a',
{'args': (),
'fn': <numpyro.distributions.continuous.Normal object at 0x7f9e689b1eb8>,
'is_observed': False,
'kwargs': {'rng_key': DeviceArray([0, 0], dtype=uint32)},
'name': 'a',
'type': 'sample',
'value': DeviceArray(-0.20584235, dtype=float32)})])
"""
def __enter__(self):
super(trace, self).__enter__()
self.trace = OrderedDict()
return self.trace
def postprocess_message(self, msg):
if "name" not in msg:
# skip recording helper messages e.g. `control_flow`, `to_data`, `to_funsor`
# which has no name
return
assert not (
msg["type"] == "sample" and msg["name"] in self.trace
), "all sites must have unique names but got `{}` duplicated".format(
msg["name"]
)
self.trace[msg["name"]] = msg.copy()
def get_trace(self, *args, **kwargs):
"""
Run the wrapped callable and return the recorded trace.
:param `*args`: arguments to the callable.
:param `**kwargs`: keyword arguments to the callable.
:return: `OrderedDict` containing the execution trace.
"""
self(*args, **kwargs)
return self.trace
class replay(Messenger):
"""
Given a callable `fn` and an execution trace `guide_trace`,
return a callable which substitutes `sample` calls in `fn` with
values from the corresponding site names in `guide_trace`.
:param fn: Python callable with NumPyro primitives.
:param guide_trace: an OrderedDict containing execution metadata.
**Example:**
.. doctest::
>>> from jax import random
>>> import numpyro
>>> import numpyro.distributions as dist
>>> from numpyro.handlers import replay, seed, trace
>>> def model():
... numpyro.sample('a', dist.Normal(0., 1.))
>>> exec_trace = trace(seed(model, random.PRNGKey(0))).get_trace()
>>> print(exec_trace['a']['value']) # doctest: +SKIP
-0.20584235
>>> replayed_trace = trace(replay(model, exec_trace)).get_trace()
>>> print(exec_trace['a']['value']) # doctest: +SKIP
-0.20584235
>>> assert replayed_trace['a']['value'] == exec_trace['a']['value']
"""
def __init__(self, fn=None, trace=None, guide_trace=None):
if guide_trace is not None:
warnings.warn(
"`guide_trace` argument is deprecated. Please replace it by `trace`.",
FutureWarning,
stacklevel=find_stack_level(),
)
if guide_trace is not None:
trace = guide_trace
assert trace is not None
self.trace = trace
super(replay, self).__init__(fn)
def process_message(self, msg):
if msg["type"] in ("sample", "plate") and msg["name"] in self.trace:
msg["value"] = self.trace[msg["name"]]["value"]
class block(Messenger):
"""
Given a callable `fn`, return another callable that selectively hides
primitive sites where `hide_fn` returns True from other effect handlers
on the stack.
:param callable fn: Python callable with NumPyro primitives.
:param callable hide_fn: function which when given a dictionary containing
site-level metadata returns whether it should be blocked.
:param list hide: list of site names to hide.
**Example:**
.. doctest::
>>> from jax import random
>>> import numpyro
>>> from numpyro.handlers import block, seed, trace
>>> import numpyro.distributions as dist
>>> def model():
... a = numpyro.sample('a', dist.Normal(0., 1.))
... return numpyro.sample('b', dist.Normal(a, 1.))
>>> model = seed(model, random.PRNGKey(0))
>>> block_all = block(model)
>>> block_a = block(model, lambda site: site['name'] == 'a')
>>> trace_block_all = trace(block_all).get_trace()
>>> assert not {'a', 'b'}.intersection(trace_block_all.keys())
>>> trace_block_a = trace(block_a).get_trace()
>>> assert 'a' not in trace_block_a
>>> assert 'b' in trace_block_a
"""
def __init__(self, fn=None, hide_fn=None, hide=None):
if hide_fn is not None:
self.hide_fn = hide_fn
elif hide is not None:
self.hide_fn = lambda msg: msg.get("name") in hide
else:
self.hide_fn = lambda msg: True
super(block, self).__init__(fn)
def process_message(self, msg):
if self.hide_fn(msg):
msg["stop"] = True
class collapse(trace):
"""
EXPERIMENTAL Collapses all sites in the context by lazily sampling and
attempting to use conjugacy relations. If no conjugacy is known this will
fail. Code using the results of sample sites must be written to accept
Funsors rather than Tensors. This requires ``funsor`` to be installed.
"""
_coerce = None
def __init__(self, *args, **kwargs):
if collapse._coerce is None:
import funsor
from funsor.distribution import CoerceDistributionToFunsor
funsor.set_backend("jax")
collapse._coerce = CoerceDistributionToFunsor("jax")
super().__init__(*args, **kwargs)
def process_message(self, msg):
from funsor.terms import Funsor
if msg["type"] == "sample":
if msg["value"] is None:
msg["value"] = msg["name"]
if isinstance(msg["fn"], Funsor) or isinstance(msg["value"], (str, Funsor)):
msg["stop"] = True
def __enter__(self):
self.preserved_plates = frozenset(
h.name for h in _PYRO_STACK if isinstance(h, plate)
)
COERCIONS.append(self._coerce)
return super().__enter__()
def __exit__(self, exc_type, exc_value, traceback):
import funsor
_coerce = COERCIONS.pop()
assert _coerce is self._coerce
super().__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
# Convert delayed statements to pyro.factor()
reduced_vars = []
log_prob_terms = []
plates = frozenset()
for name, site in self.trace.items():
if site["type"] != "sample":
continue
if not site["is_observed"]:
reduced_vars.append(name)
dim_to_name = {f.dim: f.name for f in site["cond_indep_stack"]}
fn = funsor.to_funsor(site["fn"], funsor.Real, dim_to_name)
value = site["value"]
if not isinstance(value, str):
value = funsor.to_funsor(site["value"], fn.inputs["value"], dim_to_name)
log_prob_terms.append(fn(value=value))
plates |= frozenset(f.name for f in site["cond_indep_stack"])
assert log_prob_terms, "nothing to collapse"
reduced_plates = plates - self.preserved_plates
log_prob = funsor.sum_product.sum_product(
funsor.ops.logaddexp,
funsor.ops.add,
log_prob_terms,
eliminate=frozenset(reduced_vars) | reduced_plates,
plates=plates,
)
name = reduced_vars[0]
numpyro.factor(name, log_prob.data)
class condition(Messenger):
"""
Conditions unobserved sample sites to values from `data` or `condition_fn`.
Similar to :class:`~numpyro.handlers.substitute` except that it only affects
`sample` sites and changes the `is_observed` property to `True`.
:param fn: Python callable with NumPyro primitives.
:param dict data: dictionary of `numpy.ndarray` values keyed by
site names.
:param condition_fn: callable that takes in a site dict and returns
a numpy array or `None` (in which case the handler has no side
effect).
**Example:**
.. doctest::
>>> from jax import random
>>> import numpyro
>>> from numpyro.handlers import condition, seed, substitute, trace
>>> import numpyro.distributions as dist
>>> def model():
... numpyro.sample('a', dist.Normal(0., 1.))
>>> model = seed(model, random.PRNGKey(0))
>>> exec_trace = trace(condition(model, {'a': -1})).get_trace()
>>> assert exec_trace['a']['value'] == -1
>>> assert exec_trace['a']['is_observed']
"""
def __init__(self, fn=None, data=None, condition_fn=None):
self.condition_fn = condition_fn
self.data = data
if sum((x is not None for x in (data, condition_fn))) != 1:
raise ValueError(
"Only one of `data` or `condition_fn` " "should be provided."
)
super(condition, self).__init__(fn)
def process_message(self, msg):
if (msg["type"] != "sample") or msg.get("_control_flow_done", False):
if msg["type"] == "control_flow":
if self.data is not None:
msg["kwargs"]["substitute_stack"].append(("condition", self.data))
if self.condition_fn is not None:
msg["kwargs"]["substitute_stack"].append(
("condition", self.condition_fn)
)
return
if self.data is not None:
value = self.data.get(msg["name"])
else:
value = self.condition_fn(msg)
if value is not None:
msg["value"] = value
msg["is_observed"] = True
class infer_config(Messenger):
"""
Given a callable `fn` that contains NumPyro primitive calls
and a callable `config_fn` taking a trace site and returning a dictionary,
updates the value of the infer kwarg at a sample site to config_fn(site).
:param fn: a stochastic function (callable containing NumPyro primitive calls)
:param config_fn: a callable taking a site and returning an infer dict
"""
def __init__(self, fn=None, config_fn=None):
super().__init__(fn)
self.config_fn = config_fn
def process_message(self, msg):
if msg["type"] in ("sample",):
msg["infer"].update(self.config_fn(msg))
class lift(Messenger):
"""
Given a stochastic function with ``param`` calls and a prior distribution,
create a stochastic function where all param calls are replaced by sampling from prior.
Prior should be a distribution or a dict of names to distributions.
Consider the following NumPyro program:
>>> import numpyro
>>> import numpyro.distributions as dist
>>> from numpyro.handlers import lift
>>>
>>> def model(x):
... s = numpyro.param("s", 0.5)
... z = numpyro.sample("z", dist.Normal(x, s))
... return z ** 2
>>> lifted_model = lift(model, prior={"s": dist.Exponential(0.3)})
``lift`` makes ``param`` statements behave like ``sample`` statements
using the distributions in ``prior``. In this example, site `s` will now behave
as if it was replaced with ``s = numpyro.sample("s", dist.Exponential(0.3))``.
:param fn: function whose parameters will be lifted to random values
:param prior: prior function in the form of a Distribution or a dict of Distributions
"""
def __init__(self, fn=None, prior=None):
super().__init__(fn)
self.prior = prior
self._samples_cache = {}
def __enter__(self):
self._samples_cache = {}
return super().__enter__()
def __exit__(self, *args, **kwargs):
self._samples_cache = {}
return super().__exit__(*args, **kwargs)
def process_message(self, msg):
if msg["type"] != "param":
return
name = msg["name"]
fn = self.prior.get(name) if isinstance(self.prior, dict) else self.prior
if isinstance(fn, numpyro.distributions.Distribution):
msg["type"] = "sample"
msg["fn"] = fn
msg["args"] = ()
msg["kwargs"] = {
"rng_key": msg["kwargs"].get("rng_key", None),
"sample_shape": msg["kwargs"].get("sample_shape", ()),
}
msg["intermediates"] = []
msg["infer"] = msg.get("infer", {})
else:
# otherwise leave as is
return
if name in self._samples_cache:
# Multiple pyro.param statements with the same
# name. Block the site and fix the value.
msg["value"] = self._samples_cache[name]["value"]
msg["is_observed"] = True
msg["stop"] = True
else:
self._samples_cache[name] = msg
msg["is_observed"] = False
class mask(Messenger):
"""
This messenger masks out some of the sample statements elementwise.
:param mask: a boolean or a boolean-valued array for masking elementwise log
probability of sample sites (`True` includes a site, `False` excludes a site).
"""
def __init__(self, fn=None, mask=True):
if jnp.result_type(mask) != "bool":
raise ValueError("`mask` should be a bool array.")
self.mask = mask
super().__init__(fn)
def process_message(self, msg):
if msg["type"] != "sample":
if msg["type"] == "inspect":
msg["mask"] = (
self.mask if msg["mask"] is None else (self.mask & msg["mask"])
)
return
msg["fn"] = msg["fn"].mask(self.mask)
class reparam(Messenger):
"""
Reparametrizes each affected sample site into one or more auxiliary sample
sites followed by a deterministic transformation [1].
To specify reparameterizers, pass a ``config`` dict or callable to the
constructor. See the :mod:`numpyro.infer.reparam` module for available
reparameterizers.
Note some reparameterizers can examine the ``*args,**kwargs`` inputs of
functions they affect; these reparameterizers require using
``handlers.reparam`` as a decorator rather than as a context manager.
[1] <NAME>, <NAME>, <NAME> (2019)
"Automatic Reparameterisation of Probabilistic Programs"
https://arxiv.org/pdf/1906.03028.pdf
:param config: Configuration, either a dict mapping site name to
:class:`~numpyro.infer.reparam.Reparam` ,
or a function mapping site to
:class:`~numpyro.infer.reparam.Reparam` or None.
:type config: dict or callable
"""
def __init__(self, fn=None, config=None):
assert isinstance(config, dict) or callable(config)
self.config = config
super().__init__(fn)
def process_message(self, msg):
if msg["type"] != "sample":
return
if isinstance(self.config, dict):
reparam = self.config.get(msg["name"])
else:
reparam = self.config(msg)
if reparam is None:
return
new_fn, value = reparam(msg["name"], msg["fn"], msg["value"])
if value is not None:
if new_fn is None:
msg["type"] = "deterministic"
msg["value"] = value
for key in list(msg.keys()):
if key not in ("type", "name", "value"):
del msg[key]
return
if msg["value"] is None:
msg["is_observed"] = True
msg["value"] = value
msg["fn"] = new_fn
class scale(Messenger):
"""
This messenger rescales the log probability score.
This is typically used for data subsampling or for stratified sampling of data
(e.g. in fraud detection where negatives vastly outnumber positives).
:param scale: a positive scaling factor that is broadcastable to the shape
of log probability.
:type scale: float or numpy.ndarray
"""
def __init__(self, fn=None, scale=1.0):
if not_jax_tracer(scale):
if np.any(np.less_equal(scale, 0)):
raise ValueError("'scale' argument should be positive.")
self.scale = scale
super().__init__(fn)
def process_message(self, msg):
if msg["type"] not in ("param", "sample", "plate"):
return
msg["scale"] = (
self.scale if msg.get("scale") is None else self.scale * msg["scale"]
)
class scope(Messenger):
"""
This handler prepend a prefix followed by a divider to the name of sample sites.
**Example:**
.. doctest::
>>> import numpyro
>>> import numpyro.distributions as dist
>>> from numpyro.handlers import scope, seed, trace
>>> def model():
... with scope(prefix="a"):
... with scope(prefix="b", divider="."):
... return numpyro.sample("x", dist.Bernoulli(0.5))
...
>>> assert "a/b.x" in trace(seed(model, 0)).get_trace()
:param fn: Python callable with NumPyro primitives.
:param str prefix: a string to prepend to sample names
:param str divider: a string to join the prefix and sample name; default to `'/'`
"""
def __init__(self, fn=None, prefix="", divider="/"):
self.prefix = prefix
self.divider = divider
super().__init__(fn)
def process_message(self, msg):
if msg.get("name"):
msg["name"] = f"{self.prefix}{self.divider}{msg['name']}"
if msg.get("cond_indep_stack"):
msg["cond_indep_stack"] = [
CondIndepStackFrame(
f"{self.prefix}{self.divider}{i.name}", i.dim, i.size
)
for i in msg["cond_indep_stack"]
]
class seed(Messenger):
"""
JAX uses a functional pseudo random number generator that requires passing
in a seed :func:`~jax.random.PRNGKey` to every stochastic function. The
`seed` handler allows us to initially seed a stochastic function with a
:func:`~jax.random.PRNGKey`. Every call to the :func:`~numpyro.handlers.sample`
primitive inside the function results in a splitting of this initial seed
so that we use a fresh seed for each subsequent call without having to
explicitly pass in a `PRNGKey` to each `sample` call.
:param fn: Python callable with NumPyro primitives.
:param rng_seed: a random number generator seed.
:type rng_seed: int, jnp.ndarray scalar, or jax.random.PRNGKey
.. note::
Unlike in Pyro, `numpyro.sample` primitive cannot be used without wrapping
it in seed handler since there is no global random state. As such,
users need to use `seed` as a contextmanager to generate samples from
distributions or as a decorator for their model callable (See below).
**Example:**
.. doctest::
>>> from jax import random
>>> import numpyro
>>> import numpyro.handlers
>>> import numpyro.distributions as dist
>>> # as context manager
>>> with handlers.seed(rng_seed=1):
... x = numpyro.sample('x', dist.Normal(0., 1.))
>>> def model():
... return numpyro.sample('y', dist.Normal(0., 1.))
>>> # as function decorator (/modifier)
>>> y = handlers.seed(model, rng_seed=1)()
>>> assert x == y
"""
def __init__(self, fn=None, rng_seed=None):
if isinstance(rng_seed, int) or (
isinstance(rng_seed, (np.ndarray, jnp.ndarray)) and not jnp.shape(rng_seed)
):
rng_seed = random.PRNGKey(rng_seed)
if not (
isinstance(rng_seed, (np.ndarray, jnp.ndarray))
and rng_seed.dtype == jnp.uint32
and rng_seed.shape == (2,)
):
raise TypeError("Incorrect type for rng_seed: {}".format(type(rng_seed)))
self.rng_key = rng_seed
super(seed, self).__init__(fn)
def process_message(self, msg):
if (
msg["type"] == "sample"
and not msg["is_observed"]
and msg["kwargs"]["rng_key"] is None
) or msg["type"] in ["prng_key", "plate", "control_flow"]:
if msg["value"] is not None:
# no need to create a new key when value is available
return
self.rng_key, rng_key_sample = random.split(self.rng_key)
msg["kwargs"]["rng_key"] = rng_key_sample
class substitute(Messenger):
"""
Given a callable `fn` and a dict `data` keyed by site names
(alternatively, a callable `substitute_fn`), return a callable
which substitutes all primitive calls in `fn` with values from
`data` whose key matches the site name. If the site name
is not present in `data`, there is no side effect.
If a `substitute_fn` is provided, then the value at the site is
replaced by the value returned from the call to `substitute_fn`
for the given site.
.. note:: This handler is mainly used for internal algorithms.
For conditioning a generative model on observed data, please
using the :class:`condition` handler.
:param fn: Python callable with NumPyro primitives.
:param dict data: dictionary of `numpy.ndarray` values keyed by
site names.
:param substitute_fn: callable that takes in a site dict and returns
a numpy array or `None` (in which case the handler has no side
effect).
**Example:**
.. doctest::
>>> from jax import random
>>> import numpyro
>>> from numpyro.handlers import seed, substitute, trace
>>> import numpyro.distributions as dist
>>> def model():
... numpyro.sample('a', dist.Normal(0., 1.))
>>> model = seed(model, random.PRNGKey(0))
>>> exec_trace = trace(substitute(model, {'a': -1})).get_trace()
>>> assert exec_trace['a']['value'] == -1
"""
def __init__(self, fn=None, data=None, substitute_fn=None):
self.substitute_fn = substitute_fn
self.data = data
if sum((x is not None for x in (data, substitute_fn))) != 1:
raise ValueError(
"Only one of `data` or `substitute_fn` " "should be provided."
)
super(substitute, self).__init__(fn)
def process_message(self, msg):
if (msg["type"] not in ("sample", "param", "mutable", "plate")) or msg.get(
"_control_flow_done", False
):
if msg["type"] == "control_flow":
if self.data is not None:
msg["kwargs"]["substitute_stack"].append(("substitute", self.data))
if self.substitute_fn is not None:
msg["kwargs"]["substitute_stack"].append(
("substitute", self.substitute_fn)
)
return
if self.data is not None:
value = self.data.get(msg["name"])
else:
value = self.substitute_fn(msg)
if value is not None:
msg["value"] = value
class do(Messenger):
"""
Given a stochastic function with some sample statements and a dictionary
of values at names, set the return values of those sites equal to the values
as if they were hard-coded to those values and introduce fresh sample sites
with the same names whose values do not propagate.
Composes freely with :func:`~numpyro.handlers.condition` to represent
counterfactual distributions over potential outcomes. See Single World
Intervention Graphs [1] for additional details and theory.
This is equivalent to replacing `z = numpyro.sample("z", ...)` with `z = 1.`
and introducing a fresh sample site `numpyro.sample("z", ...)` whose value is
not used elsewhere.
**References:**
1. *Single World Intervention Graphs: A Primer*,
<NAME>, <NAME>
:param fn: a stochastic function (callable containing Pyro primitive calls)
:param data: a ``dict`` mapping sample site names to interventions
**Example:**
.. doctest::
>>> import jax.numpy as jnp
>>> import numpyro
>>> from numpyro.handlers import do, trace, seed
>>> import numpyro.distributions as dist
>>> def model(x):
... s = numpyro.sample("s", dist.LogNormal())
... z = numpyro.sample("z", dist.Normal(x, s))
... return z ** 2
>>> intervened_model = handlers.do(model, data={"z": 1.})
>>> with trace() as exec_trace:
... z_square = seed(intervened_model, 0)(1)
>>> assert exec_trace['z']['value'] != 1.
>>> assert not exec_trace['z']['is_observed']
>>> assert not exec_trace['z'].get('stop', None)
>>> assert z_square == 1
"""
def __init__(self, fn=None, data=None):
self.data = data
self._intervener_id = str(id(self))
super(do, self).__init__(fn)
def process_message(self, msg):
if msg["type"] != "sample":
return
if (
msg.get("_intervener_id", None) != self._intervener_id
and self.data.get(msg["name"]) is not None
):
if msg.get("_intervener_id", None) is not None:
warnings.warn(
"Attempting to intervene on variable {} multiple times,"
"this is almost certainly incorrect behavior".format(msg["name"]),
RuntimeWarning,
stacklevel=find_stack_level(),
)
msg["_intervener_id"] = self._intervener_id
# split node, avoid reapplying self recursively to new node
new_msg = msg.copy()
apply_stack(new_msg)
intervention = self.data.get(msg["name"])
msg["name"] = msg["name"] + "__CF" # mangle old name
msg["value"] = intervention
msg["is_observed"] = True
msg["stop"] = True | numpyro/handlers.py | from collections import OrderedDict
import warnings
import numpy as np
from jax import random
import jax.numpy as jnp
import numpyro
from numpyro.distributions.distribution import COERCIONS
from numpyro.primitives import (
_PYRO_STACK,
CondIndepStackFrame,
Messenger,
apply_stack,
plate,
)
from numpyro.util import find_stack_level, not_jax_tracer
__all__ = [
"block",
"collapse",
"condition",
"infer_config",
"lift",
"mask",
"reparam",
"replay",
"scale",
"scope",
"seed",
"substitute",
"trace",
"do",
]
class trace(Messenger):
"""
Returns a handler that records the inputs and outputs at primitive calls
inside `fn`.
**Example:**
.. doctest::
>>> from jax import random
>>> import numpyro
>>> import numpyro.distributions as dist
>>> from numpyro.handlers import seed, trace
>>> import pprint as pp
>>> def model():
... numpyro.sample('a', dist.Normal(0., 1.))
>>> exec_trace = trace(seed(model, random.PRNGKey(0))).get_trace()
>>> pp.pprint(exec_trace) # doctest: +SKIP
OrderedDict([('a',
{'args': (),
'fn': <numpyro.distributions.continuous.Normal object at 0x7f9e689b1eb8>,
'is_observed': False,
'kwargs': {'rng_key': DeviceArray([0, 0], dtype=uint32)},
'name': 'a',
'type': 'sample',
'value': DeviceArray(-0.20584235, dtype=float32)})])
"""
def __enter__(self):
super(trace, self).__enter__()
self.trace = OrderedDict()
return self.trace
def postprocess_message(self, msg):
if "name" not in msg:
# skip recording helper messages e.g. `control_flow`, `to_data`, `to_funsor`
# which has no name
return
assert not (
msg["type"] == "sample" and msg["name"] in self.trace
), "all sites must have unique names but got `{}` duplicated".format(
msg["name"]
)
self.trace[msg["name"]] = msg.copy()
def get_trace(self, *args, **kwargs):
"""
Run the wrapped callable and return the recorded trace.
:param `*args`: arguments to the callable.
:param `**kwargs`: keyword arguments to the callable.
:return: `OrderedDict` containing the execution trace.
"""
self(*args, **kwargs)
return self.trace
class replay(Messenger):
"""
Given a callable `fn` and an execution trace `guide_trace`,
return a callable which substitutes `sample` calls in `fn` with
values from the corresponding site names in `guide_trace`.
:param fn: Python callable with NumPyro primitives.
:param guide_trace: an OrderedDict containing execution metadata.
**Example:**
.. doctest::
>>> from jax import random
>>> import numpyro
>>> import numpyro.distributions as dist
>>> from numpyro.handlers import replay, seed, trace
>>> def model():
... numpyro.sample('a', dist.Normal(0., 1.))
>>> exec_trace = trace(seed(model, random.PRNGKey(0))).get_trace()
>>> print(exec_trace['a']['value']) # doctest: +SKIP
-0.20584235
>>> replayed_trace = trace(replay(model, exec_trace)).get_trace()
>>> print(exec_trace['a']['value']) # doctest: +SKIP
-0.20584235
>>> assert replayed_trace['a']['value'] == exec_trace['a']['value']
"""
def __init__(self, fn=None, trace=None, guide_trace=None):
if guide_trace is not None:
warnings.warn(
"`guide_trace` argument is deprecated. Please replace it by `trace`.",
FutureWarning,
stacklevel=find_stack_level(),
)
if guide_trace is not None:
trace = guide_trace
assert trace is not None
self.trace = trace
super(replay, self).__init__(fn)
def process_message(self, msg):
if msg["type"] in ("sample", "plate") and msg["name"] in self.trace:
msg["value"] = self.trace[msg["name"]]["value"]
class block(Messenger):
"""
Given a callable `fn`, return another callable that selectively hides
primitive sites where `hide_fn` returns True from other effect handlers
on the stack.
:param callable fn: Python callable with NumPyro primitives.
:param callable hide_fn: function which when given a dictionary containing
site-level metadata returns whether it should be blocked.
:param list hide: list of site names to hide.
**Example:**
.. doctest::
>>> from jax import random
>>> import numpyro
>>> from numpyro.handlers import block, seed, trace
>>> import numpyro.distributions as dist
>>> def model():
... a = numpyro.sample('a', dist.Normal(0., 1.))
... return numpyro.sample('b', dist.Normal(a, 1.))
>>> model = seed(model, random.PRNGKey(0))
>>> block_all = block(model)
>>> block_a = block(model, lambda site: site['name'] == 'a')
>>> trace_block_all = trace(block_all).get_trace()
>>> assert not {'a', 'b'}.intersection(trace_block_all.keys())
>>> trace_block_a = trace(block_a).get_trace()
>>> assert 'a' not in trace_block_a
>>> assert 'b' in trace_block_a
"""
def __init__(self, fn=None, hide_fn=None, hide=None):
if hide_fn is not None:
self.hide_fn = hide_fn
elif hide is not None:
self.hide_fn = lambda msg: msg.get("name") in hide
else:
self.hide_fn = lambda msg: True
super(block, self).__init__(fn)
def process_message(self, msg):
if self.hide_fn(msg):
msg["stop"] = True
class collapse(trace):
"""
EXPERIMENTAL Collapses all sites in the context by lazily sampling and
attempting to use conjugacy relations. If no conjugacy is known this will
fail. Code using the results of sample sites must be written to accept
Funsors rather than Tensors. This requires ``funsor`` to be installed.
"""
_coerce = None
def __init__(self, *args, **kwargs):
if collapse._coerce is None:
import funsor
from funsor.distribution import CoerceDistributionToFunsor
funsor.set_backend("jax")
collapse._coerce = CoerceDistributionToFunsor("jax")
super().__init__(*args, **kwargs)
def process_message(self, msg):
from funsor.terms import Funsor
if msg["type"] == "sample":
if msg["value"] is None:
msg["value"] = msg["name"]
if isinstance(msg["fn"], Funsor) or isinstance(msg["value"], (str, Funsor)):
msg["stop"] = True
def __enter__(self):
self.preserved_plates = frozenset(
h.name for h in _PYRO_STACK if isinstance(h, plate)
)
COERCIONS.append(self._coerce)
return super().__enter__()
def __exit__(self, exc_type, exc_value, traceback):
import funsor
_coerce = COERCIONS.pop()
assert _coerce is self._coerce
super().__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
# Convert delayed statements to pyro.factor()
reduced_vars = []
log_prob_terms = []
plates = frozenset()
for name, site in self.trace.items():
if site["type"] != "sample":
continue
if not site["is_observed"]:
reduced_vars.append(name)
dim_to_name = {f.dim: f.name for f in site["cond_indep_stack"]}
fn = funsor.to_funsor(site["fn"], funsor.Real, dim_to_name)
value = site["value"]
if not isinstance(value, str):
value = funsor.to_funsor(site["value"], fn.inputs["value"], dim_to_name)
log_prob_terms.append(fn(value=value))
plates |= frozenset(f.name for f in site["cond_indep_stack"])
assert log_prob_terms, "nothing to collapse"
reduced_plates = plates - self.preserved_plates
log_prob = funsor.sum_product.sum_product(
funsor.ops.logaddexp,
funsor.ops.add,
log_prob_terms,
eliminate=frozenset(reduced_vars) | reduced_plates,
plates=plates,
)
name = reduced_vars[0]
numpyro.factor(name, log_prob.data)
class condition(Messenger):
"""
Conditions unobserved sample sites to values from `data` or `condition_fn`.
Similar to :class:`~numpyro.handlers.substitute` except that it only affects
`sample` sites and changes the `is_observed` property to `True`.
:param fn: Python callable with NumPyro primitives.
:param dict data: dictionary of `numpy.ndarray` values keyed by
site names.
:param condition_fn: callable that takes in a site dict and returns
a numpy array or `None` (in which case the handler has no side
effect).
**Example:**
.. doctest::
>>> from jax import random
>>> import numpyro
>>> from numpyro.handlers import condition, seed, substitute, trace
>>> import numpyro.distributions as dist
>>> def model():
... numpyro.sample('a', dist.Normal(0., 1.))
>>> model = seed(model, random.PRNGKey(0))
>>> exec_trace = trace(condition(model, {'a': -1})).get_trace()
>>> assert exec_trace['a']['value'] == -1
>>> assert exec_trace['a']['is_observed']
"""
def __init__(self, fn=None, data=None, condition_fn=None):
self.condition_fn = condition_fn
self.data = data
if sum((x is not None for x in (data, condition_fn))) != 1:
raise ValueError(
"Only one of `data` or `condition_fn` " "should be provided."
)
super(condition, self).__init__(fn)
def process_message(self, msg):
if (msg["type"] != "sample") or msg.get("_control_flow_done", False):
if msg["type"] == "control_flow":
if self.data is not None:
msg["kwargs"]["substitute_stack"].append(("condition", self.data))
if self.condition_fn is not None:
msg["kwargs"]["substitute_stack"].append(
("condition", self.condition_fn)
)
return
if self.data is not None:
value = self.data.get(msg["name"])
else:
value = self.condition_fn(msg)
if value is not None:
msg["value"] = value
msg["is_observed"] = True
class infer_config(Messenger):
"""
Given a callable `fn` that contains NumPyro primitive calls
and a callable `config_fn` taking a trace site and returning a dictionary,
updates the value of the infer kwarg at a sample site to config_fn(site).
:param fn: a stochastic function (callable containing NumPyro primitive calls)
:param config_fn: a callable taking a site and returning an infer dict
"""
def __init__(self, fn=None, config_fn=None):
super().__init__(fn)
self.config_fn = config_fn
def process_message(self, msg):
if msg["type"] in ("sample",):
msg["infer"].update(self.config_fn(msg))
class lift(Messenger):
"""
Given a stochastic function with ``param`` calls and a prior distribution,
create a stochastic function where all param calls are replaced by sampling from prior.
Prior should be a distribution or a dict of names to distributions.
Consider the following NumPyro program:
>>> import numpyro
>>> import numpyro.distributions as dist
>>> from numpyro.handlers import lift
>>>
>>> def model(x):
... s = numpyro.param("s", 0.5)
... z = numpyro.sample("z", dist.Normal(x, s))
... return z ** 2
>>> lifted_model = lift(model, prior={"s": dist.Exponential(0.3)})
``lift`` makes ``param`` statements behave like ``sample`` statements
using the distributions in ``prior``. In this example, site `s` will now behave
as if it was replaced with ``s = numpyro.sample("s", dist.Exponential(0.3))``.
:param fn: function whose parameters will be lifted to random values
:param prior: prior function in the form of a Distribution or a dict of Distributions
"""
def __init__(self, fn=None, prior=None):
super().__init__(fn)
self.prior = prior
self._samples_cache = {}
def __enter__(self):
self._samples_cache = {}
return super().__enter__()
def __exit__(self, *args, **kwargs):
self._samples_cache = {}
return super().__exit__(*args, **kwargs)
def process_message(self, msg):
if msg["type"] != "param":
return
name = msg["name"]
fn = self.prior.get(name) if isinstance(self.prior, dict) else self.prior
if isinstance(fn, numpyro.distributions.Distribution):
msg["type"] = "sample"
msg["fn"] = fn
msg["args"] = ()
msg["kwargs"] = {
"rng_key": msg["kwargs"].get("rng_key", None),
"sample_shape": msg["kwargs"].get("sample_shape", ()),
}
msg["intermediates"] = []
msg["infer"] = msg.get("infer", {})
else:
# otherwise leave as is
return
if name in self._samples_cache:
# Multiple pyro.param statements with the same
# name. Block the site and fix the value.
msg["value"] = self._samples_cache[name]["value"]
msg["is_observed"] = True
msg["stop"] = True
else:
self._samples_cache[name] = msg
msg["is_observed"] = False
class mask(Messenger):
"""
This messenger masks out some of the sample statements elementwise.
:param mask: a boolean or a boolean-valued array for masking elementwise log
probability of sample sites (`True` includes a site, `False` excludes a site).
"""
def __init__(self, fn=None, mask=True):
if jnp.result_type(mask) != "bool":
raise ValueError("`mask` should be a bool array.")
self.mask = mask
super().__init__(fn)
def process_message(self, msg):
if msg["type"] != "sample":
if msg["type"] == "inspect":
msg["mask"] = (
self.mask if msg["mask"] is None else (self.mask & msg["mask"])
)
return
msg["fn"] = msg["fn"].mask(self.mask)
class reparam(Messenger):
"""
Reparametrizes each affected sample site into one or more auxiliary sample
sites followed by a deterministic transformation [1].
To specify reparameterizers, pass a ``config`` dict or callable to the
constructor. See the :mod:`numpyro.infer.reparam` module for available
reparameterizers.
Note some reparameterizers can examine the ``*args,**kwargs`` inputs of
functions they affect; these reparameterizers require using
``handlers.reparam`` as a decorator rather than as a context manager.
[1] <NAME>, <NAME>, <NAME> (2019)
"Automatic Reparameterisation of Probabilistic Programs"
https://arxiv.org/pdf/1906.03028.pdf
:param config: Configuration, either a dict mapping site name to
:class:`~numpyro.infer.reparam.Reparam` ,
or a function mapping site to
:class:`~numpyro.infer.reparam.Reparam` or None.
:type config: dict or callable
"""
def __init__(self, fn=None, config=None):
assert isinstance(config, dict) or callable(config)
self.config = config
super().__init__(fn)
def process_message(self, msg):
if msg["type"] != "sample":
return
if isinstance(self.config, dict):
reparam = self.config.get(msg["name"])
else:
reparam = self.config(msg)
if reparam is None:
return
new_fn, value = reparam(msg["name"], msg["fn"], msg["value"])
if value is not None:
if new_fn is None:
msg["type"] = "deterministic"
msg["value"] = value
for key in list(msg.keys()):
if key not in ("type", "name", "value"):
del msg[key]
return
if msg["value"] is None:
msg["is_observed"] = True
msg["value"] = value
msg["fn"] = new_fn
class scale(Messenger):
"""
This messenger rescales the log probability score.
This is typically used for data subsampling or for stratified sampling of data
(e.g. in fraud detection where negatives vastly outnumber positives).
:param scale: a positive scaling factor that is broadcastable to the shape
of log probability.
:type scale: float or numpy.ndarray
"""
def __init__(self, fn=None, scale=1.0):
if not_jax_tracer(scale):
if np.any(np.less_equal(scale, 0)):
raise ValueError("'scale' argument should be positive.")
self.scale = scale
super().__init__(fn)
def process_message(self, msg):
if msg["type"] not in ("param", "sample", "plate"):
return
msg["scale"] = (
self.scale if msg.get("scale") is None else self.scale * msg["scale"]
)
class scope(Messenger):
"""
This handler prepend a prefix followed by a divider to the name of sample sites.
**Example:**
.. doctest::
>>> import numpyro
>>> import numpyro.distributions as dist
>>> from numpyro.handlers import scope, seed, trace
>>> def model():
... with scope(prefix="a"):
... with scope(prefix="b", divider="."):
... return numpyro.sample("x", dist.Bernoulli(0.5))
...
>>> assert "a/b.x" in trace(seed(model, 0)).get_trace()
:param fn: Python callable with NumPyro primitives.
:param str prefix: a string to prepend to sample names
:param str divider: a string to join the prefix and sample name; default to `'/'`
"""
def __init__(self, fn=None, prefix="", divider="/"):
self.prefix = prefix
self.divider = divider
super().__init__(fn)
def process_message(self, msg):
if msg.get("name"):
msg["name"] = f"{self.prefix}{self.divider}{msg['name']}"
if msg.get("cond_indep_stack"):
msg["cond_indep_stack"] = [
CondIndepStackFrame(
f"{self.prefix}{self.divider}{i.name}", i.dim, i.size
)
for i in msg["cond_indep_stack"]
]
class seed(Messenger):
"""
JAX uses a functional pseudo random number generator that requires passing
in a seed :func:`~jax.random.PRNGKey` to every stochastic function. The
`seed` handler allows us to initially seed a stochastic function with a
:func:`~jax.random.PRNGKey`. Every call to the :func:`~numpyro.handlers.sample`
primitive inside the function results in a splitting of this initial seed
so that we use a fresh seed for each subsequent call without having to
explicitly pass in a `PRNGKey` to each `sample` call.
:param fn: Python callable with NumPyro primitives.
:param rng_seed: a random number generator seed.
:type rng_seed: int, jnp.ndarray scalar, or jax.random.PRNGKey
.. note::
Unlike in Pyro, `numpyro.sample` primitive cannot be used without wrapping
it in seed handler since there is no global random state. As such,
users need to use `seed` as a contextmanager to generate samples from
distributions or as a decorator for their model callable (See below).
**Example:**
.. doctest::
>>> from jax import random
>>> import numpyro
>>> import numpyro.handlers
>>> import numpyro.distributions as dist
>>> # as context manager
>>> with handlers.seed(rng_seed=1):
... x = numpyro.sample('x', dist.Normal(0., 1.))
>>> def model():
... return numpyro.sample('y', dist.Normal(0., 1.))
>>> # as function decorator (/modifier)
>>> y = handlers.seed(model, rng_seed=1)()
>>> assert x == y
"""
def __init__(self, fn=None, rng_seed=None):
if isinstance(rng_seed, int) or (
isinstance(rng_seed, (np.ndarray, jnp.ndarray)) and not jnp.shape(rng_seed)
):
rng_seed = random.PRNGKey(rng_seed)
if not (
isinstance(rng_seed, (np.ndarray, jnp.ndarray))
and rng_seed.dtype == jnp.uint32
and rng_seed.shape == (2,)
):
raise TypeError("Incorrect type for rng_seed: {}".format(type(rng_seed)))
self.rng_key = rng_seed
super(seed, self).__init__(fn)
def process_message(self, msg):
if (
msg["type"] == "sample"
and not msg["is_observed"]
and msg["kwargs"]["rng_key"] is None
) or msg["type"] in ["prng_key", "plate", "control_flow"]:
if msg["value"] is not None:
# no need to create a new key when value is available
return
self.rng_key, rng_key_sample = random.split(self.rng_key)
msg["kwargs"]["rng_key"] = rng_key_sample
class substitute(Messenger):
"""
Given a callable `fn` and a dict `data` keyed by site names
(alternatively, a callable `substitute_fn`), return a callable
which substitutes all primitive calls in `fn` with values from
`data` whose key matches the site name. If the site name
is not present in `data`, there is no side effect.
If a `substitute_fn` is provided, then the value at the site is
replaced by the value returned from the call to `substitute_fn`
for the given site.
.. note:: This handler is mainly used for internal algorithms.
For conditioning a generative model on observed data, please
using the :class:`condition` handler.
:param fn: Python callable with NumPyro primitives.
:param dict data: dictionary of `numpy.ndarray` values keyed by
site names.
:param substitute_fn: callable that takes in a site dict and returns
a numpy array or `None` (in which case the handler has no side
effect).
**Example:**
.. doctest::
>>> from jax import random
>>> import numpyro
>>> from numpyro.handlers import seed, substitute, trace
>>> import numpyro.distributions as dist
>>> def model():
... numpyro.sample('a', dist.Normal(0., 1.))
>>> model = seed(model, random.PRNGKey(0))
>>> exec_trace = trace(substitute(model, {'a': -1})).get_trace()
>>> assert exec_trace['a']['value'] == -1
"""
def __init__(self, fn=None, data=None, substitute_fn=None):
self.substitute_fn = substitute_fn
self.data = data
if sum((x is not None for x in (data, substitute_fn))) != 1:
raise ValueError(
"Only one of `data` or `substitute_fn` " "should be provided."
)
super(substitute, self).__init__(fn)
def process_message(self, msg):
if (msg["type"] not in ("sample", "param", "mutable", "plate")) or msg.get(
"_control_flow_done", False
):
if msg["type"] == "control_flow":
if self.data is not None:
msg["kwargs"]["substitute_stack"].append(("substitute", self.data))
if self.substitute_fn is not None:
msg["kwargs"]["substitute_stack"].append(
("substitute", self.substitute_fn)
)
return
if self.data is not None:
value = self.data.get(msg["name"])
else:
value = self.substitute_fn(msg)
if value is not None:
msg["value"] = value
class do(Messenger):
"""
Given a stochastic function with some sample statements and a dictionary
of values at names, set the return values of those sites equal to the values
as if they were hard-coded to those values and introduce fresh sample sites
with the same names whose values do not propagate.
Composes freely with :func:`~numpyro.handlers.condition` to represent
counterfactual distributions over potential outcomes. See Single World
Intervention Graphs [1] for additional details and theory.
This is equivalent to replacing `z = numpyro.sample("z", ...)` with `z = 1.`
and introducing a fresh sample site `numpyro.sample("z", ...)` whose value is
not used elsewhere.
**References:**
1. *Single World Intervention Graphs: A Primer*,
<NAME>, <NAME>
:param fn: a stochastic function (callable containing Pyro primitive calls)
:param data: a ``dict`` mapping sample site names to interventions
**Example:**
.. doctest::
>>> import jax.numpy as jnp
>>> import numpyro
>>> from numpyro.handlers import do, trace, seed
>>> import numpyro.distributions as dist
>>> def model(x):
... s = numpyro.sample("s", dist.LogNormal())
... z = numpyro.sample("z", dist.Normal(x, s))
... return z ** 2
>>> intervened_model = handlers.do(model, data={"z": 1.})
>>> with trace() as exec_trace:
... z_square = seed(intervened_model, 0)(1)
>>> assert exec_trace['z']['value'] != 1.
>>> assert not exec_trace['z']['is_observed']
>>> assert not exec_trace['z'].get('stop', None)
>>> assert z_square == 1
"""
def __init__(self, fn=None, data=None):
self.data = data
self._intervener_id = str(id(self))
super(do, self).__init__(fn)
def process_message(self, msg):
if msg["type"] != "sample":
return
if (
msg.get("_intervener_id", None) != self._intervener_id
and self.data.get(msg["name"]) is not None
):
if msg.get("_intervener_id", None) is not None:
warnings.warn(
"Attempting to intervene on variable {} multiple times,"
"this is almost certainly incorrect behavior".format(msg["name"]),
RuntimeWarning,
stacklevel=find_stack_level(),
)
msg["_intervener_id"] = self._intervener_id
# split node, avoid reapplying self recursively to new node
new_msg = msg.copy()
apply_stack(new_msg)
intervention = self.data.get(msg["name"])
msg["name"] = msg["name"] + "__CF" # mangle old name
msg["value"] = intervention
msg["is_observed"] = True
msg["stop"] = True | 0.832713 | 0.373476 |
import time
import os
from cooka.common import util, dataset_util
from cooka.common.consts import BATCH_PREDICTION_COL
from cooka.common.log import log_core as logger
from cooka.common import client
from cooka.common import consts
from cooka.common.model import AnalyzeStep, JobStep, PredictStepType, Model, Feature, ModelFeature, FrameworkType
import pandas as pd
from deeptables.models import DeepTable
from os import path as P
import argparse
parser = argparse.ArgumentParser(description='Analyze dataset.', add_help=True)
parser.add_argument("--input_file_path", help="input_file_path", default=None, required=True)
parser.add_argument("--reserved_cols", help="reserved_cols", default=None, required=False)
parser.add_argument("--model_name", help="model_name", default=None, required=True)
parser.add_argument("--dataset_name", help="dataset_name", default=None, required=True)
parser.add_argument("--job_name", help="job_name", default=None, required=True)
parser.add_argument("--has_header", help="has_header", default=None, required=True)
parser.add_argument("--default_headers", help="default_headers", default=None, required=False)
parser.add_argument("--server_portal", help="server_portal", default=None, required=True)
# [1]. read config
args_namespace = parser.parse_args()
input_file_path = args_namespace.input_file_path
reserved_cols = args_namespace.reserved_cols
model_name = args_namespace.model_name
dataset_name = args_namespace.dataset_name
job_name = args_namespace.job_name
has_header = eval(args_namespace.has_header)
default_headers = args_namespace.default_headers
server_portal = args_namespace.server_portal
print("========Batch Predict Config========")
print(f"input_file_path: {input_file_path}")
print(f"reserved_cols: {reserved_cols}")
print(f"model_name: {model_name}")
print(f"dataset_name: {dataset_name}")
print(f"job_name: {job_name}")
print(f"has_header: {has_header}")
print(f"default_headers: {default_headers}")
print(f"server_portal: {server_portal}")
print("====================================")
# [2]. load data
t_load_start = time.time()
load_extension = None
load_status = JobStep.Status.Succeed
try:
# May has no header
if default_headers is not None:
default_headers = default_headers.split(",")
df = util.read_csv(input_file_path, has_header, default_headers)
df_origin = df.copy()
load_extension = {
"n_cols": df.shape[0],
"n_rows": df.shape[1]
}
except Exception as e:
load_status = JobStep.Status.Failed
raise e
finally:
client. batch_predict_callback(portal=server_portal,
dataset_name=dataset_name,
model_name=model_name,
batch_predict_job_name=job_name,
type=PredictStepType.Load,
status=load_status,
took=time.time()-t_load_start,
extension=load_extension)
logger.info("Load dataset finished. ")
# [3]. load model
t_load_model_start = time.time()
load_model_status = JobStep.Status.Succeed
load_model_extension = None
try:
model_dict = client.retrieve_model(portal=server_portal, dataset_name=dataset_name, model_name=model_name)
features = model_dict['inputs']
logger.info("Before cast type: ")
logger.info(df.dtypes)
X = dataset_util.cast_df(df, features, remove_unnecessary_cols=True)
logger.info("After cast type: ")
logger.info(X.dtypes)
abs_model_path = P.join(consts.DATA_DIR, model_dict['model_path'])
framework = model_dict['framework']
if framework == FrameworkType.DeepTables:
model = DeepTable.load(P.join(abs_model_path, 'model'))
elif framework == FrameworkType.GBM:
model = util.load_pkl(P.join(abs_model_path, 'model.pkl'))
else:
raise ValueError(f"Unseen model framework: {framework}")
load_model_extension = {
"model_size": model_dict['model_file_size']
}
except Exception as e:
evaluate_status = JobStep.Status.Failed
raise e
finally:
client.batch_predict_callback(portal=server_portal,
dataset_name=dataset_name,
model_name=model_name,
batch_predict_job_name=job_name,
type=PredictStepType.LoadModel,
status=load_model_status,
took=time.time()-t_load_model_start,
extension=load_model_extension)
logger.info("Load model finished. ")
# [4]. evaluate dataset
t_evaluate_start = time.time()
evaluate_status = JobStep.Status.Succeed
try:
y_pred = model.predict(X)
# proba = dt_model.predict_proba(X)
except Exception as e:
evaluate_status = JobStep.Status.Failed
raise e
finally:
client.batch_predict_callback(portal=server_portal,
dataset_name=dataset_name,
model_name=model_name,
batch_predict_job_name=job_name,
type=PredictStepType.Evaluate,
status=evaluate_status,
took=time.time()-t_evaluate_start,
extension=None)
logger.info("Evaluate input file finished. ")
# [4]. write result
write_result_extension = {}
t_write_result_start = time.time()
write_result_status = AnalyzeStep.Status.Succeed
try:
df = X # remained cols
if reserved_cols is not None and len(reserved_cols) > 0:
result_df = df_origin[reserved_cols]
result_df[BATCH_PREDICTION_COL] = y_pred
else:
result_df = pd.DataFrame(data={BATCH_PREDICTION_COL: y_pred})
output_path = P.join(consts.PATH_TMP_PREDICT, f"{model_name}_{util.human_datetime()}.csv")
if not P.exists(consts.PATH_TMP_PREDICT):
os.makedirs(consts.PATH_TMP_PREDICT)
result_df.to_csv(output_path, index=False)
logger.info(f"Write result finished at: {output_path}")
write_result_extension = {
"output_path": util.relative_path(output_path)
}
except Exception as e:
write_result_status = AnalyzeStep.Status.Failed
raise e
finally:
client.batch_predict_callback(portal=server_portal,
dataset_name=dataset_name,
model_name=model_name,
batch_predict_job_name=job_name,
type=PredictStepType.WriteResult,
status=write_result_status,
took=time.time()-t_write_result_start,
extension=write_result_extension) | cooka/core/batch_predict_job.py | import time
import os
from cooka.common import util, dataset_util
from cooka.common.consts import BATCH_PREDICTION_COL
from cooka.common.log import log_core as logger
from cooka.common import client
from cooka.common import consts
from cooka.common.model import AnalyzeStep, JobStep, PredictStepType, Model, Feature, ModelFeature, FrameworkType
import pandas as pd
from deeptables.models import DeepTable
from os import path as P
import argparse
parser = argparse.ArgumentParser(description='Analyze dataset.', add_help=True)
parser.add_argument("--input_file_path", help="input_file_path", default=None, required=True)
parser.add_argument("--reserved_cols", help="reserved_cols", default=None, required=False)
parser.add_argument("--model_name", help="model_name", default=None, required=True)
parser.add_argument("--dataset_name", help="dataset_name", default=None, required=True)
parser.add_argument("--job_name", help="job_name", default=None, required=True)
parser.add_argument("--has_header", help="has_header", default=None, required=True)
parser.add_argument("--default_headers", help="default_headers", default=None, required=False)
parser.add_argument("--server_portal", help="server_portal", default=None, required=True)
# [1]. read config
args_namespace = parser.parse_args()
input_file_path = args_namespace.input_file_path
reserved_cols = args_namespace.reserved_cols
model_name = args_namespace.model_name
dataset_name = args_namespace.dataset_name
job_name = args_namespace.job_name
has_header = eval(args_namespace.has_header)
default_headers = args_namespace.default_headers
server_portal = args_namespace.server_portal
print("========Batch Predict Config========")
print(f"input_file_path: {input_file_path}")
print(f"reserved_cols: {reserved_cols}")
print(f"model_name: {model_name}")
print(f"dataset_name: {dataset_name}")
print(f"job_name: {job_name}")
print(f"has_header: {has_header}")
print(f"default_headers: {default_headers}")
print(f"server_portal: {server_portal}")
print("====================================")
# [2]. load data
t_load_start = time.time()
load_extension = None
load_status = JobStep.Status.Succeed
try:
# May has no header
if default_headers is not None:
default_headers = default_headers.split(",")
df = util.read_csv(input_file_path, has_header, default_headers)
df_origin = df.copy()
load_extension = {
"n_cols": df.shape[0],
"n_rows": df.shape[1]
}
except Exception as e:
load_status = JobStep.Status.Failed
raise e
finally:
client. batch_predict_callback(portal=server_portal,
dataset_name=dataset_name,
model_name=model_name,
batch_predict_job_name=job_name,
type=PredictStepType.Load,
status=load_status,
took=time.time()-t_load_start,
extension=load_extension)
logger.info("Load dataset finished. ")
# [3]. load model
t_load_model_start = time.time()
load_model_status = JobStep.Status.Succeed
load_model_extension = None
try:
model_dict = client.retrieve_model(portal=server_portal, dataset_name=dataset_name, model_name=model_name)
features = model_dict['inputs']
logger.info("Before cast type: ")
logger.info(df.dtypes)
X = dataset_util.cast_df(df, features, remove_unnecessary_cols=True)
logger.info("After cast type: ")
logger.info(X.dtypes)
abs_model_path = P.join(consts.DATA_DIR, model_dict['model_path'])
framework = model_dict['framework']
if framework == FrameworkType.DeepTables:
model = DeepTable.load(P.join(abs_model_path, 'model'))
elif framework == FrameworkType.GBM:
model = util.load_pkl(P.join(abs_model_path, 'model.pkl'))
else:
raise ValueError(f"Unseen model framework: {framework}")
load_model_extension = {
"model_size": model_dict['model_file_size']
}
except Exception as e:
evaluate_status = JobStep.Status.Failed
raise e
finally:
client.batch_predict_callback(portal=server_portal,
dataset_name=dataset_name,
model_name=model_name,
batch_predict_job_name=job_name,
type=PredictStepType.LoadModel,
status=load_model_status,
took=time.time()-t_load_model_start,
extension=load_model_extension)
logger.info("Load model finished. ")
# [4]. evaluate dataset
t_evaluate_start = time.time()
evaluate_status = JobStep.Status.Succeed
try:
y_pred = model.predict(X)
# proba = dt_model.predict_proba(X)
except Exception as e:
evaluate_status = JobStep.Status.Failed
raise e
finally:
client.batch_predict_callback(portal=server_portal,
dataset_name=dataset_name,
model_name=model_name,
batch_predict_job_name=job_name,
type=PredictStepType.Evaluate,
status=evaluate_status,
took=time.time()-t_evaluate_start,
extension=None)
logger.info("Evaluate input file finished. ")
# [4]. write result
write_result_extension = {}
t_write_result_start = time.time()
write_result_status = AnalyzeStep.Status.Succeed
try:
df = X # remained cols
if reserved_cols is not None and len(reserved_cols) > 0:
result_df = df_origin[reserved_cols]
result_df[BATCH_PREDICTION_COL] = y_pred
else:
result_df = pd.DataFrame(data={BATCH_PREDICTION_COL: y_pred})
output_path = P.join(consts.PATH_TMP_PREDICT, f"{model_name}_{util.human_datetime()}.csv")
if not P.exists(consts.PATH_TMP_PREDICT):
os.makedirs(consts.PATH_TMP_PREDICT)
result_df.to_csv(output_path, index=False)
logger.info(f"Write result finished at: {output_path}")
write_result_extension = {
"output_path": util.relative_path(output_path)
}
except Exception as e:
write_result_status = AnalyzeStep.Status.Failed
raise e
finally:
client.batch_predict_callback(portal=server_portal,
dataset_name=dataset_name,
model_name=model_name,
batch_predict_job_name=job_name,
type=PredictStepType.WriteResult,
status=write_result_status,
took=time.time()-t_write_result_start,
extension=write_result_extension) | 0.282295 | 0.151404 |
import math
import torch
class Graft(torch.optim.Optimizer):
"""Grafted meta-optimizer for disentanglement of optimizers and
implicit step size schedules. Takes black-box optimizers M and D,
and grafts the norm of M's update with the normalized step
direction of D's update, with in-place operations.
Also known as AdaGraft.
Paper: <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>. Disentangling Adaptive Gradient Methods from
Learning Rates. https://arxiv.org/abs/2002.11803
Arguments:
params (iterable): iterable of parameters to optimize or dicts
defining parameter groups; should match child optimizers'
params for intended usage
magnitude_optimizer (torch.nn.Optimizer): child optimizer to
inherit step sizes
direction_optimizer (torch.nn.Optimizer): child optimizer to
inherit step directions
lr (float, optional): global lr multiplier (default: 1.0)
eps (float, optional): term added to D normalization
denominator for numerical stability (default: 1e-16)
use_global_norm (bool, optional): graft global l2 norms rather
than per-layer (default: False)
"""
# pylint: disable-msg=too-many-arguments
def __init__(self, params, magnitude_optimizer, direction_optimizer,
lr=1.0, eps=1e-16, use_global_norm=False):
self.magnitude_optimizer = magnitude_optimizer
self.direction_optimizer = direction_optimizer
self.use_global_norm = use_global_norm
self.global_M_norm = self.global_D_norm = None
defaults = dict(lr=lr, eps=eps)
super(Graft, self).__init__(params, defaults)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step. In-place implementation
of grafting has a 1x model dimension overhead; trades numerical
stability, speed, and memory consumption for full generality.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
self._increment_step()
# grafting can be cheaper/stabler when M & D are SGD-like
self._save_scratch_copy()
self._step_M_inplace()
self._step_D_inplace()
return loss
def _increment_step(self):
# possibly initialize step state, and increment it
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0 or 'step' not in state:
state['step'] = 0
state['step'] += 1
def _save_scratch_copy(self):
# possibly initialize scratch space, and copy current model weights to it
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0 or 'scratch' not in state:
state['scratch'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['scratch'].copy_(p)
# pylint: disable-msg=invalid-name
def _step_M_inplace(self):
# execute M, measure per-layer step norms, then revert step
# call M in-place
self.magnitude_optimizer.step()
# measure M's step norms, then undo M's step
squared_step_norm = 0.
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
# stingy/trashy: use p to store step
p -= state['scratch']
state['m_norm'] = torch.linalg.norm(p)
if self.use_global_norm:
squared_step_norm += state['m_norm'].item() ** 2
# revert p to old weights for D
p.copy_(state['scratch'])
if self.use_global_norm:
self.global_M_norm = math.sqrt(squared_step_norm)
# pylint: disable-msg=invalid-name
def _step_D_inplace(self):
# execute D, then rescale step norms measured from M
# call D in-place
self.direction_optimizer.step()
# measure D's step norms
squared_step_norm = 0.
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
# use p to store step again
p -= state['scratch']
state['d_norm'] = torch.linalg.norm(p)
if self.use_global_norm:
squared_step_norm += state['d_norm'].item() ** 2
if self.use_global_norm:
self.global_D_norm = math.sqrt(squared_step_norm)
# rescale D's step by M's norms
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
# rescale p, which is currently step
if self.use_global_norm:
rescale_factor = group['lr'] \
* self.global_M_norm / (self.global_D_norm + group['eps'])
else:
rescale_factor = group['lr'] \
* state['m_norm'] / (state['d_norm'] + group['eps'])
# new weights = rescaled step + old copy
p *= rescale_factor
p += state['scratch'] | optimetry/graft.py | import math
import torch
class Graft(torch.optim.Optimizer):
"""Grafted meta-optimizer for disentanglement of optimizers and
implicit step size schedules. Takes black-box optimizers M and D,
and grafts the norm of M's update with the normalized step
direction of D's update, with in-place operations.
Also known as AdaGraft.
Paper: <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>. Disentangling Adaptive Gradient Methods from
Learning Rates. https://arxiv.org/abs/2002.11803
Arguments:
params (iterable): iterable of parameters to optimize or dicts
defining parameter groups; should match child optimizers'
params for intended usage
magnitude_optimizer (torch.nn.Optimizer): child optimizer to
inherit step sizes
direction_optimizer (torch.nn.Optimizer): child optimizer to
inherit step directions
lr (float, optional): global lr multiplier (default: 1.0)
eps (float, optional): term added to D normalization
denominator for numerical stability (default: 1e-16)
use_global_norm (bool, optional): graft global l2 norms rather
than per-layer (default: False)
"""
# pylint: disable-msg=too-many-arguments
def __init__(self, params, magnitude_optimizer, direction_optimizer,
lr=1.0, eps=1e-16, use_global_norm=False):
self.magnitude_optimizer = magnitude_optimizer
self.direction_optimizer = direction_optimizer
self.use_global_norm = use_global_norm
self.global_M_norm = self.global_D_norm = None
defaults = dict(lr=lr, eps=eps)
super(Graft, self).__init__(params, defaults)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step. In-place implementation
of grafting has a 1x model dimension overhead; trades numerical
stability, speed, and memory consumption for full generality.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
self._increment_step()
# grafting can be cheaper/stabler when M & D are SGD-like
self._save_scratch_copy()
self._step_M_inplace()
self._step_D_inplace()
return loss
def _increment_step(self):
# possibly initialize step state, and increment it
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0 or 'step' not in state:
state['step'] = 0
state['step'] += 1
def _save_scratch_copy(self):
# possibly initialize scratch space, and copy current model weights to it
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0 or 'scratch' not in state:
state['scratch'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['scratch'].copy_(p)
# pylint: disable-msg=invalid-name
def _step_M_inplace(self):
# execute M, measure per-layer step norms, then revert step
# call M in-place
self.magnitude_optimizer.step()
# measure M's step norms, then undo M's step
squared_step_norm = 0.
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
# stingy/trashy: use p to store step
p -= state['scratch']
state['m_norm'] = torch.linalg.norm(p)
if self.use_global_norm:
squared_step_norm += state['m_norm'].item() ** 2
# revert p to old weights for D
p.copy_(state['scratch'])
if self.use_global_norm:
self.global_M_norm = math.sqrt(squared_step_norm)
# pylint: disable-msg=invalid-name
def _step_D_inplace(self):
# execute D, then rescale step norms measured from M
# call D in-place
self.direction_optimizer.step()
# measure D's step norms
squared_step_norm = 0.
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
# use p to store step again
p -= state['scratch']
state['d_norm'] = torch.linalg.norm(p)
if self.use_global_norm:
squared_step_norm += state['d_norm'].item() ** 2
if self.use_global_norm:
self.global_D_norm = math.sqrt(squared_step_norm)
# rescale D's step by M's norms
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
# rescale p, which is currently step
if self.use_global_norm:
rescale_factor = group['lr'] \
* self.global_M_norm / (self.global_D_norm + group['eps'])
else:
rescale_factor = group['lr'] \
* state['m_norm'] / (state['d_norm'] + group['eps'])
# new weights = rescaled step + old copy
p *= rescale_factor
p += state['scratch'] | 0.864968 | 0.646851 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_io.core.python.ops import core_ops
def decode_dicom_image(
contents,
color_dim=False,
on_error='skip',
scale='preserve',
dtype=tf.uint16,
name=None):
"""Getting DICOM Image Data.
This package has two operations which wrap `DCMTK` functions.
`decode_dicom_image` decodes the pixel data from DICOM files, and
`decode_dicom_data` decodes tag information.
`dicom_tags` contains useful DICOM tags such as `dicom_tags.PatientsName`.
We borrow the same tag notation from the
[`pydicom`](https://pydicom.github.io/) dicom package.
The detailed usage of DICOM is available in
[tutorial](https://www.tensorflow.org/io/tutorials/dicom).
If this package helped, please kindly cite the below:
```
@misc{marcelo_lerendegui_2019_3337331,
author = {<NAME> and <NAME>},
title = {Tensorflow Dicom Decoder},
month = jul,
year = 2019,
doi = {10.5281/zenodo.3337331},
url = {https://doi.org/10.5281/zenodo.3337331}
}
```
Args:
contents: A Tensor of type string. 0-D. The byte string encoded DICOM file.
color_dim: An optional `bool`. Defaults to `False`. If `True`, a third
channel will be appended to all images forming a 3-D tensor.
A 1024 x 1024 grayscale image will be 1024 x 1024 x 1.
on_error: Defaults to `skip`. This attribute establishes the behavior in
case an error occurs on opening the image or if the output type cannot
accomodate all the possible input values. For example if the user sets
the output dtype to `tf.uint8`, but a dicom image stores a `tf.uint16`
type. `strict` throws an error. `skip` returns a 1-D empty tensor.
`lossy` continues with the operation scaling the value via the `scale`
attribute.
scale: Defaults to `preserve`. This attribute establishes what to do with
the scale of the input values. `auto` will autoscale the input values,
if the output type is integer, `auto` will use the maximum output scale
for example a `uint8` which stores values from [0, 255] can be linearly
stretched to fill a `uint16` that is [0,65535]. If the output is float,
`auto` will scale to [0,1]. `preserve` keeps the values as they are, an
input value greater than the maximum possible output will be clipped.
dtype: An optional `tf.DType` from: `tf.uint8`, `tf.uint16`, `tf.uint32`,
`tf.uint64`, `tf.float16`, `tf.float32`, `tf.float64`. Defaults to
`tf.uint16`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype` and the shape is determined by the DICOM file.
"""
return core_ops.io_decode_dicom_image(
contents=contents,
color_dim=color_dim,
on_error=on_error,
scale=scale,
dtype=dtype,
name=name)
def decode_dicom_data(
contents,
tags=None,
name=None):
"""Getting DICOM Tag Data.
This package has two operations which wrap `DCMTK` functions.
`decode_dicom_image` decodes the pixel data from DICOM files, and
`decode_dicom_data` decodes tag information.
`dicom_tags` contains useful DICOM tags such as `dicom_tags.PatientsName`.
We borrow the same tag notation from the
[`pydicom`](https://pydicom.github.io/) dicom package.
The detailed usage of DICOM is available in
[tutorial](https://www.tensorflow.org/io/tutorials/dicom).
If this package helped, please kindly cite the below:
```
@misc{marcelo_lerendegui_2019_3337331,
author = {<NAME> and <NAME>},
title = {Tensorflow Dicom Decoder},
month = jul,
year = 2019,
doi = {10.5281/zenodo.3337331},
url = {https://doi.org/10.5281/zenodo.3337331}
}
```
Args:
contents: A Tensor of type string. 0-D. The byte string encoded DICOM file.
tags: A Tensor of type `tf.uint32` of any dimension.
These `uint32` numbers map directly to DICOM tags.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `tf.string` and same shape as `tags`. If a dicom tag is
a list of strings, they are combined into one string and seperated by a
double backslash `\\`. There is a bug in
[DCMTK](https://support.dcmtk.org/docs/) if the tag is a list of numbers,
only the zeroth element will be returned as a string.
"""
return core_ops.io_decode_dicom_data(
contents=contents,
tags=tags,
name=name)
class dicom_tags(object): # pylint: disable=invalid-name
"""dicom_tags"""
def __init__(self):
pass
# pylint: disable=invalid-name
FileMetaInformationGroupLength = int('00020000', 16)
FileMetaInformationVersion = int('00020001', 16)
MediaStorageSOPClassUID = int('00020002', 16)
MediaStorageSOPInstanceUID = int('00020003', 16)
TransferSyntaxUID = int('00020010', 16)
ImplementationClassUID = int('00020012', 16)
ImplementationVersionName = int('00020013', 16)
SourceApplicationEntityTitle = int('00020016', 16)
SendingApplicationEntityTitle = int('00020017', 16)
ReceivingApplicationEntityTitle = int('00020018', 16)
PrivateInformationCreatorUID = int('00020100', 16)
PrivateInformation = int('00020102', 16)
FilesetID = int('00041130', 16)
FilesetDescriptorFileID = int('00041141', 16)
SpecificCharacterSetofFilesetDescriptorFile = int('00041142', 16)
OffsetoftheFirstDirectoryRecordoftheRootDirectoryEntity = int(
'00041200', 16)
OffsetoftheLastDirectoryRecordoftheRootDirectoryEntity = int(
'00041202', 16)
FilesetConsistencyFlag = int('00041212', 16)
DirectoryRecordSequence = int('00041220', 16)
OffsetoftheNextDirectoryRecord = int('00041400', 16)
RecordInuseFlag = int('00041410', 16)
OffsetofReferencedLowerLevelDirectoryEntity = int('00041420', 16)
DirectoryRecordType = int('00041430', 16)
PrivateRecordUID = int('00041432', 16)
ReferencedFileID = int('00041500', 16)
MRDRDirectoryRecordOffset = int('00041504', 16)
ReferencedSOPClassUIDinFile = int('00041510', 16)
ReferencedSOPInstanceUIDinFile = int('00041511', 16)
ReferencedTransferSyntaxUIDinFile = int('00041512', 16)
ReferencedRelatedGeneralSOPClassUIDinFile = int('0004151A', 16)
NumberofReferences = int('00041600', 16)
LengthtoEnd = int('00080001', 16)
SpecificCharacterSet = int('00080005', 16)
LanguageCodeSequence = int('00080006', 16)
ImageType = int('00080008', 16)
RecognitionCode = int('00080010', 16)
InstanceCreationDate = int('00080012', 16)
InstanceCreationTime = int('00080013', 16)
InstanceCreatorUID = int('00080014', 16)
InstanceCoercionDateTime = int('00080015', 16)
SOPClassUID = int('00080016', 16)
SOPInstanceUID = int('00080018', 16)
RelatedGeneralSOPClassUID = int('0008001A', 16)
OriginalSpecializedSOPClassUID = int('0008001B', 16)
StudyDate = int('00080020', 16)
SeriesDate = int('00080021', 16)
AcquisitionDate = int('00080022', 16)
ContentDate = int('00080023', 16)
OverlayDate = int('00080024', 16)
CurveDate = int('00080025', 16)
AcquisitionDateTime = int('0008002A', 16)
StudyTime = int('00080030', 16)
SeriesTime = int('00080031', 16)
AcquisitionTime = int('00080032', 16)
ContentTime = int('00080033', 16)
OverlayTime = int('00080034', 16)
CurveTime = int('00080035', 16)
DataSetType = int('00080040', 16)
DataSetSubtype = int('00080041', 16)
NuclearMedicineSeriesType = int('00080042', 16)
AccessionNumber = int('00080050', 16)
IssuerofAccessionNumberSequence = int('00080051', 16)
QueryRetrieveLevel = int('00080052', 16)
QueryRetrieveView = int('00080053', 16)
RetrieveAETitle = int('00080054', 16)
StationAETitle = int('00080055', 16)
InstanceAvailability = int('00080056', 16)
FailedSOPInstanceUIDList = int('00080058', 16)
Modality = int('00080060', 16)
ModalitiesinStudy = int('00080061', 16)
SOPClassesinStudy = int('00080062', 16)
ConversionType = int('00080064', 16)
PresentationIntentType = int('00080068', 16)
Manufacturer = int('00080070', 16)
InstitutionName = int('00080080', 16)
InstitutionAddress = int('00080081', 16)
InstitutionCodeSequence = int('00080082', 16)
ReferringPhysiciansName = int('00080090', 16)
ReferringPhysiciansAddress = int('00080092', 16)
ReferringPhysiciansTelephoneNumbers = int('00080094', 16)
ReferringPhysicianIdentificationSequence = int('00080096', 16)
ConsultingPhysiciansName = int('0008009C', 16)
ConsultingPhysicianIdentificationSequence = int('0008009D', 16)
CodeValue = int('00080100', 16)
ExtendedCodeValue = int('00080101', 16)
CodingSchemeDesignator = int('00080102', 16)
CodingSchemeVersion = int('00080103', 16)
CodeMeaning = int('00080104', 16)
MappingResource = int('00080105', 16)
ContextGroupVersion = int('00080106', 16)
ContextGroupLocalVersion = int('00080107', 16)
ExtendedCodeMeaning = int('00080108', 16)
ContextGroupExtensionFlag = int('0008010B', 16)
CodingSchemeUID = int('0008010C', 16)
ContextGroupExtensionCreatorUID = int('0008010D', 16)
ContextIdentifier = int('0008010F', 16)
CodingSchemeIdentificationSequence = int('00080110', 16)
CodingSchemeRegistry = int('00080112', 16)
CodingSchemeExternalID = int('00080114', 16)
CodingSchemeName = int('00080115', 16)
CodingSchemeResponsibleOrganization = int('00080116', 16)
ContextUID = int('00080117', 16)
MappingResourceUID = int('00080118', 16)
LongCodeValue = int('00080119', 16)
URNCodeValue = int('00080120', 16)
EquivalentCodeSequence = int('00080121', 16)
MappingResourceName = int('00080122', 16)
ContextGroupIdentificationSequence = int('00080123', 16)
MappingResourceIdentificationSequence = int('00080124', 16)
TimezoneOffsetFromUTC = int('00080201', 16)
PrivateDataElementCharacteristicsSequence = int('00080300', 16)
PrivateGroupReference = int('00080301', 16)
PrivateCreatorReference = int('00080302', 16)
BlockIdentifyingInformationStatus = int('00080303', 16)
NonidentifyingPrivateElements = int('00080304', 16)
IdentifyingPrivateElements = int('00080306', 16)
DeidentificationActionSequence = int('00080305', 16)
DeidentificationAction = int('00080307', 16)
NetworkID = int('00081000', 16)
StationName = int('00081010', 16)
StudyDescription = int('00081030', 16)
ProcedureCodeSequence = int('00081032', 16)
SeriesDescription = int('0008103E', 16)
SeriesDescriptionCodeSequence = int('0008103F', 16)
InstitutionalDepartmentName = int('00081040', 16)
PhysiciansofRecord = int('00081048', 16)
PhysiciansofRecordIdentificationSequence = int('00081049', 16)
PerformingPhysiciansName = int('00081050', 16)
PerformingPhysicianIdentificationSequence = int('00081052', 16)
NameofPhysiciansReadingStudy = int('00081060', 16)
PhysiciansReadingStudyIdentificationSequence = int('00081062', 16)
OperatorsName = int('00081070', 16)
OperatorIdentificationSequence = int('00081072', 16)
AdmittingDiagnosesDescription = int('00081080', 16)
AdmittingDiagnosesCodeSequence = int('00081084', 16)
ManufacturersModelName = int('00081090', 16)
ReferencedResultsSequence = int('00081100', 16)
ReferencedStudySequence = int('00081110', 16)
ReferencedPerformedProcedureStepSequence = int('00081111', 16)
ReferencedSeriesSequence = int('00081115', 16)
ReferencedPatientSequence = int('00081120', 16)
ReferencedVisitSequence = int('00081125', 16)
ReferencedOverlaySequence = int('00081130', 16)
ReferencedStereometricInstanceSequence = int('00081134', 16)
ReferencedWaveformSequence = int('0008113A', 16)
ReferencedImageSequence = int('00081140', 16)
ReferencedCurveSequence = int('00081145', 16)
ReferencedInstanceSequence = int('0008114A', 16)
ReferencedRealWorldValueMappingInstanceSequence = int('0008114B', 16)
ReferencedSOPClassUID = int('00081150', 16)
ReferencedSOPInstanceUID = int('00081155', 16)
SOPClassesSupported = int('0008115A', 16)
ReferencedFrameNumber = int('00081160', 16)
SimpleFrameList = int('00081161', 16)
CalculatedFrameList = int('00081162', 16)
TimeRange = int('00081163', 16)
FrameExtractionSequence = int('00081164', 16)
MultiframeSourceSOPInstanceUID = int('00081167', 16)
RetrieveURL = int('00081190', 16)
TransactionUID = int('00081195', 16)
WarningReason = int('00081196', 16)
FailureReason = int('00081197', 16)
FailedSOPSequence = int('00081198', 16)
ReferencedSOPSequence = int('00081199', 16)
OtherFailuresSequence = int('0008119A', 16)
StudiesContainingOtherReferencedInstancesSequence = int('00081200', 16)
RelatedSeriesSequence = int('00081250', 16)
LossyImageCompressionRetired = int('00082110', 16)
DerivationDescription = int('00082111', 16)
SourceImageSequence = int('00082112', 16)
StageName = int('00082120', 16)
StageNumber = int('00082122', 16)
NumberofStages = int('00082124', 16)
ViewName = int('00082127', 16)
ViewNumber = int('00082128', 16)
NumberofEventTimers = int('00082129', 16)
NumberofViewsinStage = int('0008212A', 16)
EventElapsedTimes = int('00082130', 16)
EventTimerNames = int('00082132', 16)
EventTimerSequence = int('00082133', 16)
EventTimeOffset = int('00082134', 16)
EventCodeSequence = int('00082135', 16)
StartTrim = int('00082142', 16)
StopTrim = int('00082143', 16)
RecommendedDisplayFrameRate = int('00082144', 16)
TransducerPosition = int('00082200', 16)
TransducerOrientation = int('00082204', 16)
AnatomicStructure = int('00082208', 16)
AnatomicRegionSequence = int('00082218', 16)
AnatomicRegionModifierSequence = int('00082220', 16)
PrimaryAnatomicStructureSequence = int('00082228', 16)
AnatomicStructureSpaceorRegionSequence = int('00082229', 16)
PrimaryAnatomicStructureModifierSequence = int('00082230', 16)
TransducerPositionSequence = int('00082240', 16)
TransducerPositionModifierSequence = int('00082242', 16)
TransducerOrientationSequence = int('00082244', 16)
TransducerOrientationModifierSequence = int('00082246', 16)
AnatomicStructureSpaceOrRegionCodeSequenceTrial = int('00082251', 16)
AnatomicPortalOfEntranceCodeSequenceTrial = int('00082253', 16)
AnatomicApproachDirectionCodeSequenceTrial = int('00082255', 16)
AnatomicPerspectiveDescriptionTrial = int('00082256', 16)
AnatomicPerspectiveCodeSequenceTrial = int('00082257', 16)
AnatomicLocationOfExaminingInstrumentDescriptionTrial = int('00082258', 16)
AnatomicLocationOfExaminingInstrumentCodeSequenceTrial = int(
'00082259', 16)
AnatomicStructureSpaceOrRegionModifierCodeSequenceTrial = int(
'0008225A', 16)
OnAxisBackgroundAnatomicStructureCodeSequenceTrial = int('0008225C', 16)
AlternateRepresentationSequence = int('00083001', 16)
IrradiationEventUID = int('00083010', 16)
SourceIrradiationEventSequence = int('00083011', 16)
RadiopharmaceuticalAdministrationEventUID = int('00083012', 16)
IdentifyingComments = int('00084000', 16)
FrameType = int('00089007', 16)
ReferencedImageEvidenceSequence = int('00089092', 16)
ReferencedRawDataSequence = int('00089121', 16)
CreatorVersionUID = int('00089123', 16)
DerivationImageSequence = int('00089124', 16)
SourceImageEvidenceSequence = int('00089154', 16)
PixelPresentation = int('00089205', 16)
VolumetricProperties = int('00089206', 16)
VolumeBasedCalculationTechnique = int('00089207', 16)
ComplexImageComponent = int('00089208', 16)
AcquisitionContrast = int('00089209', 16)
DerivationCodeSequence = int('00089215', 16)
ReferencedPresentationStateSequence = int('00089237', 16)
ReferencedOtherPlaneSequence = int('00089410', 16)
FrameDisplaySequence = int('00089458', 16)
RecommendedDisplayFrameRateinFloat = int('00089459', 16)
SkipFrameRangeFlag = int('00089460', 16)
PatientsName = int('00100010', 16)
PatientID = int('00100020', 16)
IssuerofPatientID = int('00100021', 16)
TypeofPatientID = int('00100022', 16)
IssuerofPatientIDQualifiersSequence = int('00100024', 16)
SourcePatientGroupIdentificationSequence = int('00100026', 16)
GroupofPatientsIdentificationSequence = int('00100027', 16)
SubjectRelativePositioninImage = int('00100028', 16)
PatientsBirthDate = int('00100030', 16)
PatientsBirthTime = int('00100032', 16)
PatientsBirthDateinAlternativeCalendar = int('00100033', 16)
PatientsDeathDateinAlternativeCalendar = int('00100034', 16)
PatientsAlternativeCalendar = int('00100035', 16)
PatientsSex = int('00100040', 16)
PatientsInsurancePlanCodeSequence = int('00100050', 16)
PatientsPrimaryLanguageCodeSequence = int('00100101', 16)
PatientsPrimaryLanguageModifierCodeSequence = int('00100102', 16)
QualityControlSubject = int('00100200', 16)
QualityControlSubjectTypeCodeSequence = int('00100201', 16)
StrainDescription = int('00100212', 16)
StrainNomenclature = int('00100213', 16)
StrainStockNumber = int('00100214', 16)
StrainSourceRegistryCodeSequence = int('00100215', 16)
StrainStockSequence = int('00100216', 16)
StrainSource = int('00100217', 16)
StrainAdditionalInformation = int('00100218', 16)
StrainCodeSequence = int('00100219', 16)
OtherPatientIDs = int('00101000', 16)
OtherPatientNames = int('00101001', 16)
OtherPatientIDsSequence = int('00101002', 16)
PatientsBirthName = int('00101005', 16)
PatientsAge = int('00101010', 16)
PatientsSize = int('00101020', 16)
PatientsSizeCodeSequence = int('00101021', 16)
PatientsWeight = int('00101030', 16)
PatientsAddress = int('00101040', 16)
InsurancePlanIdentification = int('00101050', 16)
PatientsMothersBirthName = int('00101060', 16)
MilitaryRank = int('00101080', 16)
BranchofService = int('00101081', 16)
MedicalRecordLocator = int('00101090', 16)
ReferencedPatientPhotoSequence = int('00101100', 16)
MedicalAlerts = int('00102000', 16)
Allergies = int('00102110', 16)
CountryofResidence = int('00102150', 16)
RegionofResidence = int('00102152', 16)
PatientsTelephoneNumbers = int('00102154', 16)
PatientsTelecomInformation = int('00102155', 16)
EthnicGroup = int('00102160', 16)
Occupation = int('00102180', 16)
SmokingStatus = int('001021A0', 16)
AdditionalPatientHistory = int('001021B0', 16)
PregnancyStatus = int('001021C0', 16)
LastMenstrualDate = int('001021D0', 16)
PatientsReligiousPreference = int('001021F0', 16)
PatientSpeciesDescription = int('00102201', 16)
PatientSpeciesCodeSequence = int('00102202', 16)
PatientsSexNeutered = int('00102203', 16)
AnatomicalOrientationType = int('00102210', 16)
PatientBreedDescription = int('00102292', 16)
PatientBreedCodeSequence = int('00102293', 16)
BreedRegistrationSequence = int('00102294', 16)
BreedRegistrationNumber = int('00102295', 16)
BreedRegistryCodeSequence = int('00102296', 16)
ResponsiblePerson = int('00102297', 16)
ResponsiblePersonRole = int('00102298', 16)
ResponsibleOrganization = int('00102299', 16)
PatientComments = int('00104000', 16)
ExaminedBodyThickness = int('00109431', 16)
ClinicalTrialSponsorName = int('00120010', 16)
ClinicalTrialProtocolID = int('00120020', 16)
ClinicalTrialProtocolName = int('00120021', 16)
ClinicalTrialSiteID = int('00120030', 16)
ClinicalTrialSiteName = int('00120031', 16)
ClinicalTrialSubjectID = int('00120040', 16)
ClinicalTrialSubjectReadingID = int('00120042', 16)
ClinicalTrialTimePointID = int('00120050', 16)
ClinicalTrialTimePointDescription = int('00120051', 16)
ClinicalTrialCoordinatingCenterName = int('00120060', 16)
PatientIdentityRemoved = int('00120062', 16)
DeidentificationMethod = int('00120063', 16)
DeidentificationMethodCodeSequence = int('00120064', 16)
ClinicalTrialSeriesID = int('00120071', 16)
ClinicalTrialSeriesDescription = int('00120072', 16)
ClinicalTrialProtocolEthicsCommitteeName = int('00120081', 16)
ClinicalTrialProtocolEthicsCommitteeApprovalNumber = int('00120082', 16)
ConsentforClinicalTrialUseSequence = int('00120083', 16)
DistributionType = int('00120084', 16)
ConsentforDistributionFlag = int('00120085', 16)
CADFileFormat = int('00140023', 16)
ComponentReferenceSystem = int('00140024', 16)
ComponentManufacturingProcedure = int('00140025', 16)
ComponentManufacturer = int('00140028', 16)
MaterialThickness = int('00140030', 16)
MaterialPipeDiameter = int('00140032', 16)
MaterialIsolationDiameter = int('00140034', 16)
MaterialGrade = int('00140042', 16)
MaterialPropertiesDescription = int('00140044', 16)
MaterialPropertiesFileFormatRetired = int('00140045', 16)
MaterialNotes = int('00140046', 16)
ComponentShape = int('00140050', 16)
CurvatureType = int('00140052', 16)
OuterDiameter = int('00140054', 16)
InnerDiameter = int('00140056', 16)
ComponentWelderIDs = int('00140100', 16)
SecondaryApprovalStatus = int('00140101', 16)
SecondaryReviewDate = int('00140102', 16)
SecondaryReviewTime = int('00140103', 16)
SecondaryReviewerName = int('00140104', 16)
RepairID = int('00140105', 16)
MultipleComponentApprovalSequence = int('00140106', 16)
OtherApprovalStatus = int('00140107', 16)
OtherSecondaryApprovalStatus = int('00140108', 16)
ActualEnvironmentalConditions = int('00141010', 16)
ExpiryDate = int('00141020', 16)
EnvironmentalConditions = int('00141040', 16)
EvaluatorSequence = int('00142002', 16)
EvaluatorNumber = int('00142004', 16)
EvaluatorName = int('00142006', 16)
EvaluationAttempt = int('00142008', 16)
IndicationSequence = int('00142012', 16)
IndicationNumber = int('00142014', 16)
IndicationLabel = int('00142016', 16)
IndicationDescription = int('00142018', 16)
IndicationType = int('0014201A', 16)
IndicationDisposition = int('0014201C', 16)
IndicationROISequence = int('0014201E', 16)
IndicationPhysicalPropertySequence = int('00142030', 16)
PropertyLabel = int('00142032', 16)
CoordinateSystemNumberofAxes = int('00142202', 16)
CoordinateSystemAxesSequence = int('00142204', 16)
CoordinateSystemAxisDescription = int('00142206', 16)
CoordinateSystemDataSetMapping = int('00142208', 16)
CoordinateSystemAxisNumber = int('0014220A', 16)
CoordinateSystemAxisType = int('0014220C', 16)
CoordinateSystemAxisUnits = int('0014220E', 16)
CoordinateSystemAxisValues = int('00142210', 16)
CoordinateSystemTransformSequence = int('00142220', 16)
TransformDescription = int('00142222', 16)
TransformNumberofAxes = int('00142224', 16)
TransformOrderofAxes = int('00142226', 16)
TransformedAxisUnits = int('00142228', 16)
CoordinateSystemTransformRotationandScaleMatrix = int('0014222A', 16)
CoordinateSystemTransformTranslationMatrix = int('0014222C', 16)
InternalDetectorFrameTime = int('00143011', 16)
NumberofFramesIntegrated = int('00143012', 16)
DetectorTemperatureSequence = int('00143020', 16)
SensorName = int('00143022', 16)
HorizontalOffsetofSensor = int('00143024', 16)
VerticalOffsetofSensor = int('00143026', 16)
SensorTemperature = int('00143028', 16)
DarkCurrentSequence = int('00143040', 16)
DarkCurrentCounts = int('00143050', 16)
GainCorrectionReferenceSequence = int('00143060', 16)
AirCounts = int('00143070', 16)
KVUsedinGainCalibration = int('00143071', 16)
MAUsedinGainCalibration = int('00143072', 16)
NumberofFramesUsedforIntegration = int('00143073', 16)
FilterMaterialUsedinGainCalibration = int('00143074', 16)
FilterThicknessUsedinGainCalibration = int('00143075', 16)
DateofGainCalibration = int('00143076', 16)
TimeofGainCalibration = int('00143077', 16)
BadPixelImage = int('00143080', 16)
CalibrationNotes = int('00143099', 16)
PulserEquipmentSequence = int('00144002', 16)
PulserType = int('00144004', 16)
PulserNotes = int('00144006', 16)
ReceiverEquipmentSequence = int('00144008', 16)
AmplifierType = int('0014400A', 16)
ReceiverNotes = int('0014400C', 16)
PreAmplifierEquipmentSequence = int('0014400E', 16)
PreAmplifierNotes = int('0014400F', 16)
TransmitTransducerSequence = int('00144010', 16)
ReceiveTransducerSequence = int('00144011', 16)
NumberofElements = int('00144012', 16)
ElementShape = int('00144013', 16)
ElementDimensionA = int('00144014', 16)
ElementDimensionB = int('00144015', 16)
ElementPitchA = int('00144016', 16)
MeasuredBeamDimensionA = int('00144017', 16)
MeasuredBeamDimensionB = int('00144018', 16)
LocationofMeasuredBeamDiameter = int('00144019', 16)
NominalFrequency = int('0014401A', 16)
MeasuredCenterFrequency = int('0014401B', 16)
MeasuredBandwidth = int('0014401C', 16)
ElementPitchB = int('0014401D', 16)
PulserSettingsSequence = int('00144020', 16)
PulseWidth = int('00144022', 16)
ExcitationFrequency = int('00144024', 16)
ModulationType = int('00144026', 16)
Damping = int('00144028', 16)
ReceiverSettingsSequence = int('00144030', 16)
AcquiredSoundpathLength = int('00144031', 16)
AcquisitionCompressionType = int('00144032', 16)
AcquisitionSampleSize = int('00144033', 16)
RectifierSmoothing = int('00144034', 16)
DACSequence = int('00144035', 16)
DACType = int('00144036', 16)
DACGainPoints = int('00144038', 16)
DACTimePoints = int('0014403A', 16)
DACAmplitude = int('0014403C', 16)
PreAmplifierSettingsSequence = int('00144040', 16)
TransmitTransducerSettingsSequence = int('00144050', 16)
ReceiveTransducerSettingsSequence = int('00144051', 16)
IncidentAngle = int('00144052', 16)
CouplingTechnique = int('00144054', 16)
CouplingMedium = int('00144056', 16)
CouplingVelocity = int('00144057', 16)
ProbeCenterLocationX = int('00144058', 16)
ProbeCenterLocationZ = int('00144059', 16)
SoundPathLength = int('0014405A', 16)
DelayLawIdentifier = int('0014405C', 16)
GateSettingsSequence = int('00144060', 16)
GateThreshold = int('00144062', 16)
VelocityofSound = int('00144064', 16)
CalibrationSettingsSequence = int('00144070', 16)
CalibrationProcedure = int('00144072', 16)
ProcedureVersion = int('00144074', 16)
ProcedureCreationDate = int('00144076', 16)
ProcedureExpirationDate = int('00144078', 16)
ProcedureLastModifiedDate = int('0014407A', 16)
CalibrationTime = int('0014407C', 16)
CalibrationDate = int('0014407E', 16)
ProbeDriveEquipmentSequence = int('00144080', 16)
DriveType = int('00144081', 16)
ProbeDriveNotes = int('00144082', 16)
DriveProbeSequence = int('00144083', 16)
ProbeInductance = int('00144084', 16)
ProbeResistance = int('00144085', 16)
ReceiveProbeSequence = int('00144086', 16)
ProbeDriveSettingsSequence = int('00144087', 16)
BridgeResistors = int('00144088', 16)
ProbeOrientationAngle = int('00144089', 16)
UserSelectedGainY = int('0014408B', 16)
UserSelectedPhase = int('0014408C', 16)
UserSelectedOffsetX = int('0014408D', 16)
UserSelectedOffsetY = int('0014408E', 16)
ChannelSettingsSequence = int('00144091', 16)
ChannelThreshold = int('00144092', 16)
ScannerSettingsSequence = int('0014409A', 16)
ScanProcedure = int('0014409B', 16)
TranslationRateX = int('0014409C', 16)
TranslationRateY = int('0014409D', 16)
ChannelOverlap = int('0014409F', 16)
ImageQualityIndicatorType = int('001440A0', 16)
ImageQualityIndicatorMaterial = int('001440A1', 16)
ImageQualityIndicatorSize = int('001440A2', 16)
LINACEnergy = int('00145002', 16)
LINACOutput = int('00145004', 16)
ActiveAperture = int('00145100', 16)
TotalAperture = int('00145101', 16)
ApertureElevation = int('00145102', 16)
MainLobeAngle = int('00145103', 16)
MainRoofAngle = int('00145104', 16)
ConnectorType = int('00145105', 16)
WedgeModelNumber = int('00145106', 16)
WedgeAngleFloat = int('00145107', 16)
WedgeRoofAngle = int('00145108', 16)
WedgeElement1Position = int('00145109', 16)
WedgeMaterialVelocity = int('0014510A', 16)
WedgeMaterial = int('0014510B', 16)
WedgeOffsetZ = int('0014510C', 16)
WedgeOriginOffsetX = int('0014510D', 16)
WedgeTimeDelay = int('0014510E', 16)
WedgeName = int('0014510F', 16)
WedgeManufacturerName = int('00145110', 16)
WedgeDescription = int('00145111', 16)
NominalBeamAngle = int('00145112', 16)
WedgeOffsetX = int('00145113', 16)
WedgeOffsetY = int('00145114', 16)
WedgeTotalLength = int('00145115', 16)
WedgeInContactLength = int('00145116', 16)
WedgeFrontGap = int('00145117', 16)
WedgeTotalHeight = int('00145118', 16)
WedgeFrontHeight = int('00145119', 16)
WedgeRearHeight = int('0014511A', 16)
WedgeTotalWidth = int('0014511B', 16)
WedgeInContactWidth = int('0014511C', 16)
WedgeChamferHeight = int('0014511D', 16)
WedgeCurve = int('0014511E', 16)
RadiusAlongtheWedge = int('0014511F', 16)
ContrastBolusAgent = int('00180010', 16)
ContrastBolusAgentSequence = int('00180012', 16)
ContrastBolusT1Relaxivity = int('00180013', 16)
ContrastBolusAdministrationRouteSequence = int('00180014', 16)
BodyPartExamined = int('00180015', 16)
ScanningSequence = int('00180020', 16)
SequenceVariant = int('00180021', 16)
ScanOptions = int('00180022', 16)
MRAcquisitionType = int('00180023', 16)
SequenceName = int('00180024', 16)
AngioFlag = int('00180025', 16)
InterventionDrugInformationSequence = int('00180026', 16)
InterventionDrugStopTime = int('00180027', 16)
InterventionDrugDose = int('00180028', 16)
InterventionDrugCodeSequence = int('00180029', 16)
AdditionalDrugSequence = int('0018002A', 16)
Radionuclide = int('00180030', 16)
Radiopharmaceutical = int('00180031', 16)
EnergyWindowCenterline = int('00180032', 16)
EnergyWindowTotalWidth = int('00180033', 16)
InterventionDrugName = int('00180034', 16)
InterventionDrugStartTime = int('00180035', 16)
InterventionSequence = int('00180036', 16)
TherapyType = int('00180037', 16)
InterventionStatus = int('00180038', 16)
TherapyDescription = int('00180039', 16)
InterventionDescription = int('0018003A', 16)
CineRate = int('00180040', 16)
InitialCineRunState = int('00180042', 16)
SliceThickness = int('00180050', 16)
KVP = int('00180060', 16)
CountsAccumulated = int('00180070', 16)
AcquisitionTerminationCondition = int('00180071', 16)
EffectiveDuration = int('00180072', 16)
AcquisitionStartCondition = int('00180073', 16)
AcquisitionStartConditionData = int('00180074', 16)
AcquisitionTerminationConditionData = int('00180075', 16)
RepetitionTime = int('00180080', 16)
EchoTime = int('00180081', 16)
InversionTime = int('00180082', 16)
NumberofAverages = int('00180083', 16)
ImagingFrequency = int('00180084', 16)
ImagedNucleus = int('00180085', 16)
EchoNumbers = int('00180086', 16)
MagneticFieldStrength = int('00180087', 16)
SpacingBetweenSlices = int('00180088', 16)
NumberofPhaseEncodingSteps = int('00180089', 16)
DataCollectionDiameter = int('00180090', 16)
EchoTrainLength = int('00180091', 16)
PercentSampling = int('00180093', 16)
PercentPhaseFieldofView = int('00180094', 16)
PixelBandwidth = int('00180095', 16)
DeviceSerialNumber = int('00181000', 16)
DeviceUID = int('00181002', 16)
DeviceID = int('00181003', 16)
PlateID = int('00181004', 16)
GeneratorID = int('00181005', 16)
GridID = int('00181006', 16)
CassetteID = int('00181007', 16)
GantryID = int('00181008', 16)
SecondaryCaptureDeviceID = int('00181010', 16)
HardcopyCreationDeviceID = int('00181011', 16)
DateofSecondaryCapture = int('00181012', 16)
TimeofSecondaryCapture = int('00181014', 16)
SecondaryCaptureDeviceManufacturer = int('00181016', 16)
HardcopyDeviceManufacturer = int('00181017', 16)
SecondaryCaptureDeviceManufacturersModelName = int('00181018', 16)
SecondaryCaptureDeviceSoftwareVersions = int('00181019', 16)
HardcopyDeviceSoftwareVersion = int('0018101A', 16)
HardcopyDeviceManufacturersModelName = int('0018101B', 16)
SoftwareVersions = int('00181020', 16)
VideoImageFormatAcquired = int('00181022', 16)
DigitalImageFormatAcquired = int('00181023', 16)
ProtocolName = int('00181030', 16)
ContrastBolusRoute = int('00181040', 16)
ContrastBolusVolume = int('00181041', 16)
ContrastBolusStartTime = int('00181042', 16)
ContrastBolusStopTime = int('00181043', 16)
ContrastBolusTotalDose = int('00181044', 16)
SyringeCounts = int('00181045', 16)
ContrastFlowRate = int('00181046', 16)
ContrastFlowDuration = int('00181047', 16)
ContrastBolusIngredient = int('00181048', 16)
ContrastBolusIngredientConcentration = int('00181049', 16)
SpatialResolution = int('00181050', 16)
TriggerTime = int('00181060', 16)
TriggerSourceorType = int('00181061', 16)
NominalInterval = int('00181062', 16)
FrameTime = int('00181063', 16)
CardiacFramingType = int('00181064', 16)
FrameTimeVector = int('00181065', 16)
FrameDelay = int('00181066', 16)
ImageTriggerDelay = int('00181067', 16)
MultiplexGroupTimeOffset = int('00181068', 16)
TriggerTimeOffset = int('00181069', 16)
SynchronizationTrigger = int('0018106A', 16)
SynchronizationChannel = int('0018106C', 16)
TriggerSamplePosition = int('0018106E', 16)
RadiopharmaceuticalRoute = int('00181070', 16)
RadiopharmaceuticalVolume = int('00181071', 16)
RadiopharmaceuticalStartTime = int('00181072', 16)
RadiopharmaceuticalStopTime = int('00181073', 16)
RadionuclideTotalDose = int('00181074', 16)
RadionuclideHalfLife = int('00181075', 16)
RadionuclidePositronFraction = int('00181076', 16)
RadiopharmaceuticalSpecificActivity = int('00181077', 16)
RadiopharmaceuticalStartDateTime = int('00181078', 16)
RadiopharmaceuticalStopDateTime = int('00181079', 16)
BeatRejectionFlag = int('00181080', 16)
LowRRValue = int('00181081', 16)
HighRRValue = int('00181082', 16)
IntervalsAcquired = int('00181083', 16)
IntervalsRejected = int('00181084', 16)
PVCRejection = int('00181085', 16)
SkipBeats = int('00181086', 16)
HeartRate = int('00181088', 16)
CardiacNumberofImages = int('00181090', 16)
TriggerWindow = int('00181094', 16)
ReconstructionDiameter = int('00181100', 16)
DistanceSourcetoDetector = int('00181110', 16)
DistanceSourcetoPatient = int('00181111', 16)
EstimatedRadiographicMagnificationFactor = int('00181114', 16)
GantryDetectorTilt = int('00181120', 16)
GantryDetectorSlew = int('00181121', 16)
TableHeight = int('00181130', 16)
TableTraverse = int('00181131', 16)
TableMotion = int('00181134', 16)
TableVerticalIncrement = int('00181135', 16)
TableLateralIncrement = int('00181136', 16)
TableLongitudinalIncrement = int('00181137', 16)
TableAngle = int('00181138', 16)
TableType = int('0018113A', 16)
RotationDirection = int('00181140', 16)
AngularPosition = int('00181141', 16)
RadialPosition = int('00181142', 16)
ScanArc = int('00181143', 16)
AngularStep = int('00181144', 16)
CenterofRotationOffset = int('00181145', 16)
RotationOffset = int('00181146', 16)
FieldofViewShape = int('00181147', 16)
FieldofViewDimensions = int('00181149', 16)
ExposureTime = int('00181150', 16)
XRayTubeCurrent = int('00181151', 16)
Exposure = int('00181152', 16)
ExposureinAs = int('00181153', 16)
AveragePulseWidth = int('00181154', 16)
RadiationSetting = int('00181155', 16)
RectificationType = int('00181156', 16)
RadiationMode = int('0018115A', 16)
ImageandFluoroscopyAreaDoseProduct = int('0018115E', 16)
FilterType = int('00181160', 16)
TypeofFilters = int('00181161', 16)
IntensifierSize = int('00181162', 16)
ImagerPixelSpacing = int('00181164', 16)
Grid = int('00181166', 16)
GeneratorPower = int('00181170', 16)
CollimatorgridName = int('00181180', 16)
CollimatorType = int('00181181', 16)
FocalDistance = int('00181182', 16)
XFocusCenter = int('00181183', 16)
YFocusCenter = int('00181184', 16)
FocalSpots = int('00181190', 16)
AnodeTargetMaterial = int('00181191', 16)
BodyPartThickness = int('001811A0', 16)
CompressionForce = int('001811A2', 16)
PaddleDescription = int('001811A4', 16)
DateofLastCalibration = int('00181200', 16)
TimeofLastCalibration = int('00181201', 16)
DateTimeofLastCalibration = int('00181202', 16)
ConvolutionKernel = int('00181210', 16)
UpperLowerPixelValues = int('00181240', 16)
ActualFrameDuration = int('00181242', 16)
CountRate = int('00181243', 16)
PreferredPlaybackSequencing = int('00181244', 16)
ReceiveCoilName = int('00181250', 16)
TransmitCoilName = int('00181251', 16)
PlateType = int('00181260', 16)
PhosphorType = int('00181261', 16)
WaterEquivalentDiameter = int('00181271', 16)
WaterEquivalentDiameterCalculationMethodCodeSequence = int('00181272', 16)
ScanVelocity = int('00181300', 16)
WholeBodyTechnique = int('00181301', 16)
ScanLength = int('00181302', 16)
AcquisitionMatrix = int('00181310', 16)
InplanePhaseEncodingDirection = int('00181312', 16)
FlipAngle = int('00181314', 16)
VariableFlipAngleFlag = int('00181315', 16)
SAR = int('00181316', 16)
dBdt = int('00181318', 16)
B1rms = int('00181320', 16)
AcquisitionDeviceProcessingDescription = int('00181400', 16)
AcquisitionDeviceProcessingCode = int('00181401', 16)
CassetteOrientation = int('00181402', 16)
CassetteSize = int('00181403', 16)
ExposuresonPlate = int('00181404', 16)
RelativeXRayExposure = int('00181405', 16)
ExposureIndex = int('00181411', 16)
TargetExposureIndex = int('00181412', 16)
DeviationIndex = int('00181413', 16)
ColumnAngulation = int('00181450', 16)
TomoLayerHeight = int('00181460', 16)
TomoAngle = int('00181470', 16)
TomoTime = int('00181480', 16)
TomoType = int('00181490', 16)
TomoClass = int('00181491', 16)
NumberofTomosynthesisSourceImages = int('00181495', 16)
PositionerMotion = int('00181500', 16)
PositionerType = int('00181508', 16)
PositionerPrimaryAngle = int('00181510', 16)
PositionerSecondaryAngle = int('00181511', 16)
PositionerPrimaryAngleIncrement = int('00181520', 16)
PositionerSecondaryAngleIncrement = int('00181521', 16)
DetectorPrimaryAngle = int('00181530', 16)
DetectorSecondaryAngle = int('00181531', 16)
ShutterShape = int('00181600', 16)
ShutterLeftVerticalEdge = int('00181602', 16)
ShutterRightVerticalEdge = int('00181604', 16)
ShutterUpperHorizontalEdge = int('00181606', 16)
ShutterLowerHorizontalEdge = int('00181608', 16)
CenterofCircularShutter = int('00181610', 16)
RadiusofCircularShutter = int('00181612', 16)
VerticesofthePolygonalShutter = int('00181620', 16)
ShutterPresentationValue = int('00181622', 16)
ShutterOverlayGroup = int('00181623', 16)
ShutterPresentationColorCIELabValue = int('00181624', 16)
CollimatorShape = int('00181700', 16)
CollimatorLeftVerticalEdge = int('00181702', 16)
CollimatorRightVerticalEdge = int('00181704', 16)
CollimatorUpperHorizontalEdge = int('00181706', 16)
CollimatorLowerHorizontalEdge = int('00181708', 16)
CenterofCircularCollimator = int('00181710', 16)
RadiusofCircularCollimator = int('00181712', 16)
VerticesofthePolygonalCollimator = int('00181720', 16)
AcquisitionTimeSynchronized = int('00181800', 16)
TimeSource = int('00181801', 16)
TimeDistributionProtocol = int('00181802', 16)
NTPSourceAddress = int('00181803', 16)
PageNumberVector = int('00182001', 16)
FrameLabelVector = int('00182002', 16)
FramePrimaryAngleVector = int('00182003', 16)
FrameSecondaryAngleVector = int('00182004', 16)
SliceLocationVector = int('00182005', 16)
DisplayWindowLabelVector = int('00182006', 16)
NominalScannedPixelSpacing = int('00182010', 16)
DigitizingDeviceTransportDirection = int('00182020', 16)
RotationofScannedFilm = int('00182030', 16)
BiopsyTargetSequence = int('00182041', 16)
TargetUID = int('00182042', 16)
LocalizingCursorPosition = int('00182043', 16)
CalculatedTargetPosition = int('00182044', 16)
TargetLabel = int('00182045', 16)
DisplayedZValue = int('00182046', 16)
IVUSAcquisition = int('00183100', 16)
IVUSPullbackRate = int('00183101', 16)
IVUSGatedRate = int('00183102', 16)
IVUSPullbackStartFrameNumber = int('00183103', 16)
IVUSPullbackStopFrameNumber = int('00183104', 16)
LesionNumber = int('00183105', 16)
AcquisitionComments = int('00184000', 16)
OutputPower = int('00185000', 16)
TransducerData = int('00185010', 16)
FocusDepth = int('00185012', 16)
ProcessingFunction = int('00185020', 16)
PostprocessingFunction = int('00185021', 16)
MechanicalIndex = int('00185022', 16)
BoneThermalIndex = int('00185024', 16)
CranialThermalIndex = int('00185026', 16)
SoftTissueThermalIndex = int('00185027', 16)
SoftTissuefocusThermalIndex = int('00185028', 16)
SoftTissuesurfaceThermalIndex = int('00185029', 16)
DynamicRange = int('00185030', 16)
TotalGain = int('00185040', 16)
DepthofScanField = int('00185050', 16)
PatientPosition = int('00185100', 16)
ViewPosition = int('00185101', 16)
ProjectionEponymousNameCodeSequence = int('00185104', 16)
ImageTransformationMatrix = int('00185210', 16)
ImageTranslationVector = int('00185212', 16)
Sensitivity = int('00186000', 16)
SequenceofUltrasoundRegions = int('00186011', 16)
RegionSpatialFormat = int('00186012', 16)
RegionDataType = int('00186014', 16)
RegionFlags = int('00186016', 16)
RegionLocationMinX0 = int('00186018', 16)
RegionLocationMinY0 = int('0018601A', 16)
RegionLocationMaxX1 = int('0018601C', 16)
RegionLocationMaxY1 = int('0018601E', 16)
ReferencePixelX0 = int('00186020', 16)
ReferencePixelY0 = int('00186022', 16)
PhysicalUnitsXDirection = int('00186024', 16)
PhysicalUnitsYDirection = int('00186026', 16)
ReferencePixelPhysicalValueX = int('00186028', 16)
ReferencePixelPhysicalValueY = int('0018602A', 16)
PhysicalDeltaX = int('0018602C', 16)
PhysicalDeltaY = int('0018602E', 16)
TransducerFrequency = int('00186030', 16)
TransducerType = int('00186031', 16)
PulseRepetitionFrequency = int('00186032', 16)
DopplerCorrectionAngle = int('00186034', 16)
SteeringAngle = int('00186036', 16)
DopplerSampleVolumeXPositionRetired = int('00186038', 16)
DopplerSampleVolumeXPosition = int('00186039', 16)
DopplerSampleVolumeYPositionRetired = int('0018603A', 16)
DopplerSampleVolumeYPosition = int('0018603B', 16)
TMLinePositionX0Retired = int('0018603C', 16)
TMLinePositionX0 = int('0018603D', 16)
TMLinePositionY0Retired = int('0018603E', 16)
TMLinePositionY0 = int('0018603F', 16)
TMLinePositionX1Retired = int('00186040', 16)
TMLinePositionX1 = int('00186041', 16)
TMLinePositionY1Retired = int('00186042', 16)
TMLinePositionY1 = int('00186043', 16)
PixelComponentOrganization = int('00186044', 16)
PixelComponentMask = int('00186046', 16)
PixelComponentRangeStart = int('00186048', 16)
PixelComponentRangeStop = int('0018604A', 16)
PixelComponentPhysicalUnits = int('0018604C', 16)
PixelComponentDataType = int('0018604E', 16)
NumberofTableBreakPoints = int('00186050', 16)
TableofXBreakPoints = int('00186052', 16)
TableofYBreakPoints = int('00186054', 16)
NumberofTableEntries = int('00186056', 16)
TableofPixelValues = int('00186058', 16)
TableofParameterValues = int('0018605A', 16)
RWaveTimeVector = int('00186060', 16)
DetectorConditionsNominalFlag = int('00187000', 16)
DetectorTemperature = int('00187001', 16)
DetectorType = int('00187004', 16)
DetectorConfiguration = int('00187005', 16)
DetectorDescription = int('00187006', 16)
DetectorMode = int('00187008', 16)
DetectorID = int('0018700A', 16)
DateofLastDetectorCalibration = int('0018700C', 16)
TimeofLastDetectorCalibration = int('0018700E', 16)
ExposuresonDetectorSinceLastCalibration = int('00187010', 16)
ExposuresonDetectorSinceManufactured = int('00187011', 16)
DetectorTimeSinceLastExposure = int('00187012', 16)
DetectorActiveTime = int('00187014', 16)
DetectorActivationOffsetFromExposure = int('00187016', 16)
DetectorBinning = int('0018701A', 16)
DetectorElementPhysicalSize = int('00187020', 16)
DetectorElementSpacing = int('00187022', 16)
DetectorActiveShape = int('00187024', 16)
DetectorActiveDimensions = int('00187026', 16)
DetectorActiveOrigin = int('00187028', 16)
DetectorManufacturerName = int('0018702A', 16)
DetectorManufacturersModelName = int('0018702B', 16)
FieldofViewOrigin = int('00187030', 16)
FieldofViewRotation = int('00187032', 16)
FieldofViewHorizontalFlip = int('00187034', 16)
PixelDataAreaOriginRelativeToFOV = int('00187036', 16)
PixelDataAreaRotationAngleRelativeToFOV = int('00187038', 16)
GridAbsorbingMaterial = int('00187040', 16)
GridSpacingMaterial = int('00187041', 16)
GridThickness = int('00187042', 16)
GridPitch = int('00187044', 16)
GridAspectRatio = int('00187046', 16)
GridPeriod = int('00187048', 16)
GridFocalDistance = int('0018704C', 16)
FilterMaterial = int('00187050', 16)
FilterThicknessMinimum = int('00187052', 16)
FilterThicknessMaximum = int('00187054', 16)
FilterBeamPathLengthMinimum = int('00187056', 16)
FilterBeamPathLengthMaximum = int('00187058', 16)
ExposureControlMode = int('00187060', 16)
ExposureControlModeDescription = int('00187062', 16)
ExposureStatus = int('00187064', 16)
PhototimerSetting = int('00187065', 16)
ExposureTimeinS = int('00188150', 16)
XRayTubeCurrentinA = int('00188151', 16)
ContentQualification = int('00189004', 16)
PulseSequenceName = int('00189005', 16)
MRImagingModifierSequence = int('00189006', 16)
EchoPulseSequence = int('00189008', 16)
InversionRecovery = int('00189009', 16)
FlowCompensation = int('00189010', 16)
MultipleSpinEcho = int('00189011', 16)
MultiplanarExcitation = int('00189012', 16)
PhaseContrast = int('00189014', 16)
TimeofFlightContrast = int('00189015', 16)
Spoiling = int('00189016', 16)
SteadyStatePulseSequence = int('00189017', 16)
EchoPlanarPulseSequence = int('00189018', 16)
TagAngleFirstAxis = int('00189019', 16)
MagnetizationTransfer = int('00189020', 16)
T2Preparation = int('00189021', 16)
BloodSignalNulling = int('00189022', 16)
SaturationRecovery = int('00189024', 16)
SpectrallySelectedSuppression = int('00189025', 16)
SpectrallySelectedExcitation = int('00189026', 16)
SpatialPresaturation = int('00189027', 16)
Tagging = int('00189028', 16)
OversamplingPhase = int('00189029', 16)
TagSpacingFirstDimension = int('00189030', 16)
GeometryofkSpaceTraversal = int('00189032', 16)
SegmentedkSpaceTraversal = int('00189033', 16)
RectilinearPhaseEncodeReordering = int('00189034', 16)
TagThickness = int('00189035', 16)
PartialFourierDirection = int('00189036', 16)
CardiacSynchronizationTechnique = int('00189037', 16)
ReceiveCoilManufacturerName = int('00189041', 16)
MRReceiveCoilSequence = int('00189042', 16)
ReceiveCoilType = int('00189043', 16)
QuadratureReceiveCoil = int('00189044', 16)
MultiCoilDefinitionSequence = int('00189045', 16)
MultiCoilConfiguration = int('00189046', 16)
MultiCoilElementName = int('00189047', 16)
MultiCoilElementUsed = int('00189048', 16)
MRTransmitCoilSequence = int('00189049', 16)
TransmitCoilManufacturerName = int('00189050', 16)
TransmitCoilType = int('00189051', 16)
SpectralWidth = int('00189052', 16)
ChemicalShiftReference = int('00189053', 16)
VolumeLocalizationTechnique = int('00189054', 16)
MRAcquisitionFrequencyEncodingSteps = int('00189058', 16)
Decoupling = int('00189059', 16)
DecoupledNucleus = int('00189060', 16)
DecouplingFrequency = int('00189061', 16)
DecouplingMethod = int('00189062', 16)
DecouplingChemicalShiftReference = int('00189063', 16)
kspaceFiltering = int('00189064', 16)
TimeDomainFiltering = int('00189065', 16)
NumberofZeroFills = int('00189066', 16)
BaselineCorrection = int('00189067', 16)
ParallelReductionFactorInplane = int('00189069', 16)
CardiacRRIntervalSpecified = int('00189070', 16)
AcquisitionDuration = int('00189073', 16)
FrameAcquisitionDateTime = int('00189074', 16)
DiffusionDirectionality = int('00189075', 16)
DiffusionGradientDirectionSequence = int('00189076', 16)
ParallelAcquisition = int('00189077', 16)
ParallelAcquisitionTechnique = int('00189078', 16)
InversionTimes = int('00189079', 16)
MetaboliteMapDescription = int('00189080', 16)
PartialFourier = int('00189081', 16)
EffectiveEchoTime = int('00189082', 16)
MetaboliteMapCodeSequence = int('00189083', 16)
ChemicalShiftSequence = int('00189084', 16)
CardiacSignalSource = int('00189085', 16)
Diffusionbvalue = int('00189087', 16)
DiffusionGradientOrientation = int('00189089', 16)
VelocityEncodingDirection = int('00189090', 16)
VelocityEncodingMinimumValue = int('00189091', 16)
VelocityEncodingAcquisitionSequence = int('00189092', 16)
NumberofkSpaceTrajectories = int('00189093', 16)
CoverageofkSpace = int('00189094', 16)
SpectroscopyAcquisitionPhaseRows = int('00189095', 16)
ParallelReductionFactorInplaneRetired = int('00189096', 16)
TransmitterFrequency = int('00189098', 16)
ResonantNucleus = int('00189100', 16)
FrequencyCorrection = int('00189101', 16)
MRSpectroscopyFOVGeometrySequence = int('00189103', 16)
SlabThickness = int('00189104', 16)
SlabOrientation = int('00189105', 16)
MidSlabPosition = int('00189106', 16)
MRSpatialSaturationSequence = int('00189107', 16)
MRTimingandRelatedParametersSequence = int('00189112', 16)
MREchoSequence = int('00189114', 16)
MRModifierSequence = int('00189115', 16)
MRDiffusionSequence = int('00189117', 16)
CardiacSynchronizationSequence = int('00189118', 16)
MRAveragesSequence = int('00189119', 16)
MRFOVGeometrySequence = int('00189125', 16)
VolumeLocalizationSequence = int('00189126', 16)
SpectroscopyAcquisitionDataColumns = int('00189127', 16)
DiffusionAnisotropyType = int('00189147', 16)
FrameReferenceDateTime = int('00189151', 16)
MRMetaboliteMapSequence = int('00189152', 16)
ParallelReductionFactoroutofplane = int('00189155', 16)
SpectroscopyAcquisitionOutofplanePhaseSteps = int('00189159', 16)
BulkMotionStatus = int('00189166', 16)
ParallelReductionFactorSecondInplane = int('00189168', 16)
CardiacBeatRejectionTechnique = int('00189169', 16)
RespiratoryMotionCompensationTechnique = int('00189170', 16)
RespiratorySignalSource = int('00189171', 16)
BulkMotionCompensationTechnique = int('00189172', 16)
BulkMotionSignalSource = int('00189173', 16)
ApplicableSafetyStandardAgency = int('00189174', 16)
ApplicableSafetyStandardDescription = int('00189175', 16)
OperatingModeSequence = int('00189176', 16)
OperatingModeType = int('00189177', 16)
OperatingMode = int('00189178', 16)
SpecificAbsorptionRateDefinition = int('00189179', 16)
GradientOutputType = int('00189180', 16)
SpecificAbsorptionRateValue = int('00189181', 16)
GradientOutput = int('00189182', 16)
FlowCompensationDirection = int('00189183', 16)
TaggingDelay = int('00189184', 16)
RespiratoryMotionCompensationTechniqueDescription = int('00189185', 16)
RespiratorySignalSourceID = int('00189186', 16)
ChemicalShiftMinimumIntegrationLimitinHz = int('00189195', 16)
ChemicalShiftMaximumIntegrationLimitinHz = int('00189196', 16)
MRVelocityEncodingSequence = int('00189197', 16)
FirstOrderPhaseCorrection = int('00189198', 16)
WaterReferencedPhaseCorrection = int('00189199', 16)
MRSpectroscopyAcquisitionType = int('00189200', 16)
RespiratoryCyclePosition = int('00189214', 16)
VelocityEncodingMaximumValue = int('00189217', 16)
TagSpacingSecondDimension = int('00189218', 16)
TagAngleSecondAxis = int('00189219', 16)
FrameAcquisitionDuration = int('00189220', 16)
MRImageFrameTypeSequence = int('00189226', 16)
MRSpectroscopyFrameTypeSequence = int('00189227', 16)
MRAcquisitionPhaseEncodingStepsinplane = int('00189231', 16)
MRAcquisitionPhaseEncodingStepsoutofplane = int('00189232', 16)
SpectroscopyAcquisitionPhaseColumns = int('00189234', 16)
CardiacCyclePosition = int('00189236', 16)
SpecificAbsorptionRateSequence = int('00189239', 16)
RFEchoTrainLength = int('00189240', 16)
GradientEchoTrainLength = int('00189241', 16)
ArterialSpinLabelingContrast = int('00189250', 16)
MRArterialSpinLabelingSequence = int('00189251', 16)
ASLTechniqueDescription = int('00189252', 16)
ASLSlabNumber = int('00189253', 16)
ASLSlabThickness = int('00189254', 16)
ASLSlabOrientation = int('00189255', 16)
ASLMidSlabPosition = int('00189256', 16)
ASLContext = int('00189257', 16)
ASLPulseTrainDuration = int('00189258', 16)
ASLCrusherFlag = int('00189259', 16)
ASLCrusherFlowLimit = int('0018925A', 16)
ASLCrusherDescription = int('0018925B', 16)
ASLBolusCutoffFlag = int('0018925C', 16)
ASLBolusCutoffTimingSequence = int('0018925D', 16)
ASLBolusCutoffTechnique = int('0018925E', 16)
ASLBolusCutoffDelayTime = int('0018925F', 16)
ASLSlabSequence = int('00189260', 16)
ChemicalShiftMinimumIntegrationLimitinppm = int('00189295', 16)
ChemicalShiftMaximumIntegrationLimitinppm = int('00189296', 16)
WaterReferenceAcquisition = int('00189297', 16)
EchoPeakPosition = int('00189298', 16)
CTAcquisitionTypeSequence = int('00189301', 16)
AcquisitionType = int('00189302', 16)
TubeAngle = int('00189303', 16)
CTAcquisitionDetailsSequence = int('00189304', 16)
RevolutionTime = int('00189305', 16)
SingleCollimationWidth = int('00189306', 16)
TotalCollimationWidth = int('00189307', 16)
CTTableDynamicsSequence = int('00189308', 16)
TableSpeed = int('00189309', 16)
TableFeedperRotation = int('00189310', 16)
SpiralPitchFactor = int('00189311', 16)
CTGeometrySequence = int('00189312', 16)
DataCollectionCenterPatient = int('00189313', 16)
CTReconstructionSequence = int('00189314', 16)
ReconstructionAlgorithm = int('00189315', 16)
ConvolutionKernelGroup = int('00189316', 16)
ReconstructionFieldofView = int('00189317', 16)
ReconstructionTargetCenterPatient = int('00189318', 16)
ReconstructionAngle = int('00189319', 16)
ImageFilter = int('00189320', 16)
CTExposureSequence = int('00189321', 16)
ReconstructionPixelSpacing = int('00189322', 16)
ExposureModulationType = int('00189323', 16)
EstimatedDoseSaving = int('00189324', 16)
CTXRayDetailsSequence = int('00189325', 16)
CTPositionSequence = int('00189326', 16)
TablePosition = int('00189327', 16)
ExposureTimeinms = int('00189328', 16)
CTImageFrameTypeSequence = int('00189329', 16)
XRayTubeCurrentinmA = int('00189330', 16)
ExposureinmAs = int('00189332', 16)
ConstantVolumeFlag = int('00189333', 16)
FluoroscopyFlag = int('00189334', 16)
DistanceSourcetoDataCollectionCenter = int('00189335', 16)
ContrastBolusAgentNumber = int('00189337', 16)
ContrastBolusIngredientCodeSequence = int('00189338', 16)
ContrastAdministrationProfileSequence = int('00189340', 16)
ContrastBolusUsageSequence = int('00189341', 16)
ContrastBolusAgentAdministered = int('00189342', 16)
ContrastBolusAgentDetected = int('00189343', 16)
ContrastBolusAgentPhase = int('00189344', 16)
CTDIvol = int('00189345', 16)
CTDIPhantomTypeCodeSequence = int('00189346', 16)
CalciumScoringMassFactorPatient = int('00189351', 16)
CalciumScoringMassFactorDevice = int('00189352', 16)
EnergyWeightingFactor = int('00189353', 16)
CTAdditionalXRaySourceSequence = int('00189360', 16)
ProjectionPixelCalibrationSequence = int('00189401', 16)
DistanceSourcetoIsocenter = int('00189402', 16)
DistanceObjecttoTableTop = int('00189403', 16)
ObjectPixelSpacinginCenterofBeam = int('00189404', 16)
PositionerPositionSequence = int('00189405', 16)
TablePositionSequence = int('00189406', 16)
CollimatorShapeSequence = int('00189407', 16)
PlanesinAcquisition = int('00189410', 16)
XAXRFFrameCharacteristicsSequence = int('00189412', 16)
FrameAcquisitionSequence = int('00189417', 16)
XRayReceptorType = int('00189420', 16)
AcquisitionProtocolName = int('00189423', 16)
AcquisitionProtocolDescription = int('00189424', 16)
ContrastBolusIngredientOpaque = int('00189425', 16)
DistanceReceptorPlanetoDetectorHousing = int('00189426', 16)
IntensifierActiveShape = int('00189427', 16)
IntensifierActiveDimensions = int('00189428', 16)
PhysicalDetectorSize = int('00189429', 16)
PositionofIsocenterProjection = int('00189430', 16)
FieldofViewSequence = int('00189432', 16)
FieldofViewDescription = int('00189433', 16)
ExposureControlSensingRegionsSequence = int('00189434', 16)
ExposureControlSensingRegionShape = int('00189435', 16)
ExposureControlSensingRegionLeftVerticalEdge = int('00189436', 16)
ExposureControlSensingRegionRightVerticalEdge = int('00189437', 16)
ExposureControlSensingRegionUpperHorizontalEdge = int('00189438', 16)
ExposureControlSensingRegionLowerHorizontalEdge = int('00189439', 16)
CenterofCircularExposureControlSensingRegion = int('00189440', 16)
RadiusofCircularExposureControlSensingRegion = int('00189441', 16)
VerticesofthePolygonalExposureControlSensingRegion = int('00189442', 16)
ColumnAngulationPatient = int('00189447', 16)
BeamAngle = int('00189449', 16)
FrameDetectorParametersSequence = int('00189451', 16)
CalculatedAnatomyThickness = int('00189452', 16)
CalibrationSequence = int('00189455', 16)
ObjectThicknessSequence = int('00189456', 16)
PlaneIdentification = int('00189457', 16)
FieldofViewDimensionsinFloat = int('00189461', 16)
IsocenterReferenceSystemSequence = int('00189462', 16)
PositionerIsocenterPrimaryAngle = int('00189463', 16)
PositionerIsocenterSecondaryAngle = int('00189464', 16)
PositionerIsocenterDetectorRotationAngle = int('00189465', 16)
TableXPositiontoIsocenter = int('00189466', 16)
TableYPositiontoIsocenter = int('00189467', 16)
TableZPositiontoIsocenter = int('00189468', 16)
TableHorizontalRotationAngle = int('00189469', 16)
TableHeadTiltAngle = int('00189470', 16)
TableCradleTiltAngle = int('00189471', 16)
FrameDisplayShutterSequence = int('00189472', 16)
AcquiredImageAreaDoseProduct = int('00189473', 16)
CarmPositionerTabletopRelationship = int('00189474', 16)
XRayGeometrySequence = int('00189476', 16)
IrradiationEventIdentificationSequence = int('00189477', 16)
XRay3DFrameTypeSequence = int('00189504', 16)
ContributingSourcesSequence = int('00189506', 16)
XRay3DAcquisitionSequence = int('00189507', 16)
PrimaryPositionerScanArc = int('00189508', 16)
SecondaryPositionerScanArc = int('00189509', 16)
PrimaryPositionerScanStartAngle = int('00189510', 16)
SecondaryPositionerScanStartAngle = int('00189511', 16)
PrimaryPositionerIncrement = int('00189514', 16)
SecondaryPositionerIncrement = int('00189515', 16)
StartAcquisitionDateTime = int('00189516', 16)
EndAcquisitionDateTime = int('00189517', 16)
PrimaryPositionerIncrementSign = int('00189518', 16)
SecondaryPositionerIncrementSign = int('00189519', 16)
ApplicationName = int('00189524', 16)
ApplicationVersion = int('00189525', 16)
ApplicationManufacturer = int('00189526', 16)
AlgorithmType = int('00189527', 16)
AlgorithmDescription = int('00189528', 16)
XRay3DReconstructionSequence = int('00189530', 16)
ReconstructionDescription = int('00189531', 16)
PerProjectionAcquisitionSequence = int('00189538', 16)
DetectorPositionSequence = int('00189541', 16)
XRayAcquisitionDoseSequence = int('00189542', 16)
XRaySourceIsocenterPrimaryAngle = int('00189543', 16)
XRaySourceIsocenterSecondaryAngle = int('00189544', 16)
BreastSupportIsocenterPrimaryAngle = int('00189545', 16)
BreastSupportIsocenterSecondaryAngle = int('00189546', 16)
BreastSupportXPositiontoIsocenter = int('00189547', 16)
BreastSupportYPositiontoIsocenter = int('00189548', 16)
BreastSupportZPositiontoIsocenter = int('00189549', 16)
DetectorIsocenterPrimaryAngle = int('00189550', 16)
DetectorIsocenterSecondaryAngle = int('00189551', 16)
DetectorXPositiontoIsocenter = int('00189552', 16)
DetectorYPositiontoIsocenter = int('00189553', 16)
DetectorZPositiontoIsocenter = int('00189554', 16)
XRayGridSequence = int('00189555', 16)
XRayFilterSequence = int('00189556', 16)
DetectorActiveAreaTLHCPosition = int('00189557', 16)
DetectorActiveAreaOrientation = int('00189558', 16)
PositionerPrimaryAngleDirection = int('00189559', 16)
DiffusionbmatrixSequence = int('00189601', 16)
DiffusionbvalueXX = int('00189602', 16)
DiffusionbvalueXY = int('00189603', 16)
DiffusionbvalueXZ = int('00189604', 16)
DiffusionbvalueYY = int('00189605', 16)
DiffusionbvalueYZ = int('00189606', 16)
DiffusionbvalueZZ = int('00189607', 16)
FunctionalMRSequence = int('00189621', 16)
FunctionalSettlingPhaseFramesPresent = int('00189622', 16)
FunctionalSyncPulse = int('00189623', 16)
SettlingPhaseFrame = int('00189624', 16)
DecayCorrectionDateTime = int('00189701', 16)
StartDensityThreshold = int('00189715', 16)
StartRelativeDensityDifferenceThreshold = int('00189716', 16)
StartCardiacTriggerCountThreshold = int('00189717', 16)
StartRespiratoryTriggerCountThreshold = int('00189718', 16)
TerminationCountsThreshold = int('00189719', 16)
TerminationDensityThreshold = int('00189720', 16)
TerminationRelativeDensityThreshold = int('00189721', 16)
TerminationTimeThreshold = int('00189722', 16)
TerminationCardiacTriggerCountThreshold = int('00189723', 16)
TerminationRespiratoryTriggerCountThreshold = int('00189724', 16)
DetectorGeometry = int('00189725', 16)
TransverseDetectorSeparation = int('00189726', 16)
AxialDetectorDimension = int('00189727', 16)
RadiopharmaceuticalAgentNumber = int('00189729', 16)
PETFrameAcquisitionSequence = int('00189732', 16)
PETDetectorMotionDetailsSequence = int('00189733', 16)
PETTableDynamicsSequence = int('00189734', 16)
PETPositionSequence = int('00189735', 16)
PETFrameCorrectionFactorsSequence = int('00189736', 16)
RadiopharmaceuticalUsageSequence = int('00189737', 16)
AttenuationCorrectionSource = int('00189738', 16)
NumberofIterations = int('00189739', 16)
NumberofSubsets = int('00189740', 16)
PETReconstructionSequence = int('00189749', 16)
PETFrameTypeSequence = int('00189751', 16)
TimeofFlightInformationUsed = int('00189755', 16)
ReconstructionType = int('00189756', 16)
DecayCorrected = int('00189758', 16)
AttenuationCorrected = int('00189759', 16)
ScatterCorrected = int('00189760', 16)
DeadTimeCorrected = int('00189761', 16)
GantryMotionCorrected = int('00189762', 16)
PatientMotionCorrected = int('00189763', 16)
CountLossNormalizationCorrected = int('00189764', 16)
RandomsCorrected = int('00189765', 16)
NonuniformRadialSamplingCorrected = int('00189766', 16)
SensitivityCalibrated = int('00189767', 16)
DetectorNormalizationCorrection = int('00189768', 16)
IterativeReconstructionMethod = int('00189769', 16)
AttenuationCorrectionTemporalRelationship = int('00189770', 16)
PatientPhysiologicalStateSequence = int('00189771', 16)
PatientPhysiologicalStateCodeSequence = int('00189772', 16)
DepthsofFocus = int('00189801', 16)
ExcludedIntervalsSequence = int('00189803', 16)
ExclusionStartDateTime = int('00189804', 16)
ExclusionDuration = int('00189805', 16)
USImageDescriptionSequence = int('00189806', 16)
ImageDataTypeSequence = int('00189807', 16)
DataType = int('00189808', 16)
TransducerScanPatternCodeSequence = int('00189809', 16)
AliasedDataType = int('0018980B', 16)
PositionMeasuringDeviceUsed = int('0018980C', 16)
TransducerGeometryCodeSequence = int('0018980D', 16)
TransducerBeamSteeringCodeSequence = int('0018980E', 16)
TransducerApplicationCodeSequence = int('0018980F', 16)
ZeroVelocityPixelValue = int('00189810', 16)
ContributingEquipmentSequence = int('0018A001', 16)
ContributionDateTime = int('0018A002', 16)
ContributionDescription = int('0018A003', 16)
StudyInstanceUID = int('0020000D', 16)
SeriesInstanceUID = int('0020000E', 16)
StudyID = int('00200010', 16)
SeriesNumber = int('00200011', 16)
AcquisitionNumber = int('00200012', 16)
InstanceNumber = int('00200013', 16)
IsotopeNumber = int('00200014', 16)
PhaseNumber = int('00200015', 16)
IntervalNumber = int('00200016', 16)
TimeSlotNumber = int('00200017', 16)
AngleNumber = int('00200018', 16)
ItemNumber = int('00200019', 16)
PatientOrientation = int('00200020', 16)
OverlayNumber = int('00200022', 16)
CurveNumber = int('00200024', 16)
LUTNumber = int('00200026', 16)
ImagePosition = int('00200030', 16)
ImagePositionPatient = int('00200032', 16)
ImageOrientation = int('00200035', 16)
ImageOrientationPatient = int('00200037', 16)
Location = int('00200050', 16)
FrameofReferenceUID = int('00200052', 16)
Laterality = int('00200060', 16)
ImageLaterality = int('00200062', 16)
ImageGeometryType = int('00200070', 16)
MaskingImage = int('00200080', 16)
ReportNumber = int('002000AA', 16)
TemporalPositionIdentifier = int('00200100', 16)
NumberofTemporalPositions = int('00200105', 16)
TemporalResolution = int('00200110', 16)
SynchronizationFrameofReferenceUID = int('00200200', 16)
SOPInstanceUIDofConcatenationSource = int('00200242', 16)
SeriesinStudy = int('00201000', 16)
AcquisitionsinSeries = int('00201001', 16)
ImagesinAcquisition = int('00201002', 16)
ImagesinSeries = int('00201003', 16)
AcquisitionsinStudy = int('00201004', 16)
ImagesinStudy = int('00201005', 16)
Reference = int('00201020', 16)
PositionReferenceIndicator = int('00201040', 16)
SliceLocation = int('00201041', 16)
OtherStudyNumbers = int('00201070', 16)
NumberofPatientRelatedStudies = int('00201200', 16)
NumberofPatientRelatedSeries = int('00201202', 16)
NumberofPatientRelatedInstances = int('00201204', 16)
NumberofStudyRelatedSeries = int('00201206', 16)
NumberofStudyRelatedInstances = int('00201208', 16)
NumberofSeriesRelatedInstances = int('00201209', 16)
ModifyingDeviceID = int('00203401', 16)
ModifiedImageID = int('00203402', 16)
ModifiedImageDate = int('00203403', 16)
ModifyingDeviceManufacturer = int('00203404', 16)
ModifiedImageTime = int('00203405', 16)
ModifiedImageDescription = int('00203406', 16)
ImageComments = int('00204000', 16)
OriginalImageIdentification = int('00205000', 16)
OriginalImageIdentificationNomenclature = int('00205002', 16)
StackID = int('00209056', 16)
InStackPositionNumber = int('00209057', 16)
FrameAnatomySequence = int('00209071', 16)
FrameLaterality = int('00209072', 16)
FrameContentSequence = int('00209111', 16)
PlanePositionSequence = int('00209113', 16)
PlaneOrientationSequence = int('00209116', 16)
TemporalPositionIndex = int('00209128', 16)
NominalCardiacTriggerDelayTime = int('00209153', 16)
NominalCardiacTriggerTimePriorToRPeak = int('00209154', 16)
ActualCardiacTriggerTimePriorToRPeak = int('00209155', 16)
FrameAcquisitionNumber = int('00209156', 16)
DimensionIndexValues = int('00209157', 16)
FrameComments = int('00209158', 16)
ConcatenationUID = int('00209161', 16)
InconcatenationNumber = int('00209162', 16)
InconcatenationTotalNumber = int('00209163', 16)
DimensionOrganizationUID = int('00209164', 16)
DimensionIndexPointer = int('00209165', 16)
FunctionalGroupPointer = int('00209167', 16)
UnassignedSharedConvertedAttributesSequence = int('00209170', 16)
UnassignedPerFrameConvertedAttributesSequence = int('00209171', 16)
ConversionSourceAttributesSequence = int('00209172', 16)
DimensionIndexPrivateCreator = int('00209213', 16)
DimensionOrganizationSequence = int('00209221', 16)
DimensionIndexSequence = int('00209222', 16)
ConcatenationFrameOffsetNumber = int('00209228', 16)
FunctionalGroupPrivateCreator = int('00209238', 16)
NominalPercentageofCardiacPhase = int('00209241', 16)
NominalPercentageofRespiratoryPhase = int('00209245', 16)
StartingRespiratoryAmplitude = int('00209246', 16)
StartingRespiratoryPhase = int('00209247', 16)
EndingRespiratoryAmplitude = int('00209248', 16)
EndingRespiratoryPhase = int('00209249', 16)
RespiratoryTriggerType = int('00209250', 16)
RRIntervalTimeNominal = int('00209251', 16)
ActualCardiacTriggerDelayTime = int('00209252', 16)
RespiratorySynchronizationSequence = int('00209253', 16)
RespiratoryIntervalTime = int('00209254', 16)
NominalRespiratoryTriggerDelayTime = int('00209255', 16)
RespiratoryTriggerDelayThreshold = int('00209256', 16)
ActualRespiratoryTriggerDelayTime = int('00209257', 16)
ImagePositionVolume = int('00209301', 16)
ImageOrientationVolume = int('00209302', 16)
UltrasoundAcquisitionGeometry = int('00209307', 16)
ApexPosition = int('00209308', 16)
VolumetoTransducerMappingMatrix = int('00209309', 16)
VolumetoTableMappingMatrix = int('0020930A', 16)
VolumetoTransducerRelationship = int('0020930B', 16)
PatientFrameofReferenceSource = int('0020930C', 16)
TemporalPositionTimeOffset = int('0020930D', 16)
PlanePositionVolumeSequence = int('0020930E', 16)
PlaneOrientationVolumeSequence = int('0020930F', 16)
TemporalPositionSequence = int('00209310', 16)
DimensionOrganizationType = int('00209311', 16)
VolumeFrameofReferenceUID = int('00209312', 16)
TableFrameofReferenceUID = int('00209313', 16)
DimensionDescriptionLabel = int('00209421', 16)
PatientOrientationinFrameSequence = int('00209450', 16)
FrameLabel = int('00209453', 16)
AcquisitionIndex = int('00209518', 16)
ContributingSOPInstancesReferenceSequence = int('00209529', 16)
ReconstructionIndex = int('00209536', 16)
LightPathFilterPassThroughWavelength = int('00220001', 16)
LightPathFilterPassBand = int('00220002', 16)
ImagePathFilterPassThroughWavelength = int('00220003', 16)
ImagePathFilterPassBand = int('00220004', 16)
PatientEyeMovementCommanded = int('00220005', 16)
PatientEyeMovementCommandCodeSequence = int('00220006', 16)
SphericalLensPower = int('00220007', 16)
CylinderLensPower = int('00220008', 16)
CylinderAxis = int('00220009', 16)
EmmetropicMagnification = int('0022000A', 16)
IntraOcularPressure = int('0022000B', 16)
HorizontalFieldofView = int('0022000C', 16)
PupilDilated = int('0022000D', 16)
DegreeofDilation = int('0022000E', 16)
StereoBaselineAngle = int('00220010', 16)
StereoBaselineDisplacement = int('00220011', 16)
StereoHorizontalPixelOffset = int('00220012', 16)
StereoVerticalPixelOffset = int('00220013', 16)
StereoRotation = int('00220014', 16)
AcquisitionDeviceTypeCodeSequence = int('00220015', 16)
IlluminationTypeCodeSequence = int('00220016', 16)
LightPathFilterTypeStackCodeSequence = int('00220017', 16)
ImagePathFilterTypeStackCodeSequence = int('00220018', 16)
LensesCodeSequence = int('00220019', 16)
ChannelDescriptionCodeSequence = int('0022001A', 16)
RefractiveStateSequence = int('0022001B', 16)
MydriaticAgentCodeSequence = int('0022001C', 16)
RelativeImagePositionCodeSequence = int('0022001D', 16)
CameraAngleofView = int('0022001E', 16)
StereoPairsSequence = int('00220020', 16)
LeftImageSequence = int('00220021', 16)
RightImageSequence = int('00220022', 16)
StereoPairsPresent = int('00220028', 16)
AxialLengthoftheEye = int('00220030', 16)
OphthalmicFrameLocationSequence = int('00220031', 16)
ReferenceCoordinates = int('00220032', 16)
DepthSpatialResolution = int('00220035', 16)
MaximumDepthDistortion = int('00220036', 16)
AlongscanSpatialResolution = int('00220037', 16)
MaximumAlongscanDistortion = int('00220038', 16)
OphthalmicImageOrientation = int('00220039', 16)
DepthofTransverseImage = int('00220041', 16)
MydriaticAgentConcentrationUnitsSequence = int('00220042', 16)
AcrossscanSpatialResolution = int('00220048', 16)
MaximumAcrossscanDistortion = int('00220049', 16)
MydriaticAgentConcentration = int('0022004E', 16)
IlluminationWaveLength = int('00220055', 16)
IlluminationPower = int('00220056', 16)
IlluminationBandwidth = int('00220057', 16)
MydriaticAgentSequence = int('00220058', 16)
OphthalmicAxialMeasurementsRightEyeSequence = int('00221007', 16)
OphthalmicAxialMeasurementsLeftEyeSequence = int('00221008', 16)
OphthalmicAxialMeasurementsDeviceType = int('00221009', 16)
OphthalmicAxialLengthMeasurementsType = int('00221010', 16)
OphthalmicAxialLengthSequence = int('00221012', 16)
OphthalmicAxialLength = int('00221019', 16)
LensStatusCodeSequence = int('00221024', 16)
VitreousStatusCodeSequence = int('00221025', 16)
IOLFormulaCodeSequence = int('00221028', 16)
IOLFormulaDetail = int('00221029', 16)
KeratometerIndex = int('00221033', 16)
SourceofOphthalmicAxialLengthCodeSequence = int('00221035', 16)
TargetRefraction = int('00221037', 16)
RefractiveProcedureOccurred = int('00221039', 16)
RefractiveSurgeryTypeCodeSequence = int('00221040', 16)
OphthalmicUltrasoundMethodCodeSequence = int('00221044', 16)
OphthalmicAxialLengthMeasurementsSequence = int('00221050', 16)
IOLPower = int('00221053', 16)
PredictedRefractiveError = int('00221054', 16)
OphthalmicAxialLengthVelocity = int('00221059', 16)
LensStatusDescription = int('00221065', 16)
VitreousStatusDescription = int('00221066', 16)
IOLPowerSequence = int('00221090', 16)
LensConstantSequence = int('00221092', 16)
IOLManufacturer = int('00221093', 16)
LensConstantDescription = int('00221094', 16)
ImplantName = int('00221095', 16)
KeratometryMeasurementTypeCodeSequence = int('00221096', 16)
ImplantPartNumber = int('00221097', 16)
ReferencedOphthalmicAxialMeasurementsSequence = int('00221100', 16)
OphthalmicAxialLengthMeasurementsSegmentNameCodeSequence = int(
'00221101', 16)
RefractiveErrorBeforeRefractiveSurgeryCodeSequence = int('00221103', 16)
IOLPowerForExactEmmetropia = int('00221121', 16)
IOLPowerForExactTargetRefraction = int('00221122', 16)
AnteriorChamberDepthDefinitionCodeSequence = int('00221125', 16)
LensThicknessSequence = int('00221127', 16)
AnteriorChamberDepthSequence = int('00221128', 16)
LensThickness = int('00221130', 16)
AnteriorChamberDepth = int('00221131', 16)
SourceofLensThicknessDataCodeSequence = int('00221132', 16)
SourceofAnteriorChamberDepthDataCodeSequence = int('00221133', 16)
SourceofRefractiveMeasurementsSequence = int('00221134', 16)
SourceofRefractiveMeasurementsCodeSequence = int('00221135', 16)
OphthalmicAxialLengthMeasurementModified = int('00221140', 16)
OphthalmicAxialLengthDataSourceCodeSequence = int('00221150', 16)
OphthalmicAxialLengthAcquisitionMethodCodeSequence = int('00221153', 16)
SignaltoNoiseRatio = int('00221155', 16)
OphthalmicAxialLengthDataSourceDescription = int('00221159', 16)
OphthalmicAxialLengthMeasurementsTotalLengthSequence = int('00221210', 16)
OphthalmicAxialLengthMeasurementsSegmentalLengthSequence = int(
'00221211', 16)
OphthalmicAxialLengthMeasurementsLengthSummationSequence = int(
'00221212', 16)
UltrasoundOphthalmicAxialLengthMeasurementsSequence = int('00221220', 16)
OpticalOphthalmicAxialLengthMeasurementsSequence = int('00221225', 16)
UltrasoundSelectedOphthalmicAxialLengthSequence = int('00221230', 16)
OphthalmicAxialLengthSelectionMethodCodeSequence = int('00221250', 16)
OpticalSelectedOphthalmicAxialLengthSequence = int('00221255', 16)
SelectedSegmentalOphthalmicAxialLengthSequence = int('00221257', 16)
SelectedTotalOphthalmicAxialLengthSequence = int('00221260', 16)
OphthalmicAxialLengthQualityMetricSequence = int('00221262', 16)
OphthalmicAxialLengthQualityMetricTypeCodeSequence = int('00221265', 16)
OphthalmicAxialLengthQualityMetricTypeDescription = int('00221273', 16)
IntraocularLensCalculationsRightEyeSequence = int('00221300', 16)
IntraocularLensCalculationsLeftEyeSequence = int('00221310', 16)
ReferencedOphthalmicAxialLengthMeasurementQCImageSequence = int(
'00221330', 16)
OphthalmicMappingDeviceType = int('00221415', 16)
AcquisitionMethodCodeSequence = int('00221420', 16)
AcquisitionMethodAlgorithmSequence = int('00221423', 16)
OphthalmicThicknessMapTypeCodeSequence = int('00221436', 16)
OphthalmicThicknessMappingNormalsSequence = int('00221443', 16)
RetinalThicknessDefinitionCodeSequence = int('00221445', 16)
PixelValueMappingtoCodedConceptSequence = int('00221450', 16)
MappedPixelValue = int('00221452', 16)
PixelValueMappingExplanation = int('00221454', 16)
OphthalmicThicknessMapQualityThresholdSequence = int('00221458', 16)
OphthalmicThicknessMapThresholdQualityRating = int('00221460', 16)
AnatomicStructureReferencePoint = int('00221463', 16)
RegistrationtoLocalizerSequence = int('00221465', 16)
RegisteredLocalizerUnits = int('00221466', 16)
RegisteredLocalizerTopLeftHandCorner = int('00221467', 16)
RegisteredLocalizerBottomRightHandCorner = int('00221468', 16)
OphthalmicThicknessMapQualityRatingSequence = int('00221470', 16)
RelevantOPTAttributesSequence = int('00221472', 16)
TransformationMethodCodeSequence = int('00221512', 16)
TransformationAlgorithmSequence = int('00221513', 16)
OphthalmicAxialLengthMethod = int('00221515', 16)
OphthalmicFOV = int('00221517', 16)
TwoDimensionaltoThreeDimensionalMapSequence = int('00221518', 16)
WideFieldOphthalmicPhotographyQualityRatingSequence = int('00221525', 16)
WideFieldOphthalmicPhotographyQualityThresholdSequence = int(
'00221526', 16)
WideFieldOphthalmicPhotographyThresholdQualityRating = int('00221527', 16)
XCoordinatesCenterPixelViewAngle = int('00221528', 16)
YCoordinatesCenterPixelViewAngle = int('00221529', 16)
NumberofMapPoints = int('00221530', 16)
TwoDimensionaltoThreeDimensionalMapData = int('00221531', 16)
VisualFieldHorizontalExtent = int('00240010', 16)
VisualFieldVerticalExtent = int('00240011', 16)
VisualFieldShape = int('00240012', 16)
ScreeningTestModeCodeSequence = int('00240016', 16)
MaximumStimulusLuminance = int('00240018', 16)
BackgroundLuminance = int('00240020', 16)
StimulusColorCodeSequence = int('00240021', 16)
BackgroundIlluminationColorCodeSequence = int('00240024', 16)
StimulusArea = int('00240025', 16)
StimulusPresentationTime = int('00240028', 16)
FixationSequence = int('00240032', 16)
FixationMonitoringCodeSequence = int('00240033', 16)
VisualFieldCatchTrialSequence = int('00240034', 16)
FixationCheckedQuantity = int('00240035', 16)
PatientNotProperlyFixatedQuantity = int('00240036', 16)
PresentedVisualStimuliDataFlag = int('00240037', 16)
NumberofVisualStimuli = int('00240038', 16)
ExcessiveFixationLossesDataFlag = int('00240039', 16)
ExcessiveFixationLosses = int('00240040', 16)
StimuliRetestingQuantity = int('00240042', 16)
CommentsonPatientsPerformanceofVisualField = int('00240044', 16)
FalseNegativesEstimateFlag = int('00240045', 16)
FalseNegativesEstimate = int('00240046', 16)
NegativeCatchTrialsQuantity = int('00240048', 16)
FalseNegativesQuantity = int('00240050', 16)
ExcessiveFalseNegativesDataFlag = int('00240051', 16)
ExcessiveFalseNegatives = int('00240052', 16)
FalsePositivesEstimateFlag = int('00240053', 16)
FalsePositivesEstimate = int('00240054', 16)
CatchTrialsDataFlag = int('00240055', 16)
PositiveCatchTrialsQuantity = int('00240056', 16)
TestPointNormalsDataFlag = int('00240057', 16)
TestPointNormalsSequence = int('00240058', 16)
GlobalDeviationProbabilityNormalsFlag = int('00240059', 16)
FalsePositivesQuantity = int('00240060', 16)
ExcessiveFalsePositivesDataFlag = int('00240061', 16)
ExcessiveFalsePositives = int('00240062', 16)
VisualFieldTestNormalsFlag = int('00240063', 16)
ResultsNormalsSequence = int('00240064', 16)
AgeCorrectedSensitivityDeviationAlgorithmSequence = int('00240065', 16)
GlobalDeviationFromNormal = int('00240066', 16)
GeneralizedDefectSensitivityDeviationAlgorithmSequence = int(
'00240067', 16)
LocalizedDeviationFromNormal = int('00240068', 16)
PatientReliabilityIndicator = int('00240069', 16)
VisualFieldMeanSensitivity = int('00240070', 16)
GlobalDeviationProbability = int('00240071', 16)
LocalDeviationProbabilityNormalsFlag = int('00240072', 16)
LocalizedDeviationProbability = int('00240073', 16)
ShortTermFluctuationCalculated = int('00240074', 16)
ShortTermFluctuation = int('00240075', 16)
ShortTermFluctuationProbabilityCalculated = int('00240076', 16)
ShortTermFluctuationProbability = int('00240077', 16)
CorrectedLocalizedDeviationFromNormalCalculated = int('00240078', 16)
CorrectedLocalizedDeviationFromNormal = int('00240079', 16)
CorrectedLocalizedDeviationFromNormalProbabilityCalculated = int(
'00240080', 16)
CorrectedLocalizedDeviationFromNormalProbability = int('00240081', 16)
GlobalDeviationProbabilitySequence = int('00240083', 16)
LocalizedDeviationProbabilitySequence = int('00240085', 16)
FovealSensitivityMeasured = int('00240086', 16)
FovealSensitivity = int('00240087', 16)
VisualFieldTestDuration = int('00240088', 16)
VisualFieldTestPointSequence = int('00240089', 16)
VisualFieldTestPointXCoordinate = int('00240090', 16)
VisualFieldTestPointYCoordinate = int('00240091', 16)
AgeCorrectedSensitivityDeviationValue = int('00240092', 16)
StimulusResults = int('00240093', 16)
SensitivityValue = int('00240094', 16)
RetestStimulusSeen = int('00240095', 16)
RetestSensitivityValue = int('00240096', 16)
VisualFieldTestPointNormalsSequence = int('00240097', 16)
QuantifiedDefect = int('00240098', 16)
AgeCorrectedSensitivityDeviationProbabilityValue = int('00240100', 16)
GeneralizedDefectCorrectedSensitivityDeviationFlag = int('00240102', 16)
GeneralizedDefectCorrectedSensitivityDeviationValue = int('00240103', 16)
GeneralizedDefectCorrectedSensitivityDeviationProbabilityValue = int(
'00240104', 16)
MinimumSensitivityValue = int('00240105', 16)
BlindSpotLocalized = int('00240106', 16)
BlindSpotXCoordinate = int('00240107', 16)
BlindSpotYCoordinate = int('00240108', 16)
VisualAcuityMeasurementSequence = int('00240110', 16)
RefractiveParametersUsedonPatientSequence = int('00240112', 16)
MeasurementLaterality = int('00240113', 16)
OphthalmicPatientClinicalInformationLeftEyeSequence = int('00240114', 16)
OphthalmicPatientClinicalInformationRightEyeSequence = int('00240115', 16)
FovealPointNormativeDataFlag = int('00240117', 16)
FovealPointProbabilityValue = int('00240118', 16)
ScreeningBaselineMeasured = int('00240120', 16)
ScreeningBaselineMeasuredSequence = int('00240122', 16)
ScreeningBaselineType = int('00240124', 16)
ScreeningBaselineValue = int('00240126', 16)
AlgorithmSource = int('00240202', 16)
DataSetName = int('00240306', 16)
DataSetVersion = int('00240307', 16)
DataSetSource = int('00240308', 16)
DataSetDescription = int('00240309', 16)
VisualFieldTestReliabilityGlobalIndexSequence = int('00240317', 16)
VisualFieldGlobalResultsIndexSequence = int('00240320', 16)
DataObservationSequence = int('00240325', 16)
IndexNormalsFlag = int('00240338', 16)
IndexProbability = int('00240341', 16)
IndexProbabilitySequence = int('00240344', 16)
SamplesperPixel = int('00280002', 16)
SamplesperPixelUsed = int('00280003', 16)
PhotometricInterpretation = int('00280004', 16)
ImageDimensions = int('00280005', 16)
PlanarConfiguration = int('00280006', 16)
NumberofFrames = int('00280008', 16)
FrameIncrementPointer = int('00280009', 16)
FrameDimensionPointer = int('0028000A', 16)
Rows = int('00280010', 16)
Columns = int('00280011', 16)
Planes = int('00280012', 16)
UltrasoundColorDataPresent = int('00280014', 16)
PixelSpacing = int('00280030', 16)
ZoomFactor = int('00280031', 16)
ZoomCenter = int('00280032', 16)
PixelAspectRatio = int('00280034', 16)
ImageFormat = int('00280040', 16)
ManipulatedImage = int('00280050', 16)
CorrectedImage = int('00280051', 16)
CompressionRecognitionCode = int('0028005F', 16)
CompressionCode = int('00280060', 16)
CompressionOriginator = int('00280061', 16)
CompressionLabel = int('00280062', 16)
CompressionDescription = int('00280063', 16)
CompressionSequence = int('00280065', 16)
CompressionStepPointers = int('00280066', 16)
RepeatInterval = int('00280068', 16)
BitsGrouped = int('00280069', 16)
PerimeterTable = int('00280070', 16)
PerimeterValue = int('00280071', 16)
PredictorRows = int('00280080', 16)
PredictorColumns = int('00280081', 16)
PredictorConstants = int('00280082', 16)
BlockedPixels = int('00280090', 16)
BlockRows = int('00280091', 16)
BlockColumns = int('00280092', 16)
RowOverlap = int('00280093', 16)
ColumnOverlap = int('00280094', 16)
BitsAllocated = int('00280100', 16)
BitsStored = int('00280101', 16)
HighBit = int('00280102', 16)
PixelRepresentation = int('00280103', 16)
SmallestValidPixelValue = int('00280104', 16)
LargestValidPixelValue = int('00280105', 16)
SmallestImagePixelValue = int('00280106', 16)
LargestImagePixelValue = int('00280107', 16)
SmallestPixelValueinSeries = int('00280108', 16)
LargestPixelValueinSeries = int('00280109', 16)
SmallestImagePixelValueinPlane = int('00280110', 16)
LargestImagePixelValueinPlane = int('00280111', 16)
PixelPaddingValue = int('00280120', 16)
PixelPaddingRangeLimit = int('00280121', 16)
FloatPixelPaddingValue = int('00280122', 16)
DoubleFloatPixelPaddingValue = int('00280123', 16)
FloatPixelPaddingRangeLimit = int('00280124', 16)
DoubleFloatPixelPaddingRangeLimit = int('00280125', 16)
ImageLocation = int('00280200', 16)
QualityControlImage = int('00280300', 16)
BurnedInAnnotation = int('00280301', 16)
RecognizableVisualFeatures = int('00280302', 16)
LongitudinalTemporalInformationModified = int('00280303', 16)
ReferencedColorPaletteInstanceUID = int('00280304', 16)
TransformLabel = int('00280400', 16)
TransformVersionNumber = int('00280401', 16)
NumberofTransformSteps = int('00280402', 16)
SequenceofCompressedData = int('00280403', 16)
DetailsofCoefficients = int('00280404', 16)
DCTLabel = int('00280700', 16)
DataBlockDescription = int('00280701', 16)
DataBlock = int('00280702', 16)
NormalizationFactorFormat = int('00280710', 16)
ZonalMapNumberFormat = int('00280720', 16)
ZonalMapLocation = int('00280721', 16)
ZonalMapFormat = int('00280722', 16)
AdaptiveMapFormat = int('00280730', 16)
CodeNumberFormat = int('00280740', 16)
PixelSpacingCalibrationType = int('00280A02', 16)
PixelSpacingCalibrationDescription = int('00280A04', 16)
PixelIntensityRelationship = int('00281040', 16)
PixelIntensityRelationshipSign = int('00281041', 16)
WindowCenter = int('00281050', 16)
WindowWidth = int('00281051', 16)
RescaleIntercept = int('00281052', 16)
RescaleSlope = int('00281053', 16)
RescaleType = int('00281054', 16)
WindowCenterWidthExplanation = int('00281055', 16)
VOILUTFunction = int('00281056', 16)
GrayScale = int('00281080', 16)
RecommendedViewingMode = int('00281090', 16)
GrayLookupTableDescriptor = int('00281100', 16)
RedPaletteColorLookupTableDescriptor = int('00281101', 16)
GreenPaletteColorLookupTableDescriptor = int('00281102', 16)
BluePaletteColorLookupTableDescriptor = int('00281103', 16)
AlphaPaletteColorLookupTableDescriptor = int('00281104', 16)
LargeRedPaletteColorLookupTableDescriptor = int('00281111', 16)
LargeGreenPaletteColorLookupTableDescriptor = int('00281112', 16)
LargeBluePaletteColorLookupTableDescriptor = int('00281113', 16)
PaletteColorLookupTableUID = int('00281199', 16)
GrayLookupTableData = int('00281200', 16)
RedPaletteColorLookupTableData = int('00281201', 16)
GreenPaletteColorLookupTableData = int('00281202', 16)
BluePaletteColorLookupTableData = int('00281203', 16)
AlphaPaletteColorLookupTableData = int('00281204', 16)
LargeRedPaletteColorLookupTableData = int('00281211', 16)
LargeGreenPaletteColorLookupTableData = int('00281212', 16)
LargeBluePaletteColorLookupTableData = int('00281213', 16)
LargePaletteColorLookupTableUID = int('00281214', 16)
SegmentedRedPaletteColorLookupTableData = int('00281221', 16)
SegmentedGreenPaletteColorLookupTableData = int('00281222', 16)
SegmentedBluePaletteColorLookupTableData = int('00281223', 16)
SegmentedAlphaPaletteColorLookupTableData = int('00281224', 16)
BreastImplantPresent = int('00281300', 16)
PartialView = int('00281350', 16)
PartialViewDescription = int('00281351', 16)
PartialViewCodeSequence = int('00281352', 16)
SpatialLocationsPreserved = int('0028135A', 16)
DataFrameAssignmentSequence = int('00281401', 16)
DataPathAssignment = int('00281402', 16)
BitsMappedtoColorLookupTable = int('00281403', 16)
BlendingLUT1Sequence = int('00281404', 16)
BlendingLUT1TransferFunction = int('00281405', 16)
BlendingWeightConstant = int('00281406', 16)
BlendingLookupTableDescriptor = int('00281407', 16)
BlendingLookupTableData = int('00281408', 16)
EnhancedPaletteColorLookupTableSequence = int('0028140B', 16)
BlendingLUT2Sequence = int('0028140C', 16)
BlendingLUT2TransferFunction = int('0028140D', 16)
DataPathID = int('0028140E', 16)
RGBLUTTransferFunction = int('0028140F', 16)
AlphaLUTTransferFunction = int('00281410', 16)
ICCProfile = int('00282000', 16)
ColorSpace = int('00282002', 16)
LossyImageCompression = int('00282110', 16)
LossyImageCompressionRatio = int('00282112', 16)
LossyImageCompressionMethod = int('00282114', 16)
ModalityLUTSequence = int('00283000', 16)
LUTDescriptor = int('00283002', 16)
LUTExplanation = int('00283003', 16)
ModalityLUTType = int('00283004', 16)
LUTData = int('00283006', 16)
VOILUTSequence = int('00283010', 16)
SoftcopyVOILUTSequence = int('00283110', 16)
ImagePresentationComments = int('00284000', 16)
BiPlaneAcquisitionSequence = int('00285000', 16)
RepresentativeFrameNumber = int('00286010', 16)
FrameNumbersofInterestFOI = int('00286020', 16)
FrameofInterestDescription = int('00286022', 16)
FrameofInterestType = int('00286023', 16)
MaskPointers = int('00286030', 16)
RWavePointer = int('00286040', 16)
MaskSubtractionSequence = int('00286100', 16)
MaskOperation = int('00286101', 16)
ApplicableFrameRange = int('00286102', 16)
MaskFrameNumbers = int('00286110', 16)
ContrastFrameAveraging = int('00286112', 16)
MaskSubpixelShift = int('00286114', 16)
TIDOffset = int('00286120', 16)
MaskOperationExplanation = int('00286190', 16)
EquipmentAdministratorSequence = int('00287000', 16)
NumberofDisplaySubsystems = int('00287001', 16)
CurrentConfigurationID = int('00287002', 16)
DisplaySubsystemID = int('00287003', 16)
DisplaySubsystemName = int('00287004', 16)
DisplaySubsystemDescription = int('00287005', 16)
SystemStatus = int('00287006', 16)
SystemStatusComment = int('00287007', 16)
TargetLuminanceCharacteristicsSequence = int('00287008', 16)
LuminanceCharacteristicsID = int('00287009', 16)
DisplaySubsystemConfigurationSequence = int('0028700A', 16)
ConfigurationID = int('0028700B', 16)
ConfigurationName = int('0028700C', 16)
ConfigurationDescription = int('0028700D', 16)
ReferencedTargetLuminanceCharacteristicsID = int('0028700E', 16)
QAResultsSequence = int('0028700F', 16)
DisplaySubsystemQAResultsSequence = int('00287010', 16)
ConfigurationQAResultsSequence = int('00287011', 16)
MeasurementEquipmentSequence = int('00287012', 16)
MeasurementFunctions = int('00287013', 16)
MeasurementEquipmentType = int('00287014', 16)
VisualEvaluationResultSequence = int('00287015', 16)
DisplayCalibrationResultSequence = int('00287016', 16)
DDLValue = int('00287017', 16)
CIExyWhitePoint = int('00287018', 16)
DisplayFunctionType = int('00287019', 16)
GammaValue = int('0028701A', 16)
NumberofLuminancePoints = int('0028701B', 16)
LuminanceResponseSequence = int('0028701C', 16)
TargetMinimumLuminance = int('0028701D', 16)
TargetMaximumLuminance = int('0028701E', 16)
LuminanceValue = int('0028701F', 16)
LuminanceResponseDescription = int('00287020', 16)
WhitePointFlag = int('00287021', 16)
DisplayDeviceTypeCodeSequence = int('00287022', 16)
DisplaySubsystemSequence = int('00287023', 16)
LuminanceResultSequence = int('00287024', 16)
AmbientLightValueSource = int('00287025', 16)
MeasuredCharacteristics = int('00287026', 16)
LuminanceUniformityResultSequence = int('00287027', 16)
VisualEvaluationTestSequence = int('00287028', 16)
TestResult = int('00287029', 16)
TestResultComment = int('0028702A', 16)
TestImageValidation = int('0028702B', 16)
TestPatternCodeSequence = int('0028702C', 16)
MeasurementPatternCodeSequence = int('0028702D', 16)
VisualEvaluationMethodCodeSequence = int('0028702E', 16)
PixelDataProviderURL = int('00287FE0', 16)
DataPointRows = int('00289001', 16)
DataPointColumns = int('00289002', 16)
SignalDomainColumns = int('00289003', 16)
LargestMonochromePixelValue = int('00289099', 16)
DataRepresentation = int('00289108', 16)
PixelMeasuresSequence = int('00289110', 16)
FrameVOILUTSequence = int('00289132', 16)
PixelValueTransformationSequence = int('00289145', 16)
SignalDomainRows = int('00289235', 16)
DisplayFilterPercentage = int('00289411', 16)
FramePixelShiftSequence = int('00289415', 16)
SubtractionItemID = int('00289416', 16)
PixelIntensityRelationshipLUTSequence = int('00289422', 16)
FramePixelDataPropertiesSequence = int('00289443', 16)
GeometricalProperties = int('00289444', 16)
GeometricMaximumDistortion = int('00289445', 16)
ImageProcessingApplied = int('00289446', 16)
MaskSelectionMode = int('00289454', 16)
LUTFunction = int('00289474', 16)
MaskVisibilityPercentage = int('00289478', 16)
PixelShiftSequence = int('00289501', 16)
RegionPixelShiftSequence = int('00289502', 16)
VerticesoftheRegion = int('00289503', 16)
MultiframePresentationSequence = int('00289505', 16)
PixelShiftFrameRange = int('00289506', 16)
LUTFrameRange = int('00289507', 16)
ImagetoEquipmentMappingMatrix = int('00289520', 16)
EquipmentCoordinateSystemIdentification = int('00289537', 16)
StudyStatusID = int('0032000A', 16)
StudyPriorityID = int('0032000C', 16)
StudyIDIssuer = int('00320012', 16)
StudyVerifiedDate = int('00320032', 16)
StudyVerifiedTime = int('00320033', 16)
StudyReadDate = int('00320034', 16)
StudyReadTime = int('00320035', 16)
ScheduledStudyStartDate = int('00321000', 16)
ScheduledStudyStartTime = int('00321001', 16)
ScheduledStudyStopDate = int('00321010', 16)
ScheduledStudyStopTime = int('00321011', 16)
ScheduledStudyLocation = int('00321020', 16)
ScheduledStudyLocationAETitle = int('00321021', 16)
ReasonforStudy = int('00321030', 16)
RequestingPhysicianIdentificationSequence = int('00321031', 16)
RequestingPhysician = int('00321032', 16)
RequestingService = int('00321033', 16)
RequestingServiceCodeSequence = int('00321034', 16)
StudyArrivalDate = int('00321040', 16)
StudyArrivalTime = int('00321041', 16)
StudyCompletionDate = int('00321050', 16)
StudyCompletionTime = int('00321051', 16)
StudyComponentStatusID = int('00321055', 16)
RequestedProcedureDescription = int('00321060', 16)
RequestedProcedureCodeSequence = int('00321064', 16)
RequestedContrastAgent = int('00321070', 16)
StudyComments = int('00324000', 16)
ReferencedPatientAliasSequence = int('00380004', 16)
VisitStatusID = int('00380008', 16)
AdmissionID = int('00380010', 16)
IssuerofAdmissionID = int('00380011', 16)
IssuerofAdmissionIDSequence = int('00380014', 16)
RouteofAdmissions = int('00380016', 16)
ScheduledAdmissionDate = int('0038001A', 16)
ScheduledAdmissionTime = int('0038001B', 16)
ScheduledDischargeDate = int('0038001C', 16)
ScheduledDischargeTime = int('0038001D', 16)
ScheduledPatientInstitutionResidence = int('0038001E', 16)
AdmittingDate = int('00380020', 16)
AdmittingTime = int('00380021', 16)
DischargeDate = int('00380030', 16)
DischargeTime = int('00380032', 16)
DischargeDiagnosisDescription = int('00380040', 16)
DischargeDiagnosisCodeSequence = int('00380044', 16)
SpecialNeeds = int('00380050', 16)
ServiceEpisodeID = int('00380060', 16)
IssuerofServiceEpisodeID = int('00380061', 16)
ServiceEpisodeDescription = int('00380062', 16)
IssuerofServiceEpisodeIDSequence = int('00380064', 16)
PertinentDocumentsSequence = int('00380100', 16)
PertinentResourcesSequence = int('00380101', 16)
ResourceDescription = int('00380102', 16)
CurrentPatientLocation = int('00380300', 16)
PatientsInstitutionResidence = int('00380400', 16)
PatientState = int('00380500', 16)
PatientClinicalTrialParticipationSequence = int('00380502', 16)
VisitComments = int('00384000', 16)
WaveformOriginality = int('003A0004', 16)
NumberofWaveformChannels = int('003A0005', 16)
NumberofWaveformSamples = int('003A0010', 16)
SamplingFrequency = int('003A001A', 16)
MultiplexGroupLabel = int('003A0020', 16)
ChannelDefinitionSequence = int('003A0200', 16)
WaveformChannelNumber = int('003A0202', 16)
ChannelLabel = int('003A0203', 16)
ChannelStatus = int('003A0205', 16)
ChannelSourceSequence = int('003A0208', 16)
ChannelSourceModifiersSequence = int('003A0209', 16)
SourceWaveformSequence = int('003A020A', 16)
ChannelDerivationDescription = int('003A020C', 16)
ChannelSensitivity = int('003A0210', 16)
ChannelSensitivityUnitsSequence = int('003A0211', 16)
ChannelSensitivityCorrectionFactor = int('003A0212', 16)
ChannelBaseline = int('003A0213', 16)
ChannelTimeSkew = int('003A0214', 16)
ChannelSampleSkew = int('003A0215', 16)
ChannelOffset = int('003A0218', 16)
WaveformBitsStored = int('003A021A', 16)
FilterLowFrequency = int('003A0220', 16)
FilterHighFrequency = int('003A0221', 16)
NotchFilterFrequency = int('003A0222', 16)
NotchFilterBandwidth = int('003A0223', 16)
WaveformDataDisplayScale = int('003A0230', 16)
WaveformDisplayBackgroundCIELabValue = int('003A0231', 16)
WaveformPresentationGroupSequence = int('003A0240', 16)
PresentationGroupNumber = int('003A0241', 16)
ChannelDisplaySequence = int('003A0242', 16)
ChannelRecommendedDisplayCIELabValue = int('003A0244', 16)
ChannelPosition = int('003A0245', 16)
DisplayShadingFlag = int('003A0246', 16)
FractionalChannelDisplayScale = int('003A0247', 16)
AbsoluteChannelDisplayScale = int('003A0248', 16)
MultiplexedAudioChannelsDescriptionCodeSequence = int('003A0300', 16)
ChannelIdentificationCode = int('003A0301', 16)
ChannelMode = int('003A0302', 16)
ScheduledStationAETitle = int('00400001', 16)
ScheduledProcedureStepStartDate = int('00400002', 16)
ScheduledProcedureStepStartTime = int('00400003', 16)
ScheduledProcedureStepEndDate = int('00400004', 16)
ScheduledProcedureStepEndTime = int('00400005', 16)
ScheduledPerformingPhysiciansName = int('00400006', 16)
ScheduledProcedureStepDescription = int('00400007', 16)
ScheduledProtocolCodeSequence = int('00400008', 16)
ScheduledProcedureStepID = int('00400009', 16)
StageCodeSequence = int('0040000A', 16)
ScheduledPerformingPhysicianIdentificationSequence = int('0040000B', 16)
ScheduledStationName = int('00400010', 16)
ScheduledProcedureStepLocation = int('00400011', 16)
PreMedication = int('00400012', 16)
ScheduledProcedureStepStatus = int('00400020', 16)
OrderPlacerIdentifierSequence = int('00400026', 16)
OrderFillerIdentifierSequence = int('00400027', 16)
LocalNamespaceEntityID = int('00400031', 16)
UniversalEntityID = int('00400032', 16)
UniversalEntityIDType = int('00400033', 16)
IdentifierTypeCode = int('00400035', 16)
AssigningFacilitySequence = int('00400036', 16)
AssigningJurisdictionCodeSequence = int('00400039', 16)
AssigningAgencyorDepartmentCodeSequence = int('0040003A', 16)
ScheduledProcedureStepSequence = int('00400100', 16)
ReferencedNonImageCompositeSOPInstanceSequence = int('00400220', 16)
PerformedStationAETitle = int('00400241', 16)
PerformedStationName = int('00400242', 16)
PerformedLocation = int('00400243', 16)
PerformedProcedureStepStartDate = int('00400244', 16)
PerformedProcedureStepStartTime = int('00400245', 16)
PerformedProcedureStepEndDate = int('00400250', 16)
PerformedProcedureStepEndTime = int('00400251', 16)
PerformedProcedureStepStatus = int('00400252', 16)
PerformedProcedureStepID = int('00400253', 16)
PerformedProcedureStepDescription = int('00400254', 16)
PerformedProcedureTypeDescription = int('00400255', 16)
PerformedProtocolCodeSequence = int('00400260', 16)
PerformedProtocolType = int('00400261', 16)
ScheduledStepAttributesSequence = int('00400270', 16)
RequestAttributesSequence = int('00400275', 16)
CommentsonthePerformedProcedureStep = int('00400280', 16)
PerformedProcedureStepDiscontinuationReasonCodeSequence = int(
'00400281', 16)
QuantitySequence = int('00400293', 16)
Quantity = int('00400294', 16)
MeasuringUnitsSequence = int('00400295', 16)
BillingItemSequence = int('00400296', 16)
TotalTimeofFluoroscopy = int('00400300', 16)
TotalNumberofExposures = int('00400301', 16)
EntranceDose = int('00400302', 16)
ExposedArea = int('00400303', 16)
DistanceSourcetoEntrance = int('00400306', 16)
DistanceSourcetoSupport = int('00400307', 16)
ExposureDoseSequence = int('0040030E', 16)
CommentsonRadiationDose = int('00400310', 16)
XRayOutput = int('00400312', 16)
HalfValueLayer = int('00400314', 16)
OrganDose = int('00400316', 16)
OrganExposed = int('00400318', 16)
BillingProcedureStepSequence = int('00400320', 16)
FilmConsumptionSequence = int('00400321', 16)
BillingSuppliesandDevicesSequence = int('00400324', 16)
ReferencedProcedureStepSequence = int('00400330', 16)
PerformedSeriesSequence = int('00400340', 16)
CommentsontheScheduledProcedureStep = int('00400400', 16)
ProtocolContextSequence = int('00400440', 16)
ContentItemModifierSequence = int('00400441', 16)
ScheduledSpecimenSequence = int('00400500', 16)
SpecimenAccessionNumber = int('0040050A', 16)
ContainerIdentifier = int('00400512', 16)
IssueroftheContainerIdentifierSequence = int('00400513', 16)
AlternateContainerIdentifierSequence = int('00400515', 16)
ContainerTypeCodeSequence = int('00400518', 16)
ContainerDescription = int('0040051A', 16)
ContainerComponentSequence = int('00400520', 16)
SpecimenSequence = int('00400550', 16)
SpecimenIdentifier = int('00400551', 16)
SpecimenDescriptionSequenceTrial = int('00400552', 16)
SpecimenDescriptionTrial = int('00400553', 16)
SpecimenUID = int('00400554', 16)
AcquisitionContextSequence = int('00400555', 16)
AcquisitionContextDescription = int('00400556', 16)
SpecimenTypeCodeSequence = int('0040059A', 16)
SpecimenDescriptionSequence = int('00400560', 16)
IssueroftheSpecimenIdentifierSequence = int('00400562', 16)
SpecimenShortDescription = int('00400600', 16)
SpecimenDetailedDescription = int('00400602', 16)
SpecimenPreparationSequence = int('00400610', 16)
SpecimenPreparationStepContentItemSequence = int('00400612', 16)
SpecimenLocalizationContentItemSequence = int('00400620', 16)
SlideIdentifier = int('004006FA', 16)
ImageCenterPointCoordinatesSequence = int('0040071A', 16)
XOffsetinSlideCoordinateSystem = int('0040072A', 16)
YOffsetinSlideCoordinateSystem = int('0040073A', 16)
ZOffsetinSlideCoordinateSystem = int('0040074A', 16)
PixelSpacingSequence = int('004008D8', 16)
CoordinateSystemAxisCodeSequence = int('004008DA', 16)
MeasurementUnitsCodeSequence = int('004008EA', 16)
VitalStainCodeSequenceTrial = int('004009F8', 16)
RequestedProcedureID = int('00401001', 16)
ReasonfortheRequestedProcedure = int('00401002', 16)
RequestedProcedurePriority = int('00401003', 16)
PatientTransportArrangements = int('00401004', 16)
RequestedProcedureLocation = int('00401005', 16)
PlacerOrderNumberProcedure = int('00401006', 16)
FillerOrderNumberProcedure = int('00401007', 16)
ConfidentialityCode = int('00401008', 16)
ReportingPriority = int('00401009', 16)
ReasonforRequestedProcedureCodeSequence = int('0040100A', 16)
NamesofIntendedRecipientsofResults = int('00401010', 16)
IntendedRecipientsofResultsIdentificationSequence = int('00401011', 16)
ReasonForPerformedProcedureCodeSequence = int('00401012', 16)
RequestedProcedureDescriptionTrial = int('00401060', 16)
PersonIdentificationCodeSequence = int('00401101', 16)
PersonsAddress = int('00401102', 16)
PersonsTelephoneNumbers = int('00401103', 16)
PersonsTelecomInformation = int('00401104', 16)
RequestedProcedureComments = int('00401400', 16)
ReasonfortheImagingServiceRequest = int('00402001', 16)
IssueDateofImagingServiceRequest = int('00402004', 16)
IssueTimeofImagingServiceRequest = int('00402005', 16)
PlacerOrderNumberImagingServiceRequestRetired = int('00402006', 16)
FillerOrderNumberImagingServiceRequestRetired = int('00402007', 16)
OrderEnteredBy = int('00402008', 16)
OrderEnterersLocation = int('00402009', 16)
OrderCallbackPhoneNumber = int('00402010', 16)
OrderCallbackTelecomInformation = int('00402011', 16)
PlacerOrderNumberImagingServiceRequest = int('00402016', 16)
FillerOrderNumberImagingServiceRequest = int('00402017', 16)
ImagingServiceRequestComments = int('00402400', 16)
ConfidentialityConstraintonPatientDataDescription = int('00403001', 16)
GeneralPurposeScheduledProcedureStepStatus = int('00404001', 16)
GeneralPurposePerformedProcedureStepStatus = int('00404002', 16)
GeneralPurposeScheduledProcedureStepPriority = int('00404003', 16)
ScheduledProcessingApplicationsCodeSequence = int('00404004', 16)
ScheduledProcedureStepStartDateTime = int('00404005', 16)
MultipleCopiesFlag = int('00404006', 16)
PerformedProcessingApplicationsCodeSequence = int('00404007', 16)
HumanPerformerCodeSequence = int('00404009', 16)
ScheduledProcedureStepModificationDateTime = int('00404010', 16)
ExpectedCompletionDateTime = int('00404011', 16)
ResultingGeneralPurposePerformedProcedureStepsSequence = int(
'00404015', 16)
ReferencedGeneralPurposeScheduledProcedureStepSequence = int(
'00404016', 16)
ScheduledWorkitemCodeSequence = int('00404018', 16)
PerformedWorkitemCodeSequence = int('00404019', 16)
InputAvailabilityFlag = int('00404020', 16)
InputInformationSequence = int('00404021', 16)
RelevantInformationSequence = int('00404022', 16)
ReferencedGeneralPurposeScheduledProcedureStepTransactionUID = int(
'00404023', 16)
ScheduledStationNameCodeSequence = int('00404025', 16)
ScheduledStationClassCodeSequence = int('00404026', 16)
ScheduledStationGeographicLocationCodeSequence = int('00404027', 16)
PerformedStationNameCodeSequence = int('00404028', 16)
PerformedStationClassCodeSequence = int('00404029', 16)
PerformedStationGeographicLocationCodeSequence = int('00404030', 16)
RequestedSubsequentWorkitemCodeSequence = int('00404031', 16)
NonDICOMOutputCodeSequence = int('00404032', 16)
OutputInformationSequence = int('00404033', 16)
ScheduledHumanPerformersSequence = int('00404034', 16)
ActualHumanPerformersSequence = int('00404035', 16)
HumanPerformersOrganization = int('00404036', 16)
HumanPerformersName = int('00404037', 16)
RawDataHandling = int('00404040', 16)
InputReadinessState = int('00404041', 16)
PerformedProcedureStepStartDateTime = int('00404050', 16)
PerformedProcedureStepEndDateTime = int('00404051', 16)
ProcedureStepCancellationDateTime = int('00404052', 16)
OutputDestinationSequence = int('00404070', 16)
DICOMStorageSequence = int('00404071', 16)
STOWRSStorageSequence = int('00404072', 16)
StorageURL = int('00404073', 16)
XDSStorageSequence = int('00404074', 16)
EntranceDoseinmGy = int('00408302', 16)
ParametricMapFrameTypeSequence = int('00409092', 16)
ReferencedImageRealWorldValueMappingSequence = int('00409094', 16)
RealWorldValueMappingSequence = int('00409096', 16)
PixelValueMappingCodeSequence = int('00409098', 16)
LUTLabel = int('00409210', 16)
RealWorldValueLastValueMapped = int('00409211', 16)
RealWorldValueLUTData = int('00409212', 16)
DoubleFloatRealWorldValueLastValueMapped = int('00409213', 16)
DoubleFloatRealWorldValueFirstValueMapped = int('00409214', 16)
RealWorldValueFirstValueMapped = int('00409216', 16)
QuantityDefinitionSequence = int('00409220', 16)
RealWorldValueIntercept = int('00409224', 16)
RealWorldValueSlope = int('00409225', 16)
FindingsFlagTrial = int('0040A007', 16)
RelationshipType = int('0040A010', 16)
FindingsSequenceTrial = int('0040A020', 16)
FindingsGroupUIDTrial = int('0040A021', 16)
ReferencedFindingsGroupUIDTrial = int('0040A022', 16)
FindingsGroupRecordingDateTrial = int('0040A023', 16)
FindingsGroupRecordingTimeTrial = int('0040A024', 16)
FindingsSourceCategoryCodeSequenceTrial = int('0040A026', 16)
VerifyingOrganization = int('0040A027', 16)
DocumentingOrganizationIdentifierCodeSequenceTrial = int('0040A028', 16)
VerificationDateTime = int('0040A030', 16)
ObservationDateTime = int('0040A032', 16)
ValueType = int('0040A040', 16)
ConceptNameCodeSequence = int('0040A043', 16)
MeasurementPrecisionDescriptionTrial = int('0040A047', 16)
ContinuityOfContent = int('0040A050', 16)
UrgencyorPriorityAlertsTrial = int('0040A057', 16)
SequencingIndicatorTrial = int('0040A060', 16)
DocumentIdentifierCodeSequenceTrial = int('0040A066', 16)
DocumentAuthorTrial = int('0040A067', 16)
DocumentAuthorIdentifierCodeSequenceTrial = int('0040A068', 16)
IdentifierCodeSequenceTrial = int('0040A070', 16)
VerifyingObserverSequence = int('0040A073', 16)
ObjectBinaryIdentifierTrial = int('0040A074', 16)
VerifyingObserverName = int('0040A075', 16)
DocumentingObserverIdentifierCodeSequenceTrial = int('0040A076', 16)
AuthorObserverSequence = int('0040A078', 16)
ParticipantSequence = int('0040A07A', 16)
CustodialOrganizationSequence = int('0040A07C', 16)
ParticipationType = int('0040A080', 16)
ParticipationDateTime = int('0040A082', 16)
ObserverType = int('0040A084', 16)
ProcedureIdentifierCodeSequenceTrial = int('0040A085', 16)
VerifyingObserverIdentificationCodeSequence = int('0040A088', 16)
ObjectDirectoryBinaryIdentifierTrial = int('0040A089', 16)
EquivalentCDADocumentSequence = int('0040A090', 16)
ReferencedWaveformChannels = int('0040A0B0', 16)
DateofDocumentorVerbalTransactionTrial = int('0040A110', 16)
TimeofDocumentCreationorVerbalTransactionTrial = int('0040A112', 16)
DateTime = int('0040A120', 16)
Date = int('0040A121', 16)
Time = int('0040A122', 16)
PersonName = int('0040A123', 16)
UID = int('0040A124', 16)
ReportStatusIDTrial = int('0040A125', 16)
TemporalRangeType = int('0040A130', 16)
ReferencedSamplePositions = int('0040A132', 16)
ReferencedFrameNumbers = int('0040A136', 16)
ReferencedTimeOffsets = int('0040A138', 16)
ReferencedDateTime = int('0040A13A', 16)
TextValue = int('0040A160', 16)
FloatingPointValue = int('0040A161', 16)
RationalNumeratorValue = int('0040A162', 16)
RationalDenominatorValue = int('0040A163', 16)
ObservationCategoryCodeSequenceTrial = int('0040A167', 16)
ConceptCodeSequence = int('0040A168', 16)
BibliographicCitationTrial = int('0040A16A', 16)
PurposeofReferenceCodeSequence = int('0040A170', 16)
ObservationUID = int('0040A171', 16)
ReferencedObservationUIDTrial = int('0040A172', 16)
ReferencedObservationClassTrial = int('0040A173', 16)
ReferencedObjectObservationClassTrial = int('0040A174', 16)
AnnotationGroupNumber = int('0040A180', 16)
ObservationDateTrial = int('0040A192', 16)
ObservationTimeTrial = int('0040A193', 16)
MeasurementAutomationTrial = int('0040A194', 16)
ModifierCodeSequence = int('0040A195', 16)
IdentificationDescriptionTrial = int('0040A224', 16)
CoordinatesSetGeometricTypeTrial = int('0040A290', 16)
AlgorithmCodeSequenceTrial = int('0040A296', 16)
AlgorithmDescriptionTrial = int('0040A297', 16)
PixelCoordinatesSetTrial = int('0040A29A', 16)
MeasuredValueSequence = int('0040A300', 16)
NumericValueQualifierCodeSequence = int('0040A301', 16)
CurrentObserverTrial = int('0040A307', 16)
NumericValue = int('0040A30A', 16)
ReferencedAccessionSequenceTrial = int('0040A313', 16)
ReportStatusCommentTrial = int('0040A33A', 16)
ProcedureContextSequenceTrial = int('0040A340', 16)
VerbalSourceTrial = int('0040A352', 16)
AddressTrial = int('0040A353', 16)
TelephoneNumberTrial = int('0040A354', 16)
VerbalSourceIdentifierCodeSequenceTrial = int('0040A358', 16)
PredecessorDocumentsSequence = int('0040A360', 16)
ReferencedRequestSequence = int('0040A370', 16)
PerformedProcedureCodeSequence = int('0040A372', 16)
CurrentRequestedProcedureEvidenceSequence = int('0040A375', 16)
ReportDetailSequenceTrial = int('0040A380', 16)
PertinentOtherEvidenceSequence = int('0040A385', 16)
HL7StructuredDocumentReferenceSequence = int('0040A390', 16)
ObservationSubjectUIDTrial = int('0040A402', 16)
ObservationSubjectClassTrial = int('0040A403', 16)
ObservationSubjectTypeCodeSequenceTrial = int('0040A404', 16)
CompletionFlag = int('0040A491', 16)
CompletionFlagDescription = int('0040A492', 16)
VerificationFlag = int('0040A493', 16)
ArchiveRequested = int('0040A494', 16)
PreliminaryFlag = int('0040A496', 16)
ContentTemplateSequence = int('0040A504', 16)
IdenticalDocumentsSequence = int('0040A525', 16)
ObservationSubjectContextFlagTrial = int('0040A600', 16)
ObserverContextFlagTrial = int('0040A601', 16)
ProcedureContextFlagTrial = int('0040A603', 16)
ContentSequence = int('0040A730', 16)
RelationshipSequenceTrial = int('0040A731', 16)
RelationshipTypeCodeSequenceTrial = int('0040A732', 16)
LanguageCodeSequenceTrial = int('0040A744', 16)
UniformResourceLocatorTrial = int('0040A992', 16)
WaveformAnnotationSequence = int('0040B020', 16)
TemplateIdentifier = int('0040DB00', 16)
TemplateVersion = int('0040DB06', 16)
TemplateLocalVersion = int('0040DB07', 16)
TemplateExtensionFlag = int('0040DB0B', 16)
TemplateExtensionOrganizationUID = int('0040DB0C', 16)
TemplateExtensionCreatorUID = int('0040DB0D', 16)
ReferencedContentItemIdentifier = int('0040DB73', 16)
HL7InstanceIdentifier = int('0040E001', 16)
HL7DocumentEffectiveTime = int('0040E004', 16)
HL7DocumentTypeCodeSequence = int('0040E006', 16)
DocumentClassCodeSequence = int('0040E008', 16)
RetrieveURI = int('0040E010', 16)
RetrieveLocationUID = int('0040E011', 16)
TypeofInstances = int('0040E020', 16)
DICOMRetrievalSequence = int('0040E021', 16)
DICOMMediaRetrievalSequence = int('0040E022', 16)
WADORetrievalSequence = int('0040E023', 16)
XDSRetrievalSequence = int('0040E024', 16)
WADORSRetrievalSequence = int('0040E025', 16)
RepositoryUniqueID = int('0040E030', 16)
HomeCommunityID = int('0040E031', 16)
DocumentTitle = int('00420010', 16)
EncapsulatedDocument = int('00420011', 16)
MIMETypeofEncapsulatedDocument = int('00420012', 16)
SourceInstanceSequence = int('00420013', 16)
ListofMIMETypes = int('00420014', 16)
ProductPackageIdentifier = int('00440001', 16)
SubstanceAdministrationApproval = int('00440002', 16)
ApprovalStatusFurtherDescription = int('00440003', 16)
ApprovalStatusDateTime = int('00440004', 16)
ProductTypeCodeSequence = int('00440007', 16)
ProductName = int('00440008', 16)
ProductDescription = int('00440009', 16)
ProductLotIdentifier = int('0044000A', 16)
ProductExpirationDateTime = int('0044000B', 16)
SubstanceAdministrationDateTime = int('00440010', 16)
SubstanceAdministrationNotes = int('00440011', 16)
SubstanceAdministrationDeviceID = int('00440012', 16)
ProductParameterSequence = int('00440013', 16)
SubstanceAdministrationParameterSequence = int('00440019', 16)
LensDescription = int('00460012', 16)
RightLensSequence = int('00460014', 16)
LeftLensSequence = int('00460015', 16)
UnspecifiedLateralityLensSequence = int('00460016', 16)
CylinderSequence = int('00460018', 16)
PrismSequence = int('00460028', 16)
HorizontalPrismPower = int('00460030', 16)
HorizontalPrismBase = int('00460032', 16)
VerticalPrismPower = int('00460034', 16)
VerticalPrismBase = int('00460036', 16)
LensSegmentType = int('00460038', 16)
OpticalTransmittance = int('00460040', 16)
ChannelWidth = int('00460042', 16)
PupilSize = int('00460044', 16)
CornealSize = int('00460046', 16)
AutorefractionRightEyeSequence = int('00460050', 16)
AutorefractionLeftEyeSequence = int('00460052', 16)
DistancePupillaryDistance = int('00460060', 16)
NearPupillaryDistance = int('00460062', 16)
IntermediatePupillaryDistance = int('00460063', 16)
OtherPupillaryDistance = int('00460064', 16)
KeratometryRightEyeSequence = int('00460070', 16)
KeratometryLeftEyeSequence = int('00460071', 16)
SteepKeratometricAxisSequence = int('00460074', 16)
RadiusofCurvature = int('00460075', 16)
KeratometricPower = int('00460076', 16)
KeratometricAxis = int('00460077', 16)
FlatKeratometricAxisSequence = int('00460080', 16)
BackgroundColor = int('00460092', 16)
Optotype = int('00460094', 16)
OptotypePresentation = int('00460095', 16)
SubjectiveRefractionRightEyeSequence = int('00460097', 16)
SubjectiveRefractionLeftEyeSequence = int('00460098', 16)
AddNearSequence = int('00460100', 16)
AddIntermediateSequence = int('00460101', 16)
AddOtherSequence = int('00460102', 16)
AddPower = int('00460104', 16)
ViewingDistance = int('00460106', 16)
VisualAcuityTypeCodeSequence = int('00460121', 16)
VisualAcuityRightEyeSequence = int('00460122', 16)
VisualAcuityLeftEyeSequence = int('00460123', 16)
VisualAcuityBothEyesOpenSequence = int('00460124', 16)
ViewingDistanceType = int('00460125', 16)
VisualAcuityModifiers = int('00460135', 16)
DecimalVisualAcuity = int('00460137', 16)
OptotypeDetailedDefinition = int('00460139', 16)
ReferencedRefractiveMeasurementsSequence = int('00460145', 16)
SpherePower = int('00460146', 16)
CylinderPower = int('00460147', 16)
CornealTopographySurface = int('00460201', 16)
CornealVertexLocation = int('00460202', 16)
PupilCentroidXCoordinate = int('00460203', 16)
PupilCentroidYCoordinate = int('00460204', 16)
EquivalentPupilRadius = int('00460205', 16)
CornealTopographyMapTypeCodeSequence = int('00460207', 16)
VerticesoftheOutlineofPupil = int('00460208', 16)
CornealTopographyMappingNormalsSequence = int('00460210', 16)
MaximumCornealCurvatureSequence = int('00460211', 16)
MaximumCornealCurvature = int('00460212', 16)
MaximumCornealCurvatureLocation = int('00460213', 16)
MinimumKeratometricSequence = int('00460215', 16)
SimulatedKeratometricCylinderSequence = int('00460218', 16)
AverageCornealPower = int('00460220', 16)
CornealISValue = int('00460224', 16)
AnalyzedArea = int('00460227', 16)
SurfaceRegularityIndex = int('00460230', 16)
SurfaceAsymmetryIndex = int('00460232', 16)
CornealEccentricityIndex = int('00460234', 16)
KeratoconusPredictionIndex = int('00460236', 16)
DecimalPotentialVisualAcuity = int('00460238', 16)
CornealTopographyMapQualityEvaluation = int('00460242', 16)
SourceImageCornealProcessedDataSequence = int('00460244', 16)
CornealPointLocation = int('00460247', 16)
CornealPointEstimated = int('00460248', 16)
AxialPower = int('00460249', 16)
TangentialPower = int('00460250', 16)
RefractivePower = int('00460251', 16)
RelativeElevation = int('00460252', 16)
CornealWavefront = int('00460253', 16)
ImagedVolumeWidth = int('00480001', 16)
ImagedVolumeHeight = int('00480002', 16)
ImagedVolumeDepth = int('00480003', 16)
TotalPixelMatrixColumns = int('00480006', 16)
TotalPixelMatrixRows = int('00480007', 16)
TotalPixelMatrixOriginSequence = int('00480008', 16)
SpecimenLabelinImage = int('00480010', 16)
FocusMethod = int('00480011', 16)
ExtendedDepthofField = int('00480012', 16)
NumberofFocalPlanes = int('00480013', 16)
DistanceBetweenFocalPlanes = int('00480014', 16)
RecommendedAbsentPixelCIELabValue = int('00480015', 16)
IlluminatorTypeCodeSequence = int('00480100', 16)
ImageOrientationSlide = int('00480102', 16)
OpticalPathSequence = int('00480105', 16)
OpticalPathIdentifier = int('00480106', 16)
OpticalPathDescription = int('00480107', 16)
IlluminationColorCodeSequence = int('00480108', 16)
SpecimenReferenceSequence = int('00480110', 16)
CondenserLensPower = int('00480111', 16)
ObjectiveLensPower = int('00480112', 16)
ObjectiveLensNumericalAperture = int('00480113', 16)
PaletteColorLookupTableSequence = int('00480120', 16)
ReferencedImageNavigationSequence = int('00480200', 16)
TopLeftHandCornerofLocalizerArea = int('00480201', 16)
BottomRightHandCornerofLocalizerArea = int('00480202', 16)
OpticalPathIdentificationSequence = int('00480207', 16)
PlanePositionSlideSequence = int('0048021A', 16)
ColumnPositionInTotalImagePixelMatrix = int('0048021E', 16)
RowPositionInTotalImagePixelMatrix = int('0048021F', 16)
PixelOriginInterpretation = int('00480301', 16)
CalibrationImage = int('00500004', 16)
DeviceSequence = int('00500010', 16)
ContainerComponentTypeCodeSequence = int('00500012', 16)
ContainerComponentThickness = int('00500013', 16)
DeviceLength = int('00500014', 16)
ContainerComponentWidth = int('00500015', 16)
DeviceDiameter = int('00500016', 16)
DeviceDiameterUnits = int('00500017', 16)
DeviceVolume = int('00500018', 16)
InterMarkerDistance = int('00500019', 16)
ContainerComponentMaterial = int('0050001A', 16)
ContainerComponentID = int('0050001B', 16)
ContainerComponentLength = int('0050001C', 16)
ContainerComponentDiameter = int('0050001D', 16)
ContainerComponentDescription = int('0050001E', 16)
DeviceDescription = int('00500020', 16)
ContrastBolusIngredientPercentbyVolume = int('00520001', 16)
OCTFocalDistance = int('00520002', 16)
BeamSpotSize = int('00520003', 16)
EffectiveRefractiveIndex = int('00520004', 16)
OCTAcquisitionDomain = int('00520006', 16)
OCTOpticalCenterWavelength = int('00520007', 16)
AxialResolution = int('00520008', 16)
RangingDepth = int('00520009', 16)
AlineRate = int('00520011', 16)
AlinesPerFrame = int('00520012', 16)
CatheterRotationalRate = int('00520013', 16)
AlinePixelSpacing = int('00520014', 16)
ModeofPercutaneousAccessSequence = int('00520016', 16)
IntravascularOCTFrameTypeSequence = int('00520025', 16)
OCTZOffsetApplied = int('00520026', 16)
IntravascularFrameContentSequence = int('00520027', 16)
IntravascularLongitudinalDistance = int('00520028', 16)
IntravascularOCTFrameContentSequence = int('00520029', 16)
OCTZOffsetCorrection = int('00520030', 16)
CatheterDirectionofRotation = int('00520031', 16)
SeamLineLocation = int('00520033', 16)
FirstAlineLocation = int('00520034', 16)
SeamLineIndex = int('00520036', 16)
NumberofPaddedAlines = int('00520038', 16)
InterpolationType = int('00520039', 16)
RefractiveIndexApplied = int('0052003A', 16)
EnergyWindowVector = int('00540010', 16)
NumberofEnergyWindows = int('00540011', 16)
EnergyWindowInformationSequence = int('00540012', 16)
EnergyWindowRangeSequence = int('00540013', 16)
EnergyWindowLowerLimit = int('00540014', 16)
EnergyWindowUpperLimit = int('00540015', 16)
RadiopharmaceuticalInformationSequence = int('00540016', 16)
ResidualSyringeCounts = int('00540017', 16)
EnergyWindowName = int('00540018', 16)
DetectorVector = int('00540020', 16)
NumberofDetectors = int('00540021', 16)
DetectorInformationSequence = int('00540022', 16)
PhaseVector = int('00540030', 16)
NumberofPhases = int('00540031', 16)
PhaseInformationSequence = int('00540032', 16)
NumberofFramesinPhase = int('00540033', 16)
PhaseDelay = int('00540036', 16)
PauseBetweenFrames = int('00540038', 16)
PhaseDescription = int('00540039', 16)
RotationVector = int('00540050', 16)
NumberofRotations = int('00540051', 16)
RotationInformationSequence = int('00540052', 16)
NumberofFramesinRotation = int('00540053', 16)
RRIntervalVector = int('00540060', 16)
NumberofRRIntervals = int('00540061', 16)
GatedInformationSequence = int('00540062', 16)
DataInformationSequence = int('00540063', 16)
TimeSlotVector = int('00540070', 16)
NumberofTimeSlots = int('00540071', 16)
TimeSlotInformationSequence = int('00540072', 16)
TimeSlotTime = int('00540073', 16)
SliceVector = int('00540080', 16)
NumberofSlices = int('00540081', 16)
AngularViewVector = int('00540090', 16)
TimeSliceVector = int('00540100', 16)
NumberofTimeSlices = int('00540101', 16)
StartAngle = int('00540200', 16)
TypeofDetectorMotion = int('00540202', 16)
TriggerVector = int('00540210', 16)
NumberofTriggersinPhase = int('00540211', 16)
ViewCodeSequence = int('00540220', 16)
ViewModifierCodeSequence = int('00540222', 16)
RadionuclideCodeSequence = int('00540300', 16)
AdministrationRouteCodeSequence = int('00540302', 16)
RadiopharmaceuticalCodeSequence = int('00540304', 16)
CalibrationDataSequence = int('00540306', 16)
EnergyWindowNumber = int('00540308', 16)
ImageID = int('00540400', 16)
PatientOrientationCodeSequence = int('00540410', 16)
PatientOrientationModifierCodeSequence = int('00540412', 16)
PatientGantryRelationshipCodeSequence = int('00540414', 16)
SliceProgressionDirection = int('00540500', 16)
ScanProgressionDirection = int('00540501', 16)
SeriesType = int('00541000', 16)
Units = int('00541001', 16)
CountsSource = int('00541002', 16)
ReprojectionMethod = int('00541004', 16)
SUVType = int('00541006', 16)
RandomsCorrectionMethod = int('00541100', 16)
AttenuationCorrectionMethod = int('00541101', 16)
DecayCorrection = int('00541102', 16)
ReconstructionMethod = int('00541103', 16)
DetectorLinesofResponseUsed = int('00541104', 16)
ScatterCorrectionMethod = int('00541105', 16)
AxialAcceptance = int('00541200', 16)
AxialMash = int('00541201', 16)
TransverseMash = int('00541202', 16)
DetectorElementSize = int('00541203', 16)
CoincidenceWindowWidth = int('00541210', 16)
SecondaryCountsType = int('00541220', 16)
FrameReferenceTime = int('00541300', 16)
PrimaryPromptsCountsAccumulated = int('00541310', 16)
SecondaryCountsAccumulated = int('00541311', 16)
SliceSensitivityFactor = int('00541320', 16)
DecayFactor = int('00541321', 16)
DoseCalibrationFactor = int('00541322', 16)
ScatterFractionFactor = int('00541323', 16)
DeadTimeFactor = int('00541324', 16)
ImageIndex = int('00541330', 16)
CountsIncluded = int('00541400', 16)
DeadTimeCorrectionFlag = int('00541401', 16)
HistogramSequence = int('00603000', 16)
HistogramNumberofBins = int('00603002', 16)
HistogramFirstBinValue = int('00603004', 16)
HistogramLastBinValue = int('00603006', 16)
HistogramBinWidth = int('00603008', 16)
HistogramExplanation = int('00603010', 16)
HistogramData = int('00603020', 16)
SegmentationType = int('00620001', 16)
SegmentSequence = int('00620002', 16)
SegmentedPropertyCategoryCodeSequence = int('00620003', 16)
SegmentNumber = int('00620004', 16)
SegmentLabel = int('00620005', 16)
SegmentDescription = int('00620006', 16)
SegmentAlgorithmType = int('00620008', 16)
SegmentAlgorithmName = int('00620009', 16)
SegmentIdentificationSequence = int('0062000A', 16)
ReferencedSegmentNumber = int('0062000B', 16)
RecommendedDisplayGrayscaleValue = int('0062000C', 16)
RecommendedDisplayCIELabValue = int('0062000D', 16)
MaximumFractionalValue = int('0062000E', 16)
SegmentedPropertyTypeCodeSequence = int('0062000F', 16)
SegmentationFractionalType = int('00620010', 16)
SegmentedPropertyTypeModifierCodeSequence = int('00620011', 16)
UsedSegmentsSequence = int('00620012', 16)
TrackingID = int('00620020', 16)
TrackingUID = int('00620021', 16)
DeformableRegistrationSequence = int('00640002', 16)
SourceFrameofReferenceUID = int('00640003', 16)
DeformableRegistrationGridSequence = int('00640005', 16)
GridDimensions = int('00640007', 16)
GridResolution = int('00640008', 16)
VectorGridData = int('00640009', 16)
PreDeformationMatrixRegistrationSequence = int('0064000F', 16)
PostDeformationMatrixRegistrationSequence = int('00640010', 16)
NumberofSurfaces = int('00660001', 16)
SurfaceSequence = int('00660002', 16)
SurfaceNumber = int('00660003', 16)
SurfaceComments = int('00660004', 16)
SurfaceProcessing = int('00660009', 16)
SurfaceProcessingRatio = int('0066000A', 16)
SurfaceProcessingDescription = int('0066000B', 16)
RecommendedPresentationOpacity = int('0066000C', 16)
RecommendedPresentationType = int('0066000D', 16)
FiniteVolume = int('0066000E', 16)
Manifold = int('00660010', 16)
SurfacePointsSequence = int('00660011', 16)
SurfacePointsNormalsSequence = int('00660012', 16)
SurfaceMeshPrimitivesSequence = int('00660013', 16)
NumberofSurfacePoints = int('00660015', 16)
PointCoordinatesData = int('00660016', 16)
PointPositionAccuracy = int('00660017', 16)
MeanPointDistance = int('00660018', 16)
MaximumPointDistance = int('00660019', 16)
PointsBoundingBoxCoordinates = int('0066001A', 16)
AxisofRotation = int('0066001B', 16)
CenterofRotation = int('0066001C', 16)
NumberofVectors = int('0066001E', 16)
VectorDimensionality = int('0066001F', 16)
VectorAccuracy = int('00660020', 16)
VectorCoordinateData = int('00660021', 16)
TrianglePointIndexList = int('00660023', 16)
EdgePointIndexList = int('00660024', 16)
VertexPointIndexList = int('00660025', 16)
TriangleStripSequence = int('00660026', 16)
TriangleFanSequence = int('00660027', 16)
LineSequence = int('00660028', 16)
PrimitivePointIndexList = int('00660029', 16)
SurfaceCount = int('0066002A', 16)
ReferencedSurfaceSequence = int('0066002B', 16)
ReferencedSurfaceNumber = int('0066002C', 16)
SegmentSurfaceGenerationAlgorithmIdentificationSequence = int(
'0066002D', 16)
SegmentSurfaceSourceInstanceSequence = int('0066002E', 16)
AlgorithmFamilyCodeSequence = int('0066002F', 16)
AlgorithmNameCodeSequence = int('00660030', 16)
AlgorithmVersion = int('00660031', 16)
AlgorithmParameters = int('00660032', 16)
FacetSequence = int('00660034', 16)
SurfaceProcessingAlgorithmIdentificationSequence = int('00660035', 16)
AlgorithmName = int('00660036', 16)
RecommendedPointRadius = int('00660037', 16)
RecommendedLineThickness = int('00660038', 16)
LongPrimitivePointIndexList = int('00660040', 16)
LongTrianglePointIndexList = int('00660041', 16)
LongEdgePointIndexList = int('00660042', 16)
LongVertexPointIndexList = int('00660043', 16)
TrackSetSequence = int('00660101', 16)
TrackSequence = int('00660102', 16)
RecommendedDisplayCIELabValueList = int('00660103', 16)
TrackingAlgorithmIdentificationSequence = int('00660104', 16)
TrackSetNumber = int('00660105', 16)
TrackSetLabel = int('00660106', 16)
TrackSetDescription = int('00660107', 16)
TrackSetAnatomicalTypeCodeSequence = int('00660108', 16)
MeasurementsSequence = int('00660121', 16)
TrackSetStatisticsSequence = int('00660124', 16)
FloatingPointValues = int('00660125', 16)
TrackPointIndexList = int('00660129', 16)
TrackStatisticsSequence = int('00660130', 16)
MeasurementValuesSequence = int('00660132', 16)
DiffusionAcquisitionCodeSequence = int('00660133', 16)
DiffusionModelCodeSequence = int('00660134', 16)
ImplantSize = int('00686210', 16)
ImplantTemplateVersion = int('00686221', 16)
ReplacedImplantTemplateSequence = int('00686222', 16)
ImplantType = int('00686223', 16)
DerivationImplantTemplateSequence = int('00686224', 16)
OriginalImplantTemplateSequence = int('00686225', 16)
EffectiveDateTime = int('00686226', 16)
ImplantTargetAnatomySequence = int('00686230', 16)
InformationFromManufacturerSequence = int('00686260', 16)
NotificationFromManufacturerSequence = int('00686265', 16)
InformationIssueDateTime = int('00686270', 16)
InformationSummary = int('00686280', 16)
ImplantRegulatoryDisapprovalCodeSequence = int('006862A0', 16)
OverallTemplateSpatialTolerance = int('006862A5', 16)
HPGLDocumentSequence = int('006862C0', 16)
HPGLDocumentID = int('006862D0', 16)
HPGLDocumentLabel = int('006862D5', 16)
ViewOrientationCodeSequence = int('006862E0', 16)
ViewOrientationModifier = int('006862F0', 16)
HPGLDocumentScaling = int('006862F2', 16)
HPGLDocument = int('00686300', 16)
HPGLContourPenNumber = int('00686310', 16)
HPGLPenSequence = int('00686320', 16)
HPGLPenNumber = int('00686330', 16)
HPGLPenLabel = int('00686340', 16)
HPGLPenDescription = int('00686345', 16)
RecommendedRotationPoint = int('00686346', 16)
BoundingRectangle = int('00686347', 16)
ImplantTemplate3DModelSurfaceNumber = int('00686350', 16)
SurfaceModelDescriptionSequence = int('00686360', 16)
SurfaceModelLabel = int('00686380', 16)
SurfaceModelScalingFactor = int('00686390', 16)
MaterialsCodeSequence = int('006863A0', 16)
CoatingMaterialsCodeSequence = int('006863A4', 16)
ImplantTypeCodeSequence = int('006863A8', 16)
FixationMethodCodeSequence = int('006863AC', 16)
MatingFeatureSetsSequence = int('006863B0', 16)
MatingFeatureSetID = int('006863C0', 16)
MatingFeatureSetLabel = int('006863D0', 16)
MatingFeatureSequence = int('006863E0', 16)
MatingFeatureID = int('006863F0', 16)
MatingFeatureDegreeofFreedomSequence = int('00686400', 16)
DegreeofFreedomID = int('00686410', 16)
DegreeofFreedomType = int('00686420', 16)
TwoDMatingFeatureCoordinatesSequence = int('00686430', 16)
ReferencedHPGLDocumentID = int('00686440', 16)
TwoDMatingPoint = int('00686450', 16)
TwoDMatingAxes = int('00686460', 16)
TwoDDegreeofFreedomSequence = int('00686470', 16)
ThreeDDegreeofFreedomAxis = int('00686490', 16)
RangeofFreedom = int('006864A0', 16)
ThreeDMatingPoint = int('006864C0', 16)
ThreeDMatingAxes = int('006864D0', 16)
TwoDDegreeofFreedomAxis = int('006864F0', 16)
PlanningLandmarkPointSequence = int('00686500', 16)
PlanningLandmarkLineSequence = int('00686510', 16)
PlanningLandmarkPlaneSequence = int('00686520', 16)
PlanningLandmarkID = int('00686530', 16)
PlanningLandmarkDescription = int('00686540', 16)
PlanningLandmarkIdentificationCodeSequence = int('00686545', 16)
TwoDPointCoordinatesSequence = int('00686550', 16)
TwoDPointCoordinates = int('00686560', 16)
ThreeDPointCoordinates = int('00686590', 16)
TwoDLineCoordinatesSequence = int('006865A0', 16)
TwoDLineCoordinates = int('006865B0', 16)
ThreeDLineCoordinates = int('006865D0', 16)
TwoDPlaneCoordinatesSequence = int('006865E0', 16)
TwoDPlaneIntersection = int('006865F0', 16)
ThreeDPlaneOrigin = int('00686610', 16)
ThreeDPlaneNormal = int('00686620', 16)
GraphicAnnotationSequence = int('00700001', 16)
GraphicLayer = int('00700002', 16)
BoundingBoxAnnotationUnits = int('00700003', 16)
AnchorPointAnnotationUnits = int('00700004', 16)
GraphicAnnotationUnits = int('00700005', 16)
UnformattedTextValue = int('00700006', 16)
TextObjectSequence = int('00700008', 16)
GraphicObjectSequence = int('00700009', 16)
BoundingBoxTopLeftHandCorner = int('00700010', 16)
BoundingBoxBottomRightHandCorner = int('00700011', 16)
BoundingBoxTextHorizontalJustification = int('00700012', 16)
AnchorPoint = int('00700014', 16)
AnchorPointVisibility = int('00700015', 16)
GraphicDimensions = int('00700020', 16)
NumberofGraphicPoints = int('00700021', 16)
GraphicData = int('00700022', 16)
GraphicType = int('00700023', 16)
GraphicFilled = int('00700024', 16)
ImageRotationRetired = int('00700040', 16)
ImageHorizontalFlip = int('00700041', 16)
ImageRotation = int('00700042', 16)
DisplayedAreaTopLeftHandCornerTrial = int('00700050', 16)
DisplayedAreaBottomRightHandCornerTrial = int('00700051', 16)
DisplayedAreaTopLeftHandCorner = int('00700052', 16)
DisplayedAreaBottomRightHandCorner = int('00700053', 16)
DisplayedAreaSelectionSequence = int('0070005A', 16)
GraphicLayerSequence = int('00700060', 16)
GraphicLayerOrder = int('00700062', 16)
GraphicLayerRecommendedDisplayGrayscaleValue = int('00700066', 16)
GraphicLayerRecommendedDisplayRGBValue = int('00700067', 16)
GraphicLayerDescription = int('00700068', 16)
ContentLabel = int('00700080', 16)
ContentDescription = int('00700081', 16)
PresentationCreationDate = int('00700082', 16)
PresentationCreationTime = int('00700083', 16)
ContentCreatorsName = int('00700084', 16)
ContentCreatorsIdentificationCodeSequence = int('00700086', 16)
AlternateContentDescriptionSequence = int('00700087', 16)
PresentationSizeMode = int('00700100', 16)
PresentationPixelSpacing = int('00700101', 16)
PresentationPixelAspectRatio = int('00700102', 16)
PresentationPixelMagnificationRatio = int('00700103', 16)
GraphicGroupLabel = int('00700207', 16)
GraphicGroupDescription = int('00700208', 16)
CompoundGraphicSequence = int('00700209', 16)
CompoundGraphicInstanceID = int('00700226', 16)
FontName = int('00700227', 16)
FontNameType = int('00700228', 16)
CSSFontName = int('00700229', 16)
RotationAngle = int('00700230', 16)
TextStyleSequence = int('00700231', 16)
LineStyleSequence = int('00700232', 16)
FillStyleSequence = int('00700233', 16)
GraphicGroupSequence = int('00700234', 16)
TextColorCIELabValue = int('00700241', 16)
HorizontalAlignment = int('00700242', 16)
VerticalAlignment = int('00700243', 16)
ShadowStyle = int('00700244', 16)
ShadowOffsetX = int('00700245', 16)
ShadowOffsetY = int('00700246', 16)
ShadowColorCIELabValue = int('00700247', 16)
Underlined = int('00700248', 16)
Bold = int('00700249', 16)
Italic = int('00700250', 16)
PatternOnColorCIELabValue = int('00700251', 16)
PatternOffColorCIELabValue = int('00700252', 16)
LineThickness = int('00700253', 16)
LineDashingStyle = int('00700254', 16)
LinePattern = int('00700255', 16)
FillPattern = int('00700256', 16)
FillMode = int('00700257', 16)
ShadowOpacity = int('00700258', 16)
GapLength = int('00700261', 16)
DiameterofVisibility = int('00700262', 16)
RotationPoint = int('00700273', 16)
TickAlignment = int('00700274', 16)
ShowTickLabel = int('00700278', 16)
TickLabelAlignment = int('00700279', 16)
CompoundGraphicUnits = int('00700282', 16)
PatternOnOpacity = int('00700284', 16)
PatternOffOpacity = int('00700285', 16)
MajorTicksSequence = int('00700287', 16)
TickPosition = int('00700288', 16)
TickLabel = int('00700289', 16)
CompoundGraphicType = int('00700294', 16)
GraphicGroupID = int('00700295', 16)
ShapeType = int('00700306', 16)
RegistrationSequence = int('00700308', 16)
MatrixRegistrationSequence = int('00700309', 16)
MatrixSequence = int('0070030A', 16)
FrameofReferencetoDisplayedCoordinateSystemTransformationMatrix = int(
'0070030B', 16)
FrameofReferenceTransformationMatrixType = int('0070030C', 16)
RegistrationTypeCodeSequence = int('0070030D', 16)
FiducialDescription = int('0070030F', 16)
FiducialIdentifier = int('00700310', 16)
FiducialIdentifierCodeSequence = int('00700311', 16)
ContourUncertaintyRadius = int('00700312', 16)
UsedFiducialsSequence = int('00700314', 16)
GraphicCoordinatesDataSequence = int('00700318', 16)
FiducialUID = int('0070031A', 16)
FiducialSetSequence = int('0070031C', 16)
FiducialSequence = int('0070031E', 16)
GraphicLayerRecommendedDisplayCIELabValue = int('00700401', 16)
BlendingSequence = int('00700402', 16)
RelativeOpacity = int('00700403', 16)
ReferencedSpatialRegistrationSequence = int('00700404', 16)
BlendingPosition = int('00700405', 16)
PresentationDisplayCollectionUID = int('00701101', 16)
PresentationSequenceCollectionUID = int('00701102', 16)
PresentationSequencePositionIndex = int('00701103', 16)
RenderedImageReferenceSequence = int('00701104', 16)
VolumetricPresentationStateInputSequence = int('00701201', 16)
PresentationInputType = int('00701202', 16)
InputSequencePositionIndex = int('00701203', 16)
Crop = int('00701204', 16)
CroppingSpecificationIndex = int('00701205', 16)
CompositingMethod = int('00701206', 16)
VolumetricPresentationInputNumber = int('00701207', 16)
ImageVolumeGeometry = int('00701208', 16)
VolumeCroppingSequence = int('00701301', 16)
VolumeCroppingMethod = int('00701302', 16)
BoundingBoxCrop = int('00701303', 16)
ObliqueCroppingPlaneSequence = int('00701304', 16)
Plane = int('00701305', 16)
PlaneNormal = int('00701306', 16)
CroppingSpecificationNumber = int('00701309', 16)
MultiPlanarReconstructionStyle = int('00701501', 16)
MPRThicknessType = int('00701502', 16)
MPRSlabThickness = int('00701503', 16)
MPRTopLeftHandCorner = int('00701505', 16)
MPRViewWidthDirection = int('00701507', 16)
MPRViewWidth = int('00701508', 16)
NumberofVolumetricCurvePoints = int('0070150C', 16)
VolumetricCurvePoints = int('0070150D', 16)
MPRViewHeightDirection = int('00701511', 16)
MPRViewHeight = int('00701512', 16)
PresentationStateClassificationComponentSequence = int('00701801', 16)
ComponentType = int('00701802', 16)
ComponentInputSequence = int('00701803', 16)
VolumetricPresentationInputIndex = int('00701804', 16)
PresentationStateCompositorComponentSequence = int('00701805', 16)
WeightingTransferFunctionSequence = int('00701806', 16)
WeightingLookupTableDescriptor = int('00701807', 16)
WeightingLookupTableData = int('00701808', 16)
VolumetricAnnotationSequence = int('00701901', 16)
ReferencedStructuredContextSequence = int('00701903', 16)
ReferencedContentItem = int('00701904', 16)
VolumetricPresentationInputAnnotationSequence = int('00701905', 16)
AnnotationClipping = int('00701907', 16)
PresentationAnimationStyle = int('00701A01', 16)
RecommendedAnimationRate = int('00701A03', 16)
AnimationCurveSequence = int('00701A04', 16)
AnimationStepSize = int('00701A05', 16)
HangingProtocolName = int('00720002', 16)
HangingProtocolDescription = int('00720004', 16)
HangingProtocolLevel = int('00720006', 16)
HangingProtocolCreator = int('00720008', 16)
HangingProtocolCreationDateTime = int('0072000A', 16)
HangingProtocolDefinitionSequence = int('0072000C', 16)
HangingProtocolUserIdentificationCodeSequence = int('0072000E', 16)
HangingProtocolUserGroupName = int('00720010', 16)
SourceHangingProtocolSequence = int('00720012', 16)
NumberofPriorsReferenced = int('00720014', 16)
ImageSetsSequence = int('00720020', 16)
ImageSetSelectorSequence = int('00720022', 16)
ImageSetSelectorUsageFlag = int('00720024', 16)
SelectorAttribute = int('00720026', 16)
SelectorValueNumber = int('00720028', 16)
TimeBasedImageSetsSequence = int('00720030', 16)
ImageSetNumber = int('00720032', 16)
ImageSetSelectorCategory = int('00720034', 16)
RelativeTime = int('00720038', 16)
RelativeTimeUnits = int('0072003A', 16)
AbstractPriorValue = int('0072003C', 16)
AbstractPriorCodeSequence = int('0072003E', 16)
ImageSetLabel = int('00720040', 16)
SelectorAttributeVR = int('00720050', 16)
SelectorSequencePointer = int('00720052', 16)
SelectorSequencePointerPrivateCreator = int('00720054', 16)
SelectorAttributePrivateCreator = int('00720056', 16)
SelectorAEValue = int('0072005E', 16)
SelectorASValue = int('0072005F', 16)
SelectorATValue = int('00720060', 16)
SelectorDAValue = int('00720061', 16)
SelectorCSValue = int('00720062', 16)
SelectorDTValue = int('00720063', 16)
SelectorISValue = int('00720064', 16)
SelectorOBValue = int('00720065', 16)
SelectorLOValue = int('00720066', 16)
SelectorOFValue = int('00720067', 16)
SelectorLTValue = int('00720068', 16)
SelectorOWValue = int('00720069', 16)
SelectorPNValue = int('0072006A', 16)
SelectorTMValue = int('0072006B', 16)
SelectorSHValue = int('0072006C', 16)
SelectorUNValue = int('0072006D', 16)
SelectorSTValue = int('0072006E', 16)
SelectorUCValue = int('0072006F', 16)
SelectorUTValue = int('00720070', 16)
SelectorURValue = int('00720071', 16)
SelectorDSValue = int('00720072', 16)
SelectorODValue = int('00720073', 16)
SelectorFDValue = int('00720074', 16)
SelectorOLValue = int('00720075', 16)
SelectorFLValue = int('00720076', 16)
SelectorULValue = int('00720078', 16)
SelectorUSValue = int('0072007A', 16)
SelectorSLValue = int('0072007C', 16)
SelectorSSValue = int('0072007E', 16)
SelectorUIValue = int('0072007F', 16)
SelectorCodeSequenceValue = int('00720080', 16)
NumberofScreens = int('00720100', 16)
NominalScreenDefinitionSequence = int('00720102', 16)
NumberofVerticalPixels = int('00720104', 16)
NumberofHorizontalPixels = int('00720106', 16)
DisplayEnvironmentSpatialPosition = int('00720108', 16)
ScreenMinimumGrayscaleBitDepth = int('0072010A', 16)
ScreenMinimumColorBitDepth = int('0072010C', 16)
ApplicationMaximumRepaintTime = int('0072010E', 16)
DisplaySetsSequence = int('00720200', 16)
DisplaySetNumber = int('00720202', 16)
DisplaySetLabel = int('00720203', 16)
DisplaySetPresentationGroup = int('00720204', 16)
DisplaySetPresentationGroupDescription = int('00720206', 16)
PartialDataDisplayHandling = int('00720208', 16)
SynchronizedScrollingSequence = int('00720210', 16)
DisplaySetScrollingGroup = int('00720212', 16)
NavigationIndicatorSequence = int('00720214', 16)
NavigationDisplaySet = int('00720216', 16)
ReferenceDisplaySets = int('00720218', 16)
ImageBoxesSequence = int('00720300', 16)
ImageBoxNumber = int('00720302', 16)
ImageBoxLayoutType = int('00720304', 16)
ImageBoxTileHorizontalDimension = int('00720306', 16)
ImageBoxTileVerticalDimension = int('00720308', 16)
ImageBoxScrollDirection = int('00720310', 16)
ImageBoxSmallScrollType = int('00720312', 16)
ImageBoxSmallScrollAmount = int('00720314', 16)
ImageBoxLargeScrollType = int('00720316', 16)
ImageBoxLargeScrollAmount = int('00720318', 16)
ImageBoxOverlapPriority = int('00720320', 16)
CineRelativetoRealTime = int('00720330', 16)
FilterOperationsSequence = int('00720400', 16)
FilterbyCategory = int('00720402', 16)
FilterbyAttributePresence = int('00720404', 16)
FilterbyOperator = int('00720406', 16)
StructuredDisplayBackgroundCIELabValue = int('00720420', 16)
EmptyImageBoxCIELabValue = int('00720421', 16)
StructuredDisplayImageBoxSequence = int('00720422', 16)
StructuredDisplayTextBoxSequence = int('00720424', 16)
ReferencedFirstFrameSequence = int('00720427', 16)
ImageBoxSynchronizationSequence = int('00720430', 16)
SynchronizedImageBoxList = int('00720432', 16)
TypeofSynchronization = int('00720434', 16)
BlendingOperationType = int('00720500', 16)
ReformattingOperationType = int('00720510', 16)
ReformattingThickness = int('00720512', 16)
ReformattingInterval = int('00720514', 16)
ReformattingOperationInitialViewDirection = int('00720516', 16)
ThreeDRenderingType = int('00720520', 16)
SortingOperationsSequence = int('00720600', 16)
SortbyCategory = int('00720602', 16)
SortingDirection = int('00720604', 16)
DisplaySetPatientOrientation = int('00720700', 16)
VOIType = int('00720702', 16)
PseudoColorType = int('00720704', 16)
PseudoColorPaletteInstanceReferenceSequence = int('00720705', 16)
ShowGrayscaleInverted = int('00720706', 16)
ShowImageTrueSizeFlag = int('00720710', 16)
ShowGraphicAnnotationFlag = int('00720712', 16)
ShowPatientDemographicsFlag = int('00720714', 16)
ShowAcquisitionTechniquesFlag = int('00720716', 16)
DisplaySetHorizontalJustification = int('00720717', 16)
DisplaySetVerticalJustification = int('00720718', 16)
ContinuationStartMeterset = int('00740120', 16)
ContinuationEndMeterset = int('00740121', 16)
ProcedureStepState = int('00741000', 16)
ProcedureStepProgressInformationSequence = int('00741002', 16)
ProcedureStepProgress = int('00741004', 16)
ProcedureStepProgressDescription = int('00741006', 16)
ProcedureStepCommunicationsURISequence = int('00741008', 16)
ContactURI = int('0074100A', 16)
ContactDisplayName = int('0074100C', 16)
ProcedureStepDiscontinuationReasonCodeSequence = int('0074100E', 16)
BeamTaskSequence = int('00741020', 16)
BeamTaskType = int('00741022', 16)
BeamOrderIndexTrial = int('00741024', 16)
AutosequenceFlag = int('00741025', 16)
TableTopVerticalAdjustedPosition = int('00741026', 16)
TableTopLongitudinalAdjustedPosition = int('00741027', 16)
TableTopLateralAdjustedPosition = int('00741028', 16)
PatientSupportAdjustedAngle = int('0074102A', 16)
TableTopEccentricAdjustedAngle = int('0074102B', 16)
TableTopPitchAdjustedAngle = int('0074102C', 16)
TableTopRollAdjustedAngle = int('0074102D', 16)
DeliveryVerificationImageSequence = int('00741030', 16)
VerificationImageTiming = int('00741032', 16)
DoubleExposureFlag = int('00741034', 16)
DoubleExposureOrdering = int('00741036', 16)
DoubleExposureMetersetTrial = int('00741038', 16)
DoubleExposureFieldDeltaTrial = int('0074103A', 16)
RelatedReferenceRTImageSequence = int('00741040', 16)
GeneralMachineVerificationSequence = int('00741042', 16)
ConventionalMachineVerificationSequence = int('00741044', 16)
IonMachineVerificationSequence = int('00741046', 16)
FailedAttributesSequence = int('00741048', 16)
OverriddenAttributesSequence = int('0074104A', 16)
ConventionalControlPointVerificationSequence = int('0074104C', 16)
IonControlPointVerificationSequence = int('0074104E', 16)
AttributeOccurrenceSequence = int('00741050', 16)
AttributeOccurrencePointer = int('00741052', 16)
AttributeItemSelector = int('00741054', 16)
AttributeOccurrencePrivateCreator = int('00741056', 16)
SelectorSequencePointerItems = int('00741057', 16)
ScheduledProcedureStepPriority = int('00741200', 16)
WorklistLabel = int('00741202', 16)
ProcedureStepLabel = int('00741204', 16)
ScheduledProcessingParametersSequence = int('00741210', 16)
PerformedProcessingParametersSequence = int('00741212', 16)
UnifiedProcedureStepPerformedProcedureSequence = int('00741216', 16)
RelatedProcedureStepSequence = int('00741220', 16)
ProcedureStepRelationshipType = int('00741222', 16)
ReplacedProcedureStepSequence = int('00741224', 16)
DeletionLock = int('00741230', 16)
ReceivingAE = int('00741234', 16)
RequestingAE = int('00741236', 16)
ReasonforCancellation = int('00741238', 16)
SCPStatus = int('00741242', 16)
SubscriptionListStatus = int('00741244', 16)
UnifiedProcedureStepListStatus = int('00741246', 16)
BeamOrderIndex = int('00741324', 16)
DoubleExposureMeterset = int('00741338', 16)
DoubleExposureFieldDelta = int('0074133A', 16)
BrachyTaskSequence = int('00741401', 16)
ContinuationStartTotalReferenceAirKerma = int('00741402', 16)
ContinuationEndTotalReferenceAirKerma = int('00741403', 16)
ContinuationPulseNumber = int('00741404', 16)
ChannelDeliveryOrderSequence = int('00741405', 16)
ReferencedChannelNumber = int('00741406', 16)
StartCumulativeTimeWeight = int('00741407', 16)
EndCumulativeTimeWeight = int('00741408', 16)
OmittedChannelSequence = int('00741409', 16)
ReasonforChannelOmission = int('0074140A', 16)
ReasonforChannelOmissionDescription = int('0074140B', 16)
ChannelDeliveryOrderIndex = int('0074140C', 16)
ChannelDeliveryContinuationSequence = int('0074140D', 16)
OmittedApplicationSetupSequence = int('0074140E', 16)
ImplantAssemblyTemplateName = int('00760001', 16)
ImplantAssemblyTemplateIssuer = int('00760003', 16)
ImplantAssemblyTemplateVersion = int('00760006', 16)
ReplacedImplantAssemblyTemplateSequence = int('00760008', 16)
ImplantAssemblyTemplateType = int('0076000A', 16)
OriginalImplantAssemblyTemplateSequence = int('0076000C', 16)
DerivationImplantAssemblyTemplateSequence = int('0076000E', 16)
ImplantAssemblyTemplateTargetAnatomySequence = int('00760010', 16)
ProcedureTypeCodeSequence = int('00760020', 16)
SurgicalTechnique = int('00760030', 16)
ComponentTypesSequence = int('00760032', 16)
ComponentTypeCodeSequence = int('00760034', 16)
ExclusiveComponentType = int('00760036', 16)
MandatoryComponentType = int('00760038', 16)
ComponentSequence = int('00760040', 16)
ComponentID = int('00760055', 16)
ComponentAssemblySequence = int('00760060', 16)
Component1ReferencedID = int('00760070', 16)
Component1ReferencedMatingFeatureSetID = int('00760080', 16)
Component1ReferencedMatingFeatureID = int('00760090', 16)
Component2ReferencedID = int('007600A0', 16)
Component2ReferencedMatingFeatureSetID = int('007600B0', 16)
Component2ReferencedMatingFeatureID = int('007600C0', 16)
ImplantTemplateGroupName = int('00780001', 16)
ImplantTemplateGroupDescription = int('00780010', 16)
ImplantTemplateGroupIssuer = int('00780020', 16)
ImplantTemplateGroupVersion = int('00780024', 16)
ReplacedImplantTemplateGroupSequence = int('00780026', 16)
ImplantTemplateGroupTargetAnatomySequence = int('00780028', 16)
ImplantTemplateGroupMembersSequence = int('0078002A', 16)
ImplantTemplateGroupMemberID = int('0078002E', 16)
ThreeDImplantTemplateGroupMemberMatchingPoint = int('00780050', 16)
ThreeDImplantTemplateGroupMemberMatchingAxes = int('00780060', 16)
ImplantTemplateGroupMemberMatching2DCoordinatesSequence = int(
'00780070', 16)
TwoDImplantTemplateGroupMemberMatchingPoint = int('00780090', 16)
TwoDImplantTemplateGroupMemberMatchingAxes = int('007800A0', 16)
ImplantTemplateGroupVariationDimensionSequence = int('007800B0', 16)
ImplantTemplateGroupVariationDimensionName = int('007800B2', 16)
ImplantTemplateGroupVariationDimensionRankSequence = int('007800B4', 16)
ReferencedImplantTemplateGroupMemberID = int('007800B6', 16)
ImplantTemplateGroupVariationDimensionRank = int('007800B8', 16)
SurfaceScanAcquisitionTypeCodeSequence = int('00800001', 16)
SurfaceScanModeCodeSequence = int('00800002', 16)
RegistrationMethodCodeSequence = int('00800003', 16)
ShotDurationTime = int('00800004', 16)
ShotOffsetTime = int('00800005', 16)
SurfacePointPresentationValueData = int('00800006', 16)
SurfacePointColorCIELabValueData = int('00800007', 16)
UVMappingSequence = int('00800008', 16)
TextureLabel = int('00800009', 16)
UValueData = int('00800010', 16)
VValueData = int('00800011', 16)
ReferencedTextureSequence = int('00800012', 16)
ReferencedSurfaceDataSequence = int('00800013', 16)
AssessmentSummary = int('00820001', 16)
AssessmentSummaryDescription = int('00820003', 16)
AssessedSOPInstanceSequence = int('00820004', 16)
ReferencedComparisonSOPInstanceSequence = int('00820005', 16)
NumberofAssessmentObservations = int('00820006', 16)
AssessmentObservationsSequence = int('00820007', 16)
ObservationSignificance = int('00820008', 16)
ObservationDescription = int('0082000A', 16)
StructuredConstraintObservationSequence = int('0082000C', 16)
AssessedAttributeValueSequence = int('00820010', 16)
AssessmentSetID = int('00820016', 16)
AssessmentRequesterSequence = int('00820017', 16)
SelectorAttributeName = int('00820018', 16)
SelectorAttributeKeyword = int('00820019', 16)
AssessmentTypeCodeSequence = int('00820021', 16)
ObservationBasisCodeSequence = int('00820022', 16)
AssessmentLabel = int('00820023', 16)
ConstraintType = int('00820032', 16)
SpecificationSelectionGuidance = int('00820033', 16)
ConstraintValueSequence = int('00820034', 16)
RecommendedDefaultValueSequence = int('00820035', 16)
ConstraintViolationSignificance = int('00820036', 16)
ConstraintViolationCondition = int('00820037', 16)
StorageMediaFilesetID = int('00880130', 16)
StorageMediaFilesetUID = int('00880140', 16)
IconImageSequence = int('00880200', 16)
TopicTitle = int('00880904', 16)
TopicSubject = int('00880906', 16)
TopicAuthor = int('00880910', 16)
TopicKeywords = int('00880912', 16)
SOPInstanceStatus = int('01000410', 16)
SOPAuthorizationDateTime = int('01000420', 16)
SOPAuthorizationComment = int('01000424', 16)
AuthorizationEquipmentCertificationNumber = int('01000426', 16)
MACIDNumber = int('04000005', 16)
MACCalculationTransferSyntaxUID = int('04000010', 16)
MACAlgorithm = int('04000015', 16)
DataElementsSigned = int('04000020', 16)
DigitalSignatureUID = int('04000100', 16)
DigitalSignatureDateTime = int('04000105', 16)
CertificateType = int('04000110', 16)
CertificateofSigner = int('04000115', 16)
Signature = int('04000120', 16)
CertifiedTimestampType = int('04000305', 16)
CertifiedTimestamp = int('04000310', 16)
DigitalSignaturePurposeCodeSequence = int('04000401', 16)
ReferencedDigitalSignatureSequence = int('04000402', 16)
ReferencedSOPInstanceMACSequence = int('04000403', 16)
MAC = int('04000404', 16)
EncryptedAttributesSequence = int('04000500', 16)
EncryptedContentTransferSyntaxUID = int('04000510', 16)
EncryptedContent = int('04000520', 16)
ModifiedAttributesSequence = int('04000550', 16)
OriginalAttributesSequence = int('04000561', 16)
AttributeModificationDateTime = int('04000562', 16)
ModifyingSystem = int('04000563', 16)
SourceofPreviousValues = int('04000564', 16)
ReasonfortheAttributeModification = int('04000565', 16)
NumberofCopies = int('20000010', 16)
PrinterConfigurationSequence = int('2000001E', 16)
PrintPriority = int('20000020', 16)
MediumType = int('20000030', 16)
FilmDestination = int('20000040', 16)
FilmSessionLabel = int('20000050', 16)
MemoryAllocation = int('20000060', 16)
MaximumMemoryAllocation = int('20000061', 16)
ColorImagePrintingFlag = int('20000062', 16)
CollationFlag = int('20000063', 16)
AnnotationFlag = int('20000065', 16)
ImageOverlayFlag = int('20000067', 16)
PresentationLUTFlag = int('20000069', 16)
ImageBoxPresentationLUTFlag = int('2000006A', 16)
MemoryBitDepth = int('200000A0', 16)
PrintingBitDepth = int('200000A1', 16)
MediaInstalledSequence = int('200000A2', 16)
OtherMediaAvailableSequence = int('200000A4', 16)
SupportedImageDisplayFormatsSequence = int('200000A8', 16)
ReferencedFilmBoxSequence = int('20000500', 16)
ReferencedStoredPrintSequence = int('20000510', 16)
ImageDisplayFormat = int('20100010', 16)
AnnotationDisplayFormatID = int('20100030', 16)
FilmOrientation = int('20100040', 16)
FilmSizeID = int('20100050', 16)
PrinterResolutionID = int('20100052', 16)
DefaultPrinterResolutionID = int('20100054', 16)
MagnificationType = int('20100060', 16)
SmoothingType = int('20100080', 16)
DefaultMagnificationType = int('201000A6', 16)
OtherMagnificationTypesAvailable = int('201000A7', 16)
DefaultSmoothingType = int('201000A8', 16)
OtherSmoothingTypesAvailable = int('201000A9', 16)
BorderDensity = int('20100100', 16)
EmptyImageDensity = int('20100110', 16)
MinDensity = int('20100120', 16)
MaxDensity = int('20100130', 16)
Trim = int('20100140', 16)
ConfigurationInformation = int('20100150', 16)
ConfigurationInformationDescription = int('20100152', 16)
MaximumCollatedFilms = int('20100154', 16)
Illumination = int('2010015E', 16)
ReflectedAmbientLight = int('20100160', 16)
PrinterPixelSpacing = int('20100376', 16)
ReferencedFilmSessionSequence = int('20100500', 16)
ReferencedImageBoxSequence = int('20100510', 16)
ReferencedBasicAnnotationBoxSequence = int('20100520', 16)
ImageBoxPosition = int('20200010', 16)
Polarity = int('20200020', 16)
RequestedImageSize = int('20200030', 16)
RequestedDecimateCropBehavior = int('20200040', 16)
RequestedResolutionID = int('20200050', 16)
RequestedImageSizeFlag = int('202000A0', 16)
DecimateCropResult = int('202000A2', 16)
BasicGrayscaleImageSequence = int('20200110', 16)
BasicColorImageSequence = int('20200111', 16)
ReferencedImageOverlayBoxSequence = int('20200130', 16)
ReferencedVOILUTBoxSequence = int('20200140', 16)
AnnotationPosition = int('20300010', 16)
TextString = int('20300020', 16)
ReferencedOverlayPlaneSequence = int('20400010', 16)
ReferencedOverlayPlaneGroups = int('20400011', 16)
OverlayPixelDataSequence = int('20400020', 16)
OverlayMagnificationType = int('20400060', 16)
OverlaySmoothingType = int('20400070', 16)
OverlayorImageMagnification = int('20400072', 16)
MagnifytoNumberofColumns = int('20400074', 16)
OverlayForegroundDensity = int('20400080', 16)
OverlayBackgroundDensity = int('20400082', 16)
OverlayMode = int('20400090', 16)
ThresholdDensity = int('20400100', 16)
ReferencedImageBoxSequenceRetired = int('20400500', 16)
PresentationLUTSequence = int('20500010', 16)
PresentationLUTShape = int('20500020', 16)
ReferencedPresentationLUTSequence = int('20500500', 16)
PrintJobID = int('21000010', 16)
ExecutionStatus = int('21000020', 16)
ExecutionStatusInfo = int('21000030', 16)
CreationDate = int('21000040', 16)
CreationTime = int('21000050', 16)
Originator = int('21000070', 16)
DestinationAE = int('21000140', 16)
OwnerID = int('21000160', 16)
NumberofFilms = int('21000170', 16)
ReferencedPrintJobSequencePullStoredPrint = int('21000500', 16)
PrinterStatus = int('21100010', 16)
PrinterStatusInfo = int('21100020', 16)
PrinterName = int('21100030', 16)
PrintQueueID = int('21100099', 16)
QueueStatus = int('21200010', 16)
PrintJobDescriptionSequence = int('21200050', 16)
ReferencedPrintJobSequence = int('21200070', 16)
PrintManagementCapabilitiesSequence = int('21300010', 16)
PrinterCharacteristicsSequence = int('21300015', 16)
FilmBoxContentSequence = int('21300030', 16)
ImageBoxContentSequence = int('21300040', 16)
AnnotationContentSequence = int('21300050', 16)
ImageOverlayBoxContentSequence = int('21300060', 16)
PresentationLUTContentSequence = int('21300080', 16)
ProposedStudySequence = int('213000A0', 16)
OriginalImageSequence = int('213000C0', 16)
LabelUsingInformationExtractedFromInstances = int('22000001', 16)
LabelText = int('22000002', 16)
LabelStyleSelection = int('22000003', 16)
MediaDisposition = int('22000004', 16)
BarcodeValue = int('22000005', 16)
BarcodeSymbology = int('22000006', 16)
AllowMediaSplitting = int('22000007', 16)
IncludeNonDICOMObjects = int('22000008', 16)
IncludeDisplayApplication = int('22000009', 16)
PreserveCompositeInstancesAfterMediaCreation = int('2200000A', 16)
TotalNumberofPiecesofMediaCreated = int('2200000B', 16)
RequestedMediaApplicationProfile = int('2200000C', 16)
ReferencedStorageMediaSequence = int('2200000D', 16)
FailureAttributes = int('2200000E', 16)
AllowLossyCompression = int('2200000F', 16)
RequestPriority = int('22000020', 16)
RTImageLabel = int('30020002', 16)
RTImageName = int('30020003', 16)
RTImageDescription = int('30020004', 16)
ReportedValuesOrigin = int('3002000A', 16)
RTImagePlane = int('3002000C', 16)
XRayImageReceptorTranslation = int('3002000D', 16)
XRayImageReceptorAngle = int('3002000E', 16)
RTImageOrientation = int('30020010', 16)
ImagePlanePixelSpacing = int('30020011', 16)
RTImagePosition = int('30020012', 16)
RadiationMachineName = int('30020020', 16)
RadiationMachineSAD = int('30020022', 16)
RadiationMachineSSD = int('30020024', 16)
RTImageSID = int('30020026', 16)
SourcetoReferenceObjectDistance = int('30020028', 16)
FractionNumber = int('30020029', 16)
ExposureSequence = int('30020030', 16)
MetersetExposure = int('30020032', 16)
DiaphragmPosition = int('30020034', 16)
FluenceMapSequence = int('30020040', 16)
FluenceDataSource = int('30020041', 16)
FluenceDataScale = int('30020042', 16)
PrimaryFluenceModeSequence = int('30020050', 16)
FluenceMode = int('30020051', 16)
FluenceModeID = int('30020052', 16)
DVHType = int('30040001', 16)
DoseUnits = int('30040002', 16)
DoseType = int('30040004', 16)
SpatialTransformofDose = int('30040005', 16)
DoseComment = int('30040006', 16)
NormalizationPoint = int('30040008', 16)
DoseSummationType = int('3004000A', 16)
GridFrameOffsetVector = int('3004000C', 16)
DoseGridScaling = int('3004000E', 16)
RTDoseROISequence = int('30040010', 16)
DoseValue = int('30040012', 16)
TissueHeterogeneityCorrection = int('30040014', 16)
DVHNormalizationPoint = int('30040040', 16)
DVHNormalizationDoseValue = int('30040042', 16)
DVHSequence = int('30040050', 16)
DVHDoseScaling = int('30040052', 16)
DVHVolumeUnits = int('30040054', 16)
DVHNumberofBins = int('30040056', 16)
DVHData = int('30040058', 16)
DVHReferencedROISequence = int('30040060', 16)
DVHROIContributionType = int('30040062', 16)
DVHMinimumDose = int('30040070', 16)
DVHMaximumDose = int('30040072', 16)
DVHMeanDose = int('30040074', 16)
StructureSetLabel = int('30060002', 16)
StructureSetName = int('30060004', 16)
StructureSetDescription = int('30060006', 16)
StructureSetDate = int('30060008', 16)
StructureSetTime = int('30060009', 16)
ReferencedFrameofReferenceSequence = int('30060010', 16)
RTReferencedStudySequence = int('30060012', 16)
RTReferencedSeriesSequence = int('30060014', 16)
ContourImageSequence = int('30060016', 16)
PredecessorStructureSetSequence = int('30060018', 16)
StructureSetROISequence = int('30060020', 16)
ROINumber = int('30060022', 16)
ReferencedFrameofReferenceUID = int('30060024', 16)
ROIName = int('30060026', 16)
ROIDescription = int('30060028', 16)
ROIDisplayColor = int('3006002A', 16)
ROIVolume = int('3006002C', 16)
RTRelatedROISequence = int('30060030', 16)
RTROIRelationship = int('30060033', 16)
ROIGenerationAlgorithm = int('30060036', 16)
ROIGenerationDescription = int('30060038', 16)
ROIContourSequence = int('30060039', 16)
ContourSequence = int('30060040', 16)
ContourGeometricType = int('30060042', 16)
ContourSlabThickness = int('30060044', 16)
ContourOffsetVector = int('30060045', 16)
NumberofContourPoints = int('30060046', 16)
ContourNumber = int('30060048', 16)
AttachedContours = int('30060049', 16)
ContourData = int('30060050', 16)
RTROIObservationsSequence = int('30060080', 16)
ObservationNumber = int('30060082', 16)
ReferencedROINumber = int('30060084', 16)
ROIObservationLabel = int('30060085', 16)
RTROIIdentificationCodeSequence = int('30060086', 16)
ROIObservationDescription = int('30060088', 16)
RelatedRTROIObservationsSequence = int('300600A0', 16)
RTROIInterpretedType = int('300600A4', 16)
ROIInterpreter = int('300600A6', 16)
ROIPhysicalPropertiesSequence = int('300600B0', 16)
ROIPhysicalProperty = int('300600B2', 16)
ROIPhysicalPropertyValue = int('300600B4', 16)
ROIElementalCompositionSequence = int('300600B6', 16)
ROIElementalCompositionAtomicNumber = int('300600B7', 16)
ROIElementalCompositionAtomicMassFraction = int('300600B8', 16)
AdditionalRTROIIdentificationCodeSequence = int('300600B9', 16)
FrameofReferenceRelationshipSequence = int('300600C0', 16)
RelatedFrameofReferenceUID = int('300600C2', 16)
FrameofReferenceTransformationType = int('300600C4', 16)
FrameofReferenceTransformationMatrix = int('300600C6', 16)
FrameofReferenceTransformationComment = int('300600C8', 16)
MeasuredDoseReferenceSequence = int('30080010', 16)
MeasuredDoseDescription = int('30080012', 16)
MeasuredDoseType = int('30080014', 16)
MeasuredDoseValue = int('30080016', 16)
TreatmentSessionBeamSequence = int('30080020', 16)
TreatmentSessionIonBeamSequence = int('30080021', 16)
CurrentFractionNumber = int('30080022', 16)
TreatmentControlPointDate = int('30080024', 16)
TreatmentControlPointTime = int('30080025', 16)
TreatmentTerminationStatus = int('3008002A', 16)
TreatmentTerminationCode = int('3008002B', 16)
TreatmentVerificationStatus = int('3008002C', 16)
ReferencedTreatmentRecordSequence = int('30080030', 16)
SpecifiedPrimaryMeterset = int('30080032', 16)
SpecifiedSecondaryMeterset = int('30080033', 16)
DeliveredPrimaryMeterset = int('30080036', 16)
DeliveredSecondaryMeterset = int('30080037', 16)
SpecifiedTreatmentTime = int('3008003A', 16)
DeliveredTreatmentTime = int('3008003B', 16)
ControlPointDeliverySequence = int('30080040', 16)
IonControlPointDeliverySequence = int('30080041', 16)
SpecifiedMeterset = int('30080042', 16)
DeliveredMeterset = int('30080044', 16)
MetersetRateSet = int('30080045', 16)
MetersetRateDelivered = int('30080046', 16)
ScanSpotMetersetsDelivered = int('30080047', 16)
DoseRateDelivered = int('30080048', 16)
TreatmentSummaryCalculatedDoseReferenceSequence = int('30080050', 16)
CumulativeDosetoDoseReference = int('30080052', 16)
FirstTreatmentDate = int('30080054', 16)
MostRecentTreatmentDate = int('30080056', 16)
NumberofFractionsDelivered = int('3008005A', 16)
OverrideSequence = int('30080060', 16)
ParameterSequencePointer = int('30080061', 16)
OverrideParameterPointer = int('30080062', 16)
ParameterItemIndex = int('30080063', 16)
MeasuredDoseReferenceNumber = int('30080064', 16)
ParameterPointer = int('30080065', 16)
OverrideReason = int('30080066', 16)
CorrectedParameterSequence = int('30080068', 16)
CorrectionValue = int('3008006A', 16)
CalculatedDoseReferenceSequence = int('30080070', 16)
CalculatedDoseReferenceNumber = int('30080072', 16)
CalculatedDoseReferenceDescription = int('30080074', 16)
CalculatedDoseReferenceDoseValue = int('30080076', 16)
StartMeterset = int('30080078', 16)
EndMeterset = int('3008007A', 16)
ReferencedMeasuredDoseReferenceSequence = int('30080080', 16)
ReferencedMeasuredDoseReferenceNumber = int('30080082', 16)
ReferencedCalculatedDoseReferenceSequence = int('30080090', 16)
ReferencedCalculatedDoseReferenceNumber = int('30080092', 16)
BeamLimitingDeviceLeafPairsSequence = int('300800A0', 16)
RecordedWedgeSequence = int('300800B0', 16)
RecordedCompensatorSequence = int('300800C0', 16)
RecordedBlockSequence = int('300800D0', 16)
TreatmentSummaryMeasuredDoseReferenceSequence = int('300800E0', 16)
RecordedSnoutSequence = int('300800F0', 16)
RecordedRangeShifterSequence = int('300800F2', 16)
RecordedLateralSpreadingDeviceSequence = int('300800F4', 16)
RecordedRangeModulatorSequence = int('300800F6', 16)
RecordedSourceSequence = int('30080100', 16)
SourceSerialNumber = int('30080105', 16)
TreatmentSessionApplicationSetupSequence = int('30080110', 16)
ApplicationSetupCheck = int('30080116', 16)
RecordedBrachyAccessoryDeviceSequence = int('30080120', 16)
ReferencedBrachyAccessoryDeviceNumber = int('30080122', 16)
RecordedChannelSequence = int('30080130', 16)
SpecifiedChannelTotalTime = int('30080132', 16)
DeliveredChannelTotalTime = int('30080134', 16)
SpecifiedNumberofPulses = int('30080136', 16)
DeliveredNumberofPulses = int('30080138', 16)
SpecifiedPulseRepetitionInterval = int('3008013A', 16)
DeliveredPulseRepetitionInterval = int('3008013C', 16)
RecordedSourceApplicatorSequence = int('30080140', 16)
ReferencedSourceApplicatorNumber = int('30080142', 16)
RecordedChannelShieldSequence = int('30080150', 16)
ReferencedChannelShieldNumber = int('30080152', 16)
BrachyControlPointDeliveredSequence = int('30080160', 16)
SafePositionExitDate = int('30080162', 16)
SafePositionExitTime = int('30080164', 16)
SafePositionReturnDate = int('30080166', 16)
SafePositionReturnTime = int('30080168', 16)
PulseSpecificBrachyControlPointDeliveredSequence = int('30080171', 16)
PulseNumber = int('30080172', 16)
BrachyPulseControlPointDeliveredSequence = int('30080173', 16)
CurrentTreatmentStatus = int('30080200', 16)
TreatmentStatusComment = int('30080202', 16)
FractionGroupSummarySequence = int('30080220', 16)
ReferencedFractionNumber = int('30080223', 16)
FractionGroupType = int('30080224', 16)
BeamStopperPosition = int('30080230', 16)
FractionStatusSummarySequence = int('30080240', 16)
TreatmentDate = int('30080250', 16)
TreatmentTime = int('30080251', 16)
RTPlanLabel = int('300A0002', 16)
RTPlanName = int('300A0003', 16)
RTPlanDescription = int('300A0004', 16)
RTPlanDate = int('300A0006', 16)
RTPlanTime = int('300A0007', 16)
TreatmentProtocols = int('300A0009', 16)
PlanIntent = int('300A000A', 16)
TreatmentSites = int('300A000B', 16)
RTPlanGeometry = int('300A000C', 16)
PrescriptionDescription = int('300A000E', 16)
DoseReferenceSequence = int('300A0010', 16)
DoseReferenceNumber = int('300A0012', 16)
DoseReferenceUID = int('300A0013', 16)
DoseReferenceStructureType = int('300A0014', 16)
NominalBeamEnergyUnit = int('300A0015', 16)
DoseReferenceDescription = int('300A0016', 16)
DoseReferencePointCoordinates = int('300A0018', 16)
NominalPriorDose = int('300A001A', 16)
DoseReferenceType = int('300A0020', 16)
ConstraintWeight = int('300A0021', 16)
DeliveryWarningDose = int('300A0022', 16)
DeliveryMaximumDose = int('300A0023', 16)
TargetMinimumDose = int('300A0025', 16)
TargetPrescriptionDose = int('300A0026', 16)
TargetMaximumDose = int('300A0027', 16)
TargetUnderdoseVolumeFraction = int('300A0028', 16)
OrganatRiskFullvolumeDose = int('300A002A', 16)
OrganatRiskLimitDose = int('300A002B', 16)
OrganatRiskMaximumDose = int('300A002C', 16)
OrganatRiskOverdoseVolumeFraction = int('300A002D', 16)
ToleranceTableSequence = int('300A0040', 16)
ToleranceTableNumber = int('300A0042', 16)
ToleranceTableLabel = int('300A0043', 16)
GantryAngleTolerance = int('300A0044', 16)
BeamLimitingDeviceAngleTolerance = int('300A0046', 16)
BeamLimitingDeviceToleranceSequence = int('300A0048', 16)
BeamLimitingDevicePositionTolerance = int('300A004A', 16)
SnoutPositionTolerance = int('300A004B', 16)
PatientSupportAngleTolerance = int('300A004C', 16)
TableTopEccentricAngleTolerance = int('300A004E', 16)
TableTopPitchAngleTolerance = int('300A004F', 16)
TableTopRollAngleTolerance = int('300A0050', 16)
TableTopVerticalPositionTolerance = int('300A0051', 16)
TableTopLongitudinalPositionTolerance = int('300A0052', 16)
TableTopLateralPositionTolerance = int('300A0053', 16)
RTPlanRelationship = int('300A0055', 16)
FractionGroupSequence = int('300A0070', 16)
FractionGroupNumber = int('300A0071', 16)
FractionGroupDescription = int('300A0072', 16)
NumberofFractionsPlanned = int('300A0078', 16)
NumberofFractionPatternDigitsPerDay = int('300A0079', 16)
RepeatFractionCycleLength = int('300A007A', 16)
FractionPattern = int('300A007B', 16)
NumberofBeams = int('300A0080', 16)
BeamDoseSpecificationPoint = int('300A0082', 16)
BeamDose = int('300A0084', 16)
BeamMeterset = int('300A0086', 16)
BeamDosePointDepth = int('300A0088', 16)
BeamDosePointEquivalentDepth = int('300A0089', 16)
BeamDosePointSSD = int('300A008A', 16)
BeamDoseMeaning = int('300A008B', 16)
BeamDoseVerificationControlPointSequence = int('300A008C', 16)
AverageBeamDosePointDepth = int('300A008D', 16)
AverageBeamDosePointEquivalentDepth = int('300A008E', 16)
AverageBeamDosePointSSD = int('300A008F', 16)
BeamDoseType = int('300A0090', 16)
AlternateBeamDose = int('300A0091', 16)
AlternateBeamDoseType = int('300A0092', 16)
NumberofBrachyApplicationSetups = int('300A00A0', 16)
BrachyApplicationSetupDoseSpecificationPoint = int('300A00A2', 16)
BrachyApplicationSetupDose = int('300A00A4', 16)
BeamSequence = int('300A00B0', 16)
TreatmentMachineName = int('300A00B2', 16)
PrimaryDosimeterUnit = int('300A00B3', 16)
SourceAxisDistance = int('300A00B4', 16)
BeamLimitingDeviceSequence = int('300A00B6', 16)
RTBeamLimitingDeviceType = int('300A00B8', 16)
SourcetoBeamLimitingDeviceDistance = int('300A00BA', 16)
IsocentertoBeamLimitingDeviceDistance = int('300A00BB', 16)
NumberofLeafJawPairs = int('300A00BC', 16)
LeafPositionBoundaries = int('300A00BE', 16)
BeamNumber = int('300A00C0', 16)
BeamName = int('300A00C2', 16)
BeamDescription = int('300A00C3', 16)
BeamType = int('300A00C4', 16)
BeamDeliveryDurationLimit = int('300A00C5', 16)
RadiationType = int('300A00C6', 16)
HighDoseTechniqueType = int('300A00C7', 16)
ReferenceImageNumber = int('300A00C8', 16)
PlannedVerificationImageSequence = int('300A00CA', 16)
ImagingDeviceSpecificAcquisitionParameters = int('300A00CC', 16)
TreatmentDeliveryType = int('300A00CE', 16)
NumberofWedges = int('300A00D0', 16)
WedgeSequence = int('300A00D1', 16)
WedgeNumber = int('300A00D2', 16)
WedgeType = int('300A00D3', 16)
WedgeID = int('300A00D4', 16)
WedgeAngle = int('300A00D5', 16)
WedgeFactor = int('300A00D6', 16)
TotalWedgeTrayWaterEquivalentThickness = int('300A00D7', 16)
WedgeOrientation = int('300A00D8', 16)
IsocentertoWedgeTrayDistance = int('300A00D9', 16)
SourcetoWedgeTrayDistance = int('300A00DA', 16)
WedgeThinEdgePosition = int('300A00DB', 16)
BolusID = int('300A00DC', 16)
BolusDescription = int('300A00DD', 16)
EffectiveWedgeAngle = int('300A00DE', 16)
NumberofCompensators = int('300A00E0', 16)
MaterialID = int('300A00E1', 16)
TotalCompensatorTrayFactor = int('300A00E2', 16)
CompensatorSequence = int('300A00E3', 16)
CompensatorNumber = int('300A00E4', 16)
CompensatorID = int('300A00E5', 16)
SourcetoCompensatorTrayDistance = int('300A00E6', 16)
CompensatorRows = int('300A00E7', 16)
CompensatorColumns = int('300A00E8', 16)
CompensatorPixelSpacing = int('300A00E9', 16)
CompensatorPosition = int('300A00EA', 16)
CompensatorTransmissionData = int('300A00EB', 16)
CompensatorThicknessData = int('300A00EC', 16)
NumberofBoli = int('300A00ED', 16)
CompensatorType = int('300A00EE', 16)
CompensatorTrayID = int('300A00EF', 16)
NumberofBlocks = int('300A00F0', 16)
TotalBlockTrayFactor = int('300A00F2', 16)
TotalBlockTrayWaterEquivalentThickness = int('300A00F3', 16)
BlockSequence = int('300A00F4', 16)
BlockTrayID = int('300A00F5', 16)
SourcetoBlockTrayDistance = int('300A00F6', 16)
IsocentertoBlockTrayDistance = int('300A00F7', 16)
BlockType = int('300A00F8', 16)
AccessoryCode = int('300A00F9', 16)
BlockDivergence = int('300A00FA', 16)
BlockMountingPosition = int('300A00FB', 16)
BlockNumber = int('300A00FC', 16)
BlockName = int('300A00FE', 16)
BlockThickness = int('300A0100', 16)
BlockTransmission = int('300A0102', 16)
BlockNumberofPoints = int('300A0104', 16)
BlockData = int('300A0106', 16)
ApplicatorSequence = int('300A0107', 16)
ApplicatorID = int('300A0108', 16)
ApplicatorType = int('300A0109', 16)
ApplicatorDescription = int('300A010A', 16)
CumulativeDoseReferenceCoefficient = int('300A010C', 16)
FinalCumulativeMetersetWeight = int('300A010E', 16)
NumberofControlPoints = int('300A0110', 16)
ControlPointSequence = int('300A0111', 16)
ControlPointIndex = int('300A0112', 16)
NominalBeamEnergy = int('300A0114', 16)
DoseRateSet = int('300A0115', 16)
WedgePositionSequence = int('300A0116', 16)
WedgePosition = int('300A0118', 16)
BeamLimitingDevicePositionSequence = int('300A011A', 16)
LeafJawPositions = int('300A011C', 16)
GantryAngle = int('300A011E', 16)
GantryRotationDirection = int('300A011F', 16)
BeamLimitingDeviceAngle = int('300A0120', 16)
BeamLimitingDeviceRotationDirection = int('300A0121', 16)
PatientSupportAngle = int('300A0122', 16)
PatientSupportRotationDirection = int('300A0123', 16)
TableTopEccentricAxisDistance = int('300A0124', 16)
TableTopEccentricAngle = int('300A0125', 16)
TableTopEccentricRotationDirection = int('300A0126', 16)
TableTopVerticalPosition = int('300A0128', 16)
TableTopLongitudinalPosition = int('300A0129', 16)
TableTopLateralPosition = int('300A012A', 16)
IsocenterPosition = int('300A012C', 16)
SurfaceEntryPoint = int('300A012E', 16)
SourcetoSurfaceDistance = int('300A0130', 16)
AverageBeamDosePointSourcetoExternalContourDistance = int('300A0131', 16)
SourcetoExternalContourDistance = int('300A0132', 16)
ExternalContourEntryPoint = int('300A0133', 16)
CumulativeMetersetWeight = int('300A0134', 16)
TableTopPitchAngle = int('300A0140', 16)
TableTopPitchRotationDirection = int('300A0142', 16)
TableTopRollAngle = int('300A0144', 16)
TableTopRollRotationDirection = int('300A0146', 16)
HeadFixationAngle = int('300A0148', 16)
GantryPitchAngle = int('300A014A', 16)
GantryPitchRotationDirection = int('300A014C', 16)
GantryPitchAngleTolerance = int('300A014E', 16)
FixationEye = int('300A0150', 16)
ChairHeadFramePosition = int('300A0151', 16)
HeadFixationAngleTolerance = int('300A0152', 16)
ChairHeadFramePositionTolerance = int('300A0153', 16)
FixationLightAzimuthalAngleTolerance = int('300A0154', 16)
FixationLightPolarAngleTolerance = int('300A0155', 16)
PatientSetupSequence = int('300A0180', 16)
PatientSetupNumber = int('300A0182', 16)
PatientSetupLabel = int('300A0183', 16)
PatientAdditionalPosition = int('300A0184', 16)
FixationDeviceSequence = int('300A0190', 16)
FixationDeviceType = int('300A0192', 16)
FixationDeviceLabel = int('300A0194', 16)
FixationDeviceDescription = int('300A0196', 16)
FixationDevicePosition = int('300A0198', 16)
FixationDevicePitchAngle = int('300A0199', 16)
FixationDeviceRollAngle = int('300A019A', 16)
ShieldingDeviceSequence = int('300A01A0', 16)
ShieldingDeviceType = int('300A01A2', 16)
ShieldingDeviceLabel = int('300A01A4', 16)
ShieldingDeviceDescription = int('300A01A6', 16)
ShieldingDevicePosition = int('300A01A8', 16)
SetupTechnique = int('300A01B0', 16)
SetupTechniqueDescription = int('300A01B2', 16)
SetupDeviceSequence = int('300A01B4', 16)
SetupDeviceType = int('300A01B6', 16)
SetupDeviceLabel = int('300A01B8', 16)
SetupDeviceDescription = int('300A01BA', 16)
SetupDeviceParameter = int('300A01BC', 16)
SetupReferenceDescription = int('300A01D0', 16)
TableTopVerticalSetupDisplacement = int('300A01D2', 16)
TableTopLongitudinalSetupDisplacement = int('300A01D4', 16)
TableTopLateralSetupDisplacement = int('300A01D6', 16)
BrachyTreatmentTechnique = int('300A0200', 16)
BrachyTreatmentType = int('300A0202', 16)
TreatmentMachineSequence = int('300A0206', 16)
SourceSequence = int('300A0210', 16)
SourceNumber = int('300A0212', 16)
SourceType = int('300A0214', 16)
SourceManufacturer = int('300A0216', 16)
ActiveSourceDiameter = int('300A0218', 16)
ActiveSourceLength = int('300A021A', 16)
SourceModelID = int('300A021B', 16)
SourceDescription = int('300A021C', 16)
SourceEncapsulationNominalThickness = int('300A0222', 16)
SourceEncapsulationNominalTransmission = int('300A0224', 16)
SourceIsotopeName = int('300A0226', 16)
SourceIsotopeHalfLife = int('300A0228', 16)
SourceStrengthUnits = int('300A0229', 16)
ReferenceAirKermaRate = int('300A022A', 16)
SourceStrength = int('300A022B', 16)
SourceStrengthReferenceDate = int('300A022C', 16)
SourceStrengthReferenceTime = int('300A022E', 16)
ApplicationSetupSequence = int('300A0230', 16)
ApplicationSetupType = int('300A0232', 16)
ApplicationSetupNumber = int('300A0234', 16)
ApplicationSetupName = int('300A0236', 16)
ApplicationSetupManufacturer = int('300A0238', 16)
TemplateNumber = int('300A0240', 16)
TemplateType = int('300A0242', 16)
TemplateName = int('300A0244', 16)
TotalReferenceAirKerma = int('300A0250', 16)
BrachyAccessoryDeviceSequence = int('300A0260', 16)
BrachyAccessoryDeviceNumber = int('300A0262', 16)
BrachyAccessoryDeviceID = int('300A0263', 16)
BrachyAccessoryDeviceType = int('300A0264', 16)
BrachyAccessoryDeviceName = int('300A0266', 16)
BrachyAccessoryDeviceNominalThickness = int('300A026A', 16)
BrachyAccessoryDeviceNominalTransmission = int('300A026C', 16)
ChannelSequence = int('300A0280', 16)
ChannelNumber = int('300A0282', 16)
ChannelLength = int('300A0284', 16)
ChannelTotalTime = int('300A0286', 16)
SourceMovementType = int('300A0288', 16)
NumberofPulses = int('300A028A', 16)
PulseRepetitionInterval = int('300A028C', 16)
SourceApplicatorNumber = int('300A0290', 16)
SourceApplicatorID = int('300A0291', 16)
SourceApplicatorType = int('300A0292', 16)
SourceApplicatorName = int('300A0294', 16)
SourceApplicatorLength = int('300A0296', 16)
SourceApplicatorManufacturer = int('300A0298', 16)
SourceApplicatorWallNominalThickness = int('300A029C', 16)
SourceApplicatorWallNominalTransmission = int('300A029E', 16)
SourceApplicatorStepSize = int('300A02A0', 16)
TransferTubeNumber = int('300A02A2', 16)
TransferTubeLength = int('300A02A4', 16)
ChannelShieldSequence = int('300A02B0', 16)
ChannelShieldNumber = int('300A02B2', 16)
ChannelShieldID = int('300A02B3', 16)
ChannelShieldName = int('300A02B4', 16)
ChannelShieldNominalThickness = int('300A02B8', 16)
ChannelShieldNominalTransmission = int('300A02BA', 16)
FinalCumulativeTimeWeight = int('300A02C8', 16)
BrachyControlPointSequence = int('300A02D0', 16)
ControlPointRelativePosition = int('300A02D2', 16)
ControlPoint3DPosition = int('300A02D4', 16)
CumulativeTimeWeight = int('300A02D6', 16)
CompensatorDivergence = int('300A02E0', 16)
CompensatorMountingPosition = int('300A02E1', 16)
SourcetoCompensatorDistance = int('300A02E2', 16)
TotalCompensatorTrayWaterEquivalentThickness = int('300A02E3', 16)
IsocentertoCompensatorTrayDistance = int('300A02E4', 16)
CompensatorColumnOffset = int('300A02E5', 16)
IsocentertoCompensatorDistances = int('300A02E6', 16)
CompensatorRelativeStoppingPowerRatio = int('300A02E7', 16)
CompensatorMillingToolDiameter = int('300A02E8', 16)
IonRangeCompensatorSequence = int('300A02EA', 16)
CompensatorDescription = int('300A02EB', 16)
RadiationMassNumber = int('300A0302', 16)
RadiationAtomicNumber = int('300A0304', 16)
RadiationChargeState = int('300A0306', 16)
ScanMode = int('300A0308', 16)
ModulatedScanModeType = int('300A0309', 16)
VirtualSourceAxisDistances = int('300A030A', 16)
SnoutSequence = int('300A030C', 16)
SnoutPosition = int('300A030D', 16)
SnoutID = int('300A030F', 16)
NumberofRangeShifters = int('300A0312', 16)
RangeShifterSequence = int('300A0314', 16)
RangeShifterNumber = int('300A0316', 16)
RangeShifterID = int('300A0318', 16)
RangeShifterType = int('300A0320', 16)
RangeShifterDescription = int('300A0322', 16)
NumberofLateralSpreadingDevices = int('300A0330', 16)
LateralSpreadingDeviceSequence = int('300A0332', 16)
LateralSpreadingDeviceNumber = int('300A0334', 16)
LateralSpreadingDeviceID = int('300A0336', 16)
LateralSpreadingDeviceType = int('300A0338', 16)
LateralSpreadingDeviceDescription = int('300A033A', 16)
LateralSpreadingDeviceWaterEquivalentThickness = int('300A033C', 16)
NumberofRangeModulators = int('300A0340', 16)
RangeModulatorSequence = int('300A0342', 16)
RangeModulatorNumber = int('300A0344', 16)
RangeModulatorID = int('300A0346', 16)
RangeModulatorType = int('300A0348', 16)
RangeModulatorDescription = int('300A034A', 16)
BeamCurrentModulationID = int('300A034C', 16)
PatientSupportType = int('300A0350', 16)
PatientSupportID = int('300A0352', 16)
PatientSupportAccessoryCode = int('300A0354', 16)
TrayAccessoryCode = int('300A0355', 16)
FixationLightAzimuthalAngle = int('300A0356', 16)
FixationLightPolarAngle = int('300A0358', 16)
MetersetRate = int('300A035A', 16)
RangeShifterSettingsSequence = int('300A0360', 16)
RangeShifterSetting = int('300A0362', 16)
IsocentertoRangeShifterDistance = int('300A0364', 16)
RangeShifterWaterEquivalentThickness = int('300A0366', 16)
LateralSpreadingDeviceSettingsSequence = int('300A0370', 16)
LateralSpreadingDeviceSetting = int('300A0372', 16)
IsocentertoLateralSpreadingDeviceDistance = int('300A0374', 16)
RangeModulatorSettingsSequence = int('300A0380', 16)
RangeModulatorGatingStartValue = int('300A0382', 16)
RangeModulatorGatingStopValue = int('300A0384', 16)
RangeModulatorGatingStartWaterEquivalentThickness = int('300A0386', 16)
RangeModulatorGatingStopWaterEquivalentThickness = int('300A0388', 16)
IsocentertoRangeModulatorDistance = int('300A038A', 16)
ScanSpotTuneID = int('300A0390', 16)
ScanSpotPrescribedIndices = int('300A0391', 16)
NumberofScanSpotPositions = int('300A0392', 16)
ScanSpotReordered = int('300A0393', 16)
ScanSpotPositionMap = int('300A0394', 16)
ScanSpotReorderingAllowed = int('300A0395', 16)
ScanSpotMetersetWeights = int('300A0396', 16)
ScanningSpotSize = int('300A0398', 16)
NumberofPaintings = int('300A039A', 16)
IonToleranceTableSequence = int('300A03A0', 16)
IonBeamSequence = int('300A03A2', 16)
IonBeamLimitingDeviceSequence = int('300A03A4', 16)
IonBlockSequence = int('300A03A6', 16)
IonControlPointSequence = int('300A03A8', 16)
IonWedgeSequence = int('300A03AA', 16)
IonWedgePositionSequence = int('300A03AC', 16)
ReferencedSetupImageSequence = int('300A0401', 16)
SetupImageComment = int('300A0402', 16)
MotionSynchronizationSequence = int('300A0410', 16)
ControlPointOrientation = int('300A0412', 16)
GeneralAccessorySequence = int('300A0420', 16)
GeneralAccessoryID = int('300A0421', 16)
GeneralAccessoryDescription = int('300A0422', 16)
GeneralAccessoryType = int('300A0423', 16)
GeneralAccessoryNumber = int('300A0424', 16)
SourcetoGeneralAccessoryDistance = int('300A0425', 16)
ApplicatorGeometrySequence = int('300A0431', 16)
ApplicatorApertureShape = int('300A0432', 16)
ApplicatorOpening = int('300A0433', 16)
ApplicatorOpeningX = int('300A0434', 16)
ApplicatorOpeningY = int('300A0435', 16)
SourcetoApplicatorMountingPositionDistance = int('300A0436', 16)
NumberofBlockSlabItems = int('300A0440', 16)
BlockSlabSequence = int('300A0441', 16)
BlockSlabThickness = int('300A0442', 16)
BlockSlabNumber = int('300A0443', 16)
DeviceMotionControlSequence = int('300A0450', 16)
DeviceMotionExecutionMode = int('300A0451', 16)
DeviceMotionObservationMode = int('300A0452', 16)
DeviceMotionParameterCodeSequence = int('300A0453', 16)
DistalDepthFraction = int('300A0501', 16)
DistalDepth = int('300A0502', 16)
NominalRangeModulationFractions = int('300A0503', 16)
NominalRangeModulatedRegionDepths = int('300A0504', 16)
DepthDoseParametersSequence = int('300A0505', 16)
DeliveredDepthDoseParametersSequence = int('300A0506', 16)
DeliveredDistalDepthFraction = int('300A0507', 16)
DeliveredDistalDepth = int('300A0508', 16)
DeliveredNominalRangeModulationFractions = int('300A0509', 16)
DeliveredNominalRangeModulatedRegionDepths = int('300A0510', 16)
DeliveredReferenceDoseDefinition = int('300A0511', 16)
ReferenceDoseDefinition = int('300A0512', 16)
ReferencedRTPlanSequence = int('300C0002', 16)
ReferencedBeamSequence = int('300C0004', 16)
ReferencedBeamNumber = int('300C0006', 16)
ReferencedReferenceImageNumber = int('300C0007', 16)
StartCumulativeMetersetWeight = int('300C0008', 16)
EndCumulativeMetersetWeight = int('300C0009', 16)
ReferencedBrachyApplicationSetupSequence = int('300C000A', 16)
ReferencedBrachyApplicationSetupNumber = int('300C000C', 16)
ReferencedSourceNumber = int('300C000E', 16)
ReferencedFractionGroupSequence = int('300C0020', 16)
ReferencedFractionGroupNumber = int('300C0022', 16)
ReferencedVerificationImageSequence = int('300C0040', 16)
ReferencedReferenceImageSequence = int('300C0042', 16)
ReferencedDoseReferenceSequence = int('300C0050', 16)
ReferencedDoseReferenceNumber = int('300C0051', 16)
BrachyReferencedDoseReferenceSequence = int('300C0055', 16)
ReferencedStructureSetSequence = int('300C0060', 16)
ReferencedPatientSetupNumber = int('300C006A', 16)
ReferencedDoseSequence = int('300C0080', 16)
ReferencedToleranceTableNumber = int('300C00A0', 16)
ReferencedBolusSequence = int('300C00B0', 16)
ReferencedWedgeNumber = int('300C00C0', 16)
ReferencedCompensatorNumber = int('300C00D0', 16)
ReferencedBlockNumber = int('300C00E0', 16)
ReferencedControlPointIndex = int('300C00F0', 16)
ReferencedControlPointSequence = int('300C00F2', 16)
ReferencedStartControlPointIndex = int('300C00F4', 16)
ReferencedStopControlPointIndex = int('300C00F6', 16)
ReferencedRangeShifterNumber = int('300C0100', 16)
ReferencedLateralSpreadingDeviceNumber = int('300C0102', 16)
ReferencedRangeModulatorNumber = int('300C0104', 16)
OmittedBeamTaskSequence = int('300C0111', 16)
ReasonforOmission = int('300C0112', 16)
ReasonforOmissionDescription = int('300C0113', 16)
ApprovalStatus = int('300E0002', 16)
ReviewDate = int('300E0004', 16)
ReviewTime = int('300E0005', 16)
ReviewerName = int('300E0008', 16)
Arbitrary = int('40000010', 16)
TextComments = int('40004000', 16)
ResultsID = int('40080040', 16)
ResultsIDIssuer = int('40080042', 16)
ReferencedInterpretationSequence = int('40080050', 16)
ReportProductionStatusTrial = int('400800FF', 16)
InterpretationRecordedDate = int('40080100', 16)
InterpretationRecordedTime = int('40080101', 16)
InterpretationRecorder = int('40080102', 16)
ReferencetoRecordedSound = int('40080103', 16)
InterpretationTranscriptionDate = int('40080108', 16)
InterpretationTranscriptionTime = int('40080109', 16)
InterpretationTranscriber = int('4008010A', 16)
InterpretationText = int('4008010B', 16)
InterpretationAuthor = int('4008010C', 16)
InterpretationApproverSequence = int('40080111', 16)
InterpretationApprovalDate = int('40080112', 16)
InterpretationApprovalTime = int('40080113', 16)
PhysicianApprovingInterpretation = int('40080114', 16)
InterpretationDiagnosisDescription = int('40080115', 16)
InterpretationDiagnosisCodeSequence = int('40080117', 16)
ResultsDistributionListSequence = int('40080118', 16)
DistributionName = int('40080119', 16)
DistributionAddress = int('4008011A', 16)
InterpretationID = int('40080200', 16)
InterpretationIDIssuer = int('40080202', 16)
InterpretationTypeID = int('40080210', 16)
InterpretationStatusID = int('40080212', 16)
Impressions = int('40080300', 16)
ResultsComments = int('40084000', 16)
LowEnergyDetectors = int('40100001', 16)
HighEnergyDetectors = int('40100002', 16)
DetectorGeometrySequence = int('40100004', 16)
ThreatROIVoxelSequence = int('40101001', 16)
ThreatROIBase = int('40101004', 16)
ThreatROIExtents = int('40101005', 16)
ThreatROIBitmap = int('40101006', 16)
RouteSegmentID = int('40101007', 16)
GantryType = int('40101008', 16)
OOIOwnerType = int('40101009', 16)
RouteSegmentSequence = int('4010100A', 16)
PotentialThreatObjectID = int('40101010', 16)
ThreatSequence = int('40101011', 16)
ThreatCategory = int('40101012', 16)
ThreatCategoryDescription = int('40101013', 16)
ATDAbilityAssessment = int('40101014', 16)
ATDAssessmentFlag = int('40101015', 16)
ATDAssessmentProbability = int('40101016', 16)
Mass = int('40101017', 16)
Density = int('40101018', 16)
ZEffective = int('40101019', 16)
BoardingPassID = int('4010101A', 16)
CenterofMass = int('4010101B', 16)
CenterofPTO = int('4010101C', 16)
BoundingPolygon = int('4010101D', 16)
RouteSegmentStartLocationID = int('4010101E', 16)
RouteSegmentEndLocationID = int('4010101F', 16)
RouteSegmentLocationIDType = int('40101020', 16)
AbortReason = int('40101021', 16)
VolumeofPTO = int('40101023', 16)
AbortFlag = int('40101024', 16)
RouteSegmentStartTime = int('40101025', 16)
RouteSegmentEndTime = int('40101026', 16)
TDRType = int('40101027', 16)
InternationalRouteSegment = int('40101028', 16)
ThreatDetectionAlgorithmandVersion = int('40101029', 16)
AssignedLocation = int('4010102A', 16)
AlarmDecisionTime = int('4010102B', 16)
AlarmDecision = int('40101031', 16)
NumberofTotalObjects = int('40101033', 16)
NumberofAlarmObjects = int('40101034', 16)
PTORepresentationSequence = int('40101037', 16)
ATDAssessmentSequence = int('40101038', 16)
TIPType = int('40101039', 16)
Version = int('4010103A', 16)
OOIOwnerCreationTime = int('40101041', 16)
OOIType = int('40101042', 16)
OOISize = int('40101043', 16)
AcquisitionStatus = int('40101044', 16)
BasisMaterialsCodeSequence = int('40101045', 16)
PhantomType = int('40101046', 16)
OOIOwnerSequence = int('40101047', 16)
ScanType = int('40101048', 16)
ItineraryID = int('40101051', 16)
ItineraryIDType = int('40101052', 16)
ItineraryIDAssigningAuthority = int('40101053', 16)
RouteID = int('40101054', 16)
RouteIDAssigningAuthority = int('40101055', 16)
InboundArrivalType = int('40101056', 16)
CarrierID = int('40101058', 16)
CarrierIDAssigningAuthority = int('40101059', 16)
SourceOrientation = int('40101060', 16)
SourcePosition = int('40101061', 16)
BeltHeight = int('40101062', 16)
AlgorithmRoutingCodeSequence = int('40101064', 16)
TransportClassification = int('40101067', 16)
OOITypeDescriptor = int('40101068', 16)
TotalProcessingTime = int('40101069', 16)
DetectorCalibrationData = int('4010106C', 16)
AdditionalScreeningPerformed = int('4010106D', 16)
AdditionalInspectionSelectionCriteria = int('4010106E', 16)
AdditionalInspectionMethodSequence = int('4010106F', 16)
AITDeviceType = int('40101070', 16)
QRMeasurementsSequence = int('40101071', 16)
TargetMaterialSequence = int('40101072', 16)
SNRThreshold = int('40101073', 16)
ImageScaleRepresentation = int('40101075', 16)
ReferencedPTOSequence = int('40101076', 16)
ReferencedTDRInstanceSequence = int('40101077', 16)
PTOLocationDescription = int('40101078', 16)
AnomalyLocatorIndicatorSequence = int('40101079', 16)
AnomalyLocatorIndicator = int('4010107A', 16)
PTORegionSequence = int('4010107B', 16)
InspectionSelectionCriteria = int('4010107C', 16)
SecondaryInspectionMethodSequence = int('4010107D', 16)
PRCStoRCSOrientation = int('4010107E', 16)
MACParametersSequence = int('4FFE0001', 16)
SharedFunctionalGroupsSequence = int('52009229', 16)
PerframeFunctionalGroupsSequence = int('52009230', 16)
WaveformSequence = int('54000100', 16)
ChannelMinimumValue = int('54000110', 16)
ChannelMaximumValue = int('54000112', 16)
WaveformBitsAllocated = int('54001004', 16)
WaveformSampleInterpretation = int('54001006', 16)
WaveformPaddingValue = int('5400100A', 16)
WaveformData = int('54001010', 16)
FirstOrderPhaseCorrectionAngle = int('56000010', 16)
SpectroscopyData = int('56000020', 16)
FloatPixelData = int('7FE00008', 16)
DoubleFloatPixelData = int('7FE00009', 16)
PixelData = int('7FE00010', 16)
CoefficientsSDVN = int('7FE00020', 16)
CoefficientsSDHN = int('7FE00030', 16)
CoefficientsSDDN = int('7FE00040', 16)
DigitalSignaturesSequence = int('FFFAFFFA', 16)
DataSetTrailingPadding = int('FFFCFFFC', 16)
Item = int('FFFEE000', 16)
ItemDelimitationItem = int('FFFEE00D', 16)
SequenceDelimitationItem = int('FFFEE0DD', 16) | tensorflow_io/core/python/ops/dicom_ops.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_io.core.python.ops import core_ops
def decode_dicom_image(
contents,
color_dim=False,
on_error='skip',
scale='preserve',
dtype=tf.uint16,
name=None):
"""Getting DICOM Image Data.
This package has two operations which wrap `DCMTK` functions.
`decode_dicom_image` decodes the pixel data from DICOM files, and
`decode_dicom_data` decodes tag information.
`dicom_tags` contains useful DICOM tags such as `dicom_tags.PatientsName`.
We borrow the same tag notation from the
[`pydicom`](https://pydicom.github.io/) dicom package.
The detailed usage of DICOM is available in
[tutorial](https://www.tensorflow.org/io/tutorials/dicom).
If this package helped, please kindly cite the below:
```
@misc{marcelo_lerendegui_2019_3337331,
author = {<NAME> and <NAME>},
title = {Tensorflow Dicom Decoder},
month = jul,
year = 2019,
doi = {10.5281/zenodo.3337331},
url = {https://doi.org/10.5281/zenodo.3337331}
}
```
Args:
contents: A Tensor of type string. 0-D. The byte string encoded DICOM file.
color_dim: An optional `bool`. Defaults to `False`. If `True`, a third
channel will be appended to all images forming a 3-D tensor.
A 1024 x 1024 grayscale image will be 1024 x 1024 x 1.
on_error: Defaults to `skip`. This attribute establishes the behavior in
case an error occurs on opening the image or if the output type cannot
accomodate all the possible input values. For example if the user sets
the output dtype to `tf.uint8`, but a dicom image stores a `tf.uint16`
type. `strict` throws an error. `skip` returns a 1-D empty tensor.
`lossy` continues with the operation scaling the value via the `scale`
attribute.
scale: Defaults to `preserve`. This attribute establishes what to do with
the scale of the input values. `auto` will autoscale the input values,
if the output type is integer, `auto` will use the maximum output scale
for example a `uint8` which stores values from [0, 255] can be linearly
stretched to fill a `uint16` that is [0,65535]. If the output is float,
`auto` will scale to [0,1]. `preserve` keeps the values as they are, an
input value greater than the maximum possible output will be clipped.
dtype: An optional `tf.DType` from: `tf.uint8`, `tf.uint16`, `tf.uint32`,
`tf.uint64`, `tf.float16`, `tf.float32`, `tf.float64`. Defaults to
`tf.uint16`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype` and the shape is determined by the DICOM file.
"""
return core_ops.io_decode_dicom_image(
contents=contents,
color_dim=color_dim,
on_error=on_error,
scale=scale,
dtype=dtype,
name=name)
def decode_dicom_data(
contents,
tags=None,
name=None):
"""Getting DICOM Tag Data.
This package has two operations which wrap `DCMTK` functions.
`decode_dicom_image` decodes the pixel data from DICOM files, and
`decode_dicom_data` decodes tag information.
`dicom_tags` contains useful DICOM tags such as `dicom_tags.PatientsName`.
We borrow the same tag notation from the
[`pydicom`](https://pydicom.github.io/) dicom package.
The detailed usage of DICOM is available in
[tutorial](https://www.tensorflow.org/io/tutorials/dicom).
If this package helped, please kindly cite the below:
```
@misc{marcelo_lerendegui_2019_3337331,
author = {<NAME> and <NAME>},
title = {Tensorflow Dicom Decoder},
month = jul,
year = 2019,
doi = {10.5281/zenodo.3337331},
url = {https://doi.org/10.5281/zenodo.3337331}
}
```
Args:
contents: A Tensor of type string. 0-D. The byte string encoded DICOM file.
tags: A Tensor of type `tf.uint32` of any dimension.
These `uint32` numbers map directly to DICOM tags.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `tf.string` and same shape as `tags`. If a dicom tag is
a list of strings, they are combined into one string and seperated by a
double backslash `\\`. There is a bug in
[DCMTK](https://support.dcmtk.org/docs/) if the tag is a list of numbers,
only the zeroth element will be returned as a string.
"""
return core_ops.io_decode_dicom_data(
contents=contents,
tags=tags,
name=name)
class dicom_tags(object): # pylint: disable=invalid-name
"""dicom_tags"""
def __init__(self):
pass
# pylint: disable=invalid-name
FileMetaInformationGroupLength = int('00020000', 16)
FileMetaInformationVersion = int('00020001', 16)
MediaStorageSOPClassUID = int('00020002', 16)
MediaStorageSOPInstanceUID = int('00020003', 16)
TransferSyntaxUID = int('00020010', 16)
ImplementationClassUID = int('00020012', 16)
ImplementationVersionName = int('00020013', 16)
SourceApplicationEntityTitle = int('00020016', 16)
SendingApplicationEntityTitle = int('00020017', 16)
ReceivingApplicationEntityTitle = int('00020018', 16)
PrivateInformationCreatorUID = int('00020100', 16)
PrivateInformation = int('00020102', 16)
FilesetID = int('00041130', 16)
FilesetDescriptorFileID = int('00041141', 16)
SpecificCharacterSetofFilesetDescriptorFile = int('00041142', 16)
OffsetoftheFirstDirectoryRecordoftheRootDirectoryEntity = int(
'00041200', 16)
OffsetoftheLastDirectoryRecordoftheRootDirectoryEntity = int(
'00041202', 16)
FilesetConsistencyFlag = int('00041212', 16)
DirectoryRecordSequence = int('00041220', 16)
OffsetoftheNextDirectoryRecord = int('00041400', 16)
RecordInuseFlag = int('00041410', 16)
OffsetofReferencedLowerLevelDirectoryEntity = int('00041420', 16)
DirectoryRecordType = int('00041430', 16)
PrivateRecordUID = int('00041432', 16)
ReferencedFileID = int('00041500', 16)
MRDRDirectoryRecordOffset = int('00041504', 16)
ReferencedSOPClassUIDinFile = int('00041510', 16)
ReferencedSOPInstanceUIDinFile = int('00041511', 16)
ReferencedTransferSyntaxUIDinFile = int('00041512', 16)
ReferencedRelatedGeneralSOPClassUIDinFile = int('0004151A', 16)
NumberofReferences = int('00041600', 16)
LengthtoEnd = int('00080001', 16)
SpecificCharacterSet = int('00080005', 16)
LanguageCodeSequence = int('00080006', 16)
ImageType = int('00080008', 16)
RecognitionCode = int('00080010', 16)
InstanceCreationDate = int('00080012', 16)
InstanceCreationTime = int('00080013', 16)
InstanceCreatorUID = int('00080014', 16)
InstanceCoercionDateTime = int('00080015', 16)
SOPClassUID = int('00080016', 16)
SOPInstanceUID = int('00080018', 16)
RelatedGeneralSOPClassUID = int('0008001A', 16)
OriginalSpecializedSOPClassUID = int('0008001B', 16)
StudyDate = int('00080020', 16)
SeriesDate = int('00080021', 16)
AcquisitionDate = int('00080022', 16)
ContentDate = int('00080023', 16)
OverlayDate = int('00080024', 16)
CurveDate = int('00080025', 16)
AcquisitionDateTime = int('0008002A', 16)
StudyTime = int('00080030', 16)
SeriesTime = int('00080031', 16)
AcquisitionTime = int('00080032', 16)
ContentTime = int('00080033', 16)
OverlayTime = int('00080034', 16)
CurveTime = int('00080035', 16)
DataSetType = int('00080040', 16)
DataSetSubtype = int('00080041', 16)
NuclearMedicineSeriesType = int('00080042', 16)
AccessionNumber = int('00080050', 16)
IssuerofAccessionNumberSequence = int('00080051', 16)
QueryRetrieveLevel = int('00080052', 16)
QueryRetrieveView = int('00080053', 16)
RetrieveAETitle = int('00080054', 16)
StationAETitle = int('00080055', 16)
InstanceAvailability = int('00080056', 16)
FailedSOPInstanceUIDList = int('00080058', 16)
Modality = int('00080060', 16)
ModalitiesinStudy = int('00080061', 16)
SOPClassesinStudy = int('00080062', 16)
ConversionType = int('00080064', 16)
PresentationIntentType = int('00080068', 16)
Manufacturer = int('00080070', 16)
InstitutionName = int('00080080', 16)
InstitutionAddress = int('00080081', 16)
InstitutionCodeSequence = int('00080082', 16)
ReferringPhysiciansName = int('00080090', 16)
ReferringPhysiciansAddress = int('00080092', 16)
ReferringPhysiciansTelephoneNumbers = int('00080094', 16)
ReferringPhysicianIdentificationSequence = int('00080096', 16)
ConsultingPhysiciansName = int('0008009C', 16)
ConsultingPhysicianIdentificationSequence = int('0008009D', 16)
CodeValue = int('00080100', 16)
ExtendedCodeValue = int('00080101', 16)
CodingSchemeDesignator = int('00080102', 16)
CodingSchemeVersion = int('00080103', 16)
CodeMeaning = int('00080104', 16)
MappingResource = int('00080105', 16)
ContextGroupVersion = int('00080106', 16)
ContextGroupLocalVersion = int('00080107', 16)
ExtendedCodeMeaning = int('00080108', 16)
ContextGroupExtensionFlag = int('0008010B', 16)
CodingSchemeUID = int('0008010C', 16)
ContextGroupExtensionCreatorUID = int('0008010D', 16)
ContextIdentifier = int('0008010F', 16)
CodingSchemeIdentificationSequence = int('00080110', 16)
CodingSchemeRegistry = int('00080112', 16)
CodingSchemeExternalID = int('00080114', 16)
CodingSchemeName = int('00080115', 16)
CodingSchemeResponsibleOrganization = int('00080116', 16)
ContextUID = int('00080117', 16)
MappingResourceUID = int('00080118', 16)
LongCodeValue = int('00080119', 16)
URNCodeValue = int('00080120', 16)
EquivalentCodeSequence = int('00080121', 16)
MappingResourceName = int('00080122', 16)
ContextGroupIdentificationSequence = int('00080123', 16)
MappingResourceIdentificationSequence = int('00080124', 16)
TimezoneOffsetFromUTC = int('00080201', 16)
PrivateDataElementCharacteristicsSequence = int('00080300', 16)
PrivateGroupReference = int('00080301', 16)
PrivateCreatorReference = int('00080302', 16)
BlockIdentifyingInformationStatus = int('00080303', 16)
NonidentifyingPrivateElements = int('00080304', 16)
IdentifyingPrivateElements = int('00080306', 16)
DeidentificationActionSequence = int('00080305', 16)
DeidentificationAction = int('00080307', 16)
NetworkID = int('00081000', 16)
StationName = int('00081010', 16)
StudyDescription = int('00081030', 16)
ProcedureCodeSequence = int('00081032', 16)
SeriesDescription = int('0008103E', 16)
SeriesDescriptionCodeSequence = int('0008103F', 16)
InstitutionalDepartmentName = int('00081040', 16)
PhysiciansofRecord = int('00081048', 16)
PhysiciansofRecordIdentificationSequence = int('00081049', 16)
PerformingPhysiciansName = int('00081050', 16)
PerformingPhysicianIdentificationSequence = int('00081052', 16)
NameofPhysiciansReadingStudy = int('00081060', 16)
PhysiciansReadingStudyIdentificationSequence = int('00081062', 16)
OperatorsName = int('00081070', 16)
OperatorIdentificationSequence = int('00081072', 16)
AdmittingDiagnosesDescription = int('00081080', 16)
AdmittingDiagnosesCodeSequence = int('00081084', 16)
ManufacturersModelName = int('00081090', 16)
ReferencedResultsSequence = int('00081100', 16)
ReferencedStudySequence = int('00081110', 16)
ReferencedPerformedProcedureStepSequence = int('00081111', 16)
ReferencedSeriesSequence = int('00081115', 16)
ReferencedPatientSequence = int('00081120', 16)
ReferencedVisitSequence = int('00081125', 16)
ReferencedOverlaySequence = int('00081130', 16)
ReferencedStereometricInstanceSequence = int('00081134', 16)
ReferencedWaveformSequence = int('0008113A', 16)
ReferencedImageSequence = int('00081140', 16)
ReferencedCurveSequence = int('00081145', 16)
ReferencedInstanceSequence = int('0008114A', 16)
ReferencedRealWorldValueMappingInstanceSequence = int('0008114B', 16)
ReferencedSOPClassUID = int('00081150', 16)
ReferencedSOPInstanceUID = int('00081155', 16)
SOPClassesSupported = int('0008115A', 16)
ReferencedFrameNumber = int('00081160', 16)
SimpleFrameList = int('00081161', 16)
CalculatedFrameList = int('00081162', 16)
TimeRange = int('00081163', 16)
FrameExtractionSequence = int('00081164', 16)
MultiframeSourceSOPInstanceUID = int('00081167', 16)
RetrieveURL = int('00081190', 16)
TransactionUID = int('00081195', 16)
WarningReason = int('00081196', 16)
FailureReason = int('00081197', 16)
FailedSOPSequence = int('00081198', 16)
ReferencedSOPSequence = int('00081199', 16)
OtherFailuresSequence = int('0008119A', 16)
StudiesContainingOtherReferencedInstancesSequence = int('00081200', 16)
RelatedSeriesSequence = int('00081250', 16)
LossyImageCompressionRetired = int('00082110', 16)
DerivationDescription = int('00082111', 16)
SourceImageSequence = int('00082112', 16)
StageName = int('00082120', 16)
StageNumber = int('00082122', 16)
NumberofStages = int('00082124', 16)
ViewName = int('00082127', 16)
ViewNumber = int('00082128', 16)
NumberofEventTimers = int('00082129', 16)
NumberofViewsinStage = int('0008212A', 16)
EventElapsedTimes = int('00082130', 16)
EventTimerNames = int('00082132', 16)
EventTimerSequence = int('00082133', 16)
EventTimeOffset = int('00082134', 16)
EventCodeSequence = int('00082135', 16)
StartTrim = int('00082142', 16)
StopTrim = int('00082143', 16)
RecommendedDisplayFrameRate = int('00082144', 16)
TransducerPosition = int('00082200', 16)
TransducerOrientation = int('00082204', 16)
AnatomicStructure = int('00082208', 16)
AnatomicRegionSequence = int('00082218', 16)
AnatomicRegionModifierSequence = int('00082220', 16)
PrimaryAnatomicStructureSequence = int('00082228', 16)
AnatomicStructureSpaceorRegionSequence = int('00082229', 16)
PrimaryAnatomicStructureModifierSequence = int('00082230', 16)
TransducerPositionSequence = int('00082240', 16)
TransducerPositionModifierSequence = int('00082242', 16)
TransducerOrientationSequence = int('00082244', 16)
TransducerOrientationModifierSequence = int('00082246', 16)
AnatomicStructureSpaceOrRegionCodeSequenceTrial = int('00082251', 16)
AnatomicPortalOfEntranceCodeSequenceTrial = int('00082253', 16)
AnatomicApproachDirectionCodeSequenceTrial = int('00082255', 16)
AnatomicPerspectiveDescriptionTrial = int('00082256', 16)
AnatomicPerspectiveCodeSequenceTrial = int('00082257', 16)
AnatomicLocationOfExaminingInstrumentDescriptionTrial = int('00082258', 16)
AnatomicLocationOfExaminingInstrumentCodeSequenceTrial = int(
'00082259', 16)
AnatomicStructureSpaceOrRegionModifierCodeSequenceTrial = int(
'0008225A', 16)
OnAxisBackgroundAnatomicStructureCodeSequenceTrial = int('0008225C', 16)
AlternateRepresentationSequence = int('00083001', 16)
IrradiationEventUID = int('00083010', 16)
SourceIrradiationEventSequence = int('00083011', 16)
RadiopharmaceuticalAdministrationEventUID = int('00083012', 16)
IdentifyingComments = int('00084000', 16)
FrameType = int('00089007', 16)
ReferencedImageEvidenceSequence = int('00089092', 16)
ReferencedRawDataSequence = int('00089121', 16)
CreatorVersionUID = int('00089123', 16)
DerivationImageSequence = int('00089124', 16)
SourceImageEvidenceSequence = int('00089154', 16)
PixelPresentation = int('00089205', 16)
VolumetricProperties = int('00089206', 16)
VolumeBasedCalculationTechnique = int('00089207', 16)
ComplexImageComponent = int('00089208', 16)
AcquisitionContrast = int('00089209', 16)
DerivationCodeSequence = int('00089215', 16)
ReferencedPresentationStateSequence = int('00089237', 16)
ReferencedOtherPlaneSequence = int('00089410', 16)
FrameDisplaySequence = int('00089458', 16)
RecommendedDisplayFrameRateinFloat = int('00089459', 16)
SkipFrameRangeFlag = int('00089460', 16)
PatientsName = int('00100010', 16)
PatientID = int('00100020', 16)
IssuerofPatientID = int('00100021', 16)
TypeofPatientID = int('00100022', 16)
IssuerofPatientIDQualifiersSequence = int('00100024', 16)
SourcePatientGroupIdentificationSequence = int('00100026', 16)
GroupofPatientsIdentificationSequence = int('00100027', 16)
SubjectRelativePositioninImage = int('00100028', 16)
PatientsBirthDate = int('00100030', 16)
PatientsBirthTime = int('00100032', 16)
PatientsBirthDateinAlternativeCalendar = int('00100033', 16)
PatientsDeathDateinAlternativeCalendar = int('00100034', 16)
PatientsAlternativeCalendar = int('00100035', 16)
PatientsSex = int('00100040', 16)
PatientsInsurancePlanCodeSequence = int('00100050', 16)
PatientsPrimaryLanguageCodeSequence = int('00100101', 16)
PatientsPrimaryLanguageModifierCodeSequence = int('00100102', 16)
QualityControlSubject = int('00100200', 16)
QualityControlSubjectTypeCodeSequence = int('00100201', 16)
StrainDescription = int('00100212', 16)
StrainNomenclature = int('00100213', 16)
StrainStockNumber = int('00100214', 16)
StrainSourceRegistryCodeSequence = int('00100215', 16)
StrainStockSequence = int('00100216', 16)
StrainSource = int('00100217', 16)
StrainAdditionalInformation = int('00100218', 16)
StrainCodeSequence = int('00100219', 16)
OtherPatientIDs = int('00101000', 16)
OtherPatientNames = int('00101001', 16)
OtherPatientIDsSequence = int('00101002', 16)
PatientsBirthName = int('00101005', 16)
PatientsAge = int('00101010', 16)
PatientsSize = int('00101020', 16)
PatientsSizeCodeSequence = int('00101021', 16)
PatientsWeight = int('00101030', 16)
PatientsAddress = int('00101040', 16)
InsurancePlanIdentification = int('00101050', 16)
PatientsMothersBirthName = int('00101060', 16)
MilitaryRank = int('00101080', 16)
BranchofService = int('00101081', 16)
MedicalRecordLocator = int('00101090', 16)
ReferencedPatientPhotoSequence = int('00101100', 16)
MedicalAlerts = int('00102000', 16)
Allergies = int('00102110', 16)
CountryofResidence = int('00102150', 16)
RegionofResidence = int('00102152', 16)
PatientsTelephoneNumbers = int('00102154', 16)
PatientsTelecomInformation = int('00102155', 16)
EthnicGroup = int('00102160', 16)
Occupation = int('00102180', 16)
SmokingStatus = int('001021A0', 16)
AdditionalPatientHistory = int('001021B0', 16)
PregnancyStatus = int('001021C0', 16)
LastMenstrualDate = int('001021D0', 16)
PatientsReligiousPreference = int('001021F0', 16)
PatientSpeciesDescription = int('00102201', 16)
PatientSpeciesCodeSequence = int('00102202', 16)
PatientsSexNeutered = int('00102203', 16)
AnatomicalOrientationType = int('00102210', 16)
PatientBreedDescription = int('00102292', 16)
PatientBreedCodeSequence = int('00102293', 16)
BreedRegistrationSequence = int('00102294', 16)
BreedRegistrationNumber = int('00102295', 16)
BreedRegistryCodeSequence = int('00102296', 16)
ResponsiblePerson = int('00102297', 16)
ResponsiblePersonRole = int('00102298', 16)
ResponsibleOrganization = int('00102299', 16)
PatientComments = int('00104000', 16)
ExaminedBodyThickness = int('00109431', 16)
ClinicalTrialSponsorName = int('00120010', 16)
ClinicalTrialProtocolID = int('00120020', 16)
ClinicalTrialProtocolName = int('00120021', 16)
ClinicalTrialSiteID = int('00120030', 16)
ClinicalTrialSiteName = int('00120031', 16)
ClinicalTrialSubjectID = int('00120040', 16)
ClinicalTrialSubjectReadingID = int('00120042', 16)
ClinicalTrialTimePointID = int('00120050', 16)
ClinicalTrialTimePointDescription = int('00120051', 16)
ClinicalTrialCoordinatingCenterName = int('00120060', 16)
PatientIdentityRemoved = int('00120062', 16)
DeidentificationMethod = int('00120063', 16)
DeidentificationMethodCodeSequence = int('00120064', 16)
ClinicalTrialSeriesID = int('00120071', 16)
ClinicalTrialSeriesDescription = int('00120072', 16)
ClinicalTrialProtocolEthicsCommitteeName = int('00120081', 16)
ClinicalTrialProtocolEthicsCommitteeApprovalNumber = int('00120082', 16)
ConsentforClinicalTrialUseSequence = int('00120083', 16)
DistributionType = int('00120084', 16)
ConsentforDistributionFlag = int('00120085', 16)
CADFileFormat = int('00140023', 16)
ComponentReferenceSystem = int('00140024', 16)
ComponentManufacturingProcedure = int('00140025', 16)
ComponentManufacturer = int('00140028', 16)
MaterialThickness = int('00140030', 16)
MaterialPipeDiameter = int('00140032', 16)
MaterialIsolationDiameter = int('00140034', 16)
MaterialGrade = int('00140042', 16)
MaterialPropertiesDescription = int('00140044', 16)
MaterialPropertiesFileFormatRetired = int('00140045', 16)
MaterialNotes = int('00140046', 16)
ComponentShape = int('00140050', 16)
CurvatureType = int('00140052', 16)
OuterDiameter = int('00140054', 16)
InnerDiameter = int('00140056', 16)
ComponentWelderIDs = int('00140100', 16)
SecondaryApprovalStatus = int('00140101', 16)
SecondaryReviewDate = int('00140102', 16)
SecondaryReviewTime = int('00140103', 16)
SecondaryReviewerName = int('00140104', 16)
RepairID = int('00140105', 16)
MultipleComponentApprovalSequence = int('00140106', 16)
OtherApprovalStatus = int('00140107', 16)
OtherSecondaryApprovalStatus = int('00140108', 16)
ActualEnvironmentalConditions = int('00141010', 16)
ExpiryDate = int('00141020', 16)
EnvironmentalConditions = int('00141040', 16)
EvaluatorSequence = int('00142002', 16)
EvaluatorNumber = int('00142004', 16)
EvaluatorName = int('00142006', 16)
EvaluationAttempt = int('00142008', 16)
IndicationSequence = int('00142012', 16)
IndicationNumber = int('00142014', 16)
IndicationLabel = int('00142016', 16)
IndicationDescription = int('00142018', 16)
IndicationType = int('0014201A', 16)
IndicationDisposition = int('0014201C', 16)
IndicationROISequence = int('0014201E', 16)
IndicationPhysicalPropertySequence = int('00142030', 16)
PropertyLabel = int('00142032', 16)
CoordinateSystemNumberofAxes = int('00142202', 16)
CoordinateSystemAxesSequence = int('00142204', 16)
CoordinateSystemAxisDescription = int('00142206', 16)
CoordinateSystemDataSetMapping = int('00142208', 16)
CoordinateSystemAxisNumber = int('0014220A', 16)
CoordinateSystemAxisType = int('0014220C', 16)
CoordinateSystemAxisUnits = int('0014220E', 16)
CoordinateSystemAxisValues = int('00142210', 16)
CoordinateSystemTransformSequence = int('00142220', 16)
TransformDescription = int('00142222', 16)
TransformNumberofAxes = int('00142224', 16)
TransformOrderofAxes = int('00142226', 16)
TransformedAxisUnits = int('00142228', 16)
CoordinateSystemTransformRotationandScaleMatrix = int('0014222A', 16)
CoordinateSystemTransformTranslationMatrix = int('0014222C', 16)
InternalDetectorFrameTime = int('00143011', 16)
NumberofFramesIntegrated = int('00143012', 16)
DetectorTemperatureSequence = int('00143020', 16)
SensorName = int('00143022', 16)
HorizontalOffsetofSensor = int('00143024', 16)
VerticalOffsetofSensor = int('00143026', 16)
SensorTemperature = int('00143028', 16)
DarkCurrentSequence = int('00143040', 16)
DarkCurrentCounts = int('00143050', 16)
GainCorrectionReferenceSequence = int('00143060', 16)
AirCounts = int('00143070', 16)
KVUsedinGainCalibration = int('00143071', 16)
MAUsedinGainCalibration = int('00143072', 16)
NumberofFramesUsedforIntegration = int('00143073', 16)
FilterMaterialUsedinGainCalibration = int('00143074', 16)
FilterThicknessUsedinGainCalibration = int('00143075', 16)
DateofGainCalibration = int('00143076', 16)
TimeofGainCalibration = int('00143077', 16)
BadPixelImage = int('00143080', 16)
CalibrationNotes = int('00143099', 16)
PulserEquipmentSequence = int('00144002', 16)
PulserType = int('00144004', 16)
PulserNotes = int('00144006', 16)
ReceiverEquipmentSequence = int('00144008', 16)
AmplifierType = int('0014400A', 16)
ReceiverNotes = int('0014400C', 16)
PreAmplifierEquipmentSequence = int('0014400E', 16)
PreAmplifierNotes = int('0014400F', 16)
TransmitTransducerSequence = int('00144010', 16)
ReceiveTransducerSequence = int('00144011', 16)
NumberofElements = int('00144012', 16)
ElementShape = int('00144013', 16)
ElementDimensionA = int('00144014', 16)
ElementDimensionB = int('00144015', 16)
ElementPitchA = int('00144016', 16)
MeasuredBeamDimensionA = int('00144017', 16)
MeasuredBeamDimensionB = int('00144018', 16)
LocationofMeasuredBeamDiameter = int('00144019', 16)
NominalFrequency = int('0014401A', 16)
MeasuredCenterFrequency = int('0014401B', 16)
MeasuredBandwidth = int('0014401C', 16)
ElementPitchB = int('0014401D', 16)
PulserSettingsSequence = int('00144020', 16)
PulseWidth = int('00144022', 16)
ExcitationFrequency = int('00144024', 16)
ModulationType = int('00144026', 16)
Damping = int('00144028', 16)
ReceiverSettingsSequence = int('00144030', 16)
AcquiredSoundpathLength = int('00144031', 16)
AcquisitionCompressionType = int('00144032', 16)
AcquisitionSampleSize = int('00144033', 16)
RectifierSmoothing = int('00144034', 16)
DACSequence = int('00144035', 16)
DACType = int('00144036', 16)
DACGainPoints = int('00144038', 16)
DACTimePoints = int('0014403A', 16)
DACAmplitude = int('0014403C', 16)
PreAmplifierSettingsSequence = int('00144040', 16)
TransmitTransducerSettingsSequence = int('00144050', 16)
ReceiveTransducerSettingsSequence = int('00144051', 16)
IncidentAngle = int('00144052', 16)
CouplingTechnique = int('00144054', 16)
CouplingMedium = int('00144056', 16)
CouplingVelocity = int('00144057', 16)
ProbeCenterLocationX = int('00144058', 16)
ProbeCenterLocationZ = int('00144059', 16)
SoundPathLength = int('0014405A', 16)
DelayLawIdentifier = int('0014405C', 16)
GateSettingsSequence = int('00144060', 16)
GateThreshold = int('00144062', 16)
VelocityofSound = int('00144064', 16)
CalibrationSettingsSequence = int('00144070', 16)
CalibrationProcedure = int('00144072', 16)
ProcedureVersion = int('00144074', 16)
ProcedureCreationDate = int('00144076', 16)
ProcedureExpirationDate = int('00144078', 16)
ProcedureLastModifiedDate = int('0014407A', 16)
CalibrationTime = int('0014407C', 16)
CalibrationDate = int('0014407E', 16)
ProbeDriveEquipmentSequence = int('00144080', 16)
DriveType = int('00144081', 16)
ProbeDriveNotes = int('00144082', 16)
DriveProbeSequence = int('00144083', 16)
ProbeInductance = int('00144084', 16)
ProbeResistance = int('00144085', 16)
ReceiveProbeSequence = int('00144086', 16)
ProbeDriveSettingsSequence = int('00144087', 16)
BridgeResistors = int('00144088', 16)
ProbeOrientationAngle = int('00144089', 16)
UserSelectedGainY = int('0014408B', 16)
UserSelectedPhase = int('0014408C', 16)
UserSelectedOffsetX = int('0014408D', 16)
UserSelectedOffsetY = int('0014408E', 16)
ChannelSettingsSequence = int('00144091', 16)
ChannelThreshold = int('00144092', 16)
ScannerSettingsSequence = int('0014409A', 16)
ScanProcedure = int('0014409B', 16)
TranslationRateX = int('0014409C', 16)
TranslationRateY = int('0014409D', 16)
ChannelOverlap = int('0014409F', 16)
ImageQualityIndicatorType = int('001440A0', 16)
ImageQualityIndicatorMaterial = int('001440A1', 16)
ImageQualityIndicatorSize = int('001440A2', 16)
LINACEnergy = int('00145002', 16)
LINACOutput = int('00145004', 16)
ActiveAperture = int('00145100', 16)
TotalAperture = int('00145101', 16)
ApertureElevation = int('00145102', 16)
MainLobeAngle = int('00145103', 16)
MainRoofAngle = int('00145104', 16)
ConnectorType = int('00145105', 16)
WedgeModelNumber = int('00145106', 16)
WedgeAngleFloat = int('00145107', 16)
WedgeRoofAngle = int('00145108', 16)
WedgeElement1Position = int('00145109', 16)
WedgeMaterialVelocity = int('0014510A', 16)
WedgeMaterial = int('0014510B', 16)
WedgeOffsetZ = int('0014510C', 16)
WedgeOriginOffsetX = int('0014510D', 16)
WedgeTimeDelay = int('0014510E', 16)
WedgeName = int('0014510F', 16)
WedgeManufacturerName = int('00145110', 16)
WedgeDescription = int('00145111', 16)
NominalBeamAngle = int('00145112', 16)
WedgeOffsetX = int('00145113', 16)
WedgeOffsetY = int('00145114', 16)
WedgeTotalLength = int('00145115', 16)
WedgeInContactLength = int('00145116', 16)
WedgeFrontGap = int('00145117', 16)
WedgeTotalHeight = int('00145118', 16)
WedgeFrontHeight = int('00145119', 16)
WedgeRearHeight = int('0014511A', 16)
WedgeTotalWidth = int('0014511B', 16)
WedgeInContactWidth = int('0014511C', 16)
WedgeChamferHeight = int('0014511D', 16)
WedgeCurve = int('0014511E', 16)
RadiusAlongtheWedge = int('0014511F', 16)
ContrastBolusAgent = int('00180010', 16)
ContrastBolusAgentSequence = int('00180012', 16)
ContrastBolusT1Relaxivity = int('00180013', 16)
ContrastBolusAdministrationRouteSequence = int('00180014', 16)
BodyPartExamined = int('00180015', 16)
ScanningSequence = int('00180020', 16)
SequenceVariant = int('00180021', 16)
ScanOptions = int('00180022', 16)
MRAcquisitionType = int('00180023', 16)
SequenceName = int('00180024', 16)
AngioFlag = int('00180025', 16)
InterventionDrugInformationSequence = int('00180026', 16)
InterventionDrugStopTime = int('00180027', 16)
InterventionDrugDose = int('00180028', 16)
InterventionDrugCodeSequence = int('00180029', 16)
AdditionalDrugSequence = int('0018002A', 16)
Radionuclide = int('00180030', 16)
Radiopharmaceutical = int('00180031', 16)
EnergyWindowCenterline = int('00180032', 16)
EnergyWindowTotalWidth = int('00180033', 16)
InterventionDrugName = int('00180034', 16)
InterventionDrugStartTime = int('00180035', 16)
InterventionSequence = int('00180036', 16)
TherapyType = int('00180037', 16)
InterventionStatus = int('00180038', 16)
TherapyDescription = int('00180039', 16)
InterventionDescription = int('0018003A', 16)
CineRate = int('00180040', 16)
InitialCineRunState = int('00180042', 16)
SliceThickness = int('00180050', 16)
KVP = int('00180060', 16)
CountsAccumulated = int('00180070', 16)
AcquisitionTerminationCondition = int('00180071', 16)
EffectiveDuration = int('00180072', 16)
AcquisitionStartCondition = int('00180073', 16)
AcquisitionStartConditionData = int('00180074', 16)
AcquisitionTerminationConditionData = int('00180075', 16)
RepetitionTime = int('00180080', 16)
EchoTime = int('00180081', 16)
InversionTime = int('00180082', 16)
NumberofAverages = int('00180083', 16)
ImagingFrequency = int('00180084', 16)
ImagedNucleus = int('00180085', 16)
EchoNumbers = int('00180086', 16)
MagneticFieldStrength = int('00180087', 16)
SpacingBetweenSlices = int('00180088', 16)
NumberofPhaseEncodingSteps = int('00180089', 16)
DataCollectionDiameter = int('00180090', 16)
EchoTrainLength = int('00180091', 16)
PercentSampling = int('00180093', 16)
PercentPhaseFieldofView = int('00180094', 16)
PixelBandwidth = int('00180095', 16)
DeviceSerialNumber = int('00181000', 16)
DeviceUID = int('00181002', 16)
DeviceID = int('00181003', 16)
PlateID = int('00181004', 16)
GeneratorID = int('00181005', 16)
GridID = int('00181006', 16)
CassetteID = int('00181007', 16)
GantryID = int('00181008', 16)
SecondaryCaptureDeviceID = int('00181010', 16)
HardcopyCreationDeviceID = int('00181011', 16)
DateofSecondaryCapture = int('00181012', 16)
TimeofSecondaryCapture = int('00181014', 16)
SecondaryCaptureDeviceManufacturer = int('00181016', 16)
HardcopyDeviceManufacturer = int('00181017', 16)
SecondaryCaptureDeviceManufacturersModelName = int('00181018', 16)
SecondaryCaptureDeviceSoftwareVersions = int('00181019', 16)
HardcopyDeviceSoftwareVersion = int('0018101A', 16)
HardcopyDeviceManufacturersModelName = int('0018101B', 16)
SoftwareVersions = int('00181020', 16)
VideoImageFormatAcquired = int('00181022', 16)
DigitalImageFormatAcquired = int('00181023', 16)
ProtocolName = int('00181030', 16)
ContrastBolusRoute = int('00181040', 16)
ContrastBolusVolume = int('00181041', 16)
ContrastBolusStartTime = int('00181042', 16)
ContrastBolusStopTime = int('00181043', 16)
ContrastBolusTotalDose = int('00181044', 16)
SyringeCounts = int('00181045', 16)
ContrastFlowRate = int('00181046', 16)
ContrastFlowDuration = int('00181047', 16)
ContrastBolusIngredient = int('00181048', 16)
ContrastBolusIngredientConcentration = int('00181049', 16)
SpatialResolution = int('00181050', 16)
TriggerTime = int('00181060', 16)
TriggerSourceorType = int('00181061', 16)
NominalInterval = int('00181062', 16)
FrameTime = int('00181063', 16)
CardiacFramingType = int('00181064', 16)
FrameTimeVector = int('00181065', 16)
FrameDelay = int('00181066', 16)
ImageTriggerDelay = int('00181067', 16)
MultiplexGroupTimeOffset = int('00181068', 16)
TriggerTimeOffset = int('00181069', 16)
SynchronizationTrigger = int('0018106A', 16)
SynchronizationChannel = int('0018106C', 16)
TriggerSamplePosition = int('0018106E', 16)
RadiopharmaceuticalRoute = int('00181070', 16)
RadiopharmaceuticalVolume = int('00181071', 16)
RadiopharmaceuticalStartTime = int('00181072', 16)
RadiopharmaceuticalStopTime = int('00181073', 16)
RadionuclideTotalDose = int('00181074', 16)
RadionuclideHalfLife = int('00181075', 16)
RadionuclidePositronFraction = int('00181076', 16)
RadiopharmaceuticalSpecificActivity = int('00181077', 16)
RadiopharmaceuticalStartDateTime = int('00181078', 16)
RadiopharmaceuticalStopDateTime = int('00181079', 16)
BeatRejectionFlag = int('00181080', 16)
LowRRValue = int('00181081', 16)
HighRRValue = int('00181082', 16)
IntervalsAcquired = int('00181083', 16)
IntervalsRejected = int('00181084', 16)
PVCRejection = int('00181085', 16)
SkipBeats = int('00181086', 16)
HeartRate = int('00181088', 16)
CardiacNumberofImages = int('00181090', 16)
TriggerWindow = int('00181094', 16)
ReconstructionDiameter = int('00181100', 16)
DistanceSourcetoDetector = int('00181110', 16)
DistanceSourcetoPatient = int('00181111', 16)
EstimatedRadiographicMagnificationFactor = int('00181114', 16)
GantryDetectorTilt = int('00181120', 16)
GantryDetectorSlew = int('00181121', 16)
TableHeight = int('00181130', 16)
TableTraverse = int('00181131', 16)
TableMotion = int('00181134', 16)
TableVerticalIncrement = int('00181135', 16)
TableLateralIncrement = int('00181136', 16)
TableLongitudinalIncrement = int('00181137', 16)
TableAngle = int('00181138', 16)
TableType = int('0018113A', 16)
RotationDirection = int('00181140', 16)
AngularPosition = int('00181141', 16)
RadialPosition = int('00181142', 16)
ScanArc = int('00181143', 16)
AngularStep = int('00181144', 16)
CenterofRotationOffset = int('00181145', 16)
RotationOffset = int('00181146', 16)
FieldofViewShape = int('00181147', 16)
FieldofViewDimensions = int('00181149', 16)
ExposureTime = int('00181150', 16)
XRayTubeCurrent = int('00181151', 16)
Exposure = int('00181152', 16)
ExposureinAs = int('00181153', 16)
AveragePulseWidth = int('00181154', 16)
RadiationSetting = int('00181155', 16)
RectificationType = int('00181156', 16)
RadiationMode = int('0018115A', 16)
ImageandFluoroscopyAreaDoseProduct = int('0018115E', 16)
FilterType = int('00181160', 16)
TypeofFilters = int('00181161', 16)
IntensifierSize = int('00181162', 16)
ImagerPixelSpacing = int('00181164', 16)
Grid = int('00181166', 16)
GeneratorPower = int('00181170', 16)
CollimatorgridName = int('00181180', 16)
CollimatorType = int('00181181', 16)
FocalDistance = int('00181182', 16)
XFocusCenter = int('00181183', 16)
YFocusCenter = int('00181184', 16)
FocalSpots = int('00181190', 16)
AnodeTargetMaterial = int('00181191', 16)
BodyPartThickness = int('001811A0', 16)
CompressionForce = int('001811A2', 16)
PaddleDescription = int('001811A4', 16)
DateofLastCalibration = int('00181200', 16)
TimeofLastCalibration = int('00181201', 16)
DateTimeofLastCalibration = int('00181202', 16)
ConvolutionKernel = int('00181210', 16)
UpperLowerPixelValues = int('00181240', 16)
ActualFrameDuration = int('00181242', 16)
CountRate = int('00181243', 16)
PreferredPlaybackSequencing = int('00181244', 16)
ReceiveCoilName = int('00181250', 16)
TransmitCoilName = int('00181251', 16)
PlateType = int('00181260', 16)
PhosphorType = int('00181261', 16)
WaterEquivalentDiameter = int('00181271', 16)
WaterEquivalentDiameterCalculationMethodCodeSequence = int('00181272', 16)
ScanVelocity = int('00181300', 16)
WholeBodyTechnique = int('00181301', 16)
ScanLength = int('00181302', 16)
AcquisitionMatrix = int('00181310', 16)
InplanePhaseEncodingDirection = int('00181312', 16)
FlipAngle = int('00181314', 16)
VariableFlipAngleFlag = int('00181315', 16)
SAR = int('00181316', 16)
dBdt = int('00181318', 16)
B1rms = int('00181320', 16)
AcquisitionDeviceProcessingDescription = int('00181400', 16)
AcquisitionDeviceProcessingCode = int('00181401', 16)
CassetteOrientation = int('00181402', 16)
CassetteSize = int('00181403', 16)
ExposuresonPlate = int('00181404', 16)
RelativeXRayExposure = int('00181405', 16)
ExposureIndex = int('00181411', 16)
TargetExposureIndex = int('00181412', 16)
DeviationIndex = int('00181413', 16)
ColumnAngulation = int('00181450', 16)
TomoLayerHeight = int('00181460', 16)
TomoAngle = int('00181470', 16)
TomoTime = int('00181480', 16)
TomoType = int('00181490', 16)
TomoClass = int('00181491', 16)
NumberofTomosynthesisSourceImages = int('00181495', 16)
PositionerMotion = int('00181500', 16)
PositionerType = int('00181508', 16)
PositionerPrimaryAngle = int('00181510', 16)
PositionerSecondaryAngle = int('00181511', 16)
PositionerPrimaryAngleIncrement = int('00181520', 16)
PositionerSecondaryAngleIncrement = int('00181521', 16)
DetectorPrimaryAngle = int('00181530', 16)
DetectorSecondaryAngle = int('00181531', 16)
ShutterShape = int('00181600', 16)
ShutterLeftVerticalEdge = int('00181602', 16)
ShutterRightVerticalEdge = int('00181604', 16)
ShutterUpperHorizontalEdge = int('00181606', 16)
ShutterLowerHorizontalEdge = int('00181608', 16)
CenterofCircularShutter = int('00181610', 16)
RadiusofCircularShutter = int('00181612', 16)
VerticesofthePolygonalShutter = int('00181620', 16)
ShutterPresentationValue = int('00181622', 16)
ShutterOverlayGroup = int('00181623', 16)
ShutterPresentationColorCIELabValue = int('00181624', 16)
CollimatorShape = int('00181700', 16)
CollimatorLeftVerticalEdge = int('00181702', 16)
CollimatorRightVerticalEdge = int('00181704', 16)
CollimatorUpperHorizontalEdge = int('00181706', 16)
CollimatorLowerHorizontalEdge = int('00181708', 16)
CenterofCircularCollimator = int('00181710', 16)
RadiusofCircularCollimator = int('00181712', 16)
VerticesofthePolygonalCollimator = int('00181720', 16)
AcquisitionTimeSynchronized = int('00181800', 16)
TimeSource = int('00181801', 16)
TimeDistributionProtocol = int('00181802', 16)
NTPSourceAddress = int('00181803', 16)
PageNumberVector = int('00182001', 16)
FrameLabelVector = int('00182002', 16)
FramePrimaryAngleVector = int('00182003', 16)
FrameSecondaryAngleVector = int('00182004', 16)
SliceLocationVector = int('00182005', 16)
DisplayWindowLabelVector = int('00182006', 16)
NominalScannedPixelSpacing = int('00182010', 16)
DigitizingDeviceTransportDirection = int('00182020', 16)
RotationofScannedFilm = int('00182030', 16)
BiopsyTargetSequence = int('00182041', 16)
TargetUID = int('00182042', 16)
LocalizingCursorPosition = int('00182043', 16)
CalculatedTargetPosition = int('00182044', 16)
TargetLabel = int('00182045', 16)
DisplayedZValue = int('00182046', 16)
IVUSAcquisition = int('00183100', 16)
IVUSPullbackRate = int('00183101', 16)
IVUSGatedRate = int('00183102', 16)
IVUSPullbackStartFrameNumber = int('00183103', 16)
IVUSPullbackStopFrameNumber = int('00183104', 16)
LesionNumber = int('00183105', 16)
AcquisitionComments = int('00184000', 16)
OutputPower = int('00185000', 16)
TransducerData = int('00185010', 16)
FocusDepth = int('00185012', 16)
ProcessingFunction = int('00185020', 16)
PostprocessingFunction = int('00185021', 16)
MechanicalIndex = int('00185022', 16)
BoneThermalIndex = int('00185024', 16)
CranialThermalIndex = int('00185026', 16)
SoftTissueThermalIndex = int('00185027', 16)
SoftTissuefocusThermalIndex = int('00185028', 16)
SoftTissuesurfaceThermalIndex = int('00185029', 16)
DynamicRange = int('00185030', 16)
TotalGain = int('00185040', 16)
DepthofScanField = int('00185050', 16)
PatientPosition = int('00185100', 16)
ViewPosition = int('00185101', 16)
ProjectionEponymousNameCodeSequence = int('00185104', 16)
ImageTransformationMatrix = int('00185210', 16)
ImageTranslationVector = int('00185212', 16)
Sensitivity = int('00186000', 16)
SequenceofUltrasoundRegions = int('00186011', 16)
RegionSpatialFormat = int('00186012', 16)
RegionDataType = int('00186014', 16)
RegionFlags = int('00186016', 16)
RegionLocationMinX0 = int('00186018', 16)
RegionLocationMinY0 = int('0018601A', 16)
RegionLocationMaxX1 = int('0018601C', 16)
RegionLocationMaxY1 = int('0018601E', 16)
ReferencePixelX0 = int('00186020', 16)
ReferencePixelY0 = int('00186022', 16)
PhysicalUnitsXDirection = int('00186024', 16)
PhysicalUnitsYDirection = int('00186026', 16)
ReferencePixelPhysicalValueX = int('00186028', 16)
ReferencePixelPhysicalValueY = int('0018602A', 16)
PhysicalDeltaX = int('0018602C', 16)
PhysicalDeltaY = int('0018602E', 16)
TransducerFrequency = int('00186030', 16)
TransducerType = int('00186031', 16)
PulseRepetitionFrequency = int('00186032', 16)
DopplerCorrectionAngle = int('00186034', 16)
SteeringAngle = int('00186036', 16)
DopplerSampleVolumeXPositionRetired = int('00186038', 16)
DopplerSampleVolumeXPosition = int('00186039', 16)
DopplerSampleVolumeYPositionRetired = int('0018603A', 16)
DopplerSampleVolumeYPosition = int('0018603B', 16)
TMLinePositionX0Retired = int('0018603C', 16)
TMLinePositionX0 = int('0018603D', 16)
TMLinePositionY0Retired = int('0018603E', 16)
TMLinePositionY0 = int('0018603F', 16)
TMLinePositionX1Retired = int('00186040', 16)
TMLinePositionX1 = int('00186041', 16)
TMLinePositionY1Retired = int('00186042', 16)
TMLinePositionY1 = int('00186043', 16)
PixelComponentOrganization = int('00186044', 16)
PixelComponentMask = int('00186046', 16)
PixelComponentRangeStart = int('00186048', 16)
PixelComponentRangeStop = int('0018604A', 16)
PixelComponentPhysicalUnits = int('0018604C', 16)
PixelComponentDataType = int('0018604E', 16)
NumberofTableBreakPoints = int('00186050', 16)
TableofXBreakPoints = int('00186052', 16)
TableofYBreakPoints = int('00186054', 16)
NumberofTableEntries = int('00186056', 16)
TableofPixelValues = int('00186058', 16)
TableofParameterValues = int('0018605A', 16)
RWaveTimeVector = int('00186060', 16)
DetectorConditionsNominalFlag = int('00187000', 16)
DetectorTemperature = int('00187001', 16)
DetectorType = int('00187004', 16)
DetectorConfiguration = int('00187005', 16)
DetectorDescription = int('00187006', 16)
DetectorMode = int('00187008', 16)
DetectorID = int('0018700A', 16)
DateofLastDetectorCalibration = int('0018700C', 16)
TimeofLastDetectorCalibration = int('0018700E', 16)
ExposuresonDetectorSinceLastCalibration = int('00187010', 16)
ExposuresonDetectorSinceManufactured = int('00187011', 16)
DetectorTimeSinceLastExposure = int('00187012', 16)
DetectorActiveTime = int('00187014', 16)
DetectorActivationOffsetFromExposure = int('00187016', 16)
DetectorBinning = int('0018701A', 16)
DetectorElementPhysicalSize = int('00187020', 16)
DetectorElementSpacing = int('00187022', 16)
DetectorActiveShape = int('00187024', 16)
DetectorActiveDimensions = int('00187026', 16)
DetectorActiveOrigin = int('00187028', 16)
DetectorManufacturerName = int('0018702A', 16)
DetectorManufacturersModelName = int('0018702B', 16)
FieldofViewOrigin = int('00187030', 16)
FieldofViewRotation = int('00187032', 16)
FieldofViewHorizontalFlip = int('00187034', 16)
PixelDataAreaOriginRelativeToFOV = int('00187036', 16)
PixelDataAreaRotationAngleRelativeToFOV = int('00187038', 16)
GridAbsorbingMaterial = int('00187040', 16)
GridSpacingMaterial = int('00187041', 16)
GridThickness = int('00187042', 16)
GridPitch = int('00187044', 16)
GridAspectRatio = int('00187046', 16)
GridPeriod = int('00187048', 16)
GridFocalDistance = int('0018704C', 16)
FilterMaterial = int('00187050', 16)
FilterThicknessMinimum = int('00187052', 16)
FilterThicknessMaximum = int('00187054', 16)
FilterBeamPathLengthMinimum = int('00187056', 16)
FilterBeamPathLengthMaximum = int('00187058', 16)
ExposureControlMode = int('00187060', 16)
ExposureControlModeDescription = int('00187062', 16)
ExposureStatus = int('00187064', 16)
PhototimerSetting = int('00187065', 16)
ExposureTimeinS = int('00188150', 16)
XRayTubeCurrentinA = int('00188151', 16)
ContentQualification = int('00189004', 16)
PulseSequenceName = int('00189005', 16)
MRImagingModifierSequence = int('00189006', 16)
EchoPulseSequence = int('00189008', 16)
InversionRecovery = int('00189009', 16)
FlowCompensation = int('00189010', 16)
MultipleSpinEcho = int('00189011', 16)
MultiplanarExcitation = int('00189012', 16)
PhaseContrast = int('00189014', 16)
TimeofFlightContrast = int('00189015', 16)
Spoiling = int('00189016', 16)
SteadyStatePulseSequence = int('00189017', 16)
EchoPlanarPulseSequence = int('00189018', 16)
TagAngleFirstAxis = int('00189019', 16)
MagnetizationTransfer = int('00189020', 16)
T2Preparation = int('00189021', 16)
BloodSignalNulling = int('00189022', 16)
SaturationRecovery = int('00189024', 16)
SpectrallySelectedSuppression = int('00189025', 16)
SpectrallySelectedExcitation = int('00189026', 16)
SpatialPresaturation = int('00189027', 16)
Tagging = int('00189028', 16)
OversamplingPhase = int('00189029', 16)
TagSpacingFirstDimension = int('00189030', 16)
GeometryofkSpaceTraversal = int('00189032', 16)
SegmentedkSpaceTraversal = int('00189033', 16)
RectilinearPhaseEncodeReordering = int('00189034', 16)
TagThickness = int('00189035', 16)
PartialFourierDirection = int('00189036', 16)
CardiacSynchronizationTechnique = int('00189037', 16)
ReceiveCoilManufacturerName = int('00189041', 16)
MRReceiveCoilSequence = int('00189042', 16)
ReceiveCoilType = int('00189043', 16)
QuadratureReceiveCoil = int('00189044', 16)
MultiCoilDefinitionSequence = int('00189045', 16)
MultiCoilConfiguration = int('00189046', 16)
MultiCoilElementName = int('00189047', 16)
MultiCoilElementUsed = int('00189048', 16)
MRTransmitCoilSequence = int('00189049', 16)
TransmitCoilManufacturerName = int('00189050', 16)
TransmitCoilType = int('00189051', 16)
SpectralWidth = int('00189052', 16)
ChemicalShiftReference = int('00189053', 16)
VolumeLocalizationTechnique = int('00189054', 16)
MRAcquisitionFrequencyEncodingSteps = int('00189058', 16)
Decoupling = int('00189059', 16)
DecoupledNucleus = int('00189060', 16)
DecouplingFrequency = int('00189061', 16)
DecouplingMethod = int('00189062', 16)
DecouplingChemicalShiftReference = int('00189063', 16)
kspaceFiltering = int('00189064', 16)
TimeDomainFiltering = int('00189065', 16)
NumberofZeroFills = int('00189066', 16)
BaselineCorrection = int('00189067', 16)
ParallelReductionFactorInplane = int('00189069', 16)
CardiacRRIntervalSpecified = int('00189070', 16)
AcquisitionDuration = int('00189073', 16)
FrameAcquisitionDateTime = int('00189074', 16)
DiffusionDirectionality = int('00189075', 16)
DiffusionGradientDirectionSequence = int('00189076', 16)
ParallelAcquisition = int('00189077', 16)
ParallelAcquisitionTechnique = int('00189078', 16)
InversionTimes = int('00189079', 16)
MetaboliteMapDescription = int('00189080', 16)
PartialFourier = int('00189081', 16)
EffectiveEchoTime = int('00189082', 16)
MetaboliteMapCodeSequence = int('00189083', 16)
ChemicalShiftSequence = int('00189084', 16)
CardiacSignalSource = int('00189085', 16)
Diffusionbvalue = int('00189087', 16)
DiffusionGradientOrientation = int('00189089', 16)
VelocityEncodingDirection = int('00189090', 16)
VelocityEncodingMinimumValue = int('00189091', 16)
VelocityEncodingAcquisitionSequence = int('00189092', 16)
NumberofkSpaceTrajectories = int('00189093', 16)
CoverageofkSpace = int('00189094', 16)
SpectroscopyAcquisitionPhaseRows = int('00189095', 16)
ParallelReductionFactorInplaneRetired = int('00189096', 16)
TransmitterFrequency = int('00189098', 16)
ResonantNucleus = int('00189100', 16)
FrequencyCorrection = int('00189101', 16)
MRSpectroscopyFOVGeometrySequence = int('00189103', 16)
SlabThickness = int('00189104', 16)
SlabOrientation = int('00189105', 16)
MidSlabPosition = int('00189106', 16)
MRSpatialSaturationSequence = int('00189107', 16)
MRTimingandRelatedParametersSequence = int('00189112', 16)
MREchoSequence = int('00189114', 16)
MRModifierSequence = int('00189115', 16)
MRDiffusionSequence = int('00189117', 16)
CardiacSynchronizationSequence = int('00189118', 16)
MRAveragesSequence = int('00189119', 16)
MRFOVGeometrySequence = int('00189125', 16)
VolumeLocalizationSequence = int('00189126', 16)
SpectroscopyAcquisitionDataColumns = int('00189127', 16)
DiffusionAnisotropyType = int('00189147', 16)
FrameReferenceDateTime = int('00189151', 16)
MRMetaboliteMapSequence = int('00189152', 16)
ParallelReductionFactoroutofplane = int('00189155', 16)
SpectroscopyAcquisitionOutofplanePhaseSteps = int('00189159', 16)
BulkMotionStatus = int('00189166', 16)
ParallelReductionFactorSecondInplane = int('00189168', 16)
CardiacBeatRejectionTechnique = int('00189169', 16)
RespiratoryMotionCompensationTechnique = int('00189170', 16)
RespiratorySignalSource = int('00189171', 16)
BulkMotionCompensationTechnique = int('00189172', 16)
BulkMotionSignalSource = int('00189173', 16)
ApplicableSafetyStandardAgency = int('00189174', 16)
ApplicableSafetyStandardDescription = int('00189175', 16)
OperatingModeSequence = int('00189176', 16)
OperatingModeType = int('00189177', 16)
OperatingMode = int('00189178', 16)
SpecificAbsorptionRateDefinition = int('00189179', 16)
GradientOutputType = int('00189180', 16)
SpecificAbsorptionRateValue = int('00189181', 16)
GradientOutput = int('00189182', 16)
FlowCompensationDirection = int('00189183', 16)
TaggingDelay = int('00189184', 16)
RespiratoryMotionCompensationTechniqueDescription = int('00189185', 16)
RespiratorySignalSourceID = int('00189186', 16)
ChemicalShiftMinimumIntegrationLimitinHz = int('00189195', 16)
ChemicalShiftMaximumIntegrationLimitinHz = int('00189196', 16)
MRVelocityEncodingSequence = int('00189197', 16)
FirstOrderPhaseCorrection = int('00189198', 16)
WaterReferencedPhaseCorrection = int('00189199', 16)
MRSpectroscopyAcquisitionType = int('00189200', 16)
RespiratoryCyclePosition = int('00189214', 16)
VelocityEncodingMaximumValue = int('00189217', 16)
TagSpacingSecondDimension = int('00189218', 16)
TagAngleSecondAxis = int('00189219', 16)
FrameAcquisitionDuration = int('00189220', 16)
MRImageFrameTypeSequence = int('00189226', 16)
MRSpectroscopyFrameTypeSequence = int('00189227', 16)
MRAcquisitionPhaseEncodingStepsinplane = int('00189231', 16)
MRAcquisitionPhaseEncodingStepsoutofplane = int('00189232', 16)
SpectroscopyAcquisitionPhaseColumns = int('00189234', 16)
CardiacCyclePosition = int('00189236', 16)
SpecificAbsorptionRateSequence = int('00189239', 16)
RFEchoTrainLength = int('00189240', 16)
GradientEchoTrainLength = int('00189241', 16)
ArterialSpinLabelingContrast = int('00189250', 16)
MRArterialSpinLabelingSequence = int('00189251', 16)
ASLTechniqueDescription = int('00189252', 16)
ASLSlabNumber = int('00189253', 16)
ASLSlabThickness = int('00189254', 16)
ASLSlabOrientation = int('00189255', 16)
ASLMidSlabPosition = int('00189256', 16)
ASLContext = int('00189257', 16)
ASLPulseTrainDuration = int('00189258', 16)
ASLCrusherFlag = int('00189259', 16)
ASLCrusherFlowLimit = int('0018925A', 16)
ASLCrusherDescription = int('0018925B', 16)
ASLBolusCutoffFlag = int('0018925C', 16)
ASLBolusCutoffTimingSequence = int('0018925D', 16)
ASLBolusCutoffTechnique = int('0018925E', 16)
ASLBolusCutoffDelayTime = int('0018925F', 16)
ASLSlabSequence = int('00189260', 16)
ChemicalShiftMinimumIntegrationLimitinppm = int('00189295', 16)
ChemicalShiftMaximumIntegrationLimitinppm = int('00189296', 16)
WaterReferenceAcquisition = int('00189297', 16)
EchoPeakPosition = int('00189298', 16)
CTAcquisitionTypeSequence = int('00189301', 16)
AcquisitionType = int('00189302', 16)
TubeAngle = int('00189303', 16)
CTAcquisitionDetailsSequence = int('00189304', 16)
RevolutionTime = int('00189305', 16)
SingleCollimationWidth = int('00189306', 16)
TotalCollimationWidth = int('00189307', 16)
CTTableDynamicsSequence = int('00189308', 16)
TableSpeed = int('00189309', 16)
TableFeedperRotation = int('00189310', 16)
SpiralPitchFactor = int('00189311', 16)
CTGeometrySequence = int('00189312', 16)
DataCollectionCenterPatient = int('00189313', 16)
CTReconstructionSequence = int('00189314', 16)
ReconstructionAlgorithm = int('00189315', 16)
ConvolutionKernelGroup = int('00189316', 16)
ReconstructionFieldofView = int('00189317', 16)
ReconstructionTargetCenterPatient = int('00189318', 16)
ReconstructionAngle = int('00189319', 16)
ImageFilter = int('00189320', 16)
CTExposureSequence = int('00189321', 16)
ReconstructionPixelSpacing = int('00189322', 16)
ExposureModulationType = int('00189323', 16)
EstimatedDoseSaving = int('00189324', 16)
CTXRayDetailsSequence = int('00189325', 16)
CTPositionSequence = int('00189326', 16)
TablePosition = int('00189327', 16)
ExposureTimeinms = int('00189328', 16)
CTImageFrameTypeSequence = int('00189329', 16)
XRayTubeCurrentinmA = int('00189330', 16)
ExposureinmAs = int('00189332', 16)
ConstantVolumeFlag = int('00189333', 16)
FluoroscopyFlag = int('00189334', 16)
DistanceSourcetoDataCollectionCenter = int('00189335', 16)
ContrastBolusAgentNumber = int('00189337', 16)
ContrastBolusIngredientCodeSequence = int('00189338', 16)
ContrastAdministrationProfileSequence = int('00189340', 16)
ContrastBolusUsageSequence = int('00189341', 16)
ContrastBolusAgentAdministered = int('00189342', 16)
ContrastBolusAgentDetected = int('00189343', 16)
ContrastBolusAgentPhase = int('00189344', 16)
CTDIvol = int('00189345', 16)
CTDIPhantomTypeCodeSequence = int('00189346', 16)
CalciumScoringMassFactorPatient = int('00189351', 16)
CalciumScoringMassFactorDevice = int('00189352', 16)
EnergyWeightingFactor = int('00189353', 16)
CTAdditionalXRaySourceSequence = int('00189360', 16)
ProjectionPixelCalibrationSequence = int('00189401', 16)
DistanceSourcetoIsocenter = int('00189402', 16)
DistanceObjecttoTableTop = int('00189403', 16)
ObjectPixelSpacinginCenterofBeam = int('00189404', 16)
PositionerPositionSequence = int('00189405', 16)
TablePositionSequence = int('00189406', 16)
CollimatorShapeSequence = int('00189407', 16)
PlanesinAcquisition = int('00189410', 16)
XAXRFFrameCharacteristicsSequence = int('00189412', 16)
FrameAcquisitionSequence = int('00189417', 16)
XRayReceptorType = int('00189420', 16)
AcquisitionProtocolName = int('00189423', 16)
AcquisitionProtocolDescription = int('00189424', 16)
ContrastBolusIngredientOpaque = int('00189425', 16)
DistanceReceptorPlanetoDetectorHousing = int('00189426', 16)
IntensifierActiveShape = int('00189427', 16)
IntensifierActiveDimensions = int('00189428', 16)
PhysicalDetectorSize = int('00189429', 16)
PositionofIsocenterProjection = int('00189430', 16)
FieldofViewSequence = int('00189432', 16)
FieldofViewDescription = int('00189433', 16)
ExposureControlSensingRegionsSequence = int('00189434', 16)
ExposureControlSensingRegionShape = int('00189435', 16)
ExposureControlSensingRegionLeftVerticalEdge = int('00189436', 16)
ExposureControlSensingRegionRightVerticalEdge = int('00189437', 16)
ExposureControlSensingRegionUpperHorizontalEdge = int('00189438', 16)
ExposureControlSensingRegionLowerHorizontalEdge = int('00189439', 16)
CenterofCircularExposureControlSensingRegion = int('00189440', 16)
RadiusofCircularExposureControlSensingRegion = int('00189441', 16)
VerticesofthePolygonalExposureControlSensingRegion = int('00189442', 16)
ColumnAngulationPatient = int('00189447', 16)
BeamAngle = int('00189449', 16)
FrameDetectorParametersSequence = int('00189451', 16)
CalculatedAnatomyThickness = int('00189452', 16)
CalibrationSequence = int('00189455', 16)
ObjectThicknessSequence = int('00189456', 16)
PlaneIdentification = int('00189457', 16)
FieldofViewDimensionsinFloat = int('00189461', 16)
IsocenterReferenceSystemSequence = int('00189462', 16)
PositionerIsocenterPrimaryAngle = int('00189463', 16)
PositionerIsocenterSecondaryAngle = int('00189464', 16)
PositionerIsocenterDetectorRotationAngle = int('00189465', 16)
TableXPositiontoIsocenter = int('00189466', 16)
TableYPositiontoIsocenter = int('00189467', 16)
TableZPositiontoIsocenter = int('00189468', 16)
TableHorizontalRotationAngle = int('00189469', 16)
TableHeadTiltAngle = int('00189470', 16)
TableCradleTiltAngle = int('00189471', 16)
FrameDisplayShutterSequence = int('00189472', 16)
AcquiredImageAreaDoseProduct = int('00189473', 16)
CarmPositionerTabletopRelationship = int('00189474', 16)
XRayGeometrySequence = int('00189476', 16)
IrradiationEventIdentificationSequence = int('00189477', 16)
XRay3DFrameTypeSequence = int('00189504', 16)
ContributingSourcesSequence = int('00189506', 16)
XRay3DAcquisitionSequence = int('00189507', 16)
PrimaryPositionerScanArc = int('00189508', 16)
SecondaryPositionerScanArc = int('00189509', 16)
PrimaryPositionerScanStartAngle = int('00189510', 16)
SecondaryPositionerScanStartAngle = int('00189511', 16)
PrimaryPositionerIncrement = int('00189514', 16)
SecondaryPositionerIncrement = int('00189515', 16)
StartAcquisitionDateTime = int('00189516', 16)
EndAcquisitionDateTime = int('00189517', 16)
PrimaryPositionerIncrementSign = int('00189518', 16)
SecondaryPositionerIncrementSign = int('00189519', 16)
ApplicationName = int('00189524', 16)
ApplicationVersion = int('00189525', 16)
ApplicationManufacturer = int('00189526', 16)
AlgorithmType = int('00189527', 16)
AlgorithmDescription = int('00189528', 16)
XRay3DReconstructionSequence = int('00189530', 16)
ReconstructionDescription = int('00189531', 16)
PerProjectionAcquisitionSequence = int('00189538', 16)
DetectorPositionSequence = int('00189541', 16)
XRayAcquisitionDoseSequence = int('00189542', 16)
XRaySourceIsocenterPrimaryAngle = int('00189543', 16)
XRaySourceIsocenterSecondaryAngle = int('00189544', 16)
BreastSupportIsocenterPrimaryAngle = int('00189545', 16)
BreastSupportIsocenterSecondaryAngle = int('00189546', 16)
BreastSupportXPositiontoIsocenter = int('00189547', 16)
BreastSupportYPositiontoIsocenter = int('00189548', 16)
BreastSupportZPositiontoIsocenter = int('00189549', 16)
DetectorIsocenterPrimaryAngle = int('00189550', 16)
DetectorIsocenterSecondaryAngle = int('00189551', 16)
DetectorXPositiontoIsocenter = int('00189552', 16)
DetectorYPositiontoIsocenter = int('00189553', 16)
DetectorZPositiontoIsocenter = int('00189554', 16)
XRayGridSequence = int('00189555', 16)
XRayFilterSequence = int('00189556', 16)
DetectorActiveAreaTLHCPosition = int('00189557', 16)
DetectorActiveAreaOrientation = int('00189558', 16)
PositionerPrimaryAngleDirection = int('00189559', 16)
DiffusionbmatrixSequence = int('00189601', 16)
DiffusionbvalueXX = int('00189602', 16)
DiffusionbvalueXY = int('00189603', 16)
DiffusionbvalueXZ = int('00189604', 16)
DiffusionbvalueYY = int('00189605', 16)
DiffusionbvalueYZ = int('00189606', 16)
DiffusionbvalueZZ = int('00189607', 16)
FunctionalMRSequence = int('00189621', 16)
FunctionalSettlingPhaseFramesPresent = int('00189622', 16)
FunctionalSyncPulse = int('00189623', 16)
SettlingPhaseFrame = int('00189624', 16)
DecayCorrectionDateTime = int('00189701', 16)
StartDensityThreshold = int('00189715', 16)
StartRelativeDensityDifferenceThreshold = int('00189716', 16)
StartCardiacTriggerCountThreshold = int('00189717', 16)
StartRespiratoryTriggerCountThreshold = int('00189718', 16)
TerminationCountsThreshold = int('00189719', 16)
TerminationDensityThreshold = int('00189720', 16)
TerminationRelativeDensityThreshold = int('00189721', 16)
TerminationTimeThreshold = int('00189722', 16)
TerminationCardiacTriggerCountThreshold = int('00189723', 16)
TerminationRespiratoryTriggerCountThreshold = int('00189724', 16)
DetectorGeometry = int('00189725', 16)
TransverseDetectorSeparation = int('00189726', 16)
AxialDetectorDimension = int('00189727', 16)
RadiopharmaceuticalAgentNumber = int('00189729', 16)
PETFrameAcquisitionSequence = int('00189732', 16)
PETDetectorMotionDetailsSequence = int('00189733', 16)
PETTableDynamicsSequence = int('00189734', 16)
PETPositionSequence = int('00189735', 16)
PETFrameCorrectionFactorsSequence = int('00189736', 16)
RadiopharmaceuticalUsageSequence = int('00189737', 16)
AttenuationCorrectionSource = int('00189738', 16)
NumberofIterations = int('00189739', 16)
NumberofSubsets = int('00189740', 16)
PETReconstructionSequence = int('00189749', 16)
PETFrameTypeSequence = int('00189751', 16)
TimeofFlightInformationUsed = int('00189755', 16)
ReconstructionType = int('00189756', 16)
DecayCorrected = int('00189758', 16)
AttenuationCorrected = int('00189759', 16)
ScatterCorrected = int('00189760', 16)
DeadTimeCorrected = int('00189761', 16)
GantryMotionCorrected = int('00189762', 16)
PatientMotionCorrected = int('00189763', 16)
CountLossNormalizationCorrected = int('00189764', 16)
RandomsCorrected = int('00189765', 16)
NonuniformRadialSamplingCorrected = int('00189766', 16)
SensitivityCalibrated = int('00189767', 16)
DetectorNormalizationCorrection = int('00189768', 16)
IterativeReconstructionMethod = int('00189769', 16)
AttenuationCorrectionTemporalRelationship = int('00189770', 16)
PatientPhysiologicalStateSequence = int('00189771', 16)
PatientPhysiologicalStateCodeSequence = int('00189772', 16)
DepthsofFocus = int('00189801', 16)
ExcludedIntervalsSequence = int('00189803', 16)
ExclusionStartDateTime = int('00189804', 16)
ExclusionDuration = int('00189805', 16)
USImageDescriptionSequence = int('00189806', 16)
ImageDataTypeSequence = int('00189807', 16)
DataType = int('00189808', 16)
TransducerScanPatternCodeSequence = int('00189809', 16)
AliasedDataType = int('0018980B', 16)
PositionMeasuringDeviceUsed = int('0018980C', 16)
TransducerGeometryCodeSequence = int('0018980D', 16)
TransducerBeamSteeringCodeSequence = int('0018980E', 16)
TransducerApplicationCodeSequence = int('0018980F', 16)
ZeroVelocityPixelValue = int('00189810', 16)
ContributingEquipmentSequence = int('0018A001', 16)
ContributionDateTime = int('0018A002', 16)
ContributionDescription = int('0018A003', 16)
StudyInstanceUID = int('0020000D', 16)
SeriesInstanceUID = int('0020000E', 16)
StudyID = int('00200010', 16)
SeriesNumber = int('00200011', 16)
AcquisitionNumber = int('00200012', 16)
InstanceNumber = int('00200013', 16)
IsotopeNumber = int('00200014', 16)
PhaseNumber = int('00200015', 16)
IntervalNumber = int('00200016', 16)
TimeSlotNumber = int('00200017', 16)
AngleNumber = int('00200018', 16)
ItemNumber = int('00200019', 16)
PatientOrientation = int('00200020', 16)
OverlayNumber = int('00200022', 16)
CurveNumber = int('00200024', 16)
LUTNumber = int('00200026', 16)
ImagePosition = int('00200030', 16)
ImagePositionPatient = int('00200032', 16)
ImageOrientation = int('00200035', 16)
ImageOrientationPatient = int('00200037', 16)
Location = int('00200050', 16)
FrameofReferenceUID = int('00200052', 16)
Laterality = int('00200060', 16)
ImageLaterality = int('00200062', 16)
ImageGeometryType = int('00200070', 16)
MaskingImage = int('00200080', 16)
ReportNumber = int('002000AA', 16)
TemporalPositionIdentifier = int('00200100', 16)
NumberofTemporalPositions = int('00200105', 16)
TemporalResolution = int('00200110', 16)
SynchronizationFrameofReferenceUID = int('00200200', 16)
SOPInstanceUIDofConcatenationSource = int('00200242', 16)
SeriesinStudy = int('00201000', 16)
AcquisitionsinSeries = int('00201001', 16)
ImagesinAcquisition = int('00201002', 16)
ImagesinSeries = int('00201003', 16)
AcquisitionsinStudy = int('00201004', 16)
ImagesinStudy = int('00201005', 16)
Reference = int('00201020', 16)
PositionReferenceIndicator = int('00201040', 16)
SliceLocation = int('00201041', 16)
OtherStudyNumbers = int('00201070', 16)
NumberofPatientRelatedStudies = int('00201200', 16)
NumberofPatientRelatedSeries = int('00201202', 16)
NumberofPatientRelatedInstances = int('00201204', 16)
NumberofStudyRelatedSeries = int('00201206', 16)
NumberofStudyRelatedInstances = int('00201208', 16)
NumberofSeriesRelatedInstances = int('00201209', 16)
ModifyingDeviceID = int('00203401', 16)
ModifiedImageID = int('00203402', 16)
ModifiedImageDate = int('00203403', 16)
ModifyingDeviceManufacturer = int('00203404', 16)
ModifiedImageTime = int('00203405', 16)
ModifiedImageDescription = int('00203406', 16)
ImageComments = int('00204000', 16)
OriginalImageIdentification = int('00205000', 16)
OriginalImageIdentificationNomenclature = int('00205002', 16)
StackID = int('00209056', 16)
InStackPositionNumber = int('00209057', 16)
FrameAnatomySequence = int('00209071', 16)
FrameLaterality = int('00209072', 16)
FrameContentSequence = int('00209111', 16)
PlanePositionSequence = int('00209113', 16)
PlaneOrientationSequence = int('00209116', 16)
TemporalPositionIndex = int('00209128', 16)
NominalCardiacTriggerDelayTime = int('00209153', 16)
NominalCardiacTriggerTimePriorToRPeak = int('00209154', 16)
ActualCardiacTriggerTimePriorToRPeak = int('00209155', 16)
FrameAcquisitionNumber = int('00209156', 16)
DimensionIndexValues = int('00209157', 16)
FrameComments = int('00209158', 16)
ConcatenationUID = int('00209161', 16)
InconcatenationNumber = int('00209162', 16)
InconcatenationTotalNumber = int('00209163', 16)
DimensionOrganizationUID = int('00209164', 16)
DimensionIndexPointer = int('00209165', 16)
FunctionalGroupPointer = int('00209167', 16)
UnassignedSharedConvertedAttributesSequence = int('00209170', 16)
UnassignedPerFrameConvertedAttributesSequence = int('00209171', 16)
ConversionSourceAttributesSequence = int('00209172', 16)
DimensionIndexPrivateCreator = int('00209213', 16)
DimensionOrganizationSequence = int('00209221', 16)
DimensionIndexSequence = int('00209222', 16)
ConcatenationFrameOffsetNumber = int('00209228', 16)
FunctionalGroupPrivateCreator = int('00209238', 16)
NominalPercentageofCardiacPhase = int('00209241', 16)
NominalPercentageofRespiratoryPhase = int('00209245', 16)
StartingRespiratoryAmplitude = int('00209246', 16)
StartingRespiratoryPhase = int('00209247', 16)
EndingRespiratoryAmplitude = int('00209248', 16)
EndingRespiratoryPhase = int('00209249', 16)
RespiratoryTriggerType = int('00209250', 16)
RRIntervalTimeNominal = int('00209251', 16)
ActualCardiacTriggerDelayTime = int('00209252', 16)
RespiratorySynchronizationSequence = int('00209253', 16)
RespiratoryIntervalTime = int('00209254', 16)
NominalRespiratoryTriggerDelayTime = int('00209255', 16)
RespiratoryTriggerDelayThreshold = int('00209256', 16)
ActualRespiratoryTriggerDelayTime = int('00209257', 16)
ImagePositionVolume = int('00209301', 16)
ImageOrientationVolume = int('00209302', 16)
UltrasoundAcquisitionGeometry = int('00209307', 16)
ApexPosition = int('00209308', 16)
VolumetoTransducerMappingMatrix = int('00209309', 16)
VolumetoTableMappingMatrix = int('0020930A', 16)
VolumetoTransducerRelationship = int('0020930B', 16)
PatientFrameofReferenceSource = int('0020930C', 16)
TemporalPositionTimeOffset = int('0020930D', 16)
PlanePositionVolumeSequence = int('0020930E', 16)
PlaneOrientationVolumeSequence = int('0020930F', 16)
TemporalPositionSequence = int('00209310', 16)
DimensionOrganizationType = int('00209311', 16)
VolumeFrameofReferenceUID = int('00209312', 16)
TableFrameofReferenceUID = int('00209313', 16)
DimensionDescriptionLabel = int('00209421', 16)
PatientOrientationinFrameSequence = int('00209450', 16)
FrameLabel = int('00209453', 16)
AcquisitionIndex = int('00209518', 16)
ContributingSOPInstancesReferenceSequence = int('00209529', 16)
ReconstructionIndex = int('00209536', 16)
LightPathFilterPassThroughWavelength = int('00220001', 16)
LightPathFilterPassBand = int('00220002', 16)
ImagePathFilterPassThroughWavelength = int('00220003', 16)
ImagePathFilterPassBand = int('00220004', 16)
PatientEyeMovementCommanded = int('00220005', 16)
PatientEyeMovementCommandCodeSequence = int('00220006', 16)
SphericalLensPower = int('00220007', 16)
CylinderLensPower = int('00220008', 16)
CylinderAxis = int('00220009', 16)
EmmetropicMagnification = int('0022000A', 16)
IntraOcularPressure = int('0022000B', 16)
HorizontalFieldofView = int('0022000C', 16)
PupilDilated = int('0022000D', 16)
DegreeofDilation = int('0022000E', 16)
StereoBaselineAngle = int('00220010', 16)
StereoBaselineDisplacement = int('00220011', 16)
StereoHorizontalPixelOffset = int('00220012', 16)
StereoVerticalPixelOffset = int('00220013', 16)
StereoRotation = int('00220014', 16)
AcquisitionDeviceTypeCodeSequence = int('00220015', 16)
IlluminationTypeCodeSequence = int('00220016', 16)
LightPathFilterTypeStackCodeSequence = int('00220017', 16)
ImagePathFilterTypeStackCodeSequence = int('00220018', 16)
LensesCodeSequence = int('00220019', 16)
ChannelDescriptionCodeSequence = int('0022001A', 16)
RefractiveStateSequence = int('0022001B', 16)
MydriaticAgentCodeSequence = int('0022001C', 16)
RelativeImagePositionCodeSequence = int('0022001D', 16)
CameraAngleofView = int('0022001E', 16)
StereoPairsSequence = int('00220020', 16)
LeftImageSequence = int('00220021', 16)
RightImageSequence = int('00220022', 16)
StereoPairsPresent = int('00220028', 16)
AxialLengthoftheEye = int('00220030', 16)
OphthalmicFrameLocationSequence = int('00220031', 16)
ReferenceCoordinates = int('00220032', 16)
DepthSpatialResolution = int('00220035', 16)
MaximumDepthDistortion = int('00220036', 16)
AlongscanSpatialResolution = int('00220037', 16)
MaximumAlongscanDistortion = int('00220038', 16)
OphthalmicImageOrientation = int('00220039', 16)
DepthofTransverseImage = int('00220041', 16)
MydriaticAgentConcentrationUnitsSequence = int('00220042', 16)
AcrossscanSpatialResolution = int('00220048', 16)
MaximumAcrossscanDistortion = int('00220049', 16)
MydriaticAgentConcentration = int('0022004E', 16)
IlluminationWaveLength = int('00220055', 16)
IlluminationPower = int('00220056', 16)
IlluminationBandwidth = int('00220057', 16)
MydriaticAgentSequence = int('00220058', 16)
OphthalmicAxialMeasurementsRightEyeSequence = int('00221007', 16)
OphthalmicAxialMeasurementsLeftEyeSequence = int('00221008', 16)
OphthalmicAxialMeasurementsDeviceType = int('00221009', 16)
OphthalmicAxialLengthMeasurementsType = int('00221010', 16)
OphthalmicAxialLengthSequence = int('00221012', 16)
OphthalmicAxialLength = int('00221019', 16)
LensStatusCodeSequence = int('00221024', 16)
VitreousStatusCodeSequence = int('00221025', 16)
IOLFormulaCodeSequence = int('00221028', 16)
IOLFormulaDetail = int('00221029', 16)
KeratometerIndex = int('00221033', 16)
SourceofOphthalmicAxialLengthCodeSequence = int('00221035', 16)
TargetRefraction = int('00221037', 16)
RefractiveProcedureOccurred = int('00221039', 16)
RefractiveSurgeryTypeCodeSequence = int('00221040', 16)
OphthalmicUltrasoundMethodCodeSequence = int('00221044', 16)
OphthalmicAxialLengthMeasurementsSequence = int('00221050', 16)
IOLPower = int('00221053', 16)
PredictedRefractiveError = int('00221054', 16)
OphthalmicAxialLengthVelocity = int('00221059', 16)
LensStatusDescription = int('00221065', 16)
VitreousStatusDescription = int('00221066', 16)
IOLPowerSequence = int('00221090', 16)
LensConstantSequence = int('00221092', 16)
IOLManufacturer = int('00221093', 16)
LensConstantDescription = int('00221094', 16)
ImplantName = int('00221095', 16)
KeratometryMeasurementTypeCodeSequence = int('00221096', 16)
ImplantPartNumber = int('00221097', 16)
ReferencedOphthalmicAxialMeasurementsSequence = int('00221100', 16)
OphthalmicAxialLengthMeasurementsSegmentNameCodeSequence = int(
'00221101', 16)
RefractiveErrorBeforeRefractiveSurgeryCodeSequence = int('00221103', 16)
IOLPowerForExactEmmetropia = int('00221121', 16)
IOLPowerForExactTargetRefraction = int('00221122', 16)
AnteriorChamberDepthDefinitionCodeSequence = int('00221125', 16)
LensThicknessSequence = int('00221127', 16)
AnteriorChamberDepthSequence = int('00221128', 16)
LensThickness = int('00221130', 16)
AnteriorChamberDepth = int('00221131', 16)
SourceofLensThicknessDataCodeSequence = int('00221132', 16)
SourceofAnteriorChamberDepthDataCodeSequence = int('00221133', 16)
SourceofRefractiveMeasurementsSequence = int('00221134', 16)
SourceofRefractiveMeasurementsCodeSequence = int('00221135', 16)
OphthalmicAxialLengthMeasurementModified = int('00221140', 16)
OphthalmicAxialLengthDataSourceCodeSequence = int('00221150', 16)
OphthalmicAxialLengthAcquisitionMethodCodeSequence = int('00221153', 16)
SignaltoNoiseRatio = int('00221155', 16)
OphthalmicAxialLengthDataSourceDescription = int('00221159', 16)
OphthalmicAxialLengthMeasurementsTotalLengthSequence = int('00221210', 16)
OphthalmicAxialLengthMeasurementsSegmentalLengthSequence = int(
'00221211', 16)
OphthalmicAxialLengthMeasurementsLengthSummationSequence = int(
'00221212', 16)
UltrasoundOphthalmicAxialLengthMeasurementsSequence = int('00221220', 16)
OpticalOphthalmicAxialLengthMeasurementsSequence = int('00221225', 16)
UltrasoundSelectedOphthalmicAxialLengthSequence = int('00221230', 16)
OphthalmicAxialLengthSelectionMethodCodeSequence = int('00221250', 16)
OpticalSelectedOphthalmicAxialLengthSequence = int('00221255', 16)
SelectedSegmentalOphthalmicAxialLengthSequence = int('00221257', 16)
SelectedTotalOphthalmicAxialLengthSequence = int('00221260', 16)
OphthalmicAxialLengthQualityMetricSequence = int('00221262', 16)
OphthalmicAxialLengthQualityMetricTypeCodeSequence = int('00221265', 16)
OphthalmicAxialLengthQualityMetricTypeDescription = int('00221273', 16)
IntraocularLensCalculationsRightEyeSequence = int('00221300', 16)
IntraocularLensCalculationsLeftEyeSequence = int('00221310', 16)
ReferencedOphthalmicAxialLengthMeasurementQCImageSequence = int(
'00221330', 16)
OphthalmicMappingDeviceType = int('00221415', 16)
AcquisitionMethodCodeSequence = int('00221420', 16)
AcquisitionMethodAlgorithmSequence = int('00221423', 16)
OphthalmicThicknessMapTypeCodeSequence = int('00221436', 16)
OphthalmicThicknessMappingNormalsSequence = int('00221443', 16)
RetinalThicknessDefinitionCodeSequence = int('00221445', 16)
PixelValueMappingtoCodedConceptSequence = int('00221450', 16)
MappedPixelValue = int('00221452', 16)
PixelValueMappingExplanation = int('00221454', 16)
OphthalmicThicknessMapQualityThresholdSequence = int('00221458', 16)
OphthalmicThicknessMapThresholdQualityRating = int('00221460', 16)
AnatomicStructureReferencePoint = int('00221463', 16)
RegistrationtoLocalizerSequence = int('00221465', 16)
RegisteredLocalizerUnits = int('00221466', 16)
RegisteredLocalizerTopLeftHandCorner = int('00221467', 16)
RegisteredLocalizerBottomRightHandCorner = int('00221468', 16)
OphthalmicThicknessMapQualityRatingSequence = int('00221470', 16)
RelevantOPTAttributesSequence = int('00221472', 16)
TransformationMethodCodeSequence = int('00221512', 16)
TransformationAlgorithmSequence = int('00221513', 16)
OphthalmicAxialLengthMethod = int('00221515', 16)
OphthalmicFOV = int('00221517', 16)
TwoDimensionaltoThreeDimensionalMapSequence = int('00221518', 16)
WideFieldOphthalmicPhotographyQualityRatingSequence = int('00221525', 16)
WideFieldOphthalmicPhotographyQualityThresholdSequence = int(
'00221526', 16)
WideFieldOphthalmicPhotographyThresholdQualityRating = int('00221527', 16)
XCoordinatesCenterPixelViewAngle = int('00221528', 16)
YCoordinatesCenterPixelViewAngle = int('00221529', 16)
NumberofMapPoints = int('00221530', 16)
TwoDimensionaltoThreeDimensionalMapData = int('00221531', 16)
VisualFieldHorizontalExtent = int('00240010', 16)
VisualFieldVerticalExtent = int('00240011', 16)
VisualFieldShape = int('00240012', 16)
ScreeningTestModeCodeSequence = int('00240016', 16)
MaximumStimulusLuminance = int('00240018', 16)
BackgroundLuminance = int('00240020', 16)
StimulusColorCodeSequence = int('00240021', 16)
BackgroundIlluminationColorCodeSequence = int('00240024', 16)
StimulusArea = int('00240025', 16)
StimulusPresentationTime = int('00240028', 16)
FixationSequence = int('00240032', 16)
FixationMonitoringCodeSequence = int('00240033', 16)
VisualFieldCatchTrialSequence = int('00240034', 16)
FixationCheckedQuantity = int('00240035', 16)
PatientNotProperlyFixatedQuantity = int('00240036', 16)
PresentedVisualStimuliDataFlag = int('00240037', 16)
NumberofVisualStimuli = int('00240038', 16)
ExcessiveFixationLossesDataFlag = int('00240039', 16)
ExcessiveFixationLosses = int('00240040', 16)
StimuliRetestingQuantity = int('00240042', 16)
CommentsonPatientsPerformanceofVisualField = int('00240044', 16)
FalseNegativesEstimateFlag = int('00240045', 16)
FalseNegativesEstimate = int('00240046', 16)
NegativeCatchTrialsQuantity = int('00240048', 16)
FalseNegativesQuantity = int('00240050', 16)
ExcessiveFalseNegativesDataFlag = int('00240051', 16)
ExcessiveFalseNegatives = int('00240052', 16)
FalsePositivesEstimateFlag = int('00240053', 16)
FalsePositivesEstimate = int('00240054', 16)
CatchTrialsDataFlag = int('00240055', 16)
PositiveCatchTrialsQuantity = int('00240056', 16)
TestPointNormalsDataFlag = int('00240057', 16)
TestPointNormalsSequence = int('00240058', 16)
GlobalDeviationProbabilityNormalsFlag = int('00240059', 16)
FalsePositivesQuantity = int('00240060', 16)
ExcessiveFalsePositivesDataFlag = int('00240061', 16)
ExcessiveFalsePositives = int('00240062', 16)
VisualFieldTestNormalsFlag = int('00240063', 16)
ResultsNormalsSequence = int('00240064', 16)
AgeCorrectedSensitivityDeviationAlgorithmSequence = int('00240065', 16)
GlobalDeviationFromNormal = int('00240066', 16)
GeneralizedDefectSensitivityDeviationAlgorithmSequence = int(
'00240067', 16)
LocalizedDeviationFromNormal = int('00240068', 16)
PatientReliabilityIndicator = int('00240069', 16)
VisualFieldMeanSensitivity = int('00240070', 16)
GlobalDeviationProbability = int('00240071', 16)
LocalDeviationProbabilityNormalsFlag = int('00240072', 16)
LocalizedDeviationProbability = int('00240073', 16)
ShortTermFluctuationCalculated = int('00240074', 16)
ShortTermFluctuation = int('00240075', 16)
ShortTermFluctuationProbabilityCalculated = int('00240076', 16)
ShortTermFluctuationProbability = int('00240077', 16)
CorrectedLocalizedDeviationFromNormalCalculated = int('00240078', 16)
CorrectedLocalizedDeviationFromNormal = int('00240079', 16)
CorrectedLocalizedDeviationFromNormalProbabilityCalculated = int(
'00240080', 16)
CorrectedLocalizedDeviationFromNormalProbability = int('00240081', 16)
GlobalDeviationProbabilitySequence = int('00240083', 16)
LocalizedDeviationProbabilitySequence = int('00240085', 16)
FovealSensitivityMeasured = int('00240086', 16)
FovealSensitivity = int('00240087', 16)
VisualFieldTestDuration = int('00240088', 16)
VisualFieldTestPointSequence = int('00240089', 16)
VisualFieldTestPointXCoordinate = int('00240090', 16)
VisualFieldTestPointYCoordinate = int('00240091', 16)
AgeCorrectedSensitivityDeviationValue = int('00240092', 16)
StimulusResults = int('00240093', 16)
SensitivityValue = int('00240094', 16)
RetestStimulusSeen = int('00240095', 16)
RetestSensitivityValue = int('00240096', 16)
VisualFieldTestPointNormalsSequence = int('00240097', 16)
QuantifiedDefect = int('00240098', 16)
AgeCorrectedSensitivityDeviationProbabilityValue = int('00240100', 16)
GeneralizedDefectCorrectedSensitivityDeviationFlag = int('00240102', 16)
GeneralizedDefectCorrectedSensitivityDeviationValue = int('00240103', 16)
GeneralizedDefectCorrectedSensitivityDeviationProbabilityValue = int(
'00240104', 16)
MinimumSensitivityValue = int('00240105', 16)
BlindSpotLocalized = int('00240106', 16)
BlindSpotXCoordinate = int('00240107', 16)
BlindSpotYCoordinate = int('00240108', 16)
VisualAcuityMeasurementSequence = int('00240110', 16)
RefractiveParametersUsedonPatientSequence = int('00240112', 16)
MeasurementLaterality = int('00240113', 16)
OphthalmicPatientClinicalInformationLeftEyeSequence = int('00240114', 16)
OphthalmicPatientClinicalInformationRightEyeSequence = int('00240115', 16)
FovealPointNormativeDataFlag = int('00240117', 16)
FovealPointProbabilityValue = int('00240118', 16)
ScreeningBaselineMeasured = int('00240120', 16)
ScreeningBaselineMeasuredSequence = int('00240122', 16)
ScreeningBaselineType = int('00240124', 16)
ScreeningBaselineValue = int('00240126', 16)
AlgorithmSource = int('00240202', 16)
DataSetName = int('00240306', 16)
DataSetVersion = int('00240307', 16)
DataSetSource = int('00240308', 16)
DataSetDescription = int('00240309', 16)
VisualFieldTestReliabilityGlobalIndexSequence = int('00240317', 16)
VisualFieldGlobalResultsIndexSequence = int('00240320', 16)
DataObservationSequence = int('00240325', 16)
IndexNormalsFlag = int('00240338', 16)
IndexProbability = int('00240341', 16)
IndexProbabilitySequence = int('00240344', 16)
SamplesperPixel = int('00280002', 16)
SamplesperPixelUsed = int('00280003', 16)
PhotometricInterpretation = int('00280004', 16)
ImageDimensions = int('00280005', 16)
PlanarConfiguration = int('00280006', 16)
NumberofFrames = int('00280008', 16)
FrameIncrementPointer = int('00280009', 16)
FrameDimensionPointer = int('0028000A', 16)
Rows = int('00280010', 16)
Columns = int('00280011', 16)
Planes = int('00280012', 16)
UltrasoundColorDataPresent = int('00280014', 16)
PixelSpacing = int('00280030', 16)
ZoomFactor = int('00280031', 16)
ZoomCenter = int('00280032', 16)
PixelAspectRatio = int('00280034', 16)
ImageFormat = int('00280040', 16)
ManipulatedImage = int('00280050', 16)
CorrectedImage = int('00280051', 16)
CompressionRecognitionCode = int('0028005F', 16)
CompressionCode = int('00280060', 16)
CompressionOriginator = int('00280061', 16)
CompressionLabel = int('00280062', 16)
CompressionDescription = int('00280063', 16)
CompressionSequence = int('00280065', 16)
CompressionStepPointers = int('00280066', 16)
RepeatInterval = int('00280068', 16)
BitsGrouped = int('00280069', 16)
PerimeterTable = int('00280070', 16)
PerimeterValue = int('00280071', 16)
PredictorRows = int('00280080', 16)
PredictorColumns = int('00280081', 16)
PredictorConstants = int('00280082', 16)
BlockedPixels = int('00280090', 16)
BlockRows = int('00280091', 16)
BlockColumns = int('00280092', 16)
RowOverlap = int('00280093', 16)
ColumnOverlap = int('00280094', 16)
BitsAllocated = int('00280100', 16)
BitsStored = int('00280101', 16)
HighBit = int('00280102', 16)
PixelRepresentation = int('00280103', 16)
SmallestValidPixelValue = int('00280104', 16)
LargestValidPixelValue = int('00280105', 16)
SmallestImagePixelValue = int('00280106', 16)
LargestImagePixelValue = int('00280107', 16)
SmallestPixelValueinSeries = int('00280108', 16)
LargestPixelValueinSeries = int('00280109', 16)
SmallestImagePixelValueinPlane = int('00280110', 16)
LargestImagePixelValueinPlane = int('00280111', 16)
PixelPaddingValue = int('00280120', 16)
PixelPaddingRangeLimit = int('00280121', 16)
FloatPixelPaddingValue = int('00280122', 16)
DoubleFloatPixelPaddingValue = int('00280123', 16)
FloatPixelPaddingRangeLimit = int('00280124', 16)
DoubleFloatPixelPaddingRangeLimit = int('00280125', 16)
ImageLocation = int('00280200', 16)
QualityControlImage = int('00280300', 16)
BurnedInAnnotation = int('00280301', 16)
RecognizableVisualFeatures = int('00280302', 16)
LongitudinalTemporalInformationModified = int('00280303', 16)
ReferencedColorPaletteInstanceUID = int('00280304', 16)
TransformLabel = int('00280400', 16)
TransformVersionNumber = int('00280401', 16)
NumberofTransformSteps = int('00280402', 16)
SequenceofCompressedData = int('00280403', 16)
DetailsofCoefficients = int('00280404', 16)
DCTLabel = int('00280700', 16)
DataBlockDescription = int('00280701', 16)
DataBlock = int('00280702', 16)
NormalizationFactorFormat = int('00280710', 16)
ZonalMapNumberFormat = int('00280720', 16)
ZonalMapLocation = int('00280721', 16)
ZonalMapFormat = int('00280722', 16)
AdaptiveMapFormat = int('00280730', 16)
CodeNumberFormat = int('00280740', 16)
PixelSpacingCalibrationType = int('00280A02', 16)
PixelSpacingCalibrationDescription = int('00280A04', 16)
PixelIntensityRelationship = int('00281040', 16)
PixelIntensityRelationshipSign = int('00281041', 16)
WindowCenter = int('00281050', 16)
WindowWidth = int('00281051', 16)
RescaleIntercept = int('00281052', 16)
RescaleSlope = int('00281053', 16)
RescaleType = int('00281054', 16)
WindowCenterWidthExplanation = int('00281055', 16)
VOILUTFunction = int('00281056', 16)
GrayScale = int('00281080', 16)
RecommendedViewingMode = int('00281090', 16)
GrayLookupTableDescriptor = int('00281100', 16)
RedPaletteColorLookupTableDescriptor = int('00281101', 16)
GreenPaletteColorLookupTableDescriptor = int('00281102', 16)
BluePaletteColorLookupTableDescriptor = int('00281103', 16)
AlphaPaletteColorLookupTableDescriptor = int('00281104', 16)
LargeRedPaletteColorLookupTableDescriptor = int('00281111', 16)
LargeGreenPaletteColorLookupTableDescriptor = int('00281112', 16)
LargeBluePaletteColorLookupTableDescriptor = int('00281113', 16)
PaletteColorLookupTableUID = int('00281199', 16)
GrayLookupTableData = int('00281200', 16)
RedPaletteColorLookupTableData = int('00281201', 16)
GreenPaletteColorLookupTableData = int('00281202', 16)
BluePaletteColorLookupTableData = int('00281203', 16)
AlphaPaletteColorLookupTableData = int('00281204', 16)
LargeRedPaletteColorLookupTableData = int('00281211', 16)
LargeGreenPaletteColorLookupTableData = int('00281212', 16)
LargeBluePaletteColorLookupTableData = int('00281213', 16)
LargePaletteColorLookupTableUID = int('00281214', 16)
SegmentedRedPaletteColorLookupTableData = int('00281221', 16)
SegmentedGreenPaletteColorLookupTableData = int('00281222', 16)
SegmentedBluePaletteColorLookupTableData = int('00281223', 16)
SegmentedAlphaPaletteColorLookupTableData = int('00281224', 16)
BreastImplantPresent = int('00281300', 16)
PartialView = int('00281350', 16)
PartialViewDescription = int('00281351', 16)
PartialViewCodeSequence = int('00281352', 16)
SpatialLocationsPreserved = int('0028135A', 16)
DataFrameAssignmentSequence = int('00281401', 16)
DataPathAssignment = int('00281402', 16)
BitsMappedtoColorLookupTable = int('00281403', 16)
BlendingLUT1Sequence = int('00281404', 16)
BlendingLUT1TransferFunction = int('00281405', 16)
BlendingWeightConstant = int('00281406', 16)
BlendingLookupTableDescriptor = int('00281407', 16)
BlendingLookupTableData = int('00281408', 16)
EnhancedPaletteColorLookupTableSequence = int('0028140B', 16)
BlendingLUT2Sequence = int('0028140C', 16)
BlendingLUT2TransferFunction = int('0028140D', 16)
DataPathID = int('0028140E', 16)
RGBLUTTransferFunction = int('0028140F', 16)
AlphaLUTTransferFunction = int('00281410', 16)
ICCProfile = int('00282000', 16)
ColorSpace = int('00282002', 16)
LossyImageCompression = int('00282110', 16)
LossyImageCompressionRatio = int('00282112', 16)
LossyImageCompressionMethod = int('00282114', 16)
ModalityLUTSequence = int('00283000', 16)
LUTDescriptor = int('00283002', 16)
LUTExplanation = int('00283003', 16)
ModalityLUTType = int('00283004', 16)
LUTData = int('00283006', 16)
VOILUTSequence = int('00283010', 16)
SoftcopyVOILUTSequence = int('00283110', 16)
ImagePresentationComments = int('00284000', 16)
BiPlaneAcquisitionSequence = int('00285000', 16)
RepresentativeFrameNumber = int('00286010', 16)
FrameNumbersofInterestFOI = int('00286020', 16)
FrameofInterestDescription = int('00286022', 16)
FrameofInterestType = int('00286023', 16)
MaskPointers = int('00286030', 16)
RWavePointer = int('00286040', 16)
MaskSubtractionSequence = int('00286100', 16)
MaskOperation = int('00286101', 16)
ApplicableFrameRange = int('00286102', 16)
MaskFrameNumbers = int('00286110', 16)
ContrastFrameAveraging = int('00286112', 16)
MaskSubpixelShift = int('00286114', 16)
TIDOffset = int('00286120', 16)
MaskOperationExplanation = int('00286190', 16)
EquipmentAdministratorSequence = int('00287000', 16)
NumberofDisplaySubsystems = int('00287001', 16)
CurrentConfigurationID = int('00287002', 16)
DisplaySubsystemID = int('00287003', 16)
DisplaySubsystemName = int('00287004', 16)
DisplaySubsystemDescription = int('00287005', 16)
SystemStatus = int('00287006', 16)
SystemStatusComment = int('00287007', 16)
TargetLuminanceCharacteristicsSequence = int('00287008', 16)
LuminanceCharacteristicsID = int('00287009', 16)
DisplaySubsystemConfigurationSequence = int('0028700A', 16)
ConfigurationID = int('0028700B', 16)
ConfigurationName = int('0028700C', 16)
ConfigurationDescription = int('0028700D', 16)
ReferencedTargetLuminanceCharacteristicsID = int('0028700E', 16)
QAResultsSequence = int('0028700F', 16)
DisplaySubsystemQAResultsSequence = int('00287010', 16)
ConfigurationQAResultsSequence = int('00287011', 16)
MeasurementEquipmentSequence = int('00287012', 16)
MeasurementFunctions = int('00287013', 16)
MeasurementEquipmentType = int('00287014', 16)
VisualEvaluationResultSequence = int('00287015', 16)
DisplayCalibrationResultSequence = int('00287016', 16)
DDLValue = int('00287017', 16)
CIExyWhitePoint = int('00287018', 16)
DisplayFunctionType = int('00287019', 16)
GammaValue = int('0028701A', 16)
NumberofLuminancePoints = int('0028701B', 16)
LuminanceResponseSequence = int('0028701C', 16)
TargetMinimumLuminance = int('0028701D', 16)
TargetMaximumLuminance = int('0028701E', 16)
LuminanceValue = int('0028701F', 16)
LuminanceResponseDescription = int('00287020', 16)
WhitePointFlag = int('00287021', 16)
DisplayDeviceTypeCodeSequence = int('00287022', 16)
DisplaySubsystemSequence = int('00287023', 16)
LuminanceResultSequence = int('00287024', 16)
AmbientLightValueSource = int('00287025', 16)
MeasuredCharacteristics = int('00287026', 16)
LuminanceUniformityResultSequence = int('00287027', 16)
VisualEvaluationTestSequence = int('00287028', 16)
TestResult = int('00287029', 16)
TestResultComment = int('0028702A', 16)
TestImageValidation = int('0028702B', 16)
TestPatternCodeSequence = int('0028702C', 16)
MeasurementPatternCodeSequence = int('0028702D', 16)
VisualEvaluationMethodCodeSequence = int('0028702E', 16)
PixelDataProviderURL = int('00287FE0', 16)
DataPointRows = int('00289001', 16)
DataPointColumns = int('00289002', 16)
SignalDomainColumns = int('00289003', 16)
LargestMonochromePixelValue = int('00289099', 16)
DataRepresentation = int('00289108', 16)
PixelMeasuresSequence = int('00289110', 16)
FrameVOILUTSequence = int('00289132', 16)
PixelValueTransformationSequence = int('00289145', 16)
SignalDomainRows = int('00289235', 16)
DisplayFilterPercentage = int('00289411', 16)
FramePixelShiftSequence = int('00289415', 16)
SubtractionItemID = int('00289416', 16)
PixelIntensityRelationshipLUTSequence = int('00289422', 16)
FramePixelDataPropertiesSequence = int('00289443', 16)
GeometricalProperties = int('00289444', 16)
GeometricMaximumDistortion = int('00289445', 16)
ImageProcessingApplied = int('00289446', 16)
MaskSelectionMode = int('00289454', 16)
LUTFunction = int('00289474', 16)
MaskVisibilityPercentage = int('00289478', 16)
PixelShiftSequence = int('00289501', 16)
RegionPixelShiftSequence = int('00289502', 16)
VerticesoftheRegion = int('00289503', 16)
MultiframePresentationSequence = int('00289505', 16)
PixelShiftFrameRange = int('00289506', 16)
LUTFrameRange = int('00289507', 16)
ImagetoEquipmentMappingMatrix = int('00289520', 16)
EquipmentCoordinateSystemIdentification = int('00289537', 16)
StudyStatusID = int('0032000A', 16)
StudyPriorityID = int('0032000C', 16)
StudyIDIssuer = int('00320012', 16)
StudyVerifiedDate = int('00320032', 16)
StudyVerifiedTime = int('00320033', 16)
StudyReadDate = int('00320034', 16)
StudyReadTime = int('00320035', 16)
ScheduledStudyStartDate = int('00321000', 16)
ScheduledStudyStartTime = int('00321001', 16)
ScheduledStudyStopDate = int('00321010', 16)
ScheduledStudyStopTime = int('00321011', 16)
ScheduledStudyLocation = int('00321020', 16)
ScheduledStudyLocationAETitle = int('00321021', 16)
ReasonforStudy = int('00321030', 16)
RequestingPhysicianIdentificationSequence = int('00321031', 16)
RequestingPhysician = int('00321032', 16)
RequestingService = int('00321033', 16)
RequestingServiceCodeSequence = int('00321034', 16)
StudyArrivalDate = int('00321040', 16)
StudyArrivalTime = int('00321041', 16)
StudyCompletionDate = int('00321050', 16)
StudyCompletionTime = int('00321051', 16)
StudyComponentStatusID = int('00321055', 16)
RequestedProcedureDescription = int('00321060', 16)
RequestedProcedureCodeSequence = int('00321064', 16)
RequestedContrastAgent = int('00321070', 16)
StudyComments = int('00324000', 16)
ReferencedPatientAliasSequence = int('00380004', 16)
VisitStatusID = int('00380008', 16)
AdmissionID = int('00380010', 16)
IssuerofAdmissionID = int('00380011', 16)
IssuerofAdmissionIDSequence = int('00380014', 16)
RouteofAdmissions = int('00380016', 16)
ScheduledAdmissionDate = int('0038001A', 16)
ScheduledAdmissionTime = int('0038001B', 16)
ScheduledDischargeDate = int('0038001C', 16)
ScheduledDischargeTime = int('0038001D', 16)
ScheduledPatientInstitutionResidence = int('0038001E', 16)
AdmittingDate = int('00380020', 16)
AdmittingTime = int('00380021', 16)
DischargeDate = int('00380030', 16)
DischargeTime = int('00380032', 16)
DischargeDiagnosisDescription = int('00380040', 16)
DischargeDiagnosisCodeSequence = int('00380044', 16)
SpecialNeeds = int('00380050', 16)
ServiceEpisodeID = int('00380060', 16)
IssuerofServiceEpisodeID = int('00380061', 16)
ServiceEpisodeDescription = int('00380062', 16)
IssuerofServiceEpisodeIDSequence = int('00380064', 16)
PertinentDocumentsSequence = int('00380100', 16)
PertinentResourcesSequence = int('00380101', 16)
ResourceDescription = int('00380102', 16)
CurrentPatientLocation = int('00380300', 16)
PatientsInstitutionResidence = int('00380400', 16)
PatientState = int('00380500', 16)
PatientClinicalTrialParticipationSequence = int('00380502', 16)
VisitComments = int('00384000', 16)
WaveformOriginality = int('003A0004', 16)
NumberofWaveformChannels = int('003A0005', 16)
NumberofWaveformSamples = int('003A0010', 16)
SamplingFrequency = int('003A001A', 16)
MultiplexGroupLabel = int('003A0020', 16)
ChannelDefinitionSequence = int('003A0200', 16)
WaveformChannelNumber = int('003A0202', 16)
ChannelLabel = int('003A0203', 16)
ChannelStatus = int('003A0205', 16)
ChannelSourceSequence = int('003A0208', 16)
ChannelSourceModifiersSequence = int('003A0209', 16)
SourceWaveformSequence = int('003A020A', 16)
ChannelDerivationDescription = int('003A020C', 16)
ChannelSensitivity = int('003A0210', 16)
ChannelSensitivityUnitsSequence = int('003A0211', 16)
ChannelSensitivityCorrectionFactor = int('003A0212', 16)
ChannelBaseline = int('003A0213', 16)
ChannelTimeSkew = int('003A0214', 16)
ChannelSampleSkew = int('003A0215', 16)
ChannelOffset = int('003A0218', 16)
WaveformBitsStored = int('003A021A', 16)
FilterLowFrequency = int('003A0220', 16)
FilterHighFrequency = int('003A0221', 16)
NotchFilterFrequency = int('003A0222', 16)
NotchFilterBandwidth = int('003A0223', 16)
WaveformDataDisplayScale = int('003A0230', 16)
WaveformDisplayBackgroundCIELabValue = int('003A0231', 16)
WaveformPresentationGroupSequence = int('003A0240', 16)
PresentationGroupNumber = int('003A0241', 16)
ChannelDisplaySequence = int('003A0242', 16)
ChannelRecommendedDisplayCIELabValue = int('003A0244', 16)
ChannelPosition = int('003A0245', 16)
DisplayShadingFlag = int('003A0246', 16)
FractionalChannelDisplayScale = int('003A0247', 16)
AbsoluteChannelDisplayScale = int('003A0248', 16)
MultiplexedAudioChannelsDescriptionCodeSequence = int('003A0300', 16)
ChannelIdentificationCode = int('003A0301', 16)
ChannelMode = int('003A0302', 16)
ScheduledStationAETitle = int('00400001', 16)
ScheduledProcedureStepStartDate = int('00400002', 16)
ScheduledProcedureStepStartTime = int('00400003', 16)
ScheduledProcedureStepEndDate = int('00400004', 16)
ScheduledProcedureStepEndTime = int('00400005', 16)
ScheduledPerformingPhysiciansName = int('00400006', 16)
ScheduledProcedureStepDescription = int('00400007', 16)
ScheduledProtocolCodeSequence = int('00400008', 16)
ScheduledProcedureStepID = int('00400009', 16)
StageCodeSequence = int('0040000A', 16)
ScheduledPerformingPhysicianIdentificationSequence = int('0040000B', 16)
ScheduledStationName = int('00400010', 16)
ScheduledProcedureStepLocation = int('00400011', 16)
PreMedication = int('00400012', 16)
ScheduledProcedureStepStatus = int('00400020', 16)
OrderPlacerIdentifierSequence = int('00400026', 16)
OrderFillerIdentifierSequence = int('00400027', 16)
LocalNamespaceEntityID = int('00400031', 16)
UniversalEntityID = int('00400032', 16)
UniversalEntityIDType = int('00400033', 16)
IdentifierTypeCode = int('00400035', 16)
AssigningFacilitySequence = int('00400036', 16)
AssigningJurisdictionCodeSequence = int('00400039', 16)
AssigningAgencyorDepartmentCodeSequence = int('0040003A', 16)
ScheduledProcedureStepSequence = int('00400100', 16)
ReferencedNonImageCompositeSOPInstanceSequence = int('00400220', 16)
PerformedStationAETitle = int('00400241', 16)
PerformedStationName = int('00400242', 16)
PerformedLocation = int('00400243', 16)
PerformedProcedureStepStartDate = int('00400244', 16)
PerformedProcedureStepStartTime = int('00400245', 16)
PerformedProcedureStepEndDate = int('00400250', 16)
PerformedProcedureStepEndTime = int('00400251', 16)
PerformedProcedureStepStatus = int('00400252', 16)
PerformedProcedureStepID = int('00400253', 16)
PerformedProcedureStepDescription = int('00400254', 16)
PerformedProcedureTypeDescription = int('00400255', 16)
PerformedProtocolCodeSequence = int('00400260', 16)
PerformedProtocolType = int('00400261', 16)
ScheduledStepAttributesSequence = int('00400270', 16)
RequestAttributesSequence = int('00400275', 16)
CommentsonthePerformedProcedureStep = int('00400280', 16)
PerformedProcedureStepDiscontinuationReasonCodeSequence = int(
'00400281', 16)
QuantitySequence = int('00400293', 16)
Quantity = int('00400294', 16)
MeasuringUnitsSequence = int('00400295', 16)
BillingItemSequence = int('00400296', 16)
TotalTimeofFluoroscopy = int('00400300', 16)
TotalNumberofExposures = int('00400301', 16)
EntranceDose = int('00400302', 16)
ExposedArea = int('00400303', 16)
DistanceSourcetoEntrance = int('00400306', 16)
DistanceSourcetoSupport = int('00400307', 16)
ExposureDoseSequence = int('0040030E', 16)
CommentsonRadiationDose = int('00400310', 16)
XRayOutput = int('00400312', 16)
HalfValueLayer = int('00400314', 16)
OrganDose = int('00400316', 16)
OrganExposed = int('00400318', 16)
BillingProcedureStepSequence = int('00400320', 16)
FilmConsumptionSequence = int('00400321', 16)
BillingSuppliesandDevicesSequence = int('00400324', 16)
ReferencedProcedureStepSequence = int('00400330', 16)
PerformedSeriesSequence = int('00400340', 16)
CommentsontheScheduledProcedureStep = int('00400400', 16)
ProtocolContextSequence = int('00400440', 16)
ContentItemModifierSequence = int('00400441', 16)
ScheduledSpecimenSequence = int('00400500', 16)
SpecimenAccessionNumber = int('0040050A', 16)
ContainerIdentifier = int('00400512', 16)
IssueroftheContainerIdentifierSequence = int('00400513', 16)
AlternateContainerIdentifierSequence = int('00400515', 16)
ContainerTypeCodeSequence = int('00400518', 16)
ContainerDescription = int('0040051A', 16)
ContainerComponentSequence = int('00400520', 16)
SpecimenSequence = int('00400550', 16)
SpecimenIdentifier = int('00400551', 16)
SpecimenDescriptionSequenceTrial = int('00400552', 16)
SpecimenDescriptionTrial = int('00400553', 16)
SpecimenUID = int('00400554', 16)
AcquisitionContextSequence = int('00400555', 16)
AcquisitionContextDescription = int('00400556', 16)
SpecimenTypeCodeSequence = int('0040059A', 16)
SpecimenDescriptionSequence = int('00400560', 16)
IssueroftheSpecimenIdentifierSequence = int('00400562', 16)
SpecimenShortDescription = int('00400600', 16)
SpecimenDetailedDescription = int('00400602', 16)
SpecimenPreparationSequence = int('00400610', 16)
SpecimenPreparationStepContentItemSequence = int('00400612', 16)
SpecimenLocalizationContentItemSequence = int('00400620', 16)
SlideIdentifier = int('004006FA', 16)
ImageCenterPointCoordinatesSequence = int('0040071A', 16)
XOffsetinSlideCoordinateSystem = int('0040072A', 16)
YOffsetinSlideCoordinateSystem = int('0040073A', 16)
ZOffsetinSlideCoordinateSystem = int('0040074A', 16)
PixelSpacingSequence = int('004008D8', 16)
CoordinateSystemAxisCodeSequence = int('004008DA', 16)
MeasurementUnitsCodeSequence = int('004008EA', 16)
VitalStainCodeSequenceTrial = int('004009F8', 16)
RequestedProcedureID = int('00401001', 16)
ReasonfortheRequestedProcedure = int('00401002', 16)
RequestedProcedurePriority = int('00401003', 16)
PatientTransportArrangements = int('00401004', 16)
RequestedProcedureLocation = int('00401005', 16)
PlacerOrderNumberProcedure = int('00401006', 16)
FillerOrderNumberProcedure = int('00401007', 16)
ConfidentialityCode = int('00401008', 16)
ReportingPriority = int('00401009', 16)
ReasonforRequestedProcedureCodeSequence = int('0040100A', 16)
NamesofIntendedRecipientsofResults = int('00401010', 16)
IntendedRecipientsofResultsIdentificationSequence = int('00401011', 16)
ReasonForPerformedProcedureCodeSequence = int('00401012', 16)
RequestedProcedureDescriptionTrial = int('00401060', 16)
PersonIdentificationCodeSequence = int('00401101', 16)
PersonsAddress = int('00401102', 16)
PersonsTelephoneNumbers = int('00401103', 16)
PersonsTelecomInformation = int('00401104', 16)
RequestedProcedureComments = int('00401400', 16)
ReasonfortheImagingServiceRequest = int('00402001', 16)
IssueDateofImagingServiceRequest = int('00402004', 16)
IssueTimeofImagingServiceRequest = int('00402005', 16)
PlacerOrderNumberImagingServiceRequestRetired = int('00402006', 16)
FillerOrderNumberImagingServiceRequestRetired = int('00402007', 16)
OrderEnteredBy = int('00402008', 16)
OrderEnterersLocation = int('00402009', 16)
OrderCallbackPhoneNumber = int('00402010', 16)
OrderCallbackTelecomInformation = int('00402011', 16)
PlacerOrderNumberImagingServiceRequest = int('00402016', 16)
FillerOrderNumberImagingServiceRequest = int('00402017', 16)
ImagingServiceRequestComments = int('00402400', 16)
ConfidentialityConstraintonPatientDataDescription = int('00403001', 16)
GeneralPurposeScheduledProcedureStepStatus = int('00404001', 16)
GeneralPurposePerformedProcedureStepStatus = int('00404002', 16)
GeneralPurposeScheduledProcedureStepPriority = int('00404003', 16)
ScheduledProcessingApplicationsCodeSequence = int('00404004', 16)
ScheduledProcedureStepStartDateTime = int('00404005', 16)
MultipleCopiesFlag = int('00404006', 16)
PerformedProcessingApplicationsCodeSequence = int('00404007', 16)
HumanPerformerCodeSequence = int('00404009', 16)
ScheduledProcedureStepModificationDateTime = int('00404010', 16)
ExpectedCompletionDateTime = int('00404011', 16)
ResultingGeneralPurposePerformedProcedureStepsSequence = int(
'00404015', 16)
ReferencedGeneralPurposeScheduledProcedureStepSequence = int(
'00404016', 16)
ScheduledWorkitemCodeSequence = int('00404018', 16)
PerformedWorkitemCodeSequence = int('00404019', 16)
InputAvailabilityFlag = int('00404020', 16)
InputInformationSequence = int('00404021', 16)
RelevantInformationSequence = int('00404022', 16)
ReferencedGeneralPurposeScheduledProcedureStepTransactionUID = int(
'00404023', 16)
ScheduledStationNameCodeSequence = int('00404025', 16)
ScheduledStationClassCodeSequence = int('00404026', 16)
ScheduledStationGeographicLocationCodeSequence = int('00404027', 16)
PerformedStationNameCodeSequence = int('00404028', 16)
PerformedStationClassCodeSequence = int('00404029', 16)
PerformedStationGeographicLocationCodeSequence = int('00404030', 16)
RequestedSubsequentWorkitemCodeSequence = int('00404031', 16)
NonDICOMOutputCodeSequence = int('00404032', 16)
OutputInformationSequence = int('00404033', 16)
ScheduledHumanPerformersSequence = int('00404034', 16)
ActualHumanPerformersSequence = int('00404035', 16)
HumanPerformersOrganization = int('00404036', 16)
HumanPerformersName = int('00404037', 16)
RawDataHandling = int('00404040', 16)
InputReadinessState = int('00404041', 16)
PerformedProcedureStepStartDateTime = int('00404050', 16)
PerformedProcedureStepEndDateTime = int('00404051', 16)
ProcedureStepCancellationDateTime = int('00404052', 16)
OutputDestinationSequence = int('00404070', 16)
DICOMStorageSequence = int('00404071', 16)
STOWRSStorageSequence = int('00404072', 16)
StorageURL = int('00404073', 16)
XDSStorageSequence = int('00404074', 16)
EntranceDoseinmGy = int('00408302', 16)
ParametricMapFrameTypeSequence = int('00409092', 16)
ReferencedImageRealWorldValueMappingSequence = int('00409094', 16)
RealWorldValueMappingSequence = int('00409096', 16)
PixelValueMappingCodeSequence = int('00409098', 16)
LUTLabel = int('00409210', 16)
RealWorldValueLastValueMapped = int('00409211', 16)
RealWorldValueLUTData = int('00409212', 16)
DoubleFloatRealWorldValueLastValueMapped = int('00409213', 16)
DoubleFloatRealWorldValueFirstValueMapped = int('00409214', 16)
RealWorldValueFirstValueMapped = int('00409216', 16)
QuantityDefinitionSequence = int('00409220', 16)
RealWorldValueIntercept = int('00409224', 16)
RealWorldValueSlope = int('00409225', 16)
FindingsFlagTrial = int('0040A007', 16)
RelationshipType = int('0040A010', 16)
FindingsSequenceTrial = int('0040A020', 16)
FindingsGroupUIDTrial = int('0040A021', 16)
ReferencedFindingsGroupUIDTrial = int('0040A022', 16)
FindingsGroupRecordingDateTrial = int('0040A023', 16)
FindingsGroupRecordingTimeTrial = int('0040A024', 16)
FindingsSourceCategoryCodeSequenceTrial = int('0040A026', 16)
VerifyingOrganization = int('0040A027', 16)
DocumentingOrganizationIdentifierCodeSequenceTrial = int('0040A028', 16)
VerificationDateTime = int('0040A030', 16)
ObservationDateTime = int('0040A032', 16)
ValueType = int('0040A040', 16)
ConceptNameCodeSequence = int('0040A043', 16)
MeasurementPrecisionDescriptionTrial = int('0040A047', 16)
ContinuityOfContent = int('0040A050', 16)
UrgencyorPriorityAlertsTrial = int('0040A057', 16)
SequencingIndicatorTrial = int('0040A060', 16)
DocumentIdentifierCodeSequenceTrial = int('0040A066', 16)
DocumentAuthorTrial = int('0040A067', 16)
DocumentAuthorIdentifierCodeSequenceTrial = int('0040A068', 16)
IdentifierCodeSequenceTrial = int('0040A070', 16)
VerifyingObserverSequence = int('0040A073', 16)
ObjectBinaryIdentifierTrial = int('0040A074', 16)
VerifyingObserverName = int('0040A075', 16)
DocumentingObserverIdentifierCodeSequenceTrial = int('0040A076', 16)
AuthorObserverSequence = int('0040A078', 16)
ParticipantSequence = int('0040A07A', 16)
CustodialOrganizationSequence = int('0040A07C', 16)
ParticipationType = int('0040A080', 16)
ParticipationDateTime = int('0040A082', 16)
ObserverType = int('0040A084', 16)
ProcedureIdentifierCodeSequenceTrial = int('0040A085', 16)
VerifyingObserverIdentificationCodeSequence = int('0040A088', 16)
ObjectDirectoryBinaryIdentifierTrial = int('0040A089', 16)
EquivalentCDADocumentSequence = int('0040A090', 16)
ReferencedWaveformChannels = int('0040A0B0', 16)
DateofDocumentorVerbalTransactionTrial = int('0040A110', 16)
TimeofDocumentCreationorVerbalTransactionTrial = int('0040A112', 16)
DateTime = int('0040A120', 16)
Date = int('0040A121', 16)
Time = int('0040A122', 16)
PersonName = int('0040A123', 16)
UID = int('0040A124', 16)
ReportStatusIDTrial = int('0040A125', 16)
TemporalRangeType = int('0040A130', 16)
ReferencedSamplePositions = int('0040A132', 16)
ReferencedFrameNumbers = int('0040A136', 16)
ReferencedTimeOffsets = int('0040A138', 16)
ReferencedDateTime = int('0040A13A', 16)
TextValue = int('0040A160', 16)
FloatingPointValue = int('0040A161', 16)
RationalNumeratorValue = int('0040A162', 16)
RationalDenominatorValue = int('0040A163', 16)
ObservationCategoryCodeSequenceTrial = int('0040A167', 16)
ConceptCodeSequence = int('0040A168', 16)
BibliographicCitationTrial = int('0040A16A', 16)
PurposeofReferenceCodeSequence = int('0040A170', 16)
ObservationUID = int('0040A171', 16)
ReferencedObservationUIDTrial = int('0040A172', 16)
ReferencedObservationClassTrial = int('0040A173', 16)
ReferencedObjectObservationClassTrial = int('0040A174', 16)
AnnotationGroupNumber = int('0040A180', 16)
ObservationDateTrial = int('0040A192', 16)
ObservationTimeTrial = int('0040A193', 16)
MeasurementAutomationTrial = int('0040A194', 16)
ModifierCodeSequence = int('0040A195', 16)
IdentificationDescriptionTrial = int('0040A224', 16)
CoordinatesSetGeometricTypeTrial = int('0040A290', 16)
AlgorithmCodeSequenceTrial = int('0040A296', 16)
AlgorithmDescriptionTrial = int('0040A297', 16)
PixelCoordinatesSetTrial = int('0040A29A', 16)
MeasuredValueSequence = int('0040A300', 16)
NumericValueQualifierCodeSequence = int('0040A301', 16)
CurrentObserverTrial = int('0040A307', 16)
NumericValue = int('0040A30A', 16)
ReferencedAccessionSequenceTrial = int('0040A313', 16)
ReportStatusCommentTrial = int('0040A33A', 16)
ProcedureContextSequenceTrial = int('0040A340', 16)
VerbalSourceTrial = int('0040A352', 16)
AddressTrial = int('0040A353', 16)
TelephoneNumberTrial = int('0040A354', 16)
VerbalSourceIdentifierCodeSequenceTrial = int('0040A358', 16)
PredecessorDocumentsSequence = int('0040A360', 16)
ReferencedRequestSequence = int('0040A370', 16)
PerformedProcedureCodeSequence = int('0040A372', 16)
CurrentRequestedProcedureEvidenceSequence = int('0040A375', 16)
ReportDetailSequenceTrial = int('0040A380', 16)
PertinentOtherEvidenceSequence = int('0040A385', 16)
HL7StructuredDocumentReferenceSequence = int('0040A390', 16)
ObservationSubjectUIDTrial = int('0040A402', 16)
ObservationSubjectClassTrial = int('0040A403', 16)
ObservationSubjectTypeCodeSequenceTrial = int('0040A404', 16)
CompletionFlag = int('0040A491', 16)
CompletionFlagDescription = int('0040A492', 16)
VerificationFlag = int('0040A493', 16)
ArchiveRequested = int('0040A494', 16)
PreliminaryFlag = int('0040A496', 16)
ContentTemplateSequence = int('0040A504', 16)
IdenticalDocumentsSequence = int('0040A525', 16)
ObservationSubjectContextFlagTrial = int('0040A600', 16)
ObserverContextFlagTrial = int('0040A601', 16)
ProcedureContextFlagTrial = int('0040A603', 16)
ContentSequence = int('0040A730', 16)
RelationshipSequenceTrial = int('0040A731', 16)
RelationshipTypeCodeSequenceTrial = int('0040A732', 16)
LanguageCodeSequenceTrial = int('0040A744', 16)
UniformResourceLocatorTrial = int('0040A992', 16)
WaveformAnnotationSequence = int('0040B020', 16)
TemplateIdentifier = int('0040DB00', 16)
TemplateVersion = int('0040DB06', 16)
TemplateLocalVersion = int('0040DB07', 16)
TemplateExtensionFlag = int('0040DB0B', 16)
TemplateExtensionOrganizationUID = int('0040DB0C', 16)
TemplateExtensionCreatorUID = int('0040DB0D', 16)
ReferencedContentItemIdentifier = int('0040DB73', 16)
HL7InstanceIdentifier = int('0040E001', 16)
HL7DocumentEffectiveTime = int('0040E004', 16)
HL7DocumentTypeCodeSequence = int('0040E006', 16)
DocumentClassCodeSequence = int('0040E008', 16)
RetrieveURI = int('0040E010', 16)
RetrieveLocationUID = int('0040E011', 16)
TypeofInstances = int('0040E020', 16)
DICOMRetrievalSequence = int('0040E021', 16)
DICOMMediaRetrievalSequence = int('0040E022', 16)
WADORetrievalSequence = int('0040E023', 16)
XDSRetrievalSequence = int('0040E024', 16)
WADORSRetrievalSequence = int('0040E025', 16)
RepositoryUniqueID = int('0040E030', 16)
HomeCommunityID = int('0040E031', 16)
DocumentTitle = int('00420010', 16)
EncapsulatedDocument = int('00420011', 16)
MIMETypeofEncapsulatedDocument = int('00420012', 16)
SourceInstanceSequence = int('00420013', 16)
ListofMIMETypes = int('00420014', 16)
ProductPackageIdentifier = int('00440001', 16)
SubstanceAdministrationApproval = int('00440002', 16)
ApprovalStatusFurtherDescription = int('00440003', 16)
ApprovalStatusDateTime = int('00440004', 16)
ProductTypeCodeSequence = int('00440007', 16)
ProductName = int('00440008', 16)
ProductDescription = int('00440009', 16)
ProductLotIdentifier = int('0044000A', 16)
ProductExpirationDateTime = int('0044000B', 16)
SubstanceAdministrationDateTime = int('00440010', 16)
SubstanceAdministrationNotes = int('00440011', 16)
SubstanceAdministrationDeviceID = int('00440012', 16)
ProductParameterSequence = int('00440013', 16)
SubstanceAdministrationParameterSequence = int('00440019', 16)
LensDescription = int('00460012', 16)
RightLensSequence = int('00460014', 16)
LeftLensSequence = int('00460015', 16)
UnspecifiedLateralityLensSequence = int('00460016', 16)
CylinderSequence = int('00460018', 16)
PrismSequence = int('00460028', 16)
HorizontalPrismPower = int('00460030', 16)
HorizontalPrismBase = int('00460032', 16)
VerticalPrismPower = int('00460034', 16)
VerticalPrismBase = int('00460036', 16)
LensSegmentType = int('00460038', 16)
OpticalTransmittance = int('00460040', 16)
ChannelWidth = int('00460042', 16)
PupilSize = int('00460044', 16)
CornealSize = int('00460046', 16)
AutorefractionRightEyeSequence = int('00460050', 16)
AutorefractionLeftEyeSequence = int('00460052', 16)
DistancePupillaryDistance = int('00460060', 16)
NearPupillaryDistance = int('00460062', 16)
IntermediatePupillaryDistance = int('00460063', 16)
OtherPupillaryDistance = int('00460064', 16)
KeratometryRightEyeSequence = int('00460070', 16)
KeratometryLeftEyeSequence = int('00460071', 16)
SteepKeratometricAxisSequence = int('00460074', 16)
RadiusofCurvature = int('00460075', 16)
KeratometricPower = int('00460076', 16)
KeratometricAxis = int('00460077', 16)
FlatKeratometricAxisSequence = int('00460080', 16)
BackgroundColor = int('00460092', 16)
Optotype = int('00460094', 16)
OptotypePresentation = int('00460095', 16)
SubjectiveRefractionRightEyeSequence = int('00460097', 16)
SubjectiveRefractionLeftEyeSequence = int('00460098', 16)
AddNearSequence = int('00460100', 16)
AddIntermediateSequence = int('00460101', 16)
AddOtherSequence = int('00460102', 16)
AddPower = int('00460104', 16)
ViewingDistance = int('00460106', 16)
VisualAcuityTypeCodeSequence = int('00460121', 16)
VisualAcuityRightEyeSequence = int('00460122', 16)
VisualAcuityLeftEyeSequence = int('00460123', 16)
VisualAcuityBothEyesOpenSequence = int('00460124', 16)
ViewingDistanceType = int('00460125', 16)
VisualAcuityModifiers = int('00460135', 16)
DecimalVisualAcuity = int('00460137', 16)
OptotypeDetailedDefinition = int('00460139', 16)
ReferencedRefractiveMeasurementsSequence = int('00460145', 16)
SpherePower = int('00460146', 16)
CylinderPower = int('00460147', 16)
CornealTopographySurface = int('00460201', 16)
CornealVertexLocation = int('00460202', 16)
PupilCentroidXCoordinate = int('00460203', 16)
PupilCentroidYCoordinate = int('00460204', 16)
EquivalentPupilRadius = int('00460205', 16)
CornealTopographyMapTypeCodeSequence = int('00460207', 16)
VerticesoftheOutlineofPupil = int('00460208', 16)
CornealTopographyMappingNormalsSequence = int('00460210', 16)
MaximumCornealCurvatureSequence = int('00460211', 16)
MaximumCornealCurvature = int('00460212', 16)
MaximumCornealCurvatureLocation = int('00460213', 16)
MinimumKeratometricSequence = int('00460215', 16)
SimulatedKeratometricCylinderSequence = int('00460218', 16)
AverageCornealPower = int('00460220', 16)
CornealISValue = int('00460224', 16)
AnalyzedArea = int('00460227', 16)
SurfaceRegularityIndex = int('00460230', 16)
SurfaceAsymmetryIndex = int('00460232', 16)
CornealEccentricityIndex = int('00460234', 16)
KeratoconusPredictionIndex = int('00460236', 16)
DecimalPotentialVisualAcuity = int('00460238', 16)
CornealTopographyMapQualityEvaluation = int('00460242', 16)
SourceImageCornealProcessedDataSequence = int('00460244', 16)
CornealPointLocation = int('00460247', 16)
CornealPointEstimated = int('00460248', 16)
AxialPower = int('00460249', 16)
TangentialPower = int('00460250', 16)
RefractivePower = int('00460251', 16)
RelativeElevation = int('00460252', 16)
CornealWavefront = int('00460253', 16)
ImagedVolumeWidth = int('00480001', 16)
ImagedVolumeHeight = int('00480002', 16)
ImagedVolumeDepth = int('00480003', 16)
TotalPixelMatrixColumns = int('00480006', 16)
TotalPixelMatrixRows = int('00480007', 16)
TotalPixelMatrixOriginSequence = int('00480008', 16)
SpecimenLabelinImage = int('00480010', 16)
FocusMethod = int('00480011', 16)
ExtendedDepthofField = int('00480012', 16)
NumberofFocalPlanes = int('00480013', 16)
DistanceBetweenFocalPlanes = int('00480014', 16)
RecommendedAbsentPixelCIELabValue = int('00480015', 16)
IlluminatorTypeCodeSequence = int('00480100', 16)
ImageOrientationSlide = int('00480102', 16)
OpticalPathSequence = int('00480105', 16)
OpticalPathIdentifier = int('00480106', 16)
OpticalPathDescription = int('00480107', 16)
IlluminationColorCodeSequence = int('00480108', 16)
SpecimenReferenceSequence = int('00480110', 16)
CondenserLensPower = int('00480111', 16)
ObjectiveLensPower = int('00480112', 16)
ObjectiveLensNumericalAperture = int('00480113', 16)
PaletteColorLookupTableSequence = int('00480120', 16)
ReferencedImageNavigationSequence = int('00480200', 16)
TopLeftHandCornerofLocalizerArea = int('00480201', 16)
BottomRightHandCornerofLocalizerArea = int('00480202', 16)
OpticalPathIdentificationSequence = int('00480207', 16)
PlanePositionSlideSequence = int('0048021A', 16)
ColumnPositionInTotalImagePixelMatrix = int('0048021E', 16)
RowPositionInTotalImagePixelMatrix = int('0048021F', 16)
PixelOriginInterpretation = int('00480301', 16)
CalibrationImage = int('00500004', 16)
DeviceSequence = int('00500010', 16)
ContainerComponentTypeCodeSequence = int('00500012', 16)
ContainerComponentThickness = int('00500013', 16)
DeviceLength = int('00500014', 16)
ContainerComponentWidth = int('00500015', 16)
DeviceDiameter = int('00500016', 16)
DeviceDiameterUnits = int('00500017', 16)
DeviceVolume = int('00500018', 16)
InterMarkerDistance = int('00500019', 16)
ContainerComponentMaterial = int('0050001A', 16)
ContainerComponentID = int('0050001B', 16)
ContainerComponentLength = int('0050001C', 16)
ContainerComponentDiameter = int('0050001D', 16)
ContainerComponentDescription = int('0050001E', 16)
DeviceDescription = int('00500020', 16)
ContrastBolusIngredientPercentbyVolume = int('00520001', 16)
OCTFocalDistance = int('00520002', 16)
BeamSpotSize = int('00520003', 16)
EffectiveRefractiveIndex = int('00520004', 16)
OCTAcquisitionDomain = int('00520006', 16)
OCTOpticalCenterWavelength = int('00520007', 16)
AxialResolution = int('00520008', 16)
RangingDepth = int('00520009', 16)
AlineRate = int('00520011', 16)
AlinesPerFrame = int('00520012', 16)
CatheterRotationalRate = int('00520013', 16)
AlinePixelSpacing = int('00520014', 16)
ModeofPercutaneousAccessSequence = int('00520016', 16)
IntravascularOCTFrameTypeSequence = int('00520025', 16)
OCTZOffsetApplied = int('00520026', 16)
IntravascularFrameContentSequence = int('00520027', 16)
IntravascularLongitudinalDistance = int('00520028', 16)
IntravascularOCTFrameContentSequence = int('00520029', 16)
OCTZOffsetCorrection = int('00520030', 16)
CatheterDirectionofRotation = int('00520031', 16)
SeamLineLocation = int('00520033', 16)
FirstAlineLocation = int('00520034', 16)
SeamLineIndex = int('00520036', 16)
NumberofPaddedAlines = int('00520038', 16)
InterpolationType = int('00520039', 16)
RefractiveIndexApplied = int('0052003A', 16)
EnergyWindowVector = int('00540010', 16)
NumberofEnergyWindows = int('00540011', 16)
EnergyWindowInformationSequence = int('00540012', 16)
EnergyWindowRangeSequence = int('00540013', 16)
EnergyWindowLowerLimit = int('00540014', 16)
EnergyWindowUpperLimit = int('00540015', 16)
RadiopharmaceuticalInformationSequence = int('00540016', 16)
ResidualSyringeCounts = int('00540017', 16)
EnergyWindowName = int('00540018', 16)
DetectorVector = int('00540020', 16)
NumberofDetectors = int('00540021', 16)
DetectorInformationSequence = int('00540022', 16)
PhaseVector = int('00540030', 16)
NumberofPhases = int('00540031', 16)
PhaseInformationSequence = int('00540032', 16)
NumberofFramesinPhase = int('00540033', 16)
PhaseDelay = int('00540036', 16)
PauseBetweenFrames = int('00540038', 16)
PhaseDescription = int('00540039', 16)
RotationVector = int('00540050', 16)
NumberofRotations = int('00540051', 16)
RotationInformationSequence = int('00540052', 16)
NumberofFramesinRotation = int('00540053', 16)
RRIntervalVector = int('00540060', 16)
NumberofRRIntervals = int('00540061', 16)
GatedInformationSequence = int('00540062', 16)
DataInformationSequence = int('00540063', 16)
TimeSlotVector = int('00540070', 16)
NumberofTimeSlots = int('00540071', 16)
TimeSlotInformationSequence = int('00540072', 16)
TimeSlotTime = int('00540073', 16)
SliceVector = int('00540080', 16)
NumberofSlices = int('00540081', 16)
AngularViewVector = int('00540090', 16)
TimeSliceVector = int('00540100', 16)
NumberofTimeSlices = int('00540101', 16)
StartAngle = int('00540200', 16)
TypeofDetectorMotion = int('00540202', 16)
TriggerVector = int('00540210', 16)
NumberofTriggersinPhase = int('00540211', 16)
ViewCodeSequence = int('00540220', 16)
ViewModifierCodeSequence = int('00540222', 16)
RadionuclideCodeSequence = int('00540300', 16)
AdministrationRouteCodeSequence = int('00540302', 16)
RadiopharmaceuticalCodeSequence = int('00540304', 16)
CalibrationDataSequence = int('00540306', 16)
EnergyWindowNumber = int('00540308', 16)
ImageID = int('00540400', 16)
PatientOrientationCodeSequence = int('00540410', 16)
PatientOrientationModifierCodeSequence = int('00540412', 16)
PatientGantryRelationshipCodeSequence = int('00540414', 16)
SliceProgressionDirection = int('00540500', 16)
ScanProgressionDirection = int('00540501', 16)
SeriesType = int('00541000', 16)
Units = int('00541001', 16)
CountsSource = int('00541002', 16)
ReprojectionMethod = int('00541004', 16)
SUVType = int('00541006', 16)
RandomsCorrectionMethod = int('00541100', 16)
AttenuationCorrectionMethod = int('00541101', 16)
DecayCorrection = int('00541102', 16)
ReconstructionMethod = int('00541103', 16)
DetectorLinesofResponseUsed = int('00541104', 16)
ScatterCorrectionMethod = int('00541105', 16)
AxialAcceptance = int('00541200', 16)
AxialMash = int('00541201', 16)
TransverseMash = int('00541202', 16)
DetectorElementSize = int('00541203', 16)
CoincidenceWindowWidth = int('00541210', 16)
SecondaryCountsType = int('00541220', 16)
FrameReferenceTime = int('00541300', 16)
PrimaryPromptsCountsAccumulated = int('00541310', 16)
SecondaryCountsAccumulated = int('00541311', 16)
SliceSensitivityFactor = int('00541320', 16)
DecayFactor = int('00541321', 16)
DoseCalibrationFactor = int('00541322', 16)
ScatterFractionFactor = int('00541323', 16)
DeadTimeFactor = int('00541324', 16)
ImageIndex = int('00541330', 16)
CountsIncluded = int('00541400', 16)
DeadTimeCorrectionFlag = int('00541401', 16)
HistogramSequence = int('00603000', 16)
HistogramNumberofBins = int('00603002', 16)
HistogramFirstBinValue = int('00603004', 16)
HistogramLastBinValue = int('00603006', 16)
HistogramBinWidth = int('00603008', 16)
HistogramExplanation = int('00603010', 16)
HistogramData = int('00603020', 16)
SegmentationType = int('00620001', 16)
SegmentSequence = int('00620002', 16)
SegmentedPropertyCategoryCodeSequence = int('00620003', 16)
SegmentNumber = int('00620004', 16)
SegmentLabel = int('00620005', 16)
SegmentDescription = int('00620006', 16)
SegmentAlgorithmType = int('00620008', 16)
SegmentAlgorithmName = int('00620009', 16)
SegmentIdentificationSequence = int('0062000A', 16)
ReferencedSegmentNumber = int('0062000B', 16)
RecommendedDisplayGrayscaleValue = int('0062000C', 16)
RecommendedDisplayCIELabValue = int('0062000D', 16)
MaximumFractionalValue = int('0062000E', 16)
SegmentedPropertyTypeCodeSequence = int('0062000F', 16)
SegmentationFractionalType = int('00620010', 16)
SegmentedPropertyTypeModifierCodeSequence = int('00620011', 16)
UsedSegmentsSequence = int('00620012', 16)
TrackingID = int('00620020', 16)
TrackingUID = int('00620021', 16)
DeformableRegistrationSequence = int('00640002', 16)
SourceFrameofReferenceUID = int('00640003', 16)
DeformableRegistrationGridSequence = int('00640005', 16)
GridDimensions = int('00640007', 16)
GridResolution = int('00640008', 16)
VectorGridData = int('00640009', 16)
PreDeformationMatrixRegistrationSequence = int('0064000F', 16)
PostDeformationMatrixRegistrationSequence = int('00640010', 16)
NumberofSurfaces = int('00660001', 16)
SurfaceSequence = int('00660002', 16)
SurfaceNumber = int('00660003', 16)
SurfaceComments = int('00660004', 16)
SurfaceProcessing = int('00660009', 16)
SurfaceProcessingRatio = int('0066000A', 16)
SurfaceProcessingDescription = int('0066000B', 16)
RecommendedPresentationOpacity = int('0066000C', 16)
RecommendedPresentationType = int('0066000D', 16)
FiniteVolume = int('0066000E', 16)
Manifold = int('00660010', 16)
SurfacePointsSequence = int('00660011', 16)
SurfacePointsNormalsSequence = int('00660012', 16)
SurfaceMeshPrimitivesSequence = int('00660013', 16)
NumberofSurfacePoints = int('00660015', 16)
PointCoordinatesData = int('00660016', 16)
PointPositionAccuracy = int('00660017', 16)
MeanPointDistance = int('00660018', 16)
MaximumPointDistance = int('00660019', 16)
PointsBoundingBoxCoordinates = int('0066001A', 16)
AxisofRotation = int('0066001B', 16)
CenterofRotation = int('0066001C', 16)
NumberofVectors = int('0066001E', 16)
VectorDimensionality = int('0066001F', 16)
VectorAccuracy = int('00660020', 16)
VectorCoordinateData = int('00660021', 16)
TrianglePointIndexList = int('00660023', 16)
EdgePointIndexList = int('00660024', 16)
VertexPointIndexList = int('00660025', 16)
TriangleStripSequence = int('00660026', 16)
TriangleFanSequence = int('00660027', 16)
LineSequence = int('00660028', 16)
PrimitivePointIndexList = int('00660029', 16)
SurfaceCount = int('0066002A', 16)
ReferencedSurfaceSequence = int('0066002B', 16)
ReferencedSurfaceNumber = int('0066002C', 16)
SegmentSurfaceGenerationAlgorithmIdentificationSequence = int(
'0066002D', 16)
SegmentSurfaceSourceInstanceSequence = int('0066002E', 16)
AlgorithmFamilyCodeSequence = int('0066002F', 16)
AlgorithmNameCodeSequence = int('00660030', 16)
AlgorithmVersion = int('00660031', 16)
AlgorithmParameters = int('00660032', 16)
FacetSequence = int('00660034', 16)
SurfaceProcessingAlgorithmIdentificationSequence = int('00660035', 16)
AlgorithmName = int('00660036', 16)
RecommendedPointRadius = int('00660037', 16)
RecommendedLineThickness = int('00660038', 16)
LongPrimitivePointIndexList = int('00660040', 16)
LongTrianglePointIndexList = int('00660041', 16)
LongEdgePointIndexList = int('00660042', 16)
LongVertexPointIndexList = int('00660043', 16)
TrackSetSequence = int('00660101', 16)
TrackSequence = int('00660102', 16)
RecommendedDisplayCIELabValueList = int('00660103', 16)
TrackingAlgorithmIdentificationSequence = int('00660104', 16)
TrackSetNumber = int('00660105', 16)
TrackSetLabel = int('00660106', 16)
TrackSetDescription = int('00660107', 16)
TrackSetAnatomicalTypeCodeSequence = int('00660108', 16)
MeasurementsSequence = int('00660121', 16)
TrackSetStatisticsSequence = int('00660124', 16)
FloatingPointValues = int('00660125', 16)
TrackPointIndexList = int('00660129', 16)
TrackStatisticsSequence = int('00660130', 16)
MeasurementValuesSequence = int('00660132', 16)
DiffusionAcquisitionCodeSequence = int('00660133', 16)
DiffusionModelCodeSequence = int('00660134', 16)
ImplantSize = int('00686210', 16)
ImplantTemplateVersion = int('00686221', 16)
ReplacedImplantTemplateSequence = int('00686222', 16)
ImplantType = int('00686223', 16)
DerivationImplantTemplateSequence = int('00686224', 16)
OriginalImplantTemplateSequence = int('00686225', 16)
EffectiveDateTime = int('00686226', 16)
ImplantTargetAnatomySequence = int('00686230', 16)
InformationFromManufacturerSequence = int('00686260', 16)
NotificationFromManufacturerSequence = int('00686265', 16)
InformationIssueDateTime = int('00686270', 16)
InformationSummary = int('00686280', 16)
ImplantRegulatoryDisapprovalCodeSequence = int('006862A0', 16)
OverallTemplateSpatialTolerance = int('006862A5', 16)
HPGLDocumentSequence = int('006862C0', 16)
HPGLDocumentID = int('006862D0', 16)
HPGLDocumentLabel = int('006862D5', 16)
ViewOrientationCodeSequence = int('006862E0', 16)
ViewOrientationModifier = int('006862F0', 16)
HPGLDocumentScaling = int('006862F2', 16)
HPGLDocument = int('00686300', 16)
HPGLContourPenNumber = int('00686310', 16)
HPGLPenSequence = int('00686320', 16)
HPGLPenNumber = int('00686330', 16)
HPGLPenLabel = int('00686340', 16)
HPGLPenDescription = int('00686345', 16)
RecommendedRotationPoint = int('00686346', 16)
BoundingRectangle = int('00686347', 16)
ImplantTemplate3DModelSurfaceNumber = int('00686350', 16)
SurfaceModelDescriptionSequence = int('00686360', 16)
SurfaceModelLabel = int('00686380', 16)
SurfaceModelScalingFactor = int('00686390', 16)
MaterialsCodeSequence = int('006863A0', 16)
CoatingMaterialsCodeSequence = int('006863A4', 16)
ImplantTypeCodeSequence = int('006863A8', 16)
FixationMethodCodeSequence = int('006863AC', 16)
MatingFeatureSetsSequence = int('006863B0', 16)
MatingFeatureSetID = int('006863C0', 16)
MatingFeatureSetLabel = int('006863D0', 16)
MatingFeatureSequence = int('006863E0', 16)
MatingFeatureID = int('006863F0', 16)
MatingFeatureDegreeofFreedomSequence = int('00686400', 16)
DegreeofFreedomID = int('00686410', 16)
DegreeofFreedomType = int('00686420', 16)
TwoDMatingFeatureCoordinatesSequence = int('00686430', 16)
ReferencedHPGLDocumentID = int('00686440', 16)
TwoDMatingPoint = int('00686450', 16)
TwoDMatingAxes = int('00686460', 16)
TwoDDegreeofFreedomSequence = int('00686470', 16)
ThreeDDegreeofFreedomAxis = int('00686490', 16)
RangeofFreedom = int('006864A0', 16)
ThreeDMatingPoint = int('006864C0', 16)
ThreeDMatingAxes = int('006864D0', 16)
TwoDDegreeofFreedomAxis = int('006864F0', 16)
PlanningLandmarkPointSequence = int('00686500', 16)
PlanningLandmarkLineSequence = int('00686510', 16)
PlanningLandmarkPlaneSequence = int('00686520', 16)
PlanningLandmarkID = int('00686530', 16)
PlanningLandmarkDescription = int('00686540', 16)
PlanningLandmarkIdentificationCodeSequence = int('00686545', 16)
TwoDPointCoordinatesSequence = int('00686550', 16)
TwoDPointCoordinates = int('00686560', 16)
ThreeDPointCoordinates = int('00686590', 16)
TwoDLineCoordinatesSequence = int('006865A0', 16)
TwoDLineCoordinates = int('006865B0', 16)
ThreeDLineCoordinates = int('006865D0', 16)
TwoDPlaneCoordinatesSequence = int('006865E0', 16)
TwoDPlaneIntersection = int('006865F0', 16)
ThreeDPlaneOrigin = int('00686610', 16)
ThreeDPlaneNormal = int('00686620', 16)
GraphicAnnotationSequence = int('00700001', 16)
GraphicLayer = int('00700002', 16)
BoundingBoxAnnotationUnits = int('00700003', 16)
AnchorPointAnnotationUnits = int('00700004', 16)
GraphicAnnotationUnits = int('00700005', 16)
UnformattedTextValue = int('00700006', 16)
TextObjectSequence = int('00700008', 16)
GraphicObjectSequence = int('00700009', 16)
BoundingBoxTopLeftHandCorner = int('00700010', 16)
BoundingBoxBottomRightHandCorner = int('00700011', 16)
BoundingBoxTextHorizontalJustification = int('00700012', 16)
AnchorPoint = int('00700014', 16)
AnchorPointVisibility = int('00700015', 16)
GraphicDimensions = int('00700020', 16)
NumberofGraphicPoints = int('00700021', 16)
GraphicData = int('00700022', 16)
GraphicType = int('00700023', 16)
GraphicFilled = int('00700024', 16)
ImageRotationRetired = int('00700040', 16)
ImageHorizontalFlip = int('00700041', 16)
ImageRotation = int('00700042', 16)
DisplayedAreaTopLeftHandCornerTrial = int('00700050', 16)
DisplayedAreaBottomRightHandCornerTrial = int('00700051', 16)
DisplayedAreaTopLeftHandCorner = int('00700052', 16)
DisplayedAreaBottomRightHandCorner = int('00700053', 16)
DisplayedAreaSelectionSequence = int('0070005A', 16)
GraphicLayerSequence = int('00700060', 16)
GraphicLayerOrder = int('00700062', 16)
GraphicLayerRecommendedDisplayGrayscaleValue = int('00700066', 16)
GraphicLayerRecommendedDisplayRGBValue = int('00700067', 16)
GraphicLayerDescription = int('00700068', 16)
ContentLabel = int('00700080', 16)
ContentDescription = int('00700081', 16)
PresentationCreationDate = int('00700082', 16)
PresentationCreationTime = int('00700083', 16)
ContentCreatorsName = int('00700084', 16)
ContentCreatorsIdentificationCodeSequence = int('00700086', 16)
AlternateContentDescriptionSequence = int('00700087', 16)
PresentationSizeMode = int('00700100', 16)
PresentationPixelSpacing = int('00700101', 16)
PresentationPixelAspectRatio = int('00700102', 16)
PresentationPixelMagnificationRatio = int('00700103', 16)
GraphicGroupLabel = int('00700207', 16)
GraphicGroupDescription = int('00700208', 16)
CompoundGraphicSequence = int('00700209', 16)
CompoundGraphicInstanceID = int('00700226', 16)
FontName = int('00700227', 16)
FontNameType = int('00700228', 16)
CSSFontName = int('00700229', 16)
RotationAngle = int('00700230', 16)
TextStyleSequence = int('00700231', 16)
LineStyleSequence = int('00700232', 16)
FillStyleSequence = int('00700233', 16)
GraphicGroupSequence = int('00700234', 16)
TextColorCIELabValue = int('00700241', 16)
HorizontalAlignment = int('00700242', 16)
VerticalAlignment = int('00700243', 16)
ShadowStyle = int('00700244', 16)
ShadowOffsetX = int('00700245', 16)
ShadowOffsetY = int('00700246', 16)
ShadowColorCIELabValue = int('00700247', 16)
Underlined = int('00700248', 16)
Bold = int('00700249', 16)
Italic = int('00700250', 16)
PatternOnColorCIELabValue = int('00700251', 16)
PatternOffColorCIELabValue = int('00700252', 16)
LineThickness = int('00700253', 16)
LineDashingStyle = int('00700254', 16)
LinePattern = int('00700255', 16)
FillPattern = int('00700256', 16)
FillMode = int('00700257', 16)
ShadowOpacity = int('00700258', 16)
GapLength = int('00700261', 16)
DiameterofVisibility = int('00700262', 16)
RotationPoint = int('00700273', 16)
TickAlignment = int('00700274', 16)
ShowTickLabel = int('00700278', 16)
TickLabelAlignment = int('00700279', 16)
CompoundGraphicUnits = int('00700282', 16)
PatternOnOpacity = int('00700284', 16)
PatternOffOpacity = int('00700285', 16)
MajorTicksSequence = int('00700287', 16)
TickPosition = int('00700288', 16)
TickLabel = int('00700289', 16)
CompoundGraphicType = int('00700294', 16)
GraphicGroupID = int('00700295', 16)
ShapeType = int('00700306', 16)
RegistrationSequence = int('00700308', 16)
MatrixRegistrationSequence = int('00700309', 16)
MatrixSequence = int('0070030A', 16)
FrameofReferencetoDisplayedCoordinateSystemTransformationMatrix = int(
'0070030B', 16)
FrameofReferenceTransformationMatrixType = int('0070030C', 16)
RegistrationTypeCodeSequence = int('0070030D', 16)
FiducialDescription = int('0070030F', 16)
FiducialIdentifier = int('00700310', 16)
FiducialIdentifierCodeSequence = int('00700311', 16)
ContourUncertaintyRadius = int('00700312', 16)
UsedFiducialsSequence = int('00700314', 16)
GraphicCoordinatesDataSequence = int('00700318', 16)
FiducialUID = int('0070031A', 16)
FiducialSetSequence = int('0070031C', 16)
FiducialSequence = int('0070031E', 16)
GraphicLayerRecommendedDisplayCIELabValue = int('00700401', 16)
BlendingSequence = int('00700402', 16)
RelativeOpacity = int('00700403', 16)
ReferencedSpatialRegistrationSequence = int('00700404', 16)
BlendingPosition = int('00700405', 16)
PresentationDisplayCollectionUID = int('00701101', 16)
PresentationSequenceCollectionUID = int('00701102', 16)
PresentationSequencePositionIndex = int('00701103', 16)
RenderedImageReferenceSequence = int('00701104', 16)
VolumetricPresentationStateInputSequence = int('00701201', 16)
PresentationInputType = int('00701202', 16)
InputSequencePositionIndex = int('00701203', 16)
Crop = int('00701204', 16)
CroppingSpecificationIndex = int('00701205', 16)
CompositingMethod = int('00701206', 16)
VolumetricPresentationInputNumber = int('00701207', 16)
ImageVolumeGeometry = int('00701208', 16)
VolumeCroppingSequence = int('00701301', 16)
VolumeCroppingMethod = int('00701302', 16)
BoundingBoxCrop = int('00701303', 16)
ObliqueCroppingPlaneSequence = int('00701304', 16)
Plane = int('00701305', 16)
PlaneNormal = int('00701306', 16)
CroppingSpecificationNumber = int('00701309', 16)
MultiPlanarReconstructionStyle = int('00701501', 16)
MPRThicknessType = int('00701502', 16)
MPRSlabThickness = int('00701503', 16)
MPRTopLeftHandCorner = int('00701505', 16)
MPRViewWidthDirection = int('00701507', 16)
MPRViewWidth = int('00701508', 16)
NumberofVolumetricCurvePoints = int('0070150C', 16)
VolumetricCurvePoints = int('0070150D', 16)
MPRViewHeightDirection = int('00701511', 16)
MPRViewHeight = int('00701512', 16)
PresentationStateClassificationComponentSequence = int('00701801', 16)
ComponentType = int('00701802', 16)
ComponentInputSequence = int('00701803', 16)
VolumetricPresentationInputIndex = int('00701804', 16)
PresentationStateCompositorComponentSequence = int('00701805', 16)
WeightingTransferFunctionSequence = int('00701806', 16)
WeightingLookupTableDescriptor = int('00701807', 16)
WeightingLookupTableData = int('00701808', 16)
VolumetricAnnotationSequence = int('00701901', 16)
ReferencedStructuredContextSequence = int('00701903', 16)
ReferencedContentItem = int('00701904', 16)
VolumetricPresentationInputAnnotationSequence = int('00701905', 16)
AnnotationClipping = int('00701907', 16)
PresentationAnimationStyle = int('00701A01', 16)
RecommendedAnimationRate = int('00701A03', 16)
AnimationCurveSequence = int('00701A04', 16)
AnimationStepSize = int('00701A05', 16)
HangingProtocolName = int('00720002', 16)
HangingProtocolDescription = int('00720004', 16)
HangingProtocolLevel = int('00720006', 16)
HangingProtocolCreator = int('00720008', 16)
HangingProtocolCreationDateTime = int('0072000A', 16)
HangingProtocolDefinitionSequence = int('0072000C', 16)
HangingProtocolUserIdentificationCodeSequence = int('0072000E', 16)
HangingProtocolUserGroupName = int('00720010', 16)
SourceHangingProtocolSequence = int('00720012', 16)
NumberofPriorsReferenced = int('00720014', 16)
ImageSetsSequence = int('00720020', 16)
ImageSetSelectorSequence = int('00720022', 16)
ImageSetSelectorUsageFlag = int('00720024', 16)
SelectorAttribute = int('00720026', 16)
SelectorValueNumber = int('00720028', 16)
TimeBasedImageSetsSequence = int('00720030', 16)
ImageSetNumber = int('00720032', 16)
ImageSetSelectorCategory = int('00720034', 16)
RelativeTime = int('00720038', 16)
RelativeTimeUnits = int('0072003A', 16)
AbstractPriorValue = int('0072003C', 16)
AbstractPriorCodeSequence = int('0072003E', 16)
ImageSetLabel = int('00720040', 16)
SelectorAttributeVR = int('00720050', 16)
SelectorSequencePointer = int('00720052', 16)
SelectorSequencePointerPrivateCreator = int('00720054', 16)
SelectorAttributePrivateCreator = int('00720056', 16)
SelectorAEValue = int('0072005E', 16)
SelectorASValue = int('0072005F', 16)
SelectorATValue = int('00720060', 16)
SelectorDAValue = int('00720061', 16)
SelectorCSValue = int('00720062', 16)
SelectorDTValue = int('00720063', 16)
SelectorISValue = int('00720064', 16)
SelectorOBValue = int('00720065', 16)
SelectorLOValue = int('00720066', 16)
SelectorOFValue = int('00720067', 16)
SelectorLTValue = int('00720068', 16)
SelectorOWValue = int('00720069', 16)
SelectorPNValue = int('0072006A', 16)
SelectorTMValue = int('0072006B', 16)
SelectorSHValue = int('0072006C', 16)
SelectorUNValue = int('0072006D', 16)
SelectorSTValue = int('0072006E', 16)
SelectorUCValue = int('0072006F', 16)
SelectorUTValue = int('00720070', 16)
SelectorURValue = int('00720071', 16)
SelectorDSValue = int('00720072', 16)
SelectorODValue = int('00720073', 16)
SelectorFDValue = int('00720074', 16)
SelectorOLValue = int('00720075', 16)
SelectorFLValue = int('00720076', 16)
SelectorULValue = int('00720078', 16)
SelectorUSValue = int('0072007A', 16)
SelectorSLValue = int('0072007C', 16)
SelectorSSValue = int('0072007E', 16)
SelectorUIValue = int('0072007F', 16)
SelectorCodeSequenceValue = int('00720080', 16)
NumberofScreens = int('00720100', 16)
NominalScreenDefinitionSequence = int('00720102', 16)
NumberofVerticalPixels = int('00720104', 16)
NumberofHorizontalPixels = int('00720106', 16)
DisplayEnvironmentSpatialPosition = int('00720108', 16)
ScreenMinimumGrayscaleBitDepth = int('0072010A', 16)
ScreenMinimumColorBitDepth = int('0072010C', 16)
ApplicationMaximumRepaintTime = int('0072010E', 16)
DisplaySetsSequence = int('00720200', 16)
DisplaySetNumber = int('00720202', 16)
DisplaySetLabel = int('00720203', 16)
DisplaySetPresentationGroup = int('00720204', 16)
DisplaySetPresentationGroupDescription = int('00720206', 16)
PartialDataDisplayHandling = int('00720208', 16)
SynchronizedScrollingSequence = int('00720210', 16)
DisplaySetScrollingGroup = int('00720212', 16)
NavigationIndicatorSequence = int('00720214', 16)
NavigationDisplaySet = int('00720216', 16)
ReferenceDisplaySets = int('00720218', 16)
ImageBoxesSequence = int('00720300', 16)
ImageBoxNumber = int('00720302', 16)
ImageBoxLayoutType = int('00720304', 16)
ImageBoxTileHorizontalDimension = int('00720306', 16)
ImageBoxTileVerticalDimension = int('00720308', 16)
ImageBoxScrollDirection = int('00720310', 16)
ImageBoxSmallScrollType = int('00720312', 16)
ImageBoxSmallScrollAmount = int('00720314', 16)
ImageBoxLargeScrollType = int('00720316', 16)
ImageBoxLargeScrollAmount = int('00720318', 16)
ImageBoxOverlapPriority = int('00720320', 16)
CineRelativetoRealTime = int('00720330', 16)
FilterOperationsSequence = int('00720400', 16)
FilterbyCategory = int('00720402', 16)
FilterbyAttributePresence = int('00720404', 16)
FilterbyOperator = int('00720406', 16)
StructuredDisplayBackgroundCIELabValue = int('00720420', 16)
EmptyImageBoxCIELabValue = int('00720421', 16)
StructuredDisplayImageBoxSequence = int('00720422', 16)
StructuredDisplayTextBoxSequence = int('00720424', 16)
ReferencedFirstFrameSequence = int('00720427', 16)
ImageBoxSynchronizationSequence = int('00720430', 16)
SynchronizedImageBoxList = int('00720432', 16)
TypeofSynchronization = int('00720434', 16)
BlendingOperationType = int('00720500', 16)
ReformattingOperationType = int('00720510', 16)
ReformattingThickness = int('00720512', 16)
ReformattingInterval = int('00720514', 16)
ReformattingOperationInitialViewDirection = int('00720516', 16)
ThreeDRenderingType = int('00720520', 16)
SortingOperationsSequence = int('00720600', 16)
SortbyCategory = int('00720602', 16)
SortingDirection = int('00720604', 16)
DisplaySetPatientOrientation = int('00720700', 16)
VOIType = int('00720702', 16)
PseudoColorType = int('00720704', 16)
PseudoColorPaletteInstanceReferenceSequence = int('00720705', 16)
ShowGrayscaleInverted = int('00720706', 16)
ShowImageTrueSizeFlag = int('00720710', 16)
ShowGraphicAnnotationFlag = int('00720712', 16)
ShowPatientDemographicsFlag = int('00720714', 16)
ShowAcquisitionTechniquesFlag = int('00720716', 16)
DisplaySetHorizontalJustification = int('00720717', 16)
DisplaySetVerticalJustification = int('00720718', 16)
ContinuationStartMeterset = int('00740120', 16)
ContinuationEndMeterset = int('00740121', 16)
ProcedureStepState = int('00741000', 16)
ProcedureStepProgressInformationSequence = int('00741002', 16)
ProcedureStepProgress = int('00741004', 16)
ProcedureStepProgressDescription = int('00741006', 16)
ProcedureStepCommunicationsURISequence = int('00741008', 16)
ContactURI = int('0074100A', 16)
ContactDisplayName = int('0074100C', 16)
ProcedureStepDiscontinuationReasonCodeSequence = int('0074100E', 16)
BeamTaskSequence = int('00741020', 16)
BeamTaskType = int('00741022', 16)
BeamOrderIndexTrial = int('00741024', 16)
AutosequenceFlag = int('00741025', 16)
TableTopVerticalAdjustedPosition = int('00741026', 16)
TableTopLongitudinalAdjustedPosition = int('00741027', 16)
TableTopLateralAdjustedPosition = int('00741028', 16)
PatientSupportAdjustedAngle = int('0074102A', 16)
TableTopEccentricAdjustedAngle = int('0074102B', 16)
TableTopPitchAdjustedAngle = int('0074102C', 16)
TableTopRollAdjustedAngle = int('0074102D', 16)
DeliveryVerificationImageSequence = int('00741030', 16)
VerificationImageTiming = int('00741032', 16)
DoubleExposureFlag = int('00741034', 16)
DoubleExposureOrdering = int('00741036', 16)
DoubleExposureMetersetTrial = int('00741038', 16)
DoubleExposureFieldDeltaTrial = int('0074103A', 16)
RelatedReferenceRTImageSequence = int('00741040', 16)
GeneralMachineVerificationSequence = int('00741042', 16)
ConventionalMachineVerificationSequence = int('00741044', 16)
IonMachineVerificationSequence = int('00741046', 16)
FailedAttributesSequence = int('00741048', 16)
OverriddenAttributesSequence = int('0074104A', 16)
ConventionalControlPointVerificationSequence = int('0074104C', 16)
IonControlPointVerificationSequence = int('0074104E', 16)
AttributeOccurrenceSequence = int('00741050', 16)
AttributeOccurrencePointer = int('00741052', 16)
AttributeItemSelector = int('00741054', 16)
AttributeOccurrencePrivateCreator = int('00741056', 16)
SelectorSequencePointerItems = int('00741057', 16)
ScheduledProcedureStepPriority = int('00741200', 16)
WorklistLabel = int('00741202', 16)
ProcedureStepLabel = int('00741204', 16)
ScheduledProcessingParametersSequence = int('00741210', 16)
PerformedProcessingParametersSequence = int('00741212', 16)
UnifiedProcedureStepPerformedProcedureSequence = int('00741216', 16)
RelatedProcedureStepSequence = int('00741220', 16)
ProcedureStepRelationshipType = int('00741222', 16)
ReplacedProcedureStepSequence = int('00741224', 16)
DeletionLock = int('00741230', 16)
ReceivingAE = int('00741234', 16)
RequestingAE = int('00741236', 16)
ReasonforCancellation = int('00741238', 16)
SCPStatus = int('00741242', 16)
SubscriptionListStatus = int('00741244', 16)
UnifiedProcedureStepListStatus = int('00741246', 16)
BeamOrderIndex = int('00741324', 16)
DoubleExposureMeterset = int('00741338', 16)
DoubleExposureFieldDelta = int('0074133A', 16)
BrachyTaskSequence = int('00741401', 16)
ContinuationStartTotalReferenceAirKerma = int('00741402', 16)
ContinuationEndTotalReferenceAirKerma = int('00741403', 16)
ContinuationPulseNumber = int('00741404', 16)
ChannelDeliveryOrderSequence = int('00741405', 16)
ReferencedChannelNumber = int('00741406', 16)
StartCumulativeTimeWeight = int('00741407', 16)
EndCumulativeTimeWeight = int('00741408', 16)
OmittedChannelSequence = int('00741409', 16)
ReasonforChannelOmission = int('0074140A', 16)
ReasonforChannelOmissionDescription = int('0074140B', 16)
ChannelDeliveryOrderIndex = int('0074140C', 16)
ChannelDeliveryContinuationSequence = int('0074140D', 16)
OmittedApplicationSetupSequence = int('0074140E', 16)
ImplantAssemblyTemplateName = int('00760001', 16)
ImplantAssemblyTemplateIssuer = int('00760003', 16)
ImplantAssemblyTemplateVersion = int('00760006', 16)
ReplacedImplantAssemblyTemplateSequence = int('00760008', 16)
ImplantAssemblyTemplateType = int('0076000A', 16)
OriginalImplantAssemblyTemplateSequence = int('0076000C', 16)
DerivationImplantAssemblyTemplateSequence = int('0076000E', 16)
ImplantAssemblyTemplateTargetAnatomySequence = int('00760010', 16)
ProcedureTypeCodeSequence = int('00760020', 16)
SurgicalTechnique = int('00760030', 16)
ComponentTypesSequence = int('00760032', 16)
ComponentTypeCodeSequence = int('00760034', 16)
ExclusiveComponentType = int('00760036', 16)
MandatoryComponentType = int('00760038', 16)
ComponentSequence = int('00760040', 16)
ComponentID = int('00760055', 16)
ComponentAssemblySequence = int('00760060', 16)
Component1ReferencedID = int('00760070', 16)
Component1ReferencedMatingFeatureSetID = int('00760080', 16)
Component1ReferencedMatingFeatureID = int('00760090', 16)
Component2ReferencedID = int('007600A0', 16)
Component2ReferencedMatingFeatureSetID = int('007600B0', 16)
Component2ReferencedMatingFeatureID = int('007600C0', 16)
ImplantTemplateGroupName = int('00780001', 16)
ImplantTemplateGroupDescription = int('00780010', 16)
ImplantTemplateGroupIssuer = int('00780020', 16)
ImplantTemplateGroupVersion = int('00780024', 16)
ReplacedImplantTemplateGroupSequence = int('00780026', 16)
ImplantTemplateGroupTargetAnatomySequence = int('00780028', 16)
ImplantTemplateGroupMembersSequence = int('0078002A', 16)
ImplantTemplateGroupMemberID = int('0078002E', 16)
ThreeDImplantTemplateGroupMemberMatchingPoint = int('00780050', 16)
ThreeDImplantTemplateGroupMemberMatchingAxes = int('00780060', 16)
ImplantTemplateGroupMemberMatching2DCoordinatesSequence = int(
'00780070', 16)
TwoDImplantTemplateGroupMemberMatchingPoint = int('00780090', 16)
TwoDImplantTemplateGroupMemberMatchingAxes = int('007800A0', 16)
ImplantTemplateGroupVariationDimensionSequence = int('007800B0', 16)
ImplantTemplateGroupVariationDimensionName = int('007800B2', 16)
ImplantTemplateGroupVariationDimensionRankSequence = int('007800B4', 16)
ReferencedImplantTemplateGroupMemberID = int('007800B6', 16)
ImplantTemplateGroupVariationDimensionRank = int('007800B8', 16)
SurfaceScanAcquisitionTypeCodeSequence = int('00800001', 16)
SurfaceScanModeCodeSequence = int('00800002', 16)
RegistrationMethodCodeSequence = int('00800003', 16)
ShotDurationTime = int('00800004', 16)
ShotOffsetTime = int('00800005', 16)
SurfacePointPresentationValueData = int('00800006', 16)
SurfacePointColorCIELabValueData = int('00800007', 16)
UVMappingSequence = int('00800008', 16)
TextureLabel = int('00800009', 16)
UValueData = int('00800010', 16)
VValueData = int('00800011', 16)
ReferencedTextureSequence = int('00800012', 16)
ReferencedSurfaceDataSequence = int('00800013', 16)
AssessmentSummary = int('00820001', 16)
AssessmentSummaryDescription = int('00820003', 16)
AssessedSOPInstanceSequence = int('00820004', 16)
ReferencedComparisonSOPInstanceSequence = int('00820005', 16)
NumberofAssessmentObservations = int('00820006', 16)
AssessmentObservationsSequence = int('00820007', 16)
ObservationSignificance = int('00820008', 16)
ObservationDescription = int('0082000A', 16)
StructuredConstraintObservationSequence = int('0082000C', 16)
AssessedAttributeValueSequence = int('00820010', 16)
AssessmentSetID = int('00820016', 16)
AssessmentRequesterSequence = int('00820017', 16)
SelectorAttributeName = int('00820018', 16)
SelectorAttributeKeyword = int('00820019', 16)
AssessmentTypeCodeSequence = int('00820021', 16)
ObservationBasisCodeSequence = int('00820022', 16)
AssessmentLabel = int('00820023', 16)
ConstraintType = int('00820032', 16)
SpecificationSelectionGuidance = int('00820033', 16)
ConstraintValueSequence = int('00820034', 16)
RecommendedDefaultValueSequence = int('00820035', 16)
ConstraintViolationSignificance = int('00820036', 16)
ConstraintViolationCondition = int('00820037', 16)
StorageMediaFilesetID = int('00880130', 16)
StorageMediaFilesetUID = int('00880140', 16)
IconImageSequence = int('00880200', 16)
TopicTitle = int('00880904', 16)
TopicSubject = int('00880906', 16)
TopicAuthor = int('00880910', 16)
TopicKeywords = int('00880912', 16)
SOPInstanceStatus = int('01000410', 16)
SOPAuthorizationDateTime = int('01000420', 16)
SOPAuthorizationComment = int('01000424', 16)
AuthorizationEquipmentCertificationNumber = int('01000426', 16)
MACIDNumber = int('04000005', 16)
MACCalculationTransferSyntaxUID = int('04000010', 16)
MACAlgorithm = int('04000015', 16)
DataElementsSigned = int('04000020', 16)
DigitalSignatureUID = int('04000100', 16)
DigitalSignatureDateTime = int('04000105', 16)
CertificateType = int('04000110', 16)
CertificateofSigner = int('04000115', 16)
Signature = int('04000120', 16)
CertifiedTimestampType = int('04000305', 16)
CertifiedTimestamp = int('04000310', 16)
DigitalSignaturePurposeCodeSequence = int('04000401', 16)
ReferencedDigitalSignatureSequence = int('04000402', 16)
ReferencedSOPInstanceMACSequence = int('04000403', 16)
MAC = int('04000404', 16)
EncryptedAttributesSequence = int('04000500', 16)
EncryptedContentTransferSyntaxUID = int('04000510', 16)
EncryptedContent = int('04000520', 16)
ModifiedAttributesSequence = int('04000550', 16)
OriginalAttributesSequence = int('04000561', 16)
AttributeModificationDateTime = int('04000562', 16)
ModifyingSystem = int('04000563', 16)
SourceofPreviousValues = int('04000564', 16)
ReasonfortheAttributeModification = int('04000565', 16)
NumberofCopies = int('20000010', 16)
PrinterConfigurationSequence = int('2000001E', 16)
PrintPriority = int('20000020', 16)
MediumType = int('20000030', 16)
FilmDestination = int('20000040', 16)
FilmSessionLabel = int('20000050', 16)
MemoryAllocation = int('20000060', 16)
MaximumMemoryAllocation = int('20000061', 16)
ColorImagePrintingFlag = int('20000062', 16)
CollationFlag = int('20000063', 16)
AnnotationFlag = int('20000065', 16)
ImageOverlayFlag = int('20000067', 16)
PresentationLUTFlag = int('20000069', 16)
ImageBoxPresentationLUTFlag = int('2000006A', 16)
MemoryBitDepth = int('200000A0', 16)
PrintingBitDepth = int('200000A1', 16)
MediaInstalledSequence = int('200000A2', 16)
OtherMediaAvailableSequence = int('200000A4', 16)
SupportedImageDisplayFormatsSequence = int('200000A8', 16)
ReferencedFilmBoxSequence = int('20000500', 16)
ReferencedStoredPrintSequence = int('20000510', 16)
ImageDisplayFormat = int('20100010', 16)
AnnotationDisplayFormatID = int('20100030', 16)
FilmOrientation = int('20100040', 16)
FilmSizeID = int('20100050', 16)
PrinterResolutionID = int('20100052', 16)
DefaultPrinterResolutionID = int('20100054', 16)
MagnificationType = int('20100060', 16)
SmoothingType = int('20100080', 16)
DefaultMagnificationType = int('201000A6', 16)
OtherMagnificationTypesAvailable = int('201000A7', 16)
DefaultSmoothingType = int('201000A8', 16)
OtherSmoothingTypesAvailable = int('201000A9', 16)
BorderDensity = int('20100100', 16)
EmptyImageDensity = int('20100110', 16)
MinDensity = int('20100120', 16)
MaxDensity = int('20100130', 16)
Trim = int('20100140', 16)
ConfigurationInformation = int('20100150', 16)
ConfigurationInformationDescription = int('20100152', 16)
MaximumCollatedFilms = int('20100154', 16)
Illumination = int('2010015E', 16)
ReflectedAmbientLight = int('20100160', 16)
PrinterPixelSpacing = int('20100376', 16)
ReferencedFilmSessionSequence = int('20100500', 16)
ReferencedImageBoxSequence = int('20100510', 16)
ReferencedBasicAnnotationBoxSequence = int('20100520', 16)
ImageBoxPosition = int('20200010', 16)
Polarity = int('20200020', 16)
RequestedImageSize = int('20200030', 16)
RequestedDecimateCropBehavior = int('20200040', 16)
RequestedResolutionID = int('20200050', 16)
RequestedImageSizeFlag = int('202000A0', 16)
DecimateCropResult = int('202000A2', 16)
BasicGrayscaleImageSequence = int('20200110', 16)
BasicColorImageSequence = int('20200111', 16)
ReferencedImageOverlayBoxSequence = int('20200130', 16)
ReferencedVOILUTBoxSequence = int('20200140', 16)
AnnotationPosition = int('20300010', 16)
TextString = int('20300020', 16)
ReferencedOverlayPlaneSequence = int('20400010', 16)
ReferencedOverlayPlaneGroups = int('20400011', 16)
OverlayPixelDataSequence = int('20400020', 16)
OverlayMagnificationType = int('20400060', 16)
OverlaySmoothingType = int('20400070', 16)
OverlayorImageMagnification = int('20400072', 16)
MagnifytoNumberofColumns = int('20400074', 16)
OverlayForegroundDensity = int('20400080', 16)
OverlayBackgroundDensity = int('20400082', 16)
OverlayMode = int('20400090', 16)
ThresholdDensity = int('20400100', 16)
ReferencedImageBoxSequenceRetired = int('20400500', 16)
PresentationLUTSequence = int('20500010', 16)
PresentationLUTShape = int('20500020', 16)
ReferencedPresentationLUTSequence = int('20500500', 16)
PrintJobID = int('21000010', 16)
ExecutionStatus = int('21000020', 16)
ExecutionStatusInfo = int('21000030', 16)
CreationDate = int('21000040', 16)
CreationTime = int('21000050', 16)
Originator = int('21000070', 16)
DestinationAE = int('21000140', 16)
OwnerID = int('21000160', 16)
NumberofFilms = int('21000170', 16)
ReferencedPrintJobSequencePullStoredPrint = int('21000500', 16)
PrinterStatus = int('21100010', 16)
PrinterStatusInfo = int('21100020', 16)
PrinterName = int('21100030', 16)
PrintQueueID = int('21100099', 16)
QueueStatus = int('21200010', 16)
PrintJobDescriptionSequence = int('21200050', 16)
ReferencedPrintJobSequence = int('21200070', 16)
PrintManagementCapabilitiesSequence = int('21300010', 16)
PrinterCharacteristicsSequence = int('21300015', 16)
FilmBoxContentSequence = int('21300030', 16)
ImageBoxContentSequence = int('21300040', 16)
AnnotationContentSequence = int('21300050', 16)
ImageOverlayBoxContentSequence = int('21300060', 16)
PresentationLUTContentSequence = int('21300080', 16)
ProposedStudySequence = int('213000A0', 16)
OriginalImageSequence = int('213000C0', 16)
LabelUsingInformationExtractedFromInstances = int('22000001', 16)
LabelText = int('22000002', 16)
LabelStyleSelection = int('22000003', 16)
MediaDisposition = int('22000004', 16)
BarcodeValue = int('22000005', 16)
BarcodeSymbology = int('22000006', 16)
AllowMediaSplitting = int('22000007', 16)
IncludeNonDICOMObjects = int('22000008', 16)
IncludeDisplayApplication = int('22000009', 16)
PreserveCompositeInstancesAfterMediaCreation = int('2200000A', 16)
TotalNumberofPiecesofMediaCreated = int('2200000B', 16)
RequestedMediaApplicationProfile = int('2200000C', 16)
ReferencedStorageMediaSequence = int('2200000D', 16)
FailureAttributes = int('2200000E', 16)
AllowLossyCompression = int('2200000F', 16)
RequestPriority = int('22000020', 16)
RTImageLabel = int('30020002', 16)
RTImageName = int('30020003', 16)
RTImageDescription = int('30020004', 16)
ReportedValuesOrigin = int('3002000A', 16)
RTImagePlane = int('3002000C', 16)
XRayImageReceptorTranslation = int('3002000D', 16)
XRayImageReceptorAngle = int('3002000E', 16)
RTImageOrientation = int('30020010', 16)
ImagePlanePixelSpacing = int('30020011', 16)
RTImagePosition = int('30020012', 16)
RadiationMachineName = int('30020020', 16)
RadiationMachineSAD = int('30020022', 16)
RadiationMachineSSD = int('30020024', 16)
RTImageSID = int('30020026', 16)
SourcetoReferenceObjectDistance = int('30020028', 16)
FractionNumber = int('30020029', 16)
ExposureSequence = int('30020030', 16)
MetersetExposure = int('30020032', 16)
DiaphragmPosition = int('30020034', 16)
FluenceMapSequence = int('30020040', 16)
FluenceDataSource = int('30020041', 16)
FluenceDataScale = int('30020042', 16)
PrimaryFluenceModeSequence = int('30020050', 16)
FluenceMode = int('30020051', 16)
FluenceModeID = int('30020052', 16)
DVHType = int('30040001', 16)
DoseUnits = int('30040002', 16)
DoseType = int('30040004', 16)
SpatialTransformofDose = int('30040005', 16)
DoseComment = int('30040006', 16)
NormalizationPoint = int('30040008', 16)
DoseSummationType = int('3004000A', 16)
GridFrameOffsetVector = int('3004000C', 16)
DoseGridScaling = int('3004000E', 16)
RTDoseROISequence = int('30040010', 16)
DoseValue = int('30040012', 16)
TissueHeterogeneityCorrection = int('30040014', 16)
DVHNormalizationPoint = int('30040040', 16)
DVHNormalizationDoseValue = int('30040042', 16)
DVHSequence = int('30040050', 16)
DVHDoseScaling = int('30040052', 16)
DVHVolumeUnits = int('30040054', 16)
DVHNumberofBins = int('30040056', 16)
DVHData = int('30040058', 16)
DVHReferencedROISequence = int('30040060', 16)
DVHROIContributionType = int('30040062', 16)
DVHMinimumDose = int('30040070', 16)
DVHMaximumDose = int('30040072', 16)
DVHMeanDose = int('30040074', 16)
StructureSetLabel = int('30060002', 16)
StructureSetName = int('30060004', 16)
StructureSetDescription = int('30060006', 16)
StructureSetDate = int('30060008', 16)
StructureSetTime = int('30060009', 16)
ReferencedFrameofReferenceSequence = int('30060010', 16)
RTReferencedStudySequence = int('30060012', 16)
RTReferencedSeriesSequence = int('30060014', 16)
ContourImageSequence = int('30060016', 16)
PredecessorStructureSetSequence = int('30060018', 16)
StructureSetROISequence = int('30060020', 16)
ROINumber = int('30060022', 16)
ReferencedFrameofReferenceUID = int('30060024', 16)
ROIName = int('30060026', 16)
ROIDescription = int('30060028', 16)
ROIDisplayColor = int('3006002A', 16)
ROIVolume = int('3006002C', 16)
RTRelatedROISequence = int('30060030', 16)
RTROIRelationship = int('30060033', 16)
ROIGenerationAlgorithm = int('30060036', 16)
ROIGenerationDescription = int('30060038', 16)
ROIContourSequence = int('30060039', 16)
ContourSequence = int('30060040', 16)
ContourGeometricType = int('30060042', 16)
ContourSlabThickness = int('30060044', 16)
ContourOffsetVector = int('30060045', 16)
NumberofContourPoints = int('30060046', 16)
ContourNumber = int('30060048', 16)
AttachedContours = int('30060049', 16)
ContourData = int('30060050', 16)
RTROIObservationsSequence = int('30060080', 16)
ObservationNumber = int('30060082', 16)
ReferencedROINumber = int('30060084', 16)
ROIObservationLabel = int('30060085', 16)
RTROIIdentificationCodeSequence = int('30060086', 16)
ROIObservationDescription = int('30060088', 16)
RelatedRTROIObservationsSequence = int('300600A0', 16)
RTROIInterpretedType = int('300600A4', 16)
ROIInterpreter = int('300600A6', 16)
ROIPhysicalPropertiesSequence = int('300600B0', 16)
ROIPhysicalProperty = int('300600B2', 16)
ROIPhysicalPropertyValue = int('300600B4', 16)
ROIElementalCompositionSequence = int('300600B6', 16)
ROIElementalCompositionAtomicNumber = int('300600B7', 16)
ROIElementalCompositionAtomicMassFraction = int('300600B8', 16)
AdditionalRTROIIdentificationCodeSequence = int('300600B9', 16)
FrameofReferenceRelationshipSequence = int('300600C0', 16)
RelatedFrameofReferenceUID = int('300600C2', 16)
FrameofReferenceTransformationType = int('300600C4', 16)
FrameofReferenceTransformationMatrix = int('300600C6', 16)
FrameofReferenceTransformationComment = int('300600C8', 16)
MeasuredDoseReferenceSequence = int('30080010', 16)
MeasuredDoseDescription = int('30080012', 16)
MeasuredDoseType = int('30080014', 16)
MeasuredDoseValue = int('30080016', 16)
TreatmentSessionBeamSequence = int('30080020', 16)
TreatmentSessionIonBeamSequence = int('30080021', 16)
CurrentFractionNumber = int('30080022', 16)
TreatmentControlPointDate = int('30080024', 16)
TreatmentControlPointTime = int('30080025', 16)
TreatmentTerminationStatus = int('3008002A', 16)
TreatmentTerminationCode = int('3008002B', 16)
TreatmentVerificationStatus = int('3008002C', 16)
ReferencedTreatmentRecordSequence = int('30080030', 16)
SpecifiedPrimaryMeterset = int('30080032', 16)
SpecifiedSecondaryMeterset = int('30080033', 16)
DeliveredPrimaryMeterset = int('30080036', 16)
DeliveredSecondaryMeterset = int('30080037', 16)
SpecifiedTreatmentTime = int('3008003A', 16)
DeliveredTreatmentTime = int('3008003B', 16)
ControlPointDeliverySequence = int('30080040', 16)
IonControlPointDeliverySequence = int('30080041', 16)
SpecifiedMeterset = int('30080042', 16)
DeliveredMeterset = int('30080044', 16)
MetersetRateSet = int('30080045', 16)
MetersetRateDelivered = int('30080046', 16)
ScanSpotMetersetsDelivered = int('30080047', 16)
DoseRateDelivered = int('30080048', 16)
TreatmentSummaryCalculatedDoseReferenceSequence = int('30080050', 16)
CumulativeDosetoDoseReference = int('30080052', 16)
FirstTreatmentDate = int('30080054', 16)
MostRecentTreatmentDate = int('30080056', 16)
NumberofFractionsDelivered = int('3008005A', 16)
OverrideSequence = int('30080060', 16)
ParameterSequencePointer = int('30080061', 16)
OverrideParameterPointer = int('30080062', 16)
ParameterItemIndex = int('30080063', 16)
MeasuredDoseReferenceNumber = int('30080064', 16)
ParameterPointer = int('30080065', 16)
OverrideReason = int('30080066', 16)
CorrectedParameterSequence = int('30080068', 16)
CorrectionValue = int('3008006A', 16)
CalculatedDoseReferenceSequence = int('30080070', 16)
CalculatedDoseReferenceNumber = int('30080072', 16)
CalculatedDoseReferenceDescription = int('30080074', 16)
CalculatedDoseReferenceDoseValue = int('30080076', 16)
StartMeterset = int('30080078', 16)
EndMeterset = int('3008007A', 16)
ReferencedMeasuredDoseReferenceSequence = int('30080080', 16)
ReferencedMeasuredDoseReferenceNumber = int('30080082', 16)
ReferencedCalculatedDoseReferenceSequence = int('30080090', 16)
ReferencedCalculatedDoseReferenceNumber = int('30080092', 16)
BeamLimitingDeviceLeafPairsSequence = int('300800A0', 16)
RecordedWedgeSequence = int('300800B0', 16)
RecordedCompensatorSequence = int('300800C0', 16)
RecordedBlockSequence = int('300800D0', 16)
TreatmentSummaryMeasuredDoseReferenceSequence = int('300800E0', 16)
RecordedSnoutSequence = int('300800F0', 16)
RecordedRangeShifterSequence = int('300800F2', 16)
RecordedLateralSpreadingDeviceSequence = int('300800F4', 16)
RecordedRangeModulatorSequence = int('300800F6', 16)
RecordedSourceSequence = int('30080100', 16)
SourceSerialNumber = int('30080105', 16)
TreatmentSessionApplicationSetupSequence = int('30080110', 16)
ApplicationSetupCheck = int('30080116', 16)
RecordedBrachyAccessoryDeviceSequence = int('30080120', 16)
ReferencedBrachyAccessoryDeviceNumber = int('30080122', 16)
RecordedChannelSequence = int('30080130', 16)
SpecifiedChannelTotalTime = int('30080132', 16)
DeliveredChannelTotalTime = int('30080134', 16)
SpecifiedNumberofPulses = int('30080136', 16)
DeliveredNumberofPulses = int('30080138', 16)
SpecifiedPulseRepetitionInterval = int('3008013A', 16)
DeliveredPulseRepetitionInterval = int('3008013C', 16)
RecordedSourceApplicatorSequence = int('30080140', 16)
ReferencedSourceApplicatorNumber = int('30080142', 16)
RecordedChannelShieldSequence = int('30080150', 16)
ReferencedChannelShieldNumber = int('30080152', 16)
BrachyControlPointDeliveredSequence = int('30080160', 16)
SafePositionExitDate = int('30080162', 16)
SafePositionExitTime = int('30080164', 16)
SafePositionReturnDate = int('30080166', 16)
SafePositionReturnTime = int('30080168', 16)
PulseSpecificBrachyControlPointDeliveredSequence = int('30080171', 16)
PulseNumber = int('30080172', 16)
BrachyPulseControlPointDeliveredSequence = int('30080173', 16)
CurrentTreatmentStatus = int('30080200', 16)
TreatmentStatusComment = int('30080202', 16)
FractionGroupSummarySequence = int('30080220', 16)
ReferencedFractionNumber = int('30080223', 16)
FractionGroupType = int('30080224', 16)
BeamStopperPosition = int('30080230', 16)
FractionStatusSummarySequence = int('30080240', 16)
TreatmentDate = int('30080250', 16)
TreatmentTime = int('30080251', 16)
RTPlanLabel = int('300A0002', 16)
RTPlanName = int('300A0003', 16)
RTPlanDescription = int('300A0004', 16)
RTPlanDate = int('300A0006', 16)
RTPlanTime = int('300A0007', 16)
TreatmentProtocols = int('300A0009', 16)
PlanIntent = int('300A000A', 16)
TreatmentSites = int('300A000B', 16)
RTPlanGeometry = int('300A000C', 16)
PrescriptionDescription = int('300A000E', 16)
DoseReferenceSequence = int('300A0010', 16)
DoseReferenceNumber = int('300A0012', 16)
DoseReferenceUID = int('300A0013', 16)
DoseReferenceStructureType = int('300A0014', 16)
NominalBeamEnergyUnit = int('300A0015', 16)
DoseReferenceDescription = int('300A0016', 16)
DoseReferencePointCoordinates = int('300A0018', 16)
NominalPriorDose = int('300A001A', 16)
DoseReferenceType = int('300A0020', 16)
ConstraintWeight = int('300A0021', 16)
DeliveryWarningDose = int('300A0022', 16)
DeliveryMaximumDose = int('300A0023', 16)
TargetMinimumDose = int('300A0025', 16)
TargetPrescriptionDose = int('300A0026', 16)
TargetMaximumDose = int('300A0027', 16)
TargetUnderdoseVolumeFraction = int('300A0028', 16)
OrganatRiskFullvolumeDose = int('300A002A', 16)
OrganatRiskLimitDose = int('300A002B', 16)
OrganatRiskMaximumDose = int('300A002C', 16)
OrganatRiskOverdoseVolumeFraction = int('300A002D', 16)
ToleranceTableSequence = int('300A0040', 16)
ToleranceTableNumber = int('300A0042', 16)
ToleranceTableLabel = int('300A0043', 16)
GantryAngleTolerance = int('300A0044', 16)
BeamLimitingDeviceAngleTolerance = int('300A0046', 16)
BeamLimitingDeviceToleranceSequence = int('300A0048', 16)
BeamLimitingDevicePositionTolerance = int('300A004A', 16)
SnoutPositionTolerance = int('300A004B', 16)
PatientSupportAngleTolerance = int('300A004C', 16)
TableTopEccentricAngleTolerance = int('300A004E', 16)
TableTopPitchAngleTolerance = int('300A004F', 16)
TableTopRollAngleTolerance = int('300A0050', 16)
TableTopVerticalPositionTolerance = int('300A0051', 16)
TableTopLongitudinalPositionTolerance = int('300A0052', 16)
TableTopLateralPositionTolerance = int('300A0053', 16)
RTPlanRelationship = int('300A0055', 16)
FractionGroupSequence = int('300A0070', 16)
FractionGroupNumber = int('300A0071', 16)
FractionGroupDescription = int('300A0072', 16)
NumberofFractionsPlanned = int('300A0078', 16)
NumberofFractionPatternDigitsPerDay = int('300A0079', 16)
RepeatFractionCycleLength = int('300A007A', 16)
FractionPattern = int('300A007B', 16)
NumberofBeams = int('300A0080', 16)
BeamDoseSpecificationPoint = int('300A0082', 16)
BeamDose = int('300A0084', 16)
BeamMeterset = int('300A0086', 16)
BeamDosePointDepth = int('300A0088', 16)
BeamDosePointEquivalentDepth = int('300A0089', 16)
BeamDosePointSSD = int('300A008A', 16)
BeamDoseMeaning = int('300A008B', 16)
BeamDoseVerificationControlPointSequence = int('300A008C', 16)
AverageBeamDosePointDepth = int('300A008D', 16)
AverageBeamDosePointEquivalentDepth = int('300A008E', 16)
AverageBeamDosePointSSD = int('300A008F', 16)
BeamDoseType = int('300A0090', 16)
AlternateBeamDose = int('300A0091', 16)
AlternateBeamDoseType = int('300A0092', 16)
NumberofBrachyApplicationSetups = int('300A00A0', 16)
BrachyApplicationSetupDoseSpecificationPoint = int('300A00A2', 16)
BrachyApplicationSetupDose = int('300A00A4', 16)
BeamSequence = int('300A00B0', 16)
TreatmentMachineName = int('300A00B2', 16)
PrimaryDosimeterUnit = int('300A00B3', 16)
SourceAxisDistance = int('300A00B4', 16)
BeamLimitingDeviceSequence = int('300A00B6', 16)
RTBeamLimitingDeviceType = int('300A00B8', 16)
SourcetoBeamLimitingDeviceDistance = int('300A00BA', 16)
IsocentertoBeamLimitingDeviceDistance = int('300A00BB', 16)
NumberofLeafJawPairs = int('300A00BC', 16)
LeafPositionBoundaries = int('300A00BE', 16)
BeamNumber = int('300A00C0', 16)
BeamName = int('300A00C2', 16)
BeamDescription = int('300A00C3', 16)
BeamType = int('300A00C4', 16)
BeamDeliveryDurationLimit = int('300A00C5', 16)
RadiationType = int('300A00C6', 16)
HighDoseTechniqueType = int('300A00C7', 16)
ReferenceImageNumber = int('300A00C8', 16)
PlannedVerificationImageSequence = int('300A00CA', 16)
ImagingDeviceSpecificAcquisitionParameters = int('300A00CC', 16)
TreatmentDeliveryType = int('300A00CE', 16)
NumberofWedges = int('300A00D0', 16)
WedgeSequence = int('300A00D1', 16)
WedgeNumber = int('300A00D2', 16)
WedgeType = int('300A00D3', 16)
WedgeID = int('300A00D4', 16)
WedgeAngle = int('300A00D5', 16)
WedgeFactor = int('300A00D6', 16)
TotalWedgeTrayWaterEquivalentThickness = int('300A00D7', 16)
WedgeOrientation = int('300A00D8', 16)
IsocentertoWedgeTrayDistance = int('300A00D9', 16)
SourcetoWedgeTrayDistance = int('300A00DA', 16)
WedgeThinEdgePosition = int('300A00DB', 16)
BolusID = int('300A00DC', 16)
BolusDescription = int('300A00DD', 16)
EffectiveWedgeAngle = int('300A00DE', 16)
NumberofCompensators = int('300A00E0', 16)
MaterialID = int('300A00E1', 16)
TotalCompensatorTrayFactor = int('300A00E2', 16)
CompensatorSequence = int('300A00E3', 16)
CompensatorNumber = int('300A00E4', 16)
CompensatorID = int('300A00E5', 16)
SourcetoCompensatorTrayDistance = int('300A00E6', 16)
CompensatorRows = int('300A00E7', 16)
CompensatorColumns = int('300A00E8', 16)
CompensatorPixelSpacing = int('300A00E9', 16)
CompensatorPosition = int('300A00EA', 16)
CompensatorTransmissionData = int('300A00EB', 16)
CompensatorThicknessData = int('300A00EC', 16)
NumberofBoli = int('300A00ED', 16)
CompensatorType = int('300A00EE', 16)
CompensatorTrayID = int('300A00EF', 16)
NumberofBlocks = int('300A00F0', 16)
TotalBlockTrayFactor = int('300A00F2', 16)
TotalBlockTrayWaterEquivalentThickness = int('300A00F3', 16)
BlockSequence = int('300A00F4', 16)
BlockTrayID = int('300A00F5', 16)
SourcetoBlockTrayDistance = int('300A00F6', 16)
IsocentertoBlockTrayDistance = int('300A00F7', 16)
BlockType = int('300A00F8', 16)
AccessoryCode = int('300A00F9', 16)
BlockDivergence = int('300A00FA', 16)
BlockMountingPosition = int('300A00FB', 16)
BlockNumber = int('300A00FC', 16)
BlockName = int('300A00FE', 16)
BlockThickness = int('300A0100', 16)
BlockTransmission = int('300A0102', 16)
BlockNumberofPoints = int('300A0104', 16)
BlockData = int('300A0106', 16)
ApplicatorSequence = int('300A0107', 16)
ApplicatorID = int('300A0108', 16)
ApplicatorType = int('300A0109', 16)
ApplicatorDescription = int('300A010A', 16)
CumulativeDoseReferenceCoefficient = int('300A010C', 16)
FinalCumulativeMetersetWeight = int('300A010E', 16)
NumberofControlPoints = int('300A0110', 16)
ControlPointSequence = int('300A0111', 16)
ControlPointIndex = int('300A0112', 16)
NominalBeamEnergy = int('300A0114', 16)
DoseRateSet = int('300A0115', 16)
WedgePositionSequence = int('300A0116', 16)
WedgePosition = int('300A0118', 16)
BeamLimitingDevicePositionSequence = int('300A011A', 16)
LeafJawPositions = int('300A011C', 16)
GantryAngle = int('300A011E', 16)
GantryRotationDirection = int('300A011F', 16)
BeamLimitingDeviceAngle = int('300A0120', 16)
BeamLimitingDeviceRotationDirection = int('300A0121', 16)
PatientSupportAngle = int('300A0122', 16)
PatientSupportRotationDirection = int('300A0123', 16)
TableTopEccentricAxisDistance = int('300A0124', 16)
TableTopEccentricAngle = int('300A0125', 16)
TableTopEccentricRotationDirection = int('300A0126', 16)
TableTopVerticalPosition = int('300A0128', 16)
TableTopLongitudinalPosition = int('300A0129', 16)
TableTopLateralPosition = int('300A012A', 16)
IsocenterPosition = int('300A012C', 16)
SurfaceEntryPoint = int('300A012E', 16)
SourcetoSurfaceDistance = int('300A0130', 16)
AverageBeamDosePointSourcetoExternalContourDistance = int('300A0131', 16)
SourcetoExternalContourDistance = int('300A0132', 16)
ExternalContourEntryPoint = int('300A0133', 16)
CumulativeMetersetWeight = int('300A0134', 16)
TableTopPitchAngle = int('300A0140', 16)
TableTopPitchRotationDirection = int('300A0142', 16)
TableTopRollAngle = int('300A0144', 16)
TableTopRollRotationDirection = int('300A0146', 16)
HeadFixationAngle = int('300A0148', 16)
GantryPitchAngle = int('300A014A', 16)
GantryPitchRotationDirection = int('300A014C', 16)
GantryPitchAngleTolerance = int('300A014E', 16)
FixationEye = int('300A0150', 16)
ChairHeadFramePosition = int('300A0151', 16)
HeadFixationAngleTolerance = int('300A0152', 16)
ChairHeadFramePositionTolerance = int('300A0153', 16)
FixationLightAzimuthalAngleTolerance = int('300A0154', 16)
FixationLightPolarAngleTolerance = int('300A0155', 16)
PatientSetupSequence = int('300A0180', 16)
PatientSetupNumber = int('300A0182', 16)
PatientSetupLabel = int('300A0183', 16)
PatientAdditionalPosition = int('300A0184', 16)
FixationDeviceSequence = int('300A0190', 16)
FixationDeviceType = int('300A0192', 16)
FixationDeviceLabel = int('300A0194', 16)
FixationDeviceDescription = int('300A0196', 16)
FixationDevicePosition = int('300A0198', 16)
FixationDevicePitchAngle = int('300A0199', 16)
FixationDeviceRollAngle = int('300A019A', 16)
ShieldingDeviceSequence = int('300A01A0', 16)
ShieldingDeviceType = int('300A01A2', 16)
ShieldingDeviceLabel = int('300A01A4', 16)
ShieldingDeviceDescription = int('300A01A6', 16)
ShieldingDevicePosition = int('300A01A8', 16)
SetupTechnique = int('300A01B0', 16)
SetupTechniqueDescription = int('300A01B2', 16)
SetupDeviceSequence = int('300A01B4', 16)
SetupDeviceType = int('300A01B6', 16)
SetupDeviceLabel = int('300A01B8', 16)
SetupDeviceDescription = int('300A01BA', 16)
SetupDeviceParameter = int('300A01BC', 16)
SetupReferenceDescription = int('300A01D0', 16)
TableTopVerticalSetupDisplacement = int('300A01D2', 16)
TableTopLongitudinalSetupDisplacement = int('300A01D4', 16)
TableTopLateralSetupDisplacement = int('300A01D6', 16)
BrachyTreatmentTechnique = int('300A0200', 16)
BrachyTreatmentType = int('300A0202', 16)
TreatmentMachineSequence = int('300A0206', 16)
SourceSequence = int('300A0210', 16)
SourceNumber = int('300A0212', 16)
SourceType = int('300A0214', 16)
SourceManufacturer = int('300A0216', 16)
ActiveSourceDiameter = int('300A0218', 16)
ActiveSourceLength = int('300A021A', 16)
SourceModelID = int('300A021B', 16)
SourceDescription = int('300A021C', 16)
SourceEncapsulationNominalThickness = int('300A0222', 16)
SourceEncapsulationNominalTransmission = int('300A0224', 16)
SourceIsotopeName = int('300A0226', 16)
SourceIsotopeHalfLife = int('300A0228', 16)
SourceStrengthUnits = int('300A0229', 16)
ReferenceAirKermaRate = int('300A022A', 16)
SourceStrength = int('300A022B', 16)
SourceStrengthReferenceDate = int('300A022C', 16)
SourceStrengthReferenceTime = int('300A022E', 16)
ApplicationSetupSequence = int('300A0230', 16)
ApplicationSetupType = int('300A0232', 16)
ApplicationSetupNumber = int('300A0234', 16)
ApplicationSetupName = int('300A0236', 16)
ApplicationSetupManufacturer = int('300A0238', 16)
TemplateNumber = int('300A0240', 16)
TemplateType = int('300A0242', 16)
TemplateName = int('300A0244', 16)
TotalReferenceAirKerma = int('300A0250', 16)
BrachyAccessoryDeviceSequence = int('300A0260', 16)
BrachyAccessoryDeviceNumber = int('300A0262', 16)
BrachyAccessoryDeviceID = int('300A0263', 16)
BrachyAccessoryDeviceType = int('300A0264', 16)
BrachyAccessoryDeviceName = int('300A0266', 16)
BrachyAccessoryDeviceNominalThickness = int('300A026A', 16)
BrachyAccessoryDeviceNominalTransmission = int('300A026C', 16)
ChannelSequence = int('300A0280', 16)
ChannelNumber = int('300A0282', 16)
ChannelLength = int('300A0284', 16)
ChannelTotalTime = int('300A0286', 16)
SourceMovementType = int('300A0288', 16)
NumberofPulses = int('300A028A', 16)
PulseRepetitionInterval = int('300A028C', 16)
SourceApplicatorNumber = int('300A0290', 16)
SourceApplicatorID = int('300A0291', 16)
SourceApplicatorType = int('300A0292', 16)
SourceApplicatorName = int('300A0294', 16)
SourceApplicatorLength = int('300A0296', 16)
SourceApplicatorManufacturer = int('300A0298', 16)
SourceApplicatorWallNominalThickness = int('300A029C', 16)
SourceApplicatorWallNominalTransmission = int('300A029E', 16)
SourceApplicatorStepSize = int('300A02A0', 16)
TransferTubeNumber = int('300A02A2', 16)
TransferTubeLength = int('300A02A4', 16)
ChannelShieldSequence = int('300A02B0', 16)
ChannelShieldNumber = int('300A02B2', 16)
ChannelShieldID = int('300A02B3', 16)
ChannelShieldName = int('300A02B4', 16)
ChannelShieldNominalThickness = int('300A02B8', 16)
ChannelShieldNominalTransmission = int('300A02BA', 16)
FinalCumulativeTimeWeight = int('300A02C8', 16)
BrachyControlPointSequence = int('300A02D0', 16)
ControlPointRelativePosition = int('300A02D2', 16)
ControlPoint3DPosition = int('300A02D4', 16)
CumulativeTimeWeight = int('300A02D6', 16)
CompensatorDivergence = int('300A02E0', 16)
CompensatorMountingPosition = int('300A02E1', 16)
SourcetoCompensatorDistance = int('300A02E2', 16)
TotalCompensatorTrayWaterEquivalentThickness = int('300A02E3', 16)
IsocentertoCompensatorTrayDistance = int('300A02E4', 16)
CompensatorColumnOffset = int('300A02E5', 16)
IsocentertoCompensatorDistances = int('300A02E6', 16)
CompensatorRelativeStoppingPowerRatio = int('300A02E7', 16)
CompensatorMillingToolDiameter = int('300A02E8', 16)
IonRangeCompensatorSequence = int('300A02EA', 16)
CompensatorDescription = int('300A02EB', 16)
RadiationMassNumber = int('300A0302', 16)
RadiationAtomicNumber = int('300A0304', 16)
RadiationChargeState = int('300A0306', 16)
ScanMode = int('300A0308', 16)
ModulatedScanModeType = int('300A0309', 16)
VirtualSourceAxisDistances = int('300A030A', 16)
SnoutSequence = int('300A030C', 16)
SnoutPosition = int('300A030D', 16)
SnoutID = int('300A030F', 16)
NumberofRangeShifters = int('300A0312', 16)
RangeShifterSequence = int('300A0314', 16)
RangeShifterNumber = int('300A0316', 16)
RangeShifterID = int('300A0318', 16)
RangeShifterType = int('300A0320', 16)
RangeShifterDescription = int('300A0322', 16)
NumberofLateralSpreadingDevices = int('300A0330', 16)
LateralSpreadingDeviceSequence = int('300A0332', 16)
LateralSpreadingDeviceNumber = int('300A0334', 16)
LateralSpreadingDeviceID = int('300A0336', 16)
LateralSpreadingDeviceType = int('300A0338', 16)
LateralSpreadingDeviceDescription = int('300A033A', 16)
LateralSpreadingDeviceWaterEquivalentThickness = int('300A033C', 16)
NumberofRangeModulators = int('300A0340', 16)
RangeModulatorSequence = int('300A0342', 16)
RangeModulatorNumber = int('300A0344', 16)
RangeModulatorID = int('300A0346', 16)
RangeModulatorType = int('300A0348', 16)
RangeModulatorDescription = int('300A034A', 16)
BeamCurrentModulationID = int('300A034C', 16)
PatientSupportType = int('300A0350', 16)
PatientSupportID = int('300A0352', 16)
PatientSupportAccessoryCode = int('300A0354', 16)
TrayAccessoryCode = int('300A0355', 16)
FixationLightAzimuthalAngle = int('300A0356', 16)
FixationLightPolarAngle = int('300A0358', 16)
MetersetRate = int('300A035A', 16)
RangeShifterSettingsSequence = int('300A0360', 16)
RangeShifterSetting = int('300A0362', 16)
IsocentertoRangeShifterDistance = int('300A0364', 16)
RangeShifterWaterEquivalentThickness = int('300A0366', 16)
LateralSpreadingDeviceSettingsSequence = int('300A0370', 16)
LateralSpreadingDeviceSetting = int('300A0372', 16)
IsocentertoLateralSpreadingDeviceDistance = int('300A0374', 16)
RangeModulatorSettingsSequence = int('300A0380', 16)
RangeModulatorGatingStartValue = int('300A0382', 16)
RangeModulatorGatingStopValue = int('300A0384', 16)
RangeModulatorGatingStartWaterEquivalentThickness = int('300A0386', 16)
RangeModulatorGatingStopWaterEquivalentThickness = int('300A0388', 16)
IsocentertoRangeModulatorDistance = int('300A038A', 16)
ScanSpotTuneID = int('300A0390', 16)
ScanSpotPrescribedIndices = int('300A0391', 16)
NumberofScanSpotPositions = int('300A0392', 16)
ScanSpotReordered = int('300A0393', 16)
ScanSpotPositionMap = int('300A0394', 16)
ScanSpotReorderingAllowed = int('300A0395', 16)
ScanSpotMetersetWeights = int('300A0396', 16)
ScanningSpotSize = int('300A0398', 16)
NumberofPaintings = int('300A039A', 16)
IonToleranceTableSequence = int('300A03A0', 16)
IonBeamSequence = int('300A03A2', 16)
IonBeamLimitingDeviceSequence = int('300A03A4', 16)
IonBlockSequence = int('300A03A6', 16)
IonControlPointSequence = int('300A03A8', 16)
IonWedgeSequence = int('300A03AA', 16)
IonWedgePositionSequence = int('300A03AC', 16)
ReferencedSetupImageSequence = int('300A0401', 16)
SetupImageComment = int('300A0402', 16)
MotionSynchronizationSequence = int('300A0410', 16)
ControlPointOrientation = int('300A0412', 16)
GeneralAccessorySequence = int('300A0420', 16)
GeneralAccessoryID = int('300A0421', 16)
GeneralAccessoryDescription = int('300A0422', 16)
GeneralAccessoryType = int('300A0423', 16)
GeneralAccessoryNumber = int('300A0424', 16)
SourcetoGeneralAccessoryDistance = int('300A0425', 16)
ApplicatorGeometrySequence = int('300A0431', 16)
ApplicatorApertureShape = int('300A0432', 16)
ApplicatorOpening = int('300A0433', 16)
ApplicatorOpeningX = int('300A0434', 16)
ApplicatorOpeningY = int('300A0435', 16)
SourcetoApplicatorMountingPositionDistance = int('300A0436', 16)
NumberofBlockSlabItems = int('300A0440', 16)
BlockSlabSequence = int('300A0441', 16)
BlockSlabThickness = int('300A0442', 16)
BlockSlabNumber = int('300A0443', 16)
DeviceMotionControlSequence = int('300A0450', 16)
DeviceMotionExecutionMode = int('300A0451', 16)
DeviceMotionObservationMode = int('300A0452', 16)
DeviceMotionParameterCodeSequence = int('300A0453', 16)
DistalDepthFraction = int('300A0501', 16)
DistalDepth = int('300A0502', 16)
NominalRangeModulationFractions = int('300A0503', 16)
NominalRangeModulatedRegionDepths = int('300A0504', 16)
DepthDoseParametersSequence = int('300A0505', 16)
DeliveredDepthDoseParametersSequence = int('300A0506', 16)
DeliveredDistalDepthFraction = int('300A0507', 16)
DeliveredDistalDepth = int('300A0508', 16)
DeliveredNominalRangeModulationFractions = int('300A0509', 16)
DeliveredNominalRangeModulatedRegionDepths = int('300A0510', 16)
DeliveredReferenceDoseDefinition = int('300A0511', 16)
ReferenceDoseDefinition = int('300A0512', 16)
ReferencedRTPlanSequence = int('300C0002', 16)
ReferencedBeamSequence = int('300C0004', 16)
ReferencedBeamNumber = int('300C0006', 16)
ReferencedReferenceImageNumber = int('300C0007', 16)
StartCumulativeMetersetWeight = int('300C0008', 16)
EndCumulativeMetersetWeight = int('300C0009', 16)
ReferencedBrachyApplicationSetupSequence = int('300C000A', 16)
ReferencedBrachyApplicationSetupNumber = int('300C000C', 16)
ReferencedSourceNumber = int('300C000E', 16)
ReferencedFractionGroupSequence = int('300C0020', 16)
ReferencedFractionGroupNumber = int('300C0022', 16)
ReferencedVerificationImageSequence = int('300C0040', 16)
ReferencedReferenceImageSequence = int('300C0042', 16)
ReferencedDoseReferenceSequence = int('300C0050', 16)
ReferencedDoseReferenceNumber = int('300C0051', 16)
BrachyReferencedDoseReferenceSequence = int('300C0055', 16)
ReferencedStructureSetSequence = int('300C0060', 16)
ReferencedPatientSetupNumber = int('300C006A', 16)
ReferencedDoseSequence = int('300C0080', 16)
ReferencedToleranceTableNumber = int('300C00A0', 16)
ReferencedBolusSequence = int('300C00B0', 16)
ReferencedWedgeNumber = int('300C00C0', 16)
ReferencedCompensatorNumber = int('300C00D0', 16)
ReferencedBlockNumber = int('300C00E0', 16)
ReferencedControlPointIndex = int('300C00F0', 16)
ReferencedControlPointSequence = int('300C00F2', 16)
ReferencedStartControlPointIndex = int('300C00F4', 16)
ReferencedStopControlPointIndex = int('300C00F6', 16)
ReferencedRangeShifterNumber = int('300C0100', 16)
ReferencedLateralSpreadingDeviceNumber = int('300C0102', 16)
ReferencedRangeModulatorNumber = int('300C0104', 16)
OmittedBeamTaskSequence = int('300C0111', 16)
ReasonforOmission = int('300C0112', 16)
ReasonforOmissionDescription = int('300C0113', 16)
ApprovalStatus = int('300E0002', 16)
ReviewDate = int('300E0004', 16)
ReviewTime = int('300E0005', 16)
ReviewerName = int('300E0008', 16)
Arbitrary = int('40000010', 16)
TextComments = int('40004000', 16)
ResultsID = int('40080040', 16)
ResultsIDIssuer = int('40080042', 16)
ReferencedInterpretationSequence = int('40080050', 16)
ReportProductionStatusTrial = int('400800FF', 16)
InterpretationRecordedDate = int('40080100', 16)
InterpretationRecordedTime = int('40080101', 16)
InterpretationRecorder = int('40080102', 16)
ReferencetoRecordedSound = int('40080103', 16)
InterpretationTranscriptionDate = int('40080108', 16)
InterpretationTranscriptionTime = int('40080109', 16)
InterpretationTranscriber = int('4008010A', 16)
InterpretationText = int('4008010B', 16)
InterpretationAuthor = int('4008010C', 16)
InterpretationApproverSequence = int('40080111', 16)
InterpretationApprovalDate = int('40080112', 16)
InterpretationApprovalTime = int('40080113', 16)
PhysicianApprovingInterpretation = int('40080114', 16)
InterpretationDiagnosisDescription = int('40080115', 16)
InterpretationDiagnosisCodeSequence = int('40080117', 16)
ResultsDistributionListSequence = int('40080118', 16)
DistributionName = int('40080119', 16)
DistributionAddress = int('4008011A', 16)
InterpretationID = int('40080200', 16)
InterpretationIDIssuer = int('40080202', 16)
InterpretationTypeID = int('40080210', 16)
InterpretationStatusID = int('40080212', 16)
Impressions = int('40080300', 16)
ResultsComments = int('40084000', 16)
LowEnergyDetectors = int('40100001', 16)
HighEnergyDetectors = int('40100002', 16)
DetectorGeometrySequence = int('40100004', 16)
ThreatROIVoxelSequence = int('40101001', 16)
ThreatROIBase = int('40101004', 16)
ThreatROIExtents = int('40101005', 16)
ThreatROIBitmap = int('40101006', 16)
RouteSegmentID = int('40101007', 16)
GantryType = int('40101008', 16)
OOIOwnerType = int('40101009', 16)
RouteSegmentSequence = int('4010100A', 16)
PotentialThreatObjectID = int('40101010', 16)
ThreatSequence = int('40101011', 16)
ThreatCategory = int('40101012', 16)
ThreatCategoryDescription = int('40101013', 16)
ATDAbilityAssessment = int('40101014', 16)
ATDAssessmentFlag = int('40101015', 16)
ATDAssessmentProbability = int('40101016', 16)
Mass = int('40101017', 16)
Density = int('40101018', 16)
ZEffective = int('40101019', 16)
BoardingPassID = int('4010101A', 16)
CenterofMass = int('4010101B', 16)
CenterofPTO = int('4010101C', 16)
BoundingPolygon = int('4010101D', 16)
RouteSegmentStartLocationID = int('4010101E', 16)
RouteSegmentEndLocationID = int('4010101F', 16)
RouteSegmentLocationIDType = int('40101020', 16)
AbortReason = int('40101021', 16)
VolumeofPTO = int('40101023', 16)
AbortFlag = int('40101024', 16)
RouteSegmentStartTime = int('40101025', 16)
RouteSegmentEndTime = int('40101026', 16)
TDRType = int('40101027', 16)
InternationalRouteSegment = int('40101028', 16)
ThreatDetectionAlgorithmandVersion = int('40101029', 16)
AssignedLocation = int('4010102A', 16)
AlarmDecisionTime = int('4010102B', 16)
AlarmDecision = int('40101031', 16)
NumberofTotalObjects = int('40101033', 16)
NumberofAlarmObjects = int('40101034', 16)
PTORepresentationSequence = int('40101037', 16)
ATDAssessmentSequence = int('40101038', 16)
TIPType = int('40101039', 16)
Version = int('4010103A', 16)
OOIOwnerCreationTime = int('40101041', 16)
OOIType = int('40101042', 16)
OOISize = int('40101043', 16)
AcquisitionStatus = int('40101044', 16)
BasisMaterialsCodeSequence = int('40101045', 16)
PhantomType = int('40101046', 16)
OOIOwnerSequence = int('40101047', 16)
ScanType = int('40101048', 16)
ItineraryID = int('40101051', 16)
ItineraryIDType = int('40101052', 16)
ItineraryIDAssigningAuthority = int('40101053', 16)
RouteID = int('40101054', 16)
RouteIDAssigningAuthority = int('40101055', 16)
InboundArrivalType = int('40101056', 16)
CarrierID = int('40101058', 16)
CarrierIDAssigningAuthority = int('40101059', 16)
SourceOrientation = int('40101060', 16)
SourcePosition = int('40101061', 16)
BeltHeight = int('40101062', 16)
AlgorithmRoutingCodeSequence = int('40101064', 16)
TransportClassification = int('40101067', 16)
OOITypeDescriptor = int('40101068', 16)
TotalProcessingTime = int('40101069', 16)
DetectorCalibrationData = int('4010106C', 16)
AdditionalScreeningPerformed = int('4010106D', 16)
AdditionalInspectionSelectionCriteria = int('4010106E', 16)
AdditionalInspectionMethodSequence = int('4010106F', 16)
AITDeviceType = int('40101070', 16)
QRMeasurementsSequence = int('40101071', 16)
TargetMaterialSequence = int('40101072', 16)
SNRThreshold = int('40101073', 16)
ImageScaleRepresentation = int('40101075', 16)
ReferencedPTOSequence = int('40101076', 16)
ReferencedTDRInstanceSequence = int('40101077', 16)
PTOLocationDescription = int('40101078', 16)
AnomalyLocatorIndicatorSequence = int('40101079', 16)
AnomalyLocatorIndicator = int('4010107A', 16)
PTORegionSequence = int('4010107B', 16)
InspectionSelectionCriteria = int('4010107C', 16)
SecondaryInspectionMethodSequence = int('4010107D', 16)
PRCStoRCSOrientation = int('4010107E', 16)
MACParametersSequence = int('4FFE0001', 16)
SharedFunctionalGroupsSequence = int('52009229', 16)
PerframeFunctionalGroupsSequence = int('52009230', 16)
WaveformSequence = int('54000100', 16)
ChannelMinimumValue = int('54000110', 16)
ChannelMaximumValue = int('54000112', 16)
WaveformBitsAllocated = int('54001004', 16)
WaveformSampleInterpretation = int('54001006', 16)
WaveformPaddingValue = int('5400100A', 16)
WaveformData = int('54001010', 16)
FirstOrderPhaseCorrectionAngle = int('56000010', 16)
SpectroscopyData = int('56000020', 16)
FloatPixelData = int('7FE00008', 16)
DoubleFloatPixelData = int('7FE00009', 16)
PixelData = int('7FE00010', 16)
CoefficientsSDVN = int('7FE00020', 16)
CoefficientsSDHN = int('7FE00030', 16)
CoefficientsSDDN = int('7FE00040', 16)
DigitalSignaturesSequence = int('FFFAFFFA', 16)
DataSetTrailingPadding = int('FFFCFFFC', 16)
Item = int('FFFEE000', 16)
ItemDelimitationItem = int('FFFEE00D', 16)
SequenceDelimitationItem = int('FFFEE0DD', 16) | 0.924688 | 0.880232 |
import frappe
from frappe import _
from chat.utils import get_full_name
import ast
@frappe.whitelist()
def get(email):
"""Get all the rooms for a user
Args:
email (str): Email of user requests all rooms
"""
room_doctype = frappe.qb.DocType('Chat Room')
all_rooms = (
frappe.qb.from_(room_doctype)
.select('name', 'modified', 'last_message', 'is_read', 'room_name', 'members', 'type')
.where((room_doctype.type.like('Guest') | room_doctype.members.like(f'%{email}%')))
).run(as_dict=True)
for room in all_rooms:
if room['type'] == 'Direct':
members = room['members'].split(', ')
room['room_name'] = get_full_name(
members[0]) if email == members[1] else get_full_name(members[1])
room['opposite_person_email'] = members[0] if members[1] == email else members[1]
room['is_read'] = 1 if email in room['is_read'] else 0
all_rooms.sort(key=lambda room: comparator(room))
return all_rooms
@frappe.whitelist()
def create_private(room_name, users, type):
"""Create a new private room
Args:
room_name (str): Room name
users (str): List of users in room
"""
users = ast.literal_eval(users)
users.append(frappe.session.user)
members = ', '.join(users)
if type == 'Direct':
room_doctype = frappe.qb.DocType('Chat Room')
query = (
frappe.qb.from_(room_doctype)
.select('name')
.where(room_doctype.type == 'Direct')
.where(room_doctype.members.like(f'%{users[0]}%'))
.where(room_doctype.members.like(f'%{users[1]}%'))
).run(as_dict=True)
if query:
frappe.throw(
title='Error',
msg=_('Direct Room already exists!')
)
else:
room_doc = get_private_room_doc(room_name, members, type)
room_doc.insert()
else:
room_doc = get_private_room_doc(room_name, members, type)
room_doc.insert()
profile = {
'room_name': room_name,
'last_date': room_doc.modified,
'room': room_doc.name,
'is_read': 0,
'room_type': type,
'members': members,
}
if type == 'Direct':
members_names = [
{
'name': get_full_name(users[0]),
'email': users[0]
},
{
'name': get_full_name(users[1]),
'email': users[1]
}
]
profile['member_names'] = members_names
frappe.publish_realtime(event='private_room_creation',
message=profile, after_commit=True)
def get_private_room_doc(room_name, members, type):
return frappe.get_doc({
'doctype': 'Chat Room',
'room_name': room_name,
'members': members,
'type': type,
})
def comparator(key):
return (
key.is_read,
reversor(key.modified)
)
class reversor:
def __init__(self, obj):
self.obj = obj
def __eq__(self, other):
return other.obj == self.obj
def __gt__(self, other):
return other.obj > self.obj | chat/api/room.py | import frappe
from frappe import _
from chat.utils import get_full_name
import ast
@frappe.whitelist()
def get(email):
"""Get all the rooms for a user
Args:
email (str): Email of user requests all rooms
"""
room_doctype = frappe.qb.DocType('Chat Room')
all_rooms = (
frappe.qb.from_(room_doctype)
.select('name', 'modified', 'last_message', 'is_read', 'room_name', 'members', 'type')
.where((room_doctype.type.like('Guest') | room_doctype.members.like(f'%{email}%')))
).run(as_dict=True)
for room in all_rooms:
if room['type'] == 'Direct':
members = room['members'].split(', ')
room['room_name'] = get_full_name(
members[0]) if email == members[1] else get_full_name(members[1])
room['opposite_person_email'] = members[0] if members[1] == email else members[1]
room['is_read'] = 1 if email in room['is_read'] else 0
all_rooms.sort(key=lambda room: comparator(room))
return all_rooms
@frappe.whitelist()
def create_private(room_name, users, type):
"""Create a new private room
Args:
room_name (str): Room name
users (str): List of users in room
"""
users = ast.literal_eval(users)
users.append(frappe.session.user)
members = ', '.join(users)
if type == 'Direct':
room_doctype = frappe.qb.DocType('Chat Room')
query = (
frappe.qb.from_(room_doctype)
.select('name')
.where(room_doctype.type == 'Direct')
.where(room_doctype.members.like(f'%{users[0]}%'))
.where(room_doctype.members.like(f'%{users[1]}%'))
).run(as_dict=True)
if query:
frappe.throw(
title='Error',
msg=_('Direct Room already exists!')
)
else:
room_doc = get_private_room_doc(room_name, members, type)
room_doc.insert()
else:
room_doc = get_private_room_doc(room_name, members, type)
room_doc.insert()
profile = {
'room_name': room_name,
'last_date': room_doc.modified,
'room': room_doc.name,
'is_read': 0,
'room_type': type,
'members': members,
}
if type == 'Direct':
members_names = [
{
'name': get_full_name(users[0]),
'email': users[0]
},
{
'name': get_full_name(users[1]),
'email': users[1]
}
]
profile['member_names'] = members_names
frappe.publish_realtime(event='private_room_creation',
message=profile, after_commit=True)
def get_private_room_doc(room_name, members, type):
return frappe.get_doc({
'doctype': 'Chat Room',
'room_name': room_name,
'members': members,
'type': type,
})
def comparator(key):
return (
key.is_read,
reversor(key.modified)
)
class reversor:
def __init__(self, obj):
self.obj = obj
def __eq__(self, other):
return other.obj == self.obj
def __gt__(self, other):
return other.obj > self.obj | 0.419053 | 0.159675 |
async def test_event_deletion(container):
"""Check event deletion."""
newsfeed_id = '123'
event_dispatcher_service = container.event_dispatcher_service()
event_processor_service = container.event_processor_service()
event_1 = await _process_new_event(
event_dispatcher_service,
event_processor_service,
newsfeed_id=newsfeed_id,
data={
'event_data': 'some_data_1',
},
)
event_2 = await _process_new_event(
event_dispatcher_service,
event_processor_service,
newsfeed_id=newsfeed_id,
data={
'event_data': 'some_data_2',
},
)
await _process_event_deletion(
event_dispatcher_service,
event_processor_service,
newsfeed_id=newsfeed_id,
event_id=event_1.id,
)
event_repository = container.event_repository()
events = await event_repository.get_by_newsfeed_id(newsfeed_id)
assert len(events) == 1
assert events[0].id == event_2.id
assert events[0].data == event_2.data
async def test_event_deletion_from_subscriber(container):
"""Check event deletion."""
newsfeed_id = '123'
subscriber_newsfeed_id = '124'
subscription_service = container.subscription_service()
await subscription_service.create_subscription(
newsfeed_id=subscriber_newsfeed_id,
to_newsfeed_id=newsfeed_id,
)
event_dispatcher_service = container.event_dispatcher_service()
event_processor_service = container.event_processor_service()
event_1 = await _process_new_event(
event_dispatcher_service,
event_processor_service,
newsfeed_id=newsfeed_id,
data={
'event_data': 'some_data_1',
},
)
event_2 = await _process_new_event(
event_dispatcher_service,
event_processor_service,
newsfeed_id=newsfeed_id,
data={
'event_data': 'some_data_2',
},
)
await _process_event_deletion(
event_dispatcher_service,
event_processor_service,
newsfeed_id=newsfeed_id,
event_id=event_1.id,
)
event_repository = container.event_repository()
events = await event_repository.get_by_newsfeed_id(newsfeed_id)
assert len(events) == 1
assert events[0].id == event_2.id
assert events[0].newsfeed_id == event_2.newsfeed_id
assert events[0].data == event_2.data
subscriber_events = await event_repository.get_by_newsfeed_id(subscriber_newsfeed_id)
assert len(subscriber_events) == 1
assert subscriber_events[0].parent_fqid.newsfeed_id == event_2.newsfeed_id
assert subscriber_events[0].parent_fqid.event_id == event_2.id
assert subscriber_events[0].data == event_2.data
async def _process_new_event(event_dispatcher_service, event_processor_service, newsfeed_id, data):
event = await event_dispatcher_service.dispatch_new_event(
newsfeed_id=newsfeed_id,
data=data,
)
await event_processor_service.process_event()
return event
async def _process_event_deletion(event_dispatcher_service, event_processor_service, newsfeed_id,
event_id):
await event_dispatcher_service.dispatch_event_deletion(
newsfeed_id=newsfeed_id,
event_id=str(event_id),
)
await event_processor_service.process_event() | tests/unit/domain/test_event_deletion.py | async def test_event_deletion(container):
"""Check event deletion."""
newsfeed_id = '123'
event_dispatcher_service = container.event_dispatcher_service()
event_processor_service = container.event_processor_service()
event_1 = await _process_new_event(
event_dispatcher_service,
event_processor_service,
newsfeed_id=newsfeed_id,
data={
'event_data': 'some_data_1',
},
)
event_2 = await _process_new_event(
event_dispatcher_service,
event_processor_service,
newsfeed_id=newsfeed_id,
data={
'event_data': 'some_data_2',
},
)
await _process_event_deletion(
event_dispatcher_service,
event_processor_service,
newsfeed_id=newsfeed_id,
event_id=event_1.id,
)
event_repository = container.event_repository()
events = await event_repository.get_by_newsfeed_id(newsfeed_id)
assert len(events) == 1
assert events[0].id == event_2.id
assert events[0].data == event_2.data
async def test_event_deletion_from_subscriber(container):
"""Check event deletion."""
newsfeed_id = '123'
subscriber_newsfeed_id = '124'
subscription_service = container.subscription_service()
await subscription_service.create_subscription(
newsfeed_id=subscriber_newsfeed_id,
to_newsfeed_id=newsfeed_id,
)
event_dispatcher_service = container.event_dispatcher_service()
event_processor_service = container.event_processor_service()
event_1 = await _process_new_event(
event_dispatcher_service,
event_processor_service,
newsfeed_id=newsfeed_id,
data={
'event_data': 'some_data_1',
},
)
event_2 = await _process_new_event(
event_dispatcher_service,
event_processor_service,
newsfeed_id=newsfeed_id,
data={
'event_data': 'some_data_2',
},
)
await _process_event_deletion(
event_dispatcher_service,
event_processor_service,
newsfeed_id=newsfeed_id,
event_id=event_1.id,
)
event_repository = container.event_repository()
events = await event_repository.get_by_newsfeed_id(newsfeed_id)
assert len(events) == 1
assert events[0].id == event_2.id
assert events[0].newsfeed_id == event_2.newsfeed_id
assert events[0].data == event_2.data
subscriber_events = await event_repository.get_by_newsfeed_id(subscriber_newsfeed_id)
assert len(subscriber_events) == 1
assert subscriber_events[0].parent_fqid.newsfeed_id == event_2.newsfeed_id
assert subscriber_events[0].parent_fqid.event_id == event_2.id
assert subscriber_events[0].data == event_2.data
async def _process_new_event(event_dispatcher_service, event_processor_service, newsfeed_id, data):
event = await event_dispatcher_service.dispatch_new_event(
newsfeed_id=newsfeed_id,
data=data,
)
await event_processor_service.process_event()
return event
async def _process_event_deletion(event_dispatcher_service, event_processor_service, newsfeed_id,
event_id):
await event_dispatcher_service.dispatch_event_deletion(
newsfeed_id=newsfeed_id,
event_id=str(event_id),
)
await event_processor_service.process_event() | 0.651577 | 0.18756 |
import json
import logging
from affine import Affine
import numpy as np
from numpy.testing import assert_almost_equal
import pytest
import rasterio
from rasterio.control import GroundControlPoint
from rasterio.crs import CRS
from rasterio.enums import Resampling
from rasterio.env import GDALVersion
from rasterio.errors import (
CRSError,
GDALVersionError,
TransformError,
)
from rasterio.warp import (
reproject,
transform_geom,
transform,
transform_bounds,
calculate_default_transform,
aligned_target,
SUPPORTED_RESAMPLING,
GDAL2_RESAMPLING,
)
from rasterio import windows
from .conftest import requires_gdal22, requires_gdal3, requires_gdal_lt_3
log = logging.getLogger(__name__)
gdal_version = GDALVersion.runtime()
DST_TRANSFORM = Affine(300.0, 0.0, -8789636.708, 0.0, -300.0, 2943560.235)
def flatten_coords(coordinates):
"""Yield a flat sequence of coordinates to help testing"""
for elem in coordinates:
if isinstance(elem, (float, int)):
yield elem
else:
for x in flatten_coords(elem):
yield x
reproj_expected = (
({"CHECK_WITH_INVERT_PROJ": False}, 6644), ({"CHECK_WITH_INVERT_PROJ": True}, 6644)
)
class ReprojectParams:
"""Class to assist testing reprojection by encapsulating parameters."""
def __init__(self, left, bottom, right, top, width, height, src_crs, dst_crs):
self.width = width
self.height = height
src_res = float(right - left) / float(width)
self.src_transform = Affine(src_res, 0, left, 0, -src_res, top)
self.src_crs = src_crs
self.dst_crs = dst_crs
dt, dw, dh = calculate_default_transform(
src_crs, dst_crs, width, height, left, bottom, right, top
)
self.dst_transform = dt
self.dst_width = dw
self.dst_height = dh
def default_reproject_params():
return ReprojectParams(
left=-120,
bottom=30,
right=-80,
top=70,
width=80,
height=80,
src_crs=CRS.from_epsg(4326),
dst_crs=CRS.from_epsg(2163),
)
def uninvertable_reproject_params():
return ReprojectParams(
left=-120,
bottom=30,
right=-80,
top=70,
width=80,
height=80,
src_crs=CRS.from_epsg(4326),
dst_crs=CRS.from_epsg(26836),
)
WGS84_crs = CRS.from_epsg(4326)
def test_transform_src_crs_none():
with pytest.raises(CRSError):
transform(None, WGS84_crs, [1], [1])
def test_transform_dst_crs_none():
with pytest.raises(CRSError):
transform(WGS84_crs, None, [1], [1])
def test_transform_bounds_src_crs_none():
with pytest.raises(CRSError):
transform_bounds(None, WGS84_crs, 0, 0, 0, 0)
def test_transform_bounds_dst_crs_none():
with pytest.raises(CRSError):
transform_bounds(WGS84_crs, None, 0, 0, 0, 0)
def test_transform_geom_src_crs_none():
with pytest.raises(CRSError):
transform_geom(None, WGS84_crs, None)
def test_transform_geom_dst_crs_none():
with pytest.raises(CRSError):
transform_geom(WGS84_crs, None, None)
def test_reproject_src_crs_none():
with pytest.raises(CRSError):
reproject(
np.ones((2, 2)),
np.zeros((2, 2)),
src_transform=Affine.identity(),
dst_transform=Affine.identity(),
dst_crs=WGS84_crs,
)
def test_reproject_dst_crs_none():
with pytest.raises(CRSError):
reproject(
np.ones((2, 2)),
np.zeros((2, 2)),
src_transform=Affine.identity(),
dst_transform=Affine.identity(),
src_crs=WGS84_crs,
)
def test_transform():
"""2D and 3D."""
WGS84_crs = CRS.from_epsg(4326)
WGS84_points = ([12.492269], [41.890169], [48.])
ECEF_crs = CRS.from_epsg(4978)
ECEF_points = ([4642610.], [1028584.], [4236562.])
ECEF_result = transform(WGS84_crs, ECEF_crs, *WGS84_points)
assert np.allclose(np.array(ECEF_result), np.array(ECEF_points))
UTM33_crs = CRS.from_epsg(32633)
UTM33_points = ([291952], [4640623])
UTM33_result = transform(WGS84_crs, UTM33_crs, *WGS84_points[:2])
assert np.allclose(np.array(UTM33_result), np.array(UTM33_points))
def test_transform_bounds():
with rasterio.open("tests/data/RGB.byte.tif") as src:
l, b, r, t = src.bounds
assert np.allclose(
transform_bounds(src.crs, CRS.from_epsg(4326), l, b, r, t),
(
-78.95864996545055,
23.564991210854686,
-76.57492370013823,
25.550873767433984,
),
)
def test_transform_bounds__esri_wkt():
left, bottom, right, top = \
(-78.95864996545055, 23.564991210854686,
-76.57492370013823, 25.550873767433984)
dst_projection_string = (
'PROJCS["USA_Contiguous_Albers_Equal_Area_Conic_USGS_version",'
'GEOGCS["GCS_North_American_1983",DATUM["D_North_American_1983",'
'SPHEROID["GRS_1980",6378137.0,298.257222101]],'
'PRIMEM["Greenwich",0.0],'
'UNIT["Degree",0.0174532925199433]],'
'PROJECTION["Albers"],'
'PARAMETER["false_easting",0.0],'
'PARAMETER["false_northing",0.0],'
'PARAMETER["central_meridian",-96.0],'
'PARAMETER["standard_parallel_1",29.5],'
'PARAMETER["standard_parallel_2",45.5],'
'PARAMETER["latitude_of_origin",23.0],'
'UNIT["Meter",1.0],'
'VERTCS["NAVD_1988",'
'VDATUM["North_American_Vertical_Datum_1988"],'
'PARAMETER["Vertical_Shift",0.0],'
'PARAMETER["Direction",1.0],UNIT["Centimeter",0.01]]]')
assert np.allclose(
transform_bounds(CRS.from_epsg(4326),
dst_projection_string,
left,
bottom,
right,
top),
(
1721263.7931814701,
219684.49332178483,
2002926.56696663,
479360.16562217404),
)
@pytest.mark.parametrize(
"density,expected",
[
(0, (-1684649.41338, -350356.81377, 1684649.41338, 2234551.18559)),
(100, (-1684649.41338, -555777.79210, 1684649.41338, 2234551.18559)),
],
)
def test_transform_bounds_densify(density, expected):
# This transform is non-linear along the edges, so densification produces
# a different result than otherwise
src_crs = CRS.from_epsg(4326)
dst_crs = CRS.from_epsg(2163)
with rasterio.Env(OSR_USE_NON_DEPRECATED="NO"):
assert np.allclose(
expected,
transform_bounds(src_crs, dst_crs, -120, 40, -80, 64, densify_pts=density),
)
def test_transform_bounds_no_change():
"""Make sure that going from and to the same crs causes no change."""
with rasterio.open("tests/data/RGB.byte.tif") as src:
l, b, r, t = src.bounds
assert np.allclose(transform_bounds(src.crs, src.crs, l, b, r, t), src.bounds)
def test_transform_bounds_densify_out_of_bounds():
with pytest.raises(ValueError):
transform_bounds(
CRS.from_epsg(4326),
CRS.from_epsg(32610),
-120,
40,
-80,
64,
densify_pts=-10,
)
def test_calculate_default_transform():
target_transform = Affine(
0.0028535715391804096,
0.0,
-78.95864996545055,
0.0,
-0.0028535715391804096,
25.550873767433984,
)
with rasterio.open("tests/data/RGB.byte.tif") as src:
wgs84_crs = CRS.from_epsg(4326)
dst_transform, width, height = calculate_default_transform(
src.crs, wgs84_crs, src.width, src.height, *src.bounds
)
assert dst_transform.almost_equals(target_transform)
assert width == 835
assert height == 696
def test_calculate_default_transform_single_resolution():
with rasterio.open("tests/data/RGB.byte.tif") as src:
target_resolution = 0.1
target_transform = Affine(
target_resolution,
0.0,
-78.95864996545055,
0.0,
-target_resolution,
25.550873767433984,
)
dst_transform, width, height = calculate_default_transform(
src.crs,
CRS.from_epsg(4326),
src.width,
src.height,
*src.bounds,
resolution=target_resolution
)
assert dst_transform.almost_equals(target_transform)
assert width == 24
assert height == 20
def test_calculate_default_transform_multiple_resolutions():
with rasterio.open("tests/data/RGB.byte.tif") as src:
target_resolution = (0.2, 0.1)
target_transform = Affine(
target_resolution[0],
0.0,
-78.95864996545055,
0.0,
-target_resolution[1],
25.550873767433984,
)
dst_transform, width, height = calculate_default_transform(
src.crs,
CRS.from_epsg(4326),
src.width,
src.height,
*src.bounds,
resolution=target_resolution
)
assert dst_transform.almost_equals(target_transform)
assert width == 12
assert height == 20
def test_calculate_default_transform_dimensions():
with rasterio.open("tests/data/RGB.byte.tif") as src:
dst_width, dst_height = (113, 103)
target_transform = Affine(
0.02108612597535966,
0.0,
-78.95864996545055,
0.0,
-0.0192823863230055,
25.550873767433984,
)
dst_transform, width, height = calculate_default_transform(
src.crs,
CRS.from_epsg(4326),
src.width,
src.height,
*src.bounds,
dst_width=dst_width,
dst_height=dst_height
)
assert dst_transform.almost_equals(target_transform)
assert width == dst_width
assert height == dst_height
def test_reproject_ndarray():
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = dict(
proj="merc",
a=6378137,
b=6378137,
lat_ts=0.0,
lon_0=0.0,
x_0=0.0,
y_0=0,
k=1.0,
units="m",
nadgrids="@null",
wktext=True,
no_defs=True,
)
out = np.empty(src.shape, dtype=np.uint8)
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=Resampling.nearest,
)
assert (out > 0).sum() == 438113
def test_reproject_view():
"""Source views are reprojected properly"""
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
window = windows.Window(100, 100, 500, 500)
# window = windows.get_data_window(source)
reduced_array = source[window.toslices()]
reduced_transform = windows.transform(window, src.transform)
# Assert that we're working with a view.
assert reduced_array.base is source
dst_crs = dict(
proj="merc",
a=6378137,
b=6378137,
lat_ts=0.0,
lon_0=0.0,
x_0=0.0,
y_0=0,
k=1.0,
units="m",
nadgrids="@null",
wktext=True,
no_defs=True,
)
out = np.empty(src.shape, dtype=np.uint8)
reproject(
reduced_array,
out,
src_transform=reduced_transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=Resampling.nearest,
)
assert (out > 0).sum() == 299199
def test_reproject_epsg():
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = {"init": "epsg:3857"}
out = np.empty(src.shape, dtype=np.uint8)
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=Resampling.nearest,
)
assert (out > 0).sum() == 438113
def test_reproject_epsg__simple_array():
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = {"init": "EPSG:3857"}
out, dst_transform = reproject(
source,
src_transform=src.transform,
src_crs=src.crs,
dst_crs=dst_crs,
resampling=Resampling.nearest,
)
assert (out > 0).sum() == 383077
assert_almost_equal(tuple(dst_transform),
tuple(Affine(330.2992903555146, 0.0, -8789636.707871985,
0.0, -330.2992903555146, 2943560.2346221623)),
decimal=5)
def test_reproject_epsg__simple_array_resolution():
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = {"init": "EPSG:3857"}
out, dst_transform = reproject(
source,
src_transform=src.transform,
src_crs=src.crs,
dst_crs=dst_crs,
dst_resolution=(300, 300),
resampling=Resampling.nearest,
)
assert (out > 0).sum() == 464503
assert_almost_equal(tuple(dst_transform),
tuple(Affine(300, 0.0, -8789636.707871985,
0.0, -300, 2943560.2346221623)),
decimal=5)
def test_reproject_epsg__simple_array_dst():
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = {"init": "EPSG:3857"}
dst_out = np.empty(src.shape, dtype=np.uint8)
out, dst_transform = reproject(
source,
dst_out,
src_transform=src.transform,
src_crs=src.crs,
dst_crs=dst_crs,
resampling=Resampling.nearest,
)
assert (out > 0).sum() == 368123
assert_almost_equal(tuple(dst_transform),
tuple(Affine(335.3101519032594, 0.0, -8789636.707871985,
0.0, -338.579773957742, 2943560.2346221623)),
decimal=5)
def test_reproject_epsg__simple():
with rasterio.open("tests/data/RGB.byte.tif") as src:
dst_crs = {"init": "EPSG:3857"}
out, dst_transform = reproject(
rasterio.band(src, 1),
dst_crs=dst_crs,
resampling=Resampling.nearest,
)
assert (out > 0).sum() == 383077
assert_almost_equal(tuple(dst_transform),
tuple(Affine(330.2992903555146, 0.0, -8789636.707871985,
0.0, -330.2992903555146, 2943560.2346221623)),
decimal=5)
def test_reproject_epsg__simple_resolution():
with rasterio.open("tests/data/RGB.byte.tif") as src:
dst_crs = {"init": "EPSG:3857"}
out, dst_transform = reproject(
rasterio.band(src, 1),
dst_crs=dst_crs,
dst_resolution=(300, 300),
resampling=Resampling.nearest,
)
assert (out > 0).sum() == 464503
assert_almost_equal(tuple(dst_transform),
tuple(Affine(300.0, 0.0, -8789636.707871985,
0.0, -300.0, 2943560.2346221623)),
decimal=5)
def test_reproject_no_destination_with_transform():
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = {"init": "EPSG:3857"}
with pytest.raises(ValueError):
reproject(
source,
src_transform=src.transform,
src_crs=src.crs,
dst_crs=dst_crs,
dst_transform=DST_TRANSFORM,
resampling=Resampling.nearest,
)
def test_reproject_out_of_bounds():
"""Using EPSG code is not appropriate for the transform.
Should return blank image.
"""
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = {"init": "epsg:32619"}
out = np.zeros(src.shape, dtype=np.uint8)
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=Resampling.nearest,
)
assert not out.any()
@requires_gdal3
@pytest.mark.parametrize("options, expected", reproj_expected)
def test_reproject_nodata(options, expected):
# Older combinations of GDAL and PROJ might have got this transformation wrong.
# Results look better with GDAL 3.
nodata = 215
with rasterio.Env(**options):
params = uninvertable_reproject_params()
source = np.ones((params.width, params.height), dtype=np.uint8)
out = np.zeros((params.dst_width, params.dst_height), dtype=source.dtype)
out.fill(120) # Fill with arbitrary value
reproject(
source,
out,
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=nodata,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=nodata,
)
assert (out == 1).sum() == expected
assert (out == nodata).sum() == (
params.dst_width * params.dst_height - expected
)
@requires_gdal3
@pytest.mark.parametrize("options, expected", reproj_expected)
def test_reproject_nodata_nan(options, expected):
with rasterio.Env(**options):
params = uninvertable_reproject_params()
source = np.ones((params.width, params.height), dtype=np.float32)
out = np.zeros((params.dst_width, params.dst_height), dtype=source.dtype)
out.fill(120) # Fill with arbitrary value
reproject(
source,
out,
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=np.nan,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=np.nan,
)
assert (out == 1).sum() == expected
assert np.isnan(out).sum() == (params.dst_width * params.dst_height - expected)
@requires_gdal3
@pytest.mark.parametrize("options, expected", reproj_expected)
def test_reproject_dst_nodata_default(options, expected):
"""If nodata is not provided, destination will be filled with 0."""
with rasterio.Env(**options):
params = uninvertable_reproject_params()
source = np.ones((params.width, params.height), dtype=np.uint8)
out = np.zeros((params.dst_width, params.dst_height), dtype=source.dtype)
out.fill(120) # Fill with arbitrary value
reproject(
source,
out,
src_transform=params.src_transform,
src_crs=params.src_crs,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
)
assert (out == 1).sum() == expected
assert (out == 0).sum() == (params.dst_width * params.dst_height - expected)
def test_reproject_invalid_dst_nodata():
"""dst_nodata must be in value range of data type."""
params = default_reproject_params()
source = np.ones((params.width, params.height), dtype=np.uint8)
out = source.copy()
with pytest.raises(ValueError):
reproject(
source,
out,
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=0,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=999999999,
)
def test_reproject_invalid_src_nodata():
"""src_nodata must be in range for data type."""
params = default_reproject_params()
source = np.ones((params.width, params.height), dtype=np.uint8)
out = source.copy()
with pytest.raises(ValueError):
reproject(
source,
out,
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=999999999,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=215,
)
def test_reproject_init_nodata_tofile(tmpdir):
"""Test that nodata is being initialized."""
params = default_reproject_params()
tiffname = str(tmpdir.join("foo.tif"))
source1 = np.zeros((params.width, params.height), dtype=np.uint8)
source2 = source1.copy()
# fill both sources w/ arbitrary values
rows, cols = source1.shape
source1[:rows // 2, :cols // 2] = 200
source2[rows // 2:, cols // 2:] = 100
kwargs = {
"count": 1,
"width": params.width,
"height": params.height,
"dtype": np.uint8,
"driver": "GTiff",
"crs": params.dst_crs,
"transform": params.dst_transform,
}
with rasterio.open(tiffname, "w", **kwargs) as dst:
reproject(
source1,
rasterio.band(dst, 1),
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=0.0,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=0.0,
)
# 200s should be overwritten by 100s
reproject(
source2,
rasterio.band(dst, 1),
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=0.0,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=0.0,
)
with rasterio.open(tiffname) as src:
assert src.read().max() == 100
def test_reproject_no_init_nodata_tofile(tmpdir):
"""Test that nodata is not being initialized."""
params = default_reproject_params()
tiffname = str(tmpdir.join("foo.tif"))
source1 = np.zeros((params.width, params.height), dtype=np.uint8)
source2 = source1.copy()
# fill both sources w/ arbitrary values
rows, cols = source1.shape
source1[:rows // 2, :cols // 2] = 200
source2[rows // 2:, cols // 2:] = 100
kwargs = {
"count": 1,
"width": params.width,
"height": params.height,
"dtype": np.uint8,
"driver": "GTiff",
"crs": params.dst_crs,
"transform": params.dst_transform,
}
with rasterio.open(tiffname, "w", **kwargs) as dst:
reproject(
source1,
rasterio.band(dst, 1),
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=0.0,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=0.0,
)
reproject(
source2,
rasterio.band(dst, 1),
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=0.0,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=0.0,
init_dest_nodata=False,
)
# 200s should remain along with 100s
with rasterio.open(tiffname) as src:
data = src.read()
assert data.max() == 200
def test_reproject_no_init_nodata_toarray():
"""Test that nodata is being initialized."""
params = default_reproject_params()
source1 = np.zeros((params.width, params.height))
source2 = source1.copy()
out = source1.copy()
# fill both sources w/ arbitrary values
rows, cols = source1.shape
source1[:rows // 2, :cols // 2] = 200
source2[rows // 2:, cols // 2:] = 100
reproject(
source1,
out,
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=0.0,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=0.0,
)
assert out.max() == 200
assert out.min() == 0
reproject(
source2,
out,
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=0.0,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=0.0,
init_dest_nodata=False,
)
# 200s should NOT be overwritten by 100s
assert out.max() == 200
assert out.min() == 0
def test_reproject_multi():
"""Ndarry to ndarray."""
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read()
dst_crs = dict(
proj="merc",
a=6378137,
b=6378137,
lat_ts=0.0,
lon_0=0.0,
x_0=0.0,
y_0=0,
k=1.0,
units="m",
nadgrids="@null",
wktext=True,
no_defs=True,
)
destin = np.empty(source.shape, dtype=np.uint8)
reproject(
source,
destin,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=Resampling.nearest,
)
assert destin.any()
def test_warp_from_file():
"""File to ndarray."""
with rasterio.open("tests/data/RGB.byte.tif") as src:
dst_crs = dict(
proj="merc",
a=6378137,
b=6378137,
lat_ts=0.0,
lon_0=0.0,
x_0=0.0,
y_0=0,
k=1.0,
units="m",
nadgrids="@null",
wktext=True,
no_defs=True,
)
destin = np.empty(src.shape, dtype=np.uint8)
reproject(
rasterio.band(src, 1), destin, dst_transform=DST_TRANSFORM, dst_crs=dst_crs
)
assert destin.any()
def test_warp_from_to_file(tmpdir):
"""File to file."""
tiffname = str(tmpdir.join("foo.tif"))
with rasterio.open("tests/data/RGB.byte.tif") as src:
dst_crs = dict(
proj="merc",
a=6378137,
b=6378137,
lat_ts=0.0,
lon_0=0.0,
x_0=0.0,
y_0=0,
k=1.0,
units="m",
nadgrids="@null",
wktext=True,
no_defs=True,
)
kwargs = src.meta.copy()
kwargs.update(transform=DST_TRANSFORM, crs=dst_crs)
with rasterio.open(tiffname, "w", **kwargs) as dst:
for i in (1, 2, 3):
reproject(rasterio.band(src, i), rasterio.band(dst, i))
def test_warp_from_to_file_multi(tmpdir):
"""File to file."""
tiffname = str(tmpdir.join("foo.tif"))
with rasterio.open("tests/data/RGB.byte.tif") as src:
dst_crs = dict(
proj="merc",
a=6378137,
b=6378137,
lat_ts=0.0,
lon_0=0.0,
x_0=0.0,
y_0=0,
k=1.0,
units="m",
nadgrids="@null",
wktext=True,
no_defs=True,
)
kwargs = src.meta.copy()
kwargs.update(transform=DST_TRANSFORM, crs=dst_crs)
with rasterio.open(tiffname, "w", **kwargs) as dst:
for i in (1, 2, 3):
reproject(rasterio.band(src, i), rasterio.band(dst, i), num_threads=2)
@pytest.fixture(scope="function")
def polygon_3373():
"""An EPSG:3373 polygon."""
return {
"type": "Polygon",
"coordinates": (
(
(798842.3090855901, 6569056.500655151),
(756688.2826828464, 6412397.888771972),
(755571.0617232556, 6408461.009397383),
(677605.2284582685, 6425600.39266733),
(677605.2284582683, 6425600.392667332),
(670873.3791649605, 6427248.603432341),
(664882.1106069803, 6407585.48425362),
(663675.8662823177, 6403676.990080649),
(485120.71963574126, 6449787.167760638),
(485065.55660851026, 6449802.826920689),
(485957.03982722526, 6452708.625101285),
(487541.24541826674, 6457883.292107048),
(531008.5797472061, 6605816.560367976),
(530943.7197027118, 6605834.9333479265),
(531888.5010308184, 6608940.750411527),
(533299.5981959199, 6613962.642851984),
(533403.6388841148, 6613933.172096095),
(576345.6064638699, 6761983.708069147),
(577649.6721159086, 6766698.137844516),
(578600.3589008929, 6770143.99782289),
(578679.4732294685, 6770121.638265098),
(655836.640492081, 6749376.357102599),
(659913.0791150068, 6764770.1314677475),
(661105.8478791204, 6769515.168134831),
(661929.4670843681, 6772800.8565198565),
(661929.4670843673, 6772800.856519875),
(661975.1582566603, 6772983.354777632),
(662054.7979028501, 6772962.86384242),
(841909.6014891531, 6731793.200435557),
(840726.455490463, 6727039.8672589315),
(798842.3090855901, 6569056.500655151),
),
),
}
def test_transform_geom_polygon_cutting(polygon_3373):
geom = polygon_3373
result = transform_geom("EPSG:3373", "EPSG:4326", geom, antimeridian_cutting=True)
assert result["type"] == "MultiPolygon"
assert len(result["coordinates"]) == 2
def test_transform_geom_polygon_offset(polygon_3373):
geom = polygon_3373
result = transform_geom(
"EPSG:3373", "EPSG:4326", geom, antimeridian_cutting=True, antimeridian_offset=0
)
assert result["type"] == "MultiPolygon"
assert len(result["coordinates"]) == 2
def test_transform_geom_polygon_precision(polygon_3373):
geom = polygon_3373
result = transform_geom(
"EPSG:3373", "EPSG:4326", geom, precision=1, antimeridian_cutting=True
)
assert all(round(x, 1) == x for x in flatten_coords(result["coordinates"]))
def test_transform_geom_linestring_precision(polygon_3373):
ring = polygon_3373["coordinates"][0]
geom = {"type": "LineString", "coordinates": ring}
result = transform_geom(
"EPSG:3373", "EPSG:4326", geom, precision=1, antimeridian_cutting=True
)
assert all(round(x, 1) == x for x in flatten_coords(result["coordinates"]))
def test_transform_geom_linestring_precision_iso(polygon_3373):
ring = polygon_3373["coordinates"][0]
geom = {"type": "LineString", "coordinates": ring}
result = transform_geom("EPSG:3373", "EPSG:3373", geom, precision=1)
assert int(result["coordinates"][0][0] * 10) == 7988423
def test_transform_geom_linearring_precision(polygon_3373):
ring = polygon_3373["coordinates"][0]
geom = {"type": "LinearRing", "coordinates": ring}
result = transform_geom(
"EPSG:3373", "EPSG:4326", geom, precision=1, antimeridian_cutting=True
)
assert all(round(x, 1) == x for x in flatten_coords(result["coordinates"]))
def test_transform_geom_linestring_precision_z(polygon_3373):
ring = polygon_3373["coordinates"][0]
x, y = zip(*ring)
ring = list(zip(x, y, [0.0 for i in range(len(x))]))
geom = {"type": "LineString", "coordinates": ring}
result = transform_geom("EPSG:3373", "EPSG:3373", geom, precision=1)
assert int(result["coordinates"][0][0] * 10) == 7988423
assert int(result["coordinates"][0][2] * 10) == 0
def test_transform_geom_multipolygon(polygon_3373):
geom = {"type": "MultiPolygon", "coordinates": [polygon_3373["coordinates"]]}
result = transform_geom("EPSG:3373", "EPSG:4326", geom, precision=1)
assert all(round(x, 1) == x for x in flatten_coords(result["coordinates"]))
def test_transform_geom_array(polygon_3373):
geom = [polygon_3373 for _ in range(10)]
result = transform_geom("EPSG:3373", "EPSG:4326", geom, precision=1)
assert isinstance(result, list)
assert len(result) == 10
def test_transform_geom__geo_interface(polygon_3373):
class GeoObj:
@property
def __geo_interface__(self):
return polygon_3373
result = transform_geom("EPSG:3373", "EPSG:4326", GeoObj(), precision=1)
assert all(round(x, 1) == x for x in flatten_coords(result["coordinates"]))
def test_transform_geom__geo_interface__array(polygon_3373):
class GeoObj:
@property
def __geo_interface__(self):
return polygon_3373
geom = [GeoObj() for _ in range(10)]
results = transform_geom("EPSG:3373", "EPSG:4326", geom, precision=1)
assert isinstance(results, list)
assert len(results) == 10
for result in results:
assert all(round(x, 1) == x for x in flatten_coords(result["coordinates"]))
@pytest.mark.parametrize("method", SUPPORTED_RESAMPLING)
def test_reproject_resampling(path_rgb_byte_tif, method):
# Expected count of nonzero pixels for each resampling method, based
# on running rasterio with each of the following configurations
expected = {
Resampling.nearest: [438113],
Resampling.bilinear: [439280],
Resampling.cubic: [437888],
Resampling.cubic_spline: [440475],
Resampling.lanczos: [436001],
Resampling.average: [439419, 439172], # latter value for GDAL 3.1
Resampling.mode: [437298],
Resampling.max: [439464],
Resampling.min: [436397],
Resampling.med: [437194],
Resampling.q1: [436397],
Resampling.q3: [438948],
Resampling.sum: [439118],
Resampling.rms: [439385],
}
with rasterio.open(path_rgb_byte_tif) as src:
source = src.read(1)
out = np.empty(src.shape, dtype=np.uint8)
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs="EPSG:3857",
resampling=method,
)
assert np.count_nonzero(out) in expected[method]
@pytest.mark.parametrize("test3d,count_nonzero", [(True, 1309625), (False, 437686)])
def test_reproject_array_interface(test3d, count_nonzero, path_rgb_byte_tif):
class DataArray:
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
@property
def dtype(self):
return self.data.dtype
with rasterio.open(path_rgb_byte_tif) as src:
if test3d:
source = DataArray(src.read())
else:
source = DataArray(src.read(1))
out = DataArray(np.empty(source.data.shape, dtype=np.uint8))
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
src_nodata=src.nodata,
dst_transform=DST_TRANSFORM,
dst_crs="EPSG:3857",
dst_nodata=99,
)
assert isinstance(out, DataArray)
assert np.count_nonzero(out.data[out.data != 99]) == count_nonzero
@pytest.mark.parametrize("test3d,count_nonzero", [(True, 1309625), (False, 437686)])
def test_reproject_masked(test3d, count_nonzero, path_rgb_byte_tif):
with rasterio.open(path_rgb_byte_tif) as src:
if test3d:
source = src.read(masked=True)
else:
source = src.read(1, masked=True)
out = np.empty(source.shape, dtype=np.uint8)
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs="EPSG:3857",
dst_nodata=99,
)
assert np.ma.is_masked(source)
assert np.count_nonzero(out[out != 99]) == count_nonzero
@pytest.mark.parametrize("method", SUPPORTED_RESAMPLING)
def test_reproject_resampling_alpha(method):
"""Reprojection of a source with alpha band succeeds"""
# Expected count of nonzero pixels for each resampling method, based
# on running rasterio with each of the following configurations
expected = {
Resampling.nearest: [438113],
Resampling.bilinear: [439280],
Resampling.cubic: [437888],
Resampling.cubic_spline: [440475],
Resampling.lanczos: [436001],
Resampling.average: [439419, 439172], # latter value for GDAL 3.1
Resampling.mode: [437298],
Resampling.max: [439464],
Resampling.min: [436397],
Resampling.med: [437194],
Resampling.q1: [436397],
Resampling.q3: [438948],
Resampling.sum: [439118],
Resampling.rms: [439385],
}
with rasterio.open("tests/data/RGBA.byte.tif") as src:
source = src.read(1)
out = np.empty(src.shape, dtype=np.uint8)
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs="EPSG:3857",
resampling=method,
)
assert np.count_nonzero(out) in expected[method]
@pytest.mark.skipif(
gdal_version.at_least("2.0"), reason="Tests only applicable to GDAL < 2.0"
)
@pytest.mark.parametrize("method", GDAL2_RESAMPLING)
def test_reproject_not_yet_supported_resampling(method):
"""Test resampling methods not yet supported by this version of GDAL"""
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = "EPSG:32619"
out = np.empty(src.shape, dtype=np.uint8)
with pytest.raises(GDALVersionError):
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=method,
)
def test_reproject_unsupported_resampling():
"""Values not in enums. Resampling are not supported."""
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = "EPSG:32619"
out = np.empty(src.shape, dtype=np.uint8)
with pytest.raises(ValueError):
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=99,
)
def test_reproject_unsupported_resampling_guass():
"""Resampling.gauss is unsupported."""
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = "EPSG:32619"
out = np.empty(src.shape, dtype=np.uint8)
with pytest.raises(ValueError):
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=Resampling.gauss,
)
@pytest.mark.parametrize("method", SUPPORTED_RESAMPLING)
def test_resample_default_invert_proj(method):
"""Nearest and bilinear should produce valid results
with the default Env
"""
with rasterio.open("tests/data/world.rgb.tif") as src:
source = src.read(1)
profile = src.profile
dst_crs = "EPSG:32619"
# Calculate the ideal dimensions and transformation in the new crs
dst_affine, dst_width, dst_height = calculate_default_transform(
src.crs, dst_crs, src.width, src.height, *src.bounds
)
profile["height"] = dst_height
profile["width"] = dst_width
out = np.empty(shape=(dst_height, dst_width), dtype=np.uint8)
# GDAL 1.11 needs to have this config option set on to match the
# default results in later versions.
if gdal_version.major == 1:
options = dict(CHECK_WITH_INVERT_PROJ=True)
else:
options = {}
with rasterio.Env(**options):
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=dst_affine,
dst_crs=dst_crs,
resampling=method,
)
assert out.mean() > 0
@pytest.mark.xfail(reason="Projection extents have changed with PROJ 8")
def test_target_aligned_pixels():
"""Issue 853 has been resolved"""
with rasterio.open("tests/data/world.rgb.tif") as src:
source = src.read(1)
profile = src.profile
dst_crs = "EPSG:3857"
with rasterio.Env(CHECK_WITH_INVERT_PROJ=False):
# Calculate the ideal dimensions and transformation in the new crs
dst_affine, dst_width, dst_height = calculate_default_transform(
src.crs, dst_crs, src.width, src.height, *src.bounds
)
dst_affine, dst_width, dst_height = aligned_target(
dst_affine, dst_width, dst_height, 10000.0
)
profile["height"] = dst_height
profile["width"] = dst_width
out = np.empty(shape=(dst_height, dst_width), dtype=np.uint8)
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=dst_affine,
dst_crs=dst_crs,
resampling=Resampling.nearest,
)
# Check that there are no black borders
assert out[:, 0].all()
assert out[:, -1].all()
assert out[0, :].all()
assert out[-1, :].all()
@pytest.mark.parametrize("method", SUPPORTED_RESAMPLING)
def test_resample_no_invert_proj(method):
"""Nearest and bilinear should produce valid results with
CHECK_WITH_INVERT_PROJ = False
"""
if method in (
Resampling.bilinear,
Resampling.cubic,
Resampling.cubic_spline,
Resampling.lanczos,
):
pytest.xfail(
reason="Some resampling methods succeed but produce blank images. "
"See https://github.com/mapbox/rasterio/issues/614"
)
with rasterio.Env(CHECK_WITH_INVERT_PROJ=False):
with rasterio.open("tests/data/world.rgb.tif") as src:
source = src.read(1)
profile = src.profile.copy()
dst_crs = "EPSG:32619"
# Calculate the ideal dimensions and transformation in the new crs
dst_affine, dst_width, dst_height = calculate_default_transform(
src.crs, dst_crs, src.width, src.height, *src.bounds
)
profile["height"] = dst_height
profile["width"] = dst_width
out = np.empty(shape=(dst_height, dst_width), dtype=np.uint8)
# see #614, some resampling methods succeed but produce blank images
out = np.empty(src.shape, dtype=np.uint8)
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=dst_affine,
dst_crs=dst_crs,
resampling=method,
)
assert out.mean() > 0
def test_reproject_crs_none():
"""Reproject with crs is None should not cause segfault"""
src = np.random.random(25).reshape((1, 5, 5))
srcaff = Affine(1.1, 0.0, 0.0, 0.0, 1.1, 0.0)
srccrs = None
dst = np.empty(shape=(1, 11, 11))
dstaff = Affine(0.5, 0.0, 0.0, 0.0, 0.5, 0.0)
dstcrs = None
with pytest.raises(ValueError):
reproject(
src,
dst,
src_transform=srcaff,
src_crs=srccrs,
dst_transform=dstaff,
dst_crs=dstcrs,
resampling=Resampling.nearest,
)
def test_reproject_identity_src():
"""Reproject with an identity like source matrices."""
src = np.random.random(25).reshape((1, 5, 5))
dst = np.empty(shape=(1, 10, 10))
dstaff = Affine(0.5, 0.0, 0.0, 0.0, 0.5, 0.0)
crs = {"init": "epsg:3857"}
src_affines = [
Affine(1.0, 0.0, 0.0, 0.0, 1.0, 0.0), # Identity both positive
Affine(1.0, 0.0, 0.0, 0.0, -1.0, 0.0), # Identity with negative e
]
for srcaff in src_affines:
# reproject expected to not raise any error in any of the srcaff
reproject(
src,
dst,
src_transform=srcaff,
src_crs=crs,
dst_transform=dstaff,
dst_crs=crs,
resampling=Resampling.nearest,
)
def test_reproject_identity_dst():
"""Reproject with an identity like destination matrices."""
src = np.random.random(100).reshape((1, 10, 10))
srcaff = Affine(0.5, 0.0, 0.0, 0.0, 0.5, 0.0)
dst = np.empty(shape=(1, 5, 5))
crs = {"init": "epsg:3857"}
dst_affines = [
Affine(1.0, 0.0, 0.0, 0.0, 1.0, 0.0), # Identity both positive
Affine(1.0, 0.0, 0.0, 0.0, -1.0, 0.0), # Identity with negative e
]
for dstaff in dst_affines:
# reproject expected to not raise any error in any of the dstaff
reproject(
src,
dst,
src_transform=srcaff,
src_crs=crs,
dst_transform=dstaff,
dst_crs=crs,
resampling=Resampling.nearest,
)
@pytest.fixture(scope="function")
def rgb_byte_profile():
with rasterio.open("tests/data/RGB.byte.tif") as src:
return src.profile
def test_reproject_gcps_transform_exclusivity():
"""gcps and transform can't be used together."""
with pytest.raises(ValueError):
reproject(1, 1, gcps=[0], src_transform=[0])
def test_reproject_gcps(rgb_byte_profile):
"""Reproject using ground control points for the source"""
source = np.ones((3, 800, 800), dtype=np.uint8) * 255
out = np.zeros(
(3, rgb_byte_profile["height"], rgb_byte_profile["height"]), dtype=np.uint8
)
src_gcps = [
GroundControlPoint(row=0, col=0, x=156113, y=2818720, z=0),
GroundControlPoint(row=0, col=800, x=338353, y=2785790, z=0),
GroundControlPoint(row=800, col=800, x=297939, y=2618518, z=0),
GroundControlPoint(row=800, col=0, x=115698, y=2651448, z=0),
]
reproject(
source,
out,
src_crs="EPSG:32618",
gcps=src_gcps,
dst_transform=rgb_byte_profile["transform"],
dst_crs=rgb_byte_profile["crs"],
resampling=Resampling.nearest,
)
assert not out.all()
assert not out[:, 0, 0].any()
assert not out[:, 0, -1].any()
assert not out[:, -1, -1].any()
assert not out[:, -1, 0].any()
@requires_gdal22(
reason="GDAL 2.2.0 and newer has different antimeridian cutting behavior."
)
def test_transform_geom_gdal22():
"""Enabling `antimeridian_cutting` has no effect on GDAL 2.2.0 or newer
where antimeridian cutting is always enabled. This could produce
unexpected geometries, so an exception is raised.
"""
geom = {"type": "Point", "coordinates": [0, 0]}
with pytest.raises(GDALVersionError):
transform_geom("EPSG:4326", "EPSG:3857", geom, antimeridian_cutting=False)
def test_issue1056():
"""Warp sucessfully from RGB's upper bands to an array"""
with rasterio.open("tests/data/RGB.byte.tif") as src:
dst_crs = "EPSG:3857"
out = np.zeros(src.shape, dtype=np.uint8)
reproject(
rasterio.band(src, 2),
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=Resampling.nearest,
)
def test_reproject_dst_nodata():
"""Affirm resolution of issue #1395"""
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = "EPSG:3857"
out = np.empty(src.shape, dtype=np.float32)
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
src_nodata=0,
dst_nodata=np.nan,
resampling=Resampling.nearest,
)
assert (out[~np.isnan(out)] > 0.0).sum() == 438113
assert out[0, 0] != 0
assert np.isnan(out[0, 0])
def test_issue1401():
"""The warp_mem_limit keyword argument is in effect"""
with rasterio.open("tests/data/RGB.byte.tif") as src:
dst_crs = "EPSG:3857"
out = np.zeros(src.shape, dtype=np.uint8)
reproject(
rasterio.band(src, 2),
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=Resampling.nearest,
warp_mem_limit=4000,
)
def test_reproject_dst_alpha(path_rgb_msk_byte_tif):
"""Materialization of external mask succeeds"""
with rasterio.open(path_rgb_msk_byte_tif) as src:
nrows, ncols = src.shape
dst_arr = np.zeros((src.count + 1, nrows, ncols), dtype=np.uint8)
reproject(
rasterio.band(src, src.indexes),
dst_arr,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs="EPSG:3857",
dst_alpha=4,
)
assert dst_arr[3].any()
@pytest.mark.xfail(
rasterio.__gdal_version__ in ["2.2.0", "2.2.1", "2.2.2", "2.2.3"],
reason=(
"GDAL had regression in 2.2.X series, fixed in 2.2.4,"
" reproject used dst index instead of src index when destination was single band"
),
)
def test_issue1350():
"""Warp bands other than 1 or All"""
with rasterio.open("tests/data/RGB.byte.tif") as src:
dst_crs = "EPSG:3857"
reprojected = []
for dtype, idx in zip(src.dtypes, src.indexes):
out = np.zeros((1,) + src.shape, dtype=dtype)
reproject(
rasterio.band(src, idx),
out,
resampling=Resampling.nearest,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
)
reprojected.append(out)
for i in range(1, len(reprojected)):
assert not (reprojected[0] == reprojected[i]).all()
def test_issue_1446():
"""Confirm resolution of #1446"""
g = transform_geom(
CRS.from_epsg(4326),
CRS.from_epsg(32610),
{"type": "Point", "coordinates": (-122.51403808499907, 38.06106733107932)},
)
assert round(g["coordinates"][0], 1) == 542630.9
assert round(g["coordinates"][1], 1) == 4212702.1
@requires_gdal_lt_3
def test_issue_1446_b():
"""Confirm that lines aren't thrown as reported in #1446"""
src_crs = CRS.from_epsg(4326)
dst_crs = CRS(
{
"proj": "sinu",
"lon_0": 350.85607029556,
"x_0": 0,
"y_0": 0,
"a": 3396190,
"b": 3396190,
"units": "m",
"no_defs": True,
}
)
collection = json.load(open("tests/data/issue1446.geojson"))
geoms = {f["properties"]["fid"]: f["geometry"] for f in collection["features"]}
transformed_geoms = {
k: transform_geom(src_crs, dst_crs, g) for k, g in geoms.items()
}
# Before the fix, this geometry was thrown eastward of 0.0. It should be between -350 and -250.
assert all([-350 < x < -150 for x, y in transformed_geoms[183519]["coordinates"]])
def test_reproject_init_dest_nodata():
"""No pixels should transfer over"""
crs = CRS.from_epsg(4326)
transform = Affine.identity()
source = np.zeros((1, 100, 100))
destination = np.ones((1, 100, 100))
reproject(
source, destination, src_crs=crs, src_transform=transform,
dst_crs=crs, dst_transform=transform,
src_nodata=0, init_dest_nodata=False
)
assert destination.all()
def test_empty_transform_inputs():
"""Check for fix of #1952"""
assert ([], []) == rasterio.warp.transform(
"EPSG:3857", "EPSG:4326", [], [], zs=None
)
def test_empty_transform_inputs_z():
"""Check for fix of #1952"""
assert ([], [], []) == rasterio.warp.transform(
"EPSG:3857", "EPSG:4326", [], [], zs=[]
)
def test_empty_transform_inputs_length():
"""Get an exception of inputs have different lengths"""
with pytest.raises(TransformError):
rasterio.warp.transform("EPSG:3857", "EPSG:4326", [1], [1, 2])
def test_empty_transform_inputs_length_z():
"""Get an exception of inputs have different lengths"""
with pytest.raises(TransformError):
rasterio.warp.transform("EPSG:3857", "EPSG:4326", [1, 2], [1, 2], zs=[0])
def test_reproject_rpcs(caplog):
"""Reproject using rational polynomial coefficients for the source"""
with rasterio.open('tests/data/RGB.byte.rpc.vrt') as src:
out = np.zeros(
(3, src.profile["width"], src.profile["height"]), dtype=np.uint8
)
src_rpcs = src.rpcs
reproject(
rasterio.band(src, src.indexes),
out,
src_crs="EPSG:4326",
rpcs=src_rpcs,
dst_crs="EPSG:3857",
resampling=Resampling.nearest,
)
assert not out.all()
assert not out[:, 0, 0].any()
assert not out[:, 0, -1].any()
assert not out[:, -1, -1].any()
assert not out[:, -1, 0].any()
def test_reproject_rpcs_with_transformer_options(caplog):
"""Reproject using rational polynomial coefficients and additional transformer options"""
with rasterio.open('tests/data/RGB.byte.rpc.vrt') as src:
with rasterio.MemoryFile(dirname='foo', filename='dem.tif') as mem:
crs = 'COMPD_CS["WGS 84 + EGM96 height",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Latitude",NORTH],AXIS["Longitude",EAST],AUTHORITY["EPSG","4326"]],VERT_CS["EGM96 height",VERT_DATUM["EGM96 geoid",2005,AUTHORITY["EPSG","5171"]],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Gravity-related height",UP],AUTHORITY["EPSG","5773"]]]'
transform = Affine(0.001953396267361111, 0.0, -124.00013888888888, 0.0, -0.001953396267361111, 50.000138888888884)
with mem.open(
driver="GTiff",
width=1024,
height=1024,
count=1,
transform=transform,
dtype="int16",
crs=crs,
) as dem:
# we flush dem dataset before letting GDAL read from vsimem
dem.write_band(1, 500 * np.ones((1024, 1024), dtype='int16'))
out = np.zeros(
(3, src.profile["width"], src.profile["height"]), dtype=np.uint8
)
out2 = out.copy()
src_rpcs = src.rpcs
caplog.set_level(logging.DEBUG)
reproject(
rasterio.band(src, src.indexes),
out,
src_crs="EPSG:4326",
rpcs=src_rpcs,
dst_crs="EPSG:3857",
resampling=Resampling.nearest,
RPC_DEM=dem.name,
)
caplog.set_level(logging.INFO)
reproject(
rasterio.band(src, src.indexes),
out2,
src_crs="EPSG:4326",
rpcs=src_rpcs,
dst_crs="EPSG:3857",
resampling=Resampling.nearest,
)
assert not out.all()
assert not out2.all()
assert "RPC_DEM" in caplog.text
assert not np.array_equal(out, out2)
def test_warp_gcps_compute_dst_transform_automatically_array():
"""Ensure we don't raise an exception when gcps passed without dst_transform, for a source array"""
source = np.ones((3, 800, 800), dtype=np.uint8) * 255
out = np.zeros((3, 512, 512))
src_gcps = [
GroundControlPoint(row=0, col=0, x=156113, y=2818720, z=0),
GroundControlPoint(row=0, col=800, x=338353, y=2785790, z=0),
GroundControlPoint(row=800, col=800, x=297939, y=2618518, z=0),
GroundControlPoint(row=800, col=0, x=115698, y=2651448, z=0),
]
reproject(
source,
out,
src_crs="EPSG:32618",
gcps=src_gcps,
dst_crs="EPSG:32618",
resampling=Resampling.nearest
)
assert not out.all()
assert not out[:, 0, 0].any()
assert not out[:, 0, -1].any()
assert not out[:, -1, -1].any()
assert not out[:, -1, 0].any()
def test_warp_gcps_compute_dst_transform_automatically_reader(tmpdir):
"""Ensure we don't raise an exception when gcps passed without dst_transform, for a source dataset"""
tiffname = str(tmpdir.join('test.tif'))
src_gcps = [
GroundControlPoint(row=0, col=0, x=156113, y=2818720, z=0),
GroundControlPoint(row=0, col=800, x=338353, y=2785790, z=0),
GroundControlPoint(row=800, col=800, x=297939, y=2618518, z=0),
GroundControlPoint(row=800, col=0, x=115698, y=2651448, z=0),
]
out = np.zeros((3, 512, 512))
with rasterio.open(tiffname, mode='w', height=800, width=800, count=3, dtype=np.uint8) as source:
source.gcps = (src_gcps, CRS.from_epsg(32618))
with rasterio.open(tiffname) as source:
reproject(
rasterio.band(source, source.indexes),
out,
dst_crs="EPSG:32618",
resampling=Resampling.nearest
)
assert not out.all()
assert not out[:, 0, 0].any()
assert not out[:, 0, -1].any()
assert not out[:, -1, -1].any()
assert not out[:, -1, 0].any()
def test_reproject_rpcs_exact_transformer(caplog):
"""Reproject using rational polynomial coefficients and DEM, requiring that
we don't try to make an approximate transformer.
"""
with rasterio.open('tests/data/RGB.byte.rpc.vrt') as src:
with rasterio.MemoryFile(dirname='foo', filename='dem.tif') as mem:
crs = 'COMPD_CS["WGS 84 + EGM96 height",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Latitude",NORTH],AXIS["Longitude",EAST],AUTHORITY["EPSG","4326"]],VERT_CS["EGM96 height",VERT_DATUM["EGM96 geoid",2005,AUTHORITY["EPSG","5171"]],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Gravity-related height",UP],AUTHORITY["EPSG","5773"]]]'
transform = Affine(0.001953396267361111, 0.0, -124.00013888888888, 0.0, -0.001953396267361111, 50.000138888888884)
with mem.open(
driver="GTiff",
width=1024,
height=1024,
count=1,
transform=transform,
dtype="int16",
crs=crs,
) as dem:
# we flush dem dataset before letting GDAL read from vsimem
dem.write_band(1, 500 * np.ones((1024, 1024), dtype='int16'))
out = np.zeros(
(3, src.profile["width"], src.profile["height"]), dtype=np.uint8
)
src_rpcs = src.rpcs
caplog.set_level(logging.DEBUG)
reproject(
rasterio.band(src, src.indexes),
out,
src_crs="EPSG:4326",
rpcs=src_rpcs,
dst_crs="EPSG:3857",
resampling=Resampling.nearest,
RPC_DEM=dem.name,
)
assert "Created exact transformer" in caplog.text
def test_reproject_rpcs_approx_transformer(caplog):
"""Reproject using rational polynomial coefficients without a DEM, for which it's
ok to use an approximate transformer.
"""
with rasterio.open('tests/data/RGB.byte.rpc.vrt') as src:
out = np.zeros(
(3, src.profile["width"], src.profile["height"]), dtype=np.uint8
)
src_rpcs = src.rpcs
caplog.set_level(logging.DEBUG)
reproject(
rasterio.band(src, src.indexes),
out,
src_crs="EPSG:4326",
rpcs=src_rpcs,
dst_crs="EPSG:3857",
resampling=Resampling.nearest,
)
assert "Created approximate transformer" in caplog.text | tests/test_warp.py |
import json
import logging
from affine import Affine
import numpy as np
from numpy.testing import assert_almost_equal
import pytest
import rasterio
from rasterio.control import GroundControlPoint
from rasterio.crs import CRS
from rasterio.enums import Resampling
from rasterio.env import GDALVersion
from rasterio.errors import (
CRSError,
GDALVersionError,
TransformError,
)
from rasterio.warp import (
reproject,
transform_geom,
transform,
transform_bounds,
calculate_default_transform,
aligned_target,
SUPPORTED_RESAMPLING,
GDAL2_RESAMPLING,
)
from rasterio import windows
from .conftest import requires_gdal22, requires_gdal3, requires_gdal_lt_3
log = logging.getLogger(__name__)
gdal_version = GDALVersion.runtime()
DST_TRANSFORM = Affine(300.0, 0.0, -8789636.708, 0.0, -300.0, 2943560.235)
def flatten_coords(coordinates):
"""Yield a flat sequence of coordinates to help testing"""
for elem in coordinates:
if isinstance(elem, (float, int)):
yield elem
else:
for x in flatten_coords(elem):
yield x
reproj_expected = (
({"CHECK_WITH_INVERT_PROJ": False}, 6644), ({"CHECK_WITH_INVERT_PROJ": True}, 6644)
)
class ReprojectParams:
"""Class to assist testing reprojection by encapsulating parameters."""
def __init__(self, left, bottom, right, top, width, height, src_crs, dst_crs):
self.width = width
self.height = height
src_res = float(right - left) / float(width)
self.src_transform = Affine(src_res, 0, left, 0, -src_res, top)
self.src_crs = src_crs
self.dst_crs = dst_crs
dt, dw, dh = calculate_default_transform(
src_crs, dst_crs, width, height, left, bottom, right, top
)
self.dst_transform = dt
self.dst_width = dw
self.dst_height = dh
def default_reproject_params():
return ReprojectParams(
left=-120,
bottom=30,
right=-80,
top=70,
width=80,
height=80,
src_crs=CRS.from_epsg(4326),
dst_crs=CRS.from_epsg(2163),
)
def uninvertable_reproject_params():
return ReprojectParams(
left=-120,
bottom=30,
right=-80,
top=70,
width=80,
height=80,
src_crs=CRS.from_epsg(4326),
dst_crs=CRS.from_epsg(26836),
)
WGS84_crs = CRS.from_epsg(4326)
def test_transform_src_crs_none():
with pytest.raises(CRSError):
transform(None, WGS84_crs, [1], [1])
def test_transform_dst_crs_none():
with pytest.raises(CRSError):
transform(WGS84_crs, None, [1], [1])
def test_transform_bounds_src_crs_none():
with pytest.raises(CRSError):
transform_bounds(None, WGS84_crs, 0, 0, 0, 0)
def test_transform_bounds_dst_crs_none():
with pytest.raises(CRSError):
transform_bounds(WGS84_crs, None, 0, 0, 0, 0)
def test_transform_geom_src_crs_none():
with pytest.raises(CRSError):
transform_geom(None, WGS84_crs, None)
def test_transform_geom_dst_crs_none():
with pytest.raises(CRSError):
transform_geom(WGS84_crs, None, None)
def test_reproject_src_crs_none():
with pytest.raises(CRSError):
reproject(
np.ones((2, 2)),
np.zeros((2, 2)),
src_transform=Affine.identity(),
dst_transform=Affine.identity(),
dst_crs=WGS84_crs,
)
def test_reproject_dst_crs_none():
with pytest.raises(CRSError):
reproject(
np.ones((2, 2)),
np.zeros((2, 2)),
src_transform=Affine.identity(),
dst_transform=Affine.identity(),
src_crs=WGS84_crs,
)
def test_transform():
"""2D and 3D."""
WGS84_crs = CRS.from_epsg(4326)
WGS84_points = ([12.492269], [41.890169], [48.])
ECEF_crs = CRS.from_epsg(4978)
ECEF_points = ([4642610.], [1028584.], [4236562.])
ECEF_result = transform(WGS84_crs, ECEF_crs, *WGS84_points)
assert np.allclose(np.array(ECEF_result), np.array(ECEF_points))
UTM33_crs = CRS.from_epsg(32633)
UTM33_points = ([291952], [4640623])
UTM33_result = transform(WGS84_crs, UTM33_crs, *WGS84_points[:2])
assert np.allclose(np.array(UTM33_result), np.array(UTM33_points))
def test_transform_bounds():
with rasterio.open("tests/data/RGB.byte.tif") as src:
l, b, r, t = src.bounds
assert np.allclose(
transform_bounds(src.crs, CRS.from_epsg(4326), l, b, r, t),
(
-78.95864996545055,
23.564991210854686,
-76.57492370013823,
25.550873767433984,
),
)
def test_transform_bounds__esri_wkt():
left, bottom, right, top = \
(-78.95864996545055, 23.564991210854686,
-76.57492370013823, 25.550873767433984)
dst_projection_string = (
'PROJCS["USA_Contiguous_Albers_Equal_Area_Conic_USGS_version",'
'GEOGCS["GCS_North_American_1983",DATUM["D_North_American_1983",'
'SPHEROID["GRS_1980",6378137.0,298.257222101]],'
'PRIMEM["Greenwich",0.0],'
'UNIT["Degree",0.0174532925199433]],'
'PROJECTION["Albers"],'
'PARAMETER["false_easting",0.0],'
'PARAMETER["false_northing",0.0],'
'PARAMETER["central_meridian",-96.0],'
'PARAMETER["standard_parallel_1",29.5],'
'PARAMETER["standard_parallel_2",45.5],'
'PARAMETER["latitude_of_origin",23.0],'
'UNIT["Meter",1.0],'
'VERTCS["NAVD_1988",'
'VDATUM["North_American_Vertical_Datum_1988"],'
'PARAMETER["Vertical_Shift",0.0],'
'PARAMETER["Direction",1.0],UNIT["Centimeter",0.01]]]')
assert np.allclose(
transform_bounds(CRS.from_epsg(4326),
dst_projection_string,
left,
bottom,
right,
top),
(
1721263.7931814701,
219684.49332178483,
2002926.56696663,
479360.16562217404),
)
@pytest.mark.parametrize(
"density,expected",
[
(0, (-1684649.41338, -350356.81377, 1684649.41338, 2234551.18559)),
(100, (-1684649.41338, -555777.79210, 1684649.41338, 2234551.18559)),
],
)
def test_transform_bounds_densify(density, expected):
# This transform is non-linear along the edges, so densification produces
# a different result than otherwise
src_crs = CRS.from_epsg(4326)
dst_crs = CRS.from_epsg(2163)
with rasterio.Env(OSR_USE_NON_DEPRECATED="NO"):
assert np.allclose(
expected,
transform_bounds(src_crs, dst_crs, -120, 40, -80, 64, densify_pts=density),
)
def test_transform_bounds_no_change():
"""Make sure that going from and to the same crs causes no change."""
with rasterio.open("tests/data/RGB.byte.tif") as src:
l, b, r, t = src.bounds
assert np.allclose(transform_bounds(src.crs, src.crs, l, b, r, t), src.bounds)
def test_transform_bounds_densify_out_of_bounds():
with pytest.raises(ValueError):
transform_bounds(
CRS.from_epsg(4326),
CRS.from_epsg(32610),
-120,
40,
-80,
64,
densify_pts=-10,
)
def test_calculate_default_transform():
target_transform = Affine(
0.0028535715391804096,
0.0,
-78.95864996545055,
0.0,
-0.0028535715391804096,
25.550873767433984,
)
with rasterio.open("tests/data/RGB.byte.tif") as src:
wgs84_crs = CRS.from_epsg(4326)
dst_transform, width, height = calculate_default_transform(
src.crs, wgs84_crs, src.width, src.height, *src.bounds
)
assert dst_transform.almost_equals(target_transform)
assert width == 835
assert height == 696
def test_calculate_default_transform_single_resolution():
with rasterio.open("tests/data/RGB.byte.tif") as src:
target_resolution = 0.1
target_transform = Affine(
target_resolution,
0.0,
-78.95864996545055,
0.0,
-target_resolution,
25.550873767433984,
)
dst_transform, width, height = calculate_default_transform(
src.crs,
CRS.from_epsg(4326),
src.width,
src.height,
*src.bounds,
resolution=target_resolution
)
assert dst_transform.almost_equals(target_transform)
assert width == 24
assert height == 20
def test_calculate_default_transform_multiple_resolutions():
with rasterio.open("tests/data/RGB.byte.tif") as src:
target_resolution = (0.2, 0.1)
target_transform = Affine(
target_resolution[0],
0.0,
-78.95864996545055,
0.0,
-target_resolution[1],
25.550873767433984,
)
dst_transform, width, height = calculate_default_transform(
src.crs,
CRS.from_epsg(4326),
src.width,
src.height,
*src.bounds,
resolution=target_resolution
)
assert dst_transform.almost_equals(target_transform)
assert width == 12
assert height == 20
def test_calculate_default_transform_dimensions():
with rasterio.open("tests/data/RGB.byte.tif") as src:
dst_width, dst_height = (113, 103)
target_transform = Affine(
0.02108612597535966,
0.0,
-78.95864996545055,
0.0,
-0.0192823863230055,
25.550873767433984,
)
dst_transform, width, height = calculate_default_transform(
src.crs,
CRS.from_epsg(4326),
src.width,
src.height,
*src.bounds,
dst_width=dst_width,
dst_height=dst_height
)
assert dst_transform.almost_equals(target_transform)
assert width == dst_width
assert height == dst_height
def test_reproject_ndarray():
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = dict(
proj="merc",
a=6378137,
b=6378137,
lat_ts=0.0,
lon_0=0.0,
x_0=0.0,
y_0=0,
k=1.0,
units="m",
nadgrids="@null",
wktext=True,
no_defs=True,
)
out = np.empty(src.shape, dtype=np.uint8)
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=Resampling.nearest,
)
assert (out > 0).sum() == 438113
def test_reproject_view():
"""Source views are reprojected properly"""
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
window = windows.Window(100, 100, 500, 500)
# window = windows.get_data_window(source)
reduced_array = source[window.toslices()]
reduced_transform = windows.transform(window, src.transform)
# Assert that we're working with a view.
assert reduced_array.base is source
dst_crs = dict(
proj="merc",
a=6378137,
b=6378137,
lat_ts=0.0,
lon_0=0.0,
x_0=0.0,
y_0=0,
k=1.0,
units="m",
nadgrids="@null",
wktext=True,
no_defs=True,
)
out = np.empty(src.shape, dtype=np.uint8)
reproject(
reduced_array,
out,
src_transform=reduced_transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=Resampling.nearest,
)
assert (out > 0).sum() == 299199
def test_reproject_epsg():
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = {"init": "epsg:3857"}
out = np.empty(src.shape, dtype=np.uint8)
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=Resampling.nearest,
)
assert (out > 0).sum() == 438113
def test_reproject_epsg__simple_array():
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = {"init": "EPSG:3857"}
out, dst_transform = reproject(
source,
src_transform=src.transform,
src_crs=src.crs,
dst_crs=dst_crs,
resampling=Resampling.nearest,
)
assert (out > 0).sum() == 383077
assert_almost_equal(tuple(dst_transform),
tuple(Affine(330.2992903555146, 0.0, -8789636.707871985,
0.0, -330.2992903555146, 2943560.2346221623)),
decimal=5)
def test_reproject_epsg__simple_array_resolution():
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = {"init": "EPSG:3857"}
out, dst_transform = reproject(
source,
src_transform=src.transform,
src_crs=src.crs,
dst_crs=dst_crs,
dst_resolution=(300, 300),
resampling=Resampling.nearest,
)
assert (out > 0).sum() == 464503
assert_almost_equal(tuple(dst_transform),
tuple(Affine(300, 0.0, -8789636.707871985,
0.0, -300, 2943560.2346221623)),
decimal=5)
def test_reproject_epsg__simple_array_dst():
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = {"init": "EPSG:3857"}
dst_out = np.empty(src.shape, dtype=np.uint8)
out, dst_transform = reproject(
source,
dst_out,
src_transform=src.transform,
src_crs=src.crs,
dst_crs=dst_crs,
resampling=Resampling.nearest,
)
assert (out > 0).sum() == 368123
assert_almost_equal(tuple(dst_transform),
tuple(Affine(335.3101519032594, 0.0, -8789636.707871985,
0.0, -338.579773957742, 2943560.2346221623)),
decimal=5)
def test_reproject_epsg__simple():
with rasterio.open("tests/data/RGB.byte.tif") as src:
dst_crs = {"init": "EPSG:3857"}
out, dst_transform = reproject(
rasterio.band(src, 1),
dst_crs=dst_crs,
resampling=Resampling.nearest,
)
assert (out > 0).sum() == 383077
assert_almost_equal(tuple(dst_transform),
tuple(Affine(330.2992903555146, 0.0, -8789636.707871985,
0.0, -330.2992903555146, 2943560.2346221623)),
decimal=5)
def test_reproject_epsg__simple_resolution():
with rasterio.open("tests/data/RGB.byte.tif") as src:
dst_crs = {"init": "EPSG:3857"}
out, dst_transform = reproject(
rasterio.band(src, 1),
dst_crs=dst_crs,
dst_resolution=(300, 300),
resampling=Resampling.nearest,
)
assert (out > 0).sum() == 464503
assert_almost_equal(tuple(dst_transform),
tuple(Affine(300.0, 0.0, -8789636.707871985,
0.0, -300.0, 2943560.2346221623)),
decimal=5)
def test_reproject_no_destination_with_transform():
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = {"init": "EPSG:3857"}
with pytest.raises(ValueError):
reproject(
source,
src_transform=src.transform,
src_crs=src.crs,
dst_crs=dst_crs,
dst_transform=DST_TRANSFORM,
resampling=Resampling.nearest,
)
def test_reproject_out_of_bounds():
"""Using EPSG code is not appropriate for the transform.
Should return blank image.
"""
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = {"init": "epsg:32619"}
out = np.zeros(src.shape, dtype=np.uint8)
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=Resampling.nearest,
)
assert not out.any()
@requires_gdal3
@pytest.mark.parametrize("options, expected", reproj_expected)
def test_reproject_nodata(options, expected):
# Older combinations of GDAL and PROJ might have got this transformation wrong.
# Results look better with GDAL 3.
nodata = 215
with rasterio.Env(**options):
params = uninvertable_reproject_params()
source = np.ones((params.width, params.height), dtype=np.uint8)
out = np.zeros((params.dst_width, params.dst_height), dtype=source.dtype)
out.fill(120) # Fill with arbitrary value
reproject(
source,
out,
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=nodata,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=nodata,
)
assert (out == 1).sum() == expected
assert (out == nodata).sum() == (
params.dst_width * params.dst_height - expected
)
@requires_gdal3
@pytest.mark.parametrize("options, expected", reproj_expected)
def test_reproject_nodata_nan(options, expected):
with rasterio.Env(**options):
params = uninvertable_reproject_params()
source = np.ones((params.width, params.height), dtype=np.float32)
out = np.zeros((params.dst_width, params.dst_height), dtype=source.dtype)
out.fill(120) # Fill with arbitrary value
reproject(
source,
out,
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=np.nan,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=np.nan,
)
assert (out == 1).sum() == expected
assert np.isnan(out).sum() == (params.dst_width * params.dst_height - expected)
@requires_gdal3
@pytest.mark.parametrize("options, expected", reproj_expected)
def test_reproject_dst_nodata_default(options, expected):
"""If nodata is not provided, destination will be filled with 0."""
with rasterio.Env(**options):
params = uninvertable_reproject_params()
source = np.ones((params.width, params.height), dtype=np.uint8)
out = np.zeros((params.dst_width, params.dst_height), dtype=source.dtype)
out.fill(120) # Fill with arbitrary value
reproject(
source,
out,
src_transform=params.src_transform,
src_crs=params.src_crs,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
)
assert (out == 1).sum() == expected
assert (out == 0).sum() == (params.dst_width * params.dst_height - expected)
def test_reproject_invalid_dst_nodata():
"""dst_nodata must be in value range of data type."""
params = default_reproject_params()
source = np.ones((params.width, params.height), dtype=np.uint8)
out = source.copy()
with pytest.raises(ValueError):
reproject(
source,
out,
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=0,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=999999999,
)
def test_reproject_invalid_src_nodata():
"""src_nodata must be in range for data type."""
params = default_reproject_params()
source = np.ones((params.width, params.height), dtype=np.uint8)
out = source.copy()
with pytest.raises(ValueError):
reproject(
source,
out,
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=999999999,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=215,
)
def test_reproject_init_nodata_tofile(tmpdir):
"""Test that nodata is being initialized."""
params = default_reproject_params()
tiffname = str(tmpdir.join("foo.tif"))
source1 = np.zeros((params.width, params.height), dtype=np.uint8)
source2 = source1.copy()
# fill both sources w/ arbitrary values
rows, cols = source1.shape
source1[:rows // 2, :cols // 2] = 200
source2[rows // 2:, cols // 2:] = 100
kwargs = {
"count": 1,
"width": params.width,
"height": params.height,
"dtype": np.uint8,
"driver": "GTiff",
"crs": params.dst_crs,
"transform": params.dst_transform,
}
with rasterio.open(tiffname, "w", **kwargs) as dst:
reproject(
source1,
rasterio.band(dst, 1),
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=0.0,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=0.0,
)
# 200s should be overwritten by 100s
reproject(
source2,
rasterio.band(dst, 1),
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=0.0,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=0.0,
)
with rasterio.open(tiffname) as src:
assert src.read().max() == 100
def test_reproject_no_init_nodata_tofile(tmpdir):
"""Test that nodata is not being initialized."""
params = default_reproject_params()
tiffname = str(tmpdir.join("foo.tif"))
source1 = np.zeros((params.width, params.height), dtype=np.uint8)
source2 = source1.copy()
# fill both sources w/ arbitrary values
rows, cols = source1.shape
source1[:rows // 2, :cols // 2] = 200
source2[rows // 2:, cols // 2:] = 100
kwargs = {
"count": 1,
"width": params.width,
"height": params.height,
"dtype": np.uint8,
"driver": "GTiff",
"crs": params.dst_crs,
"transform": params.dst_transform,
}
with rasterio.open(tiffname, "w", **kwargs) as dst:
reproject(
source1,
rasterio.band(dst, 1),
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=0.0,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=0.0,
)
reproject(
source2,
rasterio.band(dst, 1),
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=0.0,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=0.0,
init_dest_nodata=False,
)
# 200s should remain along with 100s
with rasterio.open(tiffname) as src:
data = src.read()
assert data.max() == 200
def test_reproject_no_init_nodata_toarray():
"""Test that nodata is being initialized."""
params = default_reproject_params()
source1 = np.zeros((params.width, params.height))
source2 = source1.copy()
out = source1.copy()
# fill both sources w/ arbitrary values
rows, cols = source1.shape
source1[:rows // 2, :cols // 2] = 200
source2[rows // 2:, cols // 2:] = 100
reproject(
source1,
out,
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=0.0,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=0.0,
)
assert out.max() == 200
assert out.min() == 0
reproject(
source2,
out,
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=0.0,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=0.0,
init_dest_nodata=False,
)
# 200s should NOT be overwritten by 100s
assert out.max() == 200
assert out.min() == 0
def test_reproject_multi():
"""Ndarry to ndarray."""
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read()
dst_crs = dict(
proj="merc",
a=6378137,
b=6378137,
lat_ts=0.0,
lon_0=0.0,
x_0=0.0,
y_0=0,
k=1.0,
units="m",
nadgrids="@null",
wktext=True,
no_defs=True,
)
destin = np.empty(source.shape, dtype=np.uint8)
reproject(
source,
destin,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=Resampling.nearest,
)
assert destin.any()
def test_warp_from_file():
"""File to ndarray."""
with rasterio.open("tests/data/RGB.byte.tif") as src:
dst_crs = dict(
proj="merc",
a=6378137,
b=6378137,
lat_ts=0.0,
lon_0=0.0,
x_0=0.0,
y_0=0,
k=1.0,
units="m",
nadgrids="@null",
wktext=True,
no_defs=True,
)
destin = np.empty(src.shape, dtype=np.uint8)
reproject(
rasterio.band(src, 1), destin, dst_transform=DST_TRANSFORM, dst_crs=dst_crs
)
assert destin.any()
def test_warp_from_to_file(tmpdir):
"""File to file."""
tiffname = str(tmpdir.join("foo.tif"))
with rasterio.open("tests/data/RGB.byte.tif") as src:
dst_crs = dict(
proj="merc",
a=6378137,
b=6378137,
lat_ts=0.0,
lon_0=0.0,
x_0=0.0,
y_0=0,
k=1.0,
units="m",
nadgrids="@null",
wktext=True,
no_defs=True,
)
kwargs = src.meta.copy()
kwargs.update(transform=DST_TRANSFORM, crs=dst_crs)
with rasterio.open(tiffname, "w", **kwargs) as dst:
for i in (1, 2, 3):
reproject(rasterio.band(src, i), rasterio.band(dst, i))
def test_warp_from_to_file_multi(tmpdir):
"""File to file."""
tiffname = str(tmpdir.join("foo.tif"))
with rasterio.open("tests/data/RGB.byte.tif") as src:
dst_crs = dict(
proj="merc",
a=6378137,
b=6378137,
lat_ts=0.0,
lon_0=0.0,
x_0=0.0,
y_0=0,
k=1.0,
units="m",
nadgrids="@null",
wktext=True,
no_defs=True,
)
kwargs = src.meta.copy()
kwargs.update(transform=DST_TRANSFORM, crs=dst_crs)
with rasterio.open(tiffname, "w", **kwargs) as dst:
for i in (1, 2, 3):
reproject(rasterio.band(src, i), rasterio.band(dst, i), num_threads=2)
@pytest.fixture(scope="function")
def polygon_3373():
"""An EPSG:3373 polygon."""
return {
"type": "Polygon",
"coordinates": (
(
(798842.3090855901, 6569056.500655151),
(756688.2826828464, 6412397.888771972),
(755571.0617232556, 6408461.009397383),
(677605.2284582685, 6425600.39266733),
(677605.2284582683, 6425600.392667332),
(670873.3791649605, 6427248.603432341),
(664882.1106069803, 6407585.48425362),
(663675.8662823177, 6403676.990080649),
(485120.71963574126, 6449787.167760638),
(485065.55660851026, 6449802.826920689),
(485957.03982722526, 6452708.625101285),
(487541.24541826674, 6457883.292107048),
(531008.5797472061, 6605816.560367976),
(530943.7197027118, 6605834.9333479265),
(531888.5010308184, 6608940.750411527),
(533299.5981959199, 6613962.642851984),
(533403.6388841148, 6613933.172096095),
(576345.6064638699, 6761983.708069147),
(577649.6721159086, 6766698.137844516),
(578600.3589008929, 6770143.99782289),
(578679.4732294685, 6770121.638265098),
(655836.640492081, 6749376.357102599),
(659913.0791150068, 6764770.1314677475),
(661105.8478791204, 6769515.168134831),
(661929.4670843681, 6772800.8565198565),
(661929.4670843673, 6772800.856519875),
(661975.1582566603, 6772983.354777632),
(662054.7979028501, 6772962.86384242),
(841909.6014891531, 6731793.200435557),
(840726.455490463, 6727039.8672589315),
(798842.3090855901, 6569056.500655151),
),
),
}
def test_transform_geom_polygon_cutting(polygon_3373):
geom = polygon_3373
result = transform_geom("EPSG:3373", "EPSG:4326", geom, antimeridian_cutting=True)
assert result["type"] == "MultiPolygon"
assert len(result["coordinates"]) == 2
def test_transform_geom_polygon_offset(polygon_3373):
geom = polygon_3373
result = transform_geom(
"EPSG:3373", "EPSG:4326", geom, antimeridian_cutting=True, antimeridian_offset=0
)
assert result["type"] == "MultiPolygon"
assert len(result["coordinates"]) == 2
def test_transform_geom_polygon_precision(polygon_3373):
geom = polygon_3373
result = transform_geom(
"EPSG:3373", "EPSG:4326", geom, precision=1, antimeridian_cutting=True
)
assert all(round(x, 1) == x for x in flatten_coords(result["coordinates"]))
def test_transform_geom_linestring_precision(polygon_3373):
ring = polygon_3373["coordinates"][0]
geom = {"type": "LineString", "coordinates": ring}
result = transform_geom(
"EPSG:3373", "EPSG:4326", geom, precision=1, antimeridian_cutting=True
)
assert all(round(x, 1) == x for x in flatten_coords(result["coordinates"]))
def test_transform_geom_linestring_precision_iso(polygon_3373):
ring = polygon_3373["coordinates"][0]
geom = {"type": "LineString", "coordinates": ring}
result = transform_geom("EPSG:3373", "EPSG:3373", geom, precision=1)
assert int(result["coordinates"][0][0] * 10) == 7988423
def test_transform_geom_linearring_precision(polygon_3373):
ring = polygon_3373["coordinates"][0]
geom = {"type": "LinearRing", "coordinates": ring}
result = transform_geom(
"EPSG:3373", "EPSG:4326", geom, precision=1, antimeridian_cutting=True
)
assert all(round(x, 1) == x for x in flatten_coords(result["coordinates"]))
def test_transform_geom_linestring_precision_z(polygon_3373):
ring = polygon_3373["coordinates"][0]
x, y = zip(*ring)
ring = list(zip(x, y, [0.0 for i in range(len(x))]))
geom = {"type": "LineString", "coordinates": ring}
result = transform_geom("EPSG:3373", "EPSG:3373", geom, precision=1)
assert int(result["coordinates"][0][0] * 10) == 7988423
assert int(result["coordinates"][0][2] * 10) == 0
def test_transform_geom_multipolygon(polygon_3373):
geom = {"type": "MultiPolygon", "coordinates": [polygon_3373["coordinates"]]}
result = transform_geom("EPSG:3373", "EPSG:4326", geom, precision=1)
assert all(round(x, 1) == x for x in flatten_coords(result["coordinates"]))
def test_transform_geom_array(polygon_3373):
geom = [polygon_3373 for _ in range(10)]
result = transform_geom("EPSG:3373", "EPSG:4326", geom, precision=1)
assert isinstance(result, list)
assert len(result) == 10
def test_transform_geom__geo_interface(polygon_3373):
class GeoObj:
@property
def __geo_interface__(self):
return polygon_3373
result = transform_geom("EPSG:3373", "EPSG:4326", GeoObj(), precision=1)
assert all(round(x, 1) == x for x in flatten_coords(result["coordinates"]))
def test_transform_geom__geo_interface__array(polygon_3373):
class GeoObj:
@property
def __geo_interface__(self):
return polygon_3373
geom = [GeoObj() for _ in range(10)]
results = transform_geom("EPSG:3373", "EPSG:4326", geom, precision=1)
assert isinstance(results, list)
assert len(results) == 10
for result in results:
assert all(round(x, 1) == x for x in flatten_coords(result["coordinates"]))
@pytest.mark.parametrize("method", SUPPORTED_RESAMPLING)
def test_reproject_resampling(path_rgb_byte_tif, method):
# Expected count of nonzero pixels for each resampling method, based
# on running rasterio with each of the following configurations
expected = {
Resampling.nearest: [438113],
Resampling.bilinear: [439280],
Resampling.cubic: [437888],
Resampling.cubic_spline: [440475],
Resampling.lanczos: [436001],
Resampling.average: [439419, 439172], # latter value for GDAL 3.1
Resampling.mode: [437298],
Resampling.max: [439464],
Resampling.min: [436397],
Resampling.med: [437194],
Resampling.q1: [436397],
Resampling.q3: [438948],
Resampling.sum: [439118],
Resampling.rms: [439385],
}
with rasterio.open(path_rgb_byte_tif) as src:
source = src.read(1)
out = np.empty(src.shape, dtype=np.uint8)
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs="EPSG:3857",
resampling=method,
)
assert np.count_nonzero(out) in expected[method]
@pytest.mark.parametrize("test3d,count_nonzero", [(True, 1309625), (False, 437686)])
def test_reproject_array_interface(test3d, count_nonzero, path_rgb_byte_tif):
class DataArray:
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
@property
def dtype(self):
return self.data.dtype
with rasterio.open(path_rgb_byte_tif) as src:
if test3d:
source = DataArray(src.read())
else:
source = DataArray(src.read(1))
out = DataArray(np.empty(source.data.shape, dtype=np.uint8))
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
src_nodata=src.nodata,
dst_transform=DST_TRANSFORM,
dst_crs="EPSG:3857",
dst_nodata=99,
)
assert isinstance(out, DataArray)
assert np.count_nonzero(out.data[out.data != 99]) == count_nonzero
@pytest.mark.parametrize("test3d,count_nonzero", [(True, 1309625), (False, 437686)])
def test_reproject_masked(test3d, count_nonzero, path_rgb_byte_tif):
with rasterio.open(path_rgb_byte_tif) as src:
if test3d:
source = src.read(masked=True)
else:
source = src.read(1, masked=True)
out = np.empty(source.shape, dtype=np.uint8)
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs="EPSG:3857",
dst_nodata=99,
)
assert np.ma.is_masked(source)
assert np.count_nonzero(out[out != 99]) == count_nonzero
@pytest.mark.parametrize("method", SUPPORTED_RESAMPLING)
def test_reproject_resampling_alpha(method):
"""Reprojection of a source with alpha band succeeds"""
# Expected count of nonzero pixels for each resampling method, based
# on running rasterio with each of the following configurations
expected = {
Resampling.nearest: [438113],
Resampling.bilinear: [439280],
Resampling.cubic: [437888],
Resampling.cubic_spline: [440475],
Resampling.lanczos: [436001],
Resampling.average: [439419, 439172], # latter value for GDAL 3.1
Resampling.mode: [437298],
Resampling.max: [439464],
Resampling.min: [436397],
Resampling.med: [437194],
Resampling.q1: [436397],
Resampling.q3: [438948],
Resampling.sum: [439118],
Resampling.rms: [439385],
}
with rasterio.open("tests/data/RGBA.byte.tif") as src:
source = src.read(1)
out = np.empty(src.shape, dtype=np.uint8)
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs="EPSG:3857",
resampling=method,
)
assert np.count_nonzero(out) in expected[method]
@pytest.mark.skipif(
gdal_version.at_least("2.0"), reason="Tests only applicable to GDAL < 2.0"
)
@pytest.mark.parametrize("method", GDAL2_RESAMPLING)
def test_reproject_not_yet_supported_resampling(method):
"""Test resampling methods not yet supported by this version of GDAL"""
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = "EPSG:32619"
out = np.empty(src.shape, dtype=np.uint8)
with pytest.raises(GDALVersionError):
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=method,
)
def test_reproject_unsupported_resampling():
"""Values not in enums. Resampling are not supported."""
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = "EPSG:32619"
out = np.empty(src.shape, dtype=np.uint8)
with pytest.raises(ValueError):
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=99,
)
def test_reproject_unsupported_resampling_guass():
"""Resampling.gauss is unsupported."""
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = "EPSG:32619"
out = np.empty(src.shape, dtype=np.uint8)
with pytest.raises(ValueError):
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=Resampling.gauss,
)
@pytest.mark.parametrize("method", SUPPORTED_RESAMPLING)
def test_resample_default_invert_proj(method):
"""Nearest and bilinear should produce valid results
with the default Env
"""
with rasterio.open("tests/data/world.rgb.tif") as src:
source = src.read(1)
profile = src.profile
dst_crs = "EPSG:32619"
# Calculate the ideal dimensions and transformation in the new crs
dst_affine, dst_width, dst_height = calculate_default_transform(
src.crs, dst_crs, src.width, src.height, *src.bounds
)
profile["height"] = dst_height
profile["width"] = dst_width
out = np.empty(shape=(dst_height, dst_width), dtype=np.uint8)
# GDAL 1.11 needs to have this config option set on to match the
# default results in later versions.
if gdal_version.major == 1:
options = dict(CHECK_WITH_INVERT_PROJ=True)
else:
options = {}
with rasterio.Env(**options):
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=dst_affine,
dst_crs=dst_crs,
resampling=method,
)
assert out.mean() > 0
@pytest.mark.xfail(reason="Projection extents have changed with PROJ 8")
def test_target_aligned_pixels():
"""Issue 853 has been resolved"""
with rasterio.open("tests/data/world.rgb.tif") as src:
source = src.read(1)
profile = src.profile
dst_crs = "EPSG:3857"
with rasterio.Env(CHECK_WITH_INVERT_PROJ=False):
# Calculate the ideal dimensions and transformation in the new crs
dst_affine, dst_width, dst_height = calculate_default_transform(
src.crs, dst_crs, src.width, src.height, *src.bounds
)
dst_affine, dst_width, dst_height = aligned_target(
dst_affine, dst_width, dst_height, 10000.0
)
profile["height"] = dst_height
profile["width"] = dst_width
out = np.empty(shape=(dst_height, dst_width), dtype=np.uint8)
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=dst_affine,
dst_crs=dst_crs,
resampling=Resampling.nearest,
)
# Check that there are no black borders
assert out[:, 0].all()
assert out[:, -1].all()
assert out[0, :].all()
assert out[-1, :].all()
@pytest.mark.parametrize("method", SUPPORTED_RESAMPLING)
def test_resample_no_invert_proj(method):
"""Nearest and bilinear should produce valid results with
CHECK_WITH_INVERT_PROJ = False
"""
if method in (
Resampling.bilinear,
Resampling.cubic,
Resampling.cubic_spline,
Resampling.lanczos,
):
pytest.xfail(
reason="Some resampling methods succeed but produce blank images. "
"See https://github.com/mapbox/rasterio/issues/614"
)
with rasterio.Env(CHECK_WITH_INVERT_PROJ=False):
with rasterio.open("tests/data/world.rgb.tif") as src:
source = src.read(1)
profile = src.profile.copy()
dst_crs = "EPSG:32619"
# Calculate the ideal dimensions and transformation in the new crs
dst_affine, dst_width, dst_height = calculate_default_transform(
src.crs, dst_crs, src.width, src.height, *src.bounds
)
profile["height"] = dst_height
profile["width"] = dst_width
out = np.empty(shape=(dst_height, dst_width), dtype=np.uint8)
# see #614, some resampling methods succeed but produce blank images
out = np.empty(src.shape, dtype=np.uint8)
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=dst_affine,
dst_crs=dst_crs,
resampling=method,
)
assert out.mean() > 0
def test_reproject_crs_none():
"""Reproject with crs is None should not cause segfault"""
src = np.random.random(25).reshape((1, 5, 5))
srcaff = Affine(1.1, 0.0, 0.0, 0.0, 1.1, 0.0)
srccrs = None
dst = np.empty(shape=(1, 11, 11))
dstaff = Affine(0.5, 0.0, 0.0, 0.0, 0.5, 0.0)
dstcrs = None
with pytest.raises(ValueError):
reproject(
src,
dst,
src_transform=srcaff,
src_crs=srccrs,
dst_transform=dstaff,
dst_crs=dstcrs,
resampling=Resampling.nearest,
)
def test_reproject_identity_src():
"""Reproject with an identity like source matrices."""
src = np.random.random(25).reshape((1, 5, 5))
dst = np.empty(shape=(1, 10, 10))
dstaff = Affine(0.5, 0.0, 0.0, 0.0, 0.5, 0.0)
crs = {"init": "epsg:3857"}
src_affines = [
Affine(1.0, 0.0, 0.0, 0.0, 1.0, 0.0), # Identity both positive
Affine(1.0, 0.0, 0.0, 0.0, -1.0, 0.0), # Identity with negative e
]
for srcaff in src_affines:
# reproject expected to not raise any error in any of the srcaff
reproject(
src,
dst,
src_transform=srcaff,
src_crs=crs,
dst_transform=dstaff,
dst_crs=crs,
resampling=Resampling.nearest,
)
def test_reproject_identity_dst():
"""Reproject with an identity like destination matrices."""
src = np.random.random(100).reshape((1, 10, 10))
srcaff = Affine(0.5, 0.0, 0.0, 0.0, 0.5, 0.0)
dst = np.empty(shape=(1, 5, 5))
crs = {"init": "epsg:3857"}
dst_affines = [
Affine(1.0, 0.0, 0.0, 0.0, 1.0, 0.0), # Identity both positive
Affine(1.0, 0.0, 0.0, 0.0, -1.0, 0.0), # Identity with negative e
]
for dstaff in dst_affines:
# reproject expected to not raise any error in any of the dstaff
reproject(
src,
dst,
src_transform=srcaff,
src_crs=crs,
dst_transform=dstaff,
dst_crs=crs,
resampling=Resampling.nearest,
)
@pytest.fixture(scope="function")
def rgb_byte_profile():
with rasterio.open("tests/data/RGB.byte.tif") as src:
return src.profile
def test_reproject_gcps_transform_exclusivity():
"""gcps and transform can't be used together."""
with pytest.raises(ValueError):
reproject(1, 1, gcps=[0], src_transform=[0])
def test_reproject_gcps(rgb_byte_profile):
"""Reproject using ground control points for the source"""
source = np.ones((3, 800, 800), dtype=np.uint8) * 255
out = np.zeros(
(3, rgb_byte_profile["height"], rgb_byte_profile["height"]), dtype=np.uint8
)
src_gcps = [
GroundControlPoint(row=0, col=0, x=156113, y=2818720, z=0),
GroundControlPoint(row=0, col=800, x=338353, y=2785790, z=0),
GroundControlPoint(row=800, col=800, x=297939, y=2618518, z=0),
GroundControlPoint(row=800, col=0, x=115698, y=2651448, z=0),
]
reproject(
source,
out,
src_crs="EPSG:32618",
gcps=src_gcps,
dst_transform=rgb_byte_profile["transform"],
dst_crs=rgb_byte_profile["crs"],
resampling=Resampling.nearest,
)
assert not out.all()
assert not out[:, 0, 0].any()
assert not out[:, 0, -1].any()
assert not out[:, -1, -1].any()
assert not out[:, -1, 0].any()
@requires_gdal22(
reason="GDAL 2.2.0 and newer has different antimeridian cutting behavior."
)
def test_transform_geom_gdal22():
"""Enabling `antimeridian_cutting` has no effect on GDAL 2.2.0 or newer
where antimeridian cutting is always enabled. This could produce
unexpected geometries, so an exception is raised.
"""
geom = {"type": "Point", "coordinates": [0, 0]}
with pytest.raises(GDALVersionError):
transform_geom("EPSG:4326", "EPSG:3857", geom, antimeridian_cutting=False)
def test_issue1056():
"""Warp sucessfully from RGB's upper bands to an array"""
with rasterio.open("tests/data/RGB.byte.tif") as src:
dst_crs = "EPSG:3857"
out = np.zeros(src.shape, dtype=np.uint8)
reproject(
rasterio.band(src, 2),
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=Resampling.nearest,
)
def test_reproject_dst_nodata():
"""Affirm resolution of issue #1395"""
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = "EPSG:3857"
out = np.empty(src.shape, dtype=np.float32)
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
src_nodata=0,
dst_nodata=np.nan,
resampling=Resampling.nearest,
)
assert (out[~np.isnan(out)] > 0.0).sum() == 438113
assert out[0, 0] != 0
assert np.isnan(out[0, 0])
def test_issue1401():
"""The warp_mem_limit keyword argument is in effect"""
with rasterio.open("tests/data/RGB.byte.tif") as src:
dst_crs = "EPSG:3857"
out = np.zeros(src.shape, dtype=np.uint8)
reproject(
rasterio.band(src, 2),
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=Resampling.nearest,
warp_mem_limit=4000,
)
def test_reproject_dst_alpha(path_rgb_msk_byte_tif):
"""Materialization of external mask succeeds"""
with rasterio.open(path_rgb_msk_byte_tif) as src:
nrows, ncols = src.shape
dst_arr = np.zeros((src.count + 1, nrows, ncols), dtype=np.uint8)
reproject(
rasterio.band(src, src.indexes),
dst_arr,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs="EPSG:3857",
dst_alpha=4,
)
assert dst_arr[3].any()
@pytest.mark.xfail(
rasterio.__gdal_version__ in ["2.2.0", "2.2.1", "2.2.2", "2.2.3"],
reason=(
"GDAL had regression in 2.2.X series, fixed in 2.2.4,"
" reproject used dst index instead of src index when destination was single band"
),
)
def test_issue1350():
"""Warp bands other than 1 or All"""
with rasterio.open("tests/data/RGB.byte.tif") as src:
dst_crs = "EPSG:3857"
reprojected = []
for dtype, idx in zip(src.dtypes, src.indexes):
out = np.zeros((1,) + src.shape, dtype=dtype)
reproject(
rasterio.band(src, idx),
out,
resampling=Resampling.nearest,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
)
reprojected.append(out)
for i in range(1, len(reprojected)):
assert not (reprojected[0] == reprojected[i]).all()
def test_issue_1446():
"""Confirm resolution of #1446"""
g = transform_geom(
CRS.from_epsg(4326),
CRS.from_epsg(32610),
{"type": "Point", "coordinates": (-122.51403808499907, 38.06106733107932)},
)
assert round(g["coordinates"][0], 1) == 542630.9
assert round(g["coordinates"][1], 1) == 4212702.1
@requires_gdal_lt_3
def test_issue_1446_b():
"""Confirm that lines aren't thrown as reported in #1446"""
src_crs = CRS.from_epsg(4326)
dst_crs = CRS(
{
"proj": "sinu",
"lon_0": 350.85607029556,
"x_0": 0,
"y_0": 0,
"a": 3396190,
"b": 3396190,
"units": "m",
"no_defs": True,
}
)
collection = json.load(open("tests/data/issue1446.geojson"))
geoms = {f["properties"]["fid"]: f["geometry"] for f in collection["features"]}
transformed_geoms = {
k: transform_geom(src_crs, dst_crs, g) for k, g in geoms.items()
}
# Before the fix, this geometry was thrown eastward of 0.0. It should be between -350 and -250.
assert all([-350 < x < -150 for x, y in transformed_geoms[183519]["coordinates"]])
def test_reproject_init_dest_nodata():
"""No pixels should transfer over"""
crs = CRS.from_epsg(4326)
transform = Affine.identity()
source = np.zeros((1, 100, 100))
destination = np.ones((1, 100, 100))
reproject(
source, destination, src_crs=crs, src_transform=transform,
dst_crs=crs, dst_transform=transform,
src_nodata=0, init_dest_nodata=False
)
assert destination.all()
def test_empty_transform_inputs():
"""Check for fix of #1952"""
assert ([], []) == rasterio.warp.transform(
"EPSG:3857", "EPSG:4326", [], [], zs=None
)
def test_empty_transform_inputs_z():
"""Check for fix of #1952"""
assert ([], [], []) == rasterio.warp.transform(
"EPSG:3857", "EPSG:4326", [], [], zs=[]
)
def test_empty_transform_inputs_length():
"""Get an exception of inputs have different lengths"""
with pytest.raises(TransformError):
rasterio.warp.transform("EPSG:3857", "EPSG:4326", [1], [1, 2])
def test_empty_transform_inputs_length_z():
"""Get an exception of inputs have different lengths"""
with pytest.raises(TransformError):
rasterio.warp.transform("EPSG:3857", "EPSG:4326", [1, 2], [1, 2], zs=[0])
def test_reproject_rpcs(caplog):
"""Reproject using rational polynomial coefficients for the source"""
with rasterio.open('tests/data/RGB.byte.rpc.vrt') as src:
out = np.zeros(
(3, src.profile["width"], src.profile["height"]), dtype=np.uint8
)
src_rpcs = src.rpcs
reproject(
rasterio.band(src, src.indexes),
out,
src_crs="EPSG:4326",
rpcs=src_rpcs,
dst_crs="EPSG:3857",
resampling=Resampling.nearest,
)
assert not out.all()
assert not out[:, 0, 0].any()
assert not out[:, 0, -1].any()
assert not out[:, -1, -1].any()
assert not out[:, -1, 0].any()
def test_reproject_rpcs_with_transformer_options(caplog):
"""Reproject using rational polynomial coefficients and additional transformer options"""
with rasterio.open('tests/data/RGB.byte.rpc.vrt') as src:
with rasterio.MemoryFile(dirname='foo', filename='dem.tif') as mem:
crs = 'COMPD_CS["WGS 84 + EGM96 height",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Latitude",NORTH],AXIS["Longitude",EAST],AUTHORITY["EPSG","4326"]],VERT_CS["EGM96 height",VERT_DATUM["EGM96 geoid",2005,AUTHORITY["EPSG","5171"]],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Gravity-related height",UP],AUTHORITY["EPSG","5773"]]]'
transform = Affine(0.001953396267361111, 0.0, -124.00013888888888, 0.0, -0.001953396267361111, 50.000138888888884)
with mem.open(
driver="GTiff",
width=1024,
height=1024,
count=1,
transform=transform,
dtype="int16",
crs=crs,
) as dem:
# we flush dem dataset before letting GDAL read from vsimem
dem.write_band(1, 500 * np.ones((1024, 1024), dtype='int16'))
out = np.zeros(
(3, src.profile["width"], src.profile["height"]), dtype=np.uint8
)
out2 = out.copy()
src_rpcs = src.rpcs
caplog.set_level(logging.DEBUG)
reproject(
rasterio.band(src, src.indexes),
out,
src_crs="EPSG:4326",
rpcs=src_rpcs,
dst_crs="EPSG:3857",
resampling=Resampling.nearest,
RPC_DEM=dem.name,
)
caplog.set_level(logging.INFO)
reproject(
rasterio.band(src, src.indexes),
out2,
src_crs="EPSG:4326",
rpcs=src_rpcs,
dst_crs="EPSG:3857",
resampling=Resampling.nearest,
)
assert not out.all()
assert not out2.all()
assert "RPC_DEM" in caplog.text
assert not np.array_equal(out, out2)
def test_warp_gcps_compute_dst_transform_automatically_array():
"""Ensure we don't raise an exception when gcps passed without dst_transform, for a source array"""
source = np.ones((3, 800, 800), dtype=np.uint8) * 255
out = np.zeros((3, 512, 512))
src_gcps = [
GroundControlPoint(row=0, col=0, x=156113, y=2818720, z=0),
GroundControlPoint(row=0, col=800, x=338353, y=2785790, z=0),
GroundControlPoint(row=800, col=800, x=297939, y=2618518, z=0),
GroundControlPoint(row=800, col=0, x=115698, y=2651448, z=0),
]
reproject(
source,
out,
src_crs="EPSG:32618",
gcps=src_gcps,
dst_crs="EPSG:32618",
resampling=Resampling.nearest
)
assert not out.all()
assert not out[:, 0, 0].any()
assert not out[:, 0, -1].any()
assert not out[:, -1, -1].any()
assert not out[:, -1, 0].any()
def test_warp_gcps_compute_dst_transform_automatically_reader(tmpdir):
"""Ensure we don't raise an exception when gcps passed without dst_transform, for a source dataset"""
tiffname = str(tmpdir.join('test.tif'))
src_gcps = [
GroundControlPoint(row=0, col=0, x=156113, y=2818720, z=0),
GroundControlPoint(row=0, col=800, x=338353, y=2785790, z=0),
GroundControlPoint(row=800, col=800, x=297939, y=2618518, z=0),
GroundControlPoint(row=800, col=0, x=115698, y=2651448, z=0),
]
out = np.zeros((3, 512, 512))
with rasterio.open(tiffname, mode='w', height=800, width=800, count=3, dtype=np.uint8) as source:
source.gcps = (src_gcps, CRS.from_epsg(32618))
with rasterio.open(tiffname) as source:
reproject(
rasterio.band(source, source.indexes),
out,
dst_crs="EPSG:32618",
resampling=Resampling.nearest
)
assert not out.all()
assert not out[:, 0, 0].any()
assert not out[:, 0, -1].any()
assert not out[:, -1, -1].any()
assert not out[:, -1, 0].any()
def test_reproject_rpcs_exact_transformer(caplog):
"""Reproject using rational polynomial coefficients and DEM, requiring that
we don't try to make an approximate transformer.
"""
with rasterio.open('tests/data/RGB.byte.rpc.vrt') as src:
with rasterio.MemoryFile(dirname='foo', filename='dem.tif') as mem:
crs = 'COMPD_CS["WGS 84 + EGM96 height",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Latitude",NORTH],AXIS["Longitude",EAST],AUTHORITY["EPSG","4326"]],VERT_CS["EGM96 height",VERT_DATUM["EGM96 geoid",2005,AUTHORITY["EPSG","5171"]],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Gravity-related height",UP],AUTHORITY["EPSG","5773"]]]'
transform = Affine(0.001953396267361111, 0.0, -124.00013888888888, 0.0, -0.001953396267361111, 50.000138888888884)
with mem.open(
driver="GTiff",
width=1024,
height=1024,
count=1,
transform=transform,
dtype="int16",
crs=crs,
) as dem:
# we flush dem dataset before letting GDAL read from vsimem
dem.write_band(1, 500 * np.ones((1024, 1024), dtype='int16'))
out = np.zeros(
(3, src.profile["width"], src.profile["height"]), dtype=np.uint8
)
src_rpcs = src.rpcs
caplog.set_level(logging.DEBUG)
reproject(
rasterio.band(src, src.indexes),
out,
src_crs="EPSG:4326",
rpcs=src_rpcs,
dst_crs="EPSG:3857",
resampling=Resampling.nearest,
RPC_DEM=dem.name,
)
assert "Created exact transformer" in caplog.text
def test_reproject_rpcs_approx_transformer(caplog):
"""Reproject using rational polynomial coefficients without a DEM, for which it's
ok to use an approximate transformer.
"""
with rasterio.open('tests/data/RGB.byte.rpc.vrt') as src:
out = np.zeros(
(3, src.profile["width"], src.profile["height"]), dtype=np.uint8
)
src_rpcs = src.rpcs
caplog.set_level(logging.DEBUG)
reproject(
rasterio.band(src, src.indexes),
out,
src_crs="EPSG:4326",
rpcs=src_rpcs,
dst_crs="EPSG:3857",
resampling=Resampling.nearest,
)
assert "Created approximate transformer" in caplog.text | 0.698535 | 0.310289 |
import glob
import os
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import Element
import json
START_BOUNDING_BOX_ID = 0
START_IMAGE_ID = 0
PRE_DEFINE_CATEGORIES = {"DaoXianYiWu": 0, "DiaoChe": 1, "ShiGongJiXie": 2, "TaDiao": 3, "YanHuo":4}
# PRE_DEFINE_CATEGORIES = {"GanTa": 0}
# PRE_DEFINE_CATEGORIES = {"DaoXianYiWu": 0, "DiaoChe": 1, "ShiGongJiXie": 2, "TaDiao": 3, "YanHuo":4, "GanTa":5}
def get_categories(xml_files):
'''
Generte category name to id mapping from a list of xml files.
Args:
xml_files[list]: A list of xml file paths.
Return: dict -- category name to id mapping.
'''
classes_names = []
for xml_file in xml_files:
tree = ET.parse(xml_file)
root = tree.getroot()
for item in root.findall("object"):
classes_names.append(item.find('name').text)
classes_names = list(set(classes_names))
classes_names.sort()
return {name : i for i, name in enumerate(classes_names)}
def get_and_check(root, tag, length):
'''
Args:
root: xml.etree.ElementTree.ElementTree object
tag: xml file tag name. eg:"size","width"
length: default 1
Return: filename
'''
vars = root.findall(tag)
if len(vars) == 0:
raise ValueError(f"Can not find {tag} in {root.tag}")
if length > 0 and len(vars) != length:
raise ValueError(
f"The size of {tag} is supposed to be {length}, but is {len(vars)}."
)
if length == 1:
vars = vars[0]
return vars
def get(root, tag):
vars = root.findall(tag)
return vars
# 这个函数可以单独拎出来
def add_xml_node(xml_file, tag, img_dir):
'''
往xml文件中增加一个节点
Args:
tag: 要增加的节点的名字,比如:'path','name'
xml_file: 要增加节点的xml文件
Returns:None
'''
tree = ET.parse(xml_file)
root = tree.getroot()
vars = root.findall(tag)
if len(vars) != 0: # 如果有path节点的话
path = root.find(tag)
path.text = os.path.join(img_dir, os.path.basename(xml_file)[:-3] + 'jpg')
print(path.text)
tree.write(xml_file, encoding='utf-8')
# print('走的if分支')
else:
element = Element(tag)
element.text = os.path.join(img_dir, os.path.basename(xml_file)[:-3] + 'jpg')
# print(element.text)
root.append(element)
tree.write(xml_file, encoding='utf-8')
# print('走的else分支')
# print('Finished!')
def convert(xml_files, json_file):
'''
Convert xml annotations to COCO format.
Args:
xml_file: xml format file path.
json_file: output to a json file.
Return: None
'''
json_dict = {
"images":[],
"type":"instances",
"annotations":[],
"categories":[]
}
if PRE_DEFINE_CATEGORIES is not None:
categories = PRE_DEFINE_CATEGORIES
else:
categories = get_categories(xml_files)
image_id = START_IMAGE_ID
bbox_id = START_BOUNDING_BOX_ID
for xml_file in xml_files:
tree = ET.parse(xml_file)
root = tree.getroot()
path = get_and_check(root, "path", 1).text
# 这里假设我们的文件是以数字命名的,比如:"1.jpg","2.jpg","1.xml","2.xml"
# filename = os.path.basename(xml_file)[:-3] + 'jpg'
# image_id = int(os.path.basename(xml_file)[:-4])
# TODO:在不将文件名更改的情况下,实现转换。主要依据其实就是一个xml文件对应一张image,除后缀外,名字相同,因此有了下面的代码。
# filename = os.path.basename(path) # 这是path是完整的路径时的用法
filename = os.path.basename(xml_file)[:-3] + 'jpg' # 这是path只是一个文件夹名字的用法
size = get_and_check(root, "size", 1)
width = int(get_and_check(size, 'width', 1).text)
height = int(get_and_check(size, 'height', 1).text)
image = {
"path":path,
"file_name":filename,
"height":height,
"width":width,
"id":image_id
}
json_dict["images"].append(image)
for obj in get(root, 'object'):
category = get_and_check(obj, 'name', 1).text
if category not in categories: # 事实上,如果只想对已经指定的某些类别进行转化,这里只需pass就好,不需要创建新的类别映射
# new_id = len(categories)
# categories[category] = new_id
continue
category_id = categories[category]
bndbox = get_and_check(obj, 'bndbox', 1)
xmin = int(float(get_and_check(bndbox, 'xmin', 1).text))
ymin = int(float(get_and_check(bndbox, 'ymin', 1).text))
xmax = int(float(get_and_check(bndbox, 'xmax', 1).text))
ymax = int(float(get_and_check(bndbox, 'ymax', 1).text))
assert xmax > xmin, f'{xml_file}'
assert ymax > ymin, f'{xml_file}'
o_width = abs(xmax - xmin)
o_height = abs(ymax - ymin)
ann = {
"area":o_width * o_height,
"iscrowd":0,
"image_id":image_id,
"bbox":[xmin, ymin, o_width, o_height],
"category_id":category_id,
"id":bbox_id, # 这个表示object的id
"ignore":0,
"segmentation":[]
}
json_dict["annotations"].append(ann)
bbox_id += 1
image_id += 1
for cate, cid in categories.items():
cat = {
"supercategory":"none",
"id":cid,
"name":cate
}
json_dict['categories'].append(cat)
os.makedirs(os.path.dirname(json_file), exist_ok=True)
json_fp = open(json_file, 'w')
json_str = json.dumps(json_dict, ensure_ascii=False)
json_fp.write(json_str)
json_fp.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Convert xml annotations to COCO format!')
parser.add_argument(
"--xml-dir",
default='/shared/xjd/DataSets/transmission_line_detection/self_labeled_xml',
type=str,
help='Directory path to xml files.'
)
parser.add_argument(
"--xml-dir2",
default='/shared/xjd/DataSets/transmission_line_detection/test',
type=str,
help='Directory path to xml files.'
)
# parser.add_argument(
# "--xml-dir3",
# default='/shared/xjd/DataSets/transmission_line_detection/test_xml',
# type=str,
# help='Directory path to xml files.'
# )
# parser.add_argument(
# "--xml-dir4",
# default='/shared/xjd/DataSets/transmission_line_detection/train14000_xml',
# type=str,
# help='Directory path to xml files.'
# )
parser.add_argument(
"--json-file",
default='/shared/xjd/DataSets/transmission_line_detection/train_6cates_1280.json',
type=str,
help='Output COCO format json file.'
)
parser.add_argument(
"--json-file2",
default='/shared/xjd/DataSets/transmission_line_detection/test_6cates_3490.json',
type=str,
help='Output COCO format json file.'
)
args = parser.parse_args()
xml_files = glob.glob(os.path.join(args.xml_dir, "*.xml")) # 返回以.xml结尾的目录及文件列表
# 下面的代码只有在结合两个来源的数据的时候用到
# xml_files2 = glob.glob(os.path.join('/shared/xjd/DataSets/transmission_line_detection/train14000_xml', "*.xml"))
# xml_files2 = glob.glob(os.path.join(args.xml_dir2, "*.xml"))
# xml_files.extend(xml_files2[:-200])
# xml_files3 = glob.glob(os.path.join(args.xml_dir3, "*.xml"))
# xml_files3.extend(xml_files2[-200:])
# 大数据集时才需要下面的代码
# xml_files4 = glob.glob(os.path.join(args.xml_dir4, "*.xml"))
# xml_files.extend(xml_files4)
print(f"Number of xml files:{len(xml_files)}")
convert(xml_files, args.json_file)
print(f"Success:{args.json_file}")
# print(f"Number of xml files:{len(xml_files3)}")
# convert(xml_files3, args.json_file2)
# print(f"Success:{args.json_file2}") | tools/dataset_analysis/xml2coco_new.py | import glob
import os
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import Element
import json
START_BOUNDING_BOX_ID = 0
START_IMAGE_ID = 0
PRE_DEFINE_CATEGORIES = {"DaoXianYiWu": 0, "DiaoChe": 1, "ShiGongJiXie": 2, "TaDiao": 3, "YanHuo":4}
# PRE_DEFINE_CATEGORIES = {"GanTa": 0}
# PRE_DEFINE_CATEGORIES = {"DaoXianYiWu": 0, "DiaoChe": 1, "ShiGongJiXie": 2, "TaDiao": 3, "YanHuo":4, "GanTa":5}
def get_categories(xml_files):
'''
Generte category name to id mapping from a list of xml files.
Args:
xml_files[list]: A list of xml file paths.
Return: dict -- category name to id mapping.
'''
classes_names = []
for xml_file in xml_files:
tree = ET.parse(xml_file)
root = tree.getroot()
for item in root.findall("object"):
classes_names.append(item.find('name').text)
classes_names = list(set(classes_names))
classes_names.sort()
return {name : i for i, name in enumerate(classes_names)}
def get_and_check(root, tag, length):
'''
Args:
root: xml.etree.ElementTree.ElementTree object
tag: xml file tag name. eg:"size","width"
length: default 1
Return: filename
'''
vars = root.findall(tag)
if len(vars) == 0:
raise ValueError(f"Can not find {tag} in {root.tag}")
if length > 0 and len(vars) != length:
raise ValueError(
f"The size of {tag} is supposed to be {length}, but is {len(vars)}."
)
if length == 1:
vars = vars[0]
return vars
def get(root, tag):
vars = root.findall(tag)
return vars
# 这个函数可以单独拎出来
def add_xml_node(xml_file, tag, img_dir):
'''
往xml文件中增加一个节点
Args:
tag: 要增加的节点的名字,比如:'path','name'
xml_file: 要增加节点的xml文件
Returns:None
'''
tree = ET.parse(xml_file)
root = tree.getroot()
vars = root.findall(tag)
if len(vars) != 0: # 如果有path节点的话
path = root.find(tag)
path.text = os.path.join(img_dir, os.path.basename(xml_file)[:-3] + 'jpg')
print(path.text)
tree.write(xml_file, encoding='utf-8')
# print('走的if分支')
else:
element = Element(tag)
element.text = os.path.join(img_dir, os.path.basename(xml_file)[:-3] + 'jpg')
# print(element.text)
root.append(element)
tree.write(xml_file, encoding='utf-8')
# print('走的else分支')
# print('Finished!')
def convert(xml_files, json_file):
'''
Convert xml annotations to COCO format.
Args:
xml_file: xml format file path.
json_file: output to a json file.
Return: None
'''
json_dict = {
"images":[],
"type":"instances",
"annotations":[],
"categories":[]
}
if PRE_DEFINE_CATEGORIES is not None:
categories = PRE_DEFINE_CATEGORIES
else:
categories = get_categories(xml_files)
image_id = START_IMAGE_ID
bbox_id = START_BOUNDING_BOX_ID
for xml_file in xml_files:
tree = ET.parse(xml_file)
root = tree.getroot()
path = get_and_check(root, "path", 1).text
# 这里假设我们的文件是以数字命名的,比如:"1.jpg","2.jpg","1.xml","2.xml"
# filename = os.path.basename(xml_file)[:-3] + 'jpg'
# image_id = int(os.path.basename(xml_file)[:-4])
# TODO:在不将文件名更改的情况下,实现转换。主要依据其实就是一个xml文件对应一张image,除后缀外,名字相同,因此有了下面的代码。
# filename = os.path.basename(path) # 这是path是完整的路径时的用法
filename = os.path.basename(xml_file)[:-3] + 'jpg' # 这是path只是一个文件夹名字的用法
size = get_and_check(root, "size", 1)
width = int(get_and_check(size, 'width', 1).text)
height = int(get_and_check(size, 'height', 1).text)
image = {
"path":path,
"file_name":filename,
"height":height,
"width":width,
"id":image_id
}
json_dict["images"].append(image)
for obj in get(root, 'object'):
category = get_and_check(obj, 'name', 1).text
if category not in categories: # 事实上,如果只想对已经指定的某些类别进行转化,这里只需pass就好,不需要创建新的类别映射
# new_id = len(categories)
# categories[category] = new_id
continue
category_id = categories[category]
bndbox = get_and_check(obj, 'bndbox', 1)
xmin = int(float(get_and_check(bndbox, 'xmin', 1).text))
ymin = int(float(get_and_check(bndbox, 'ymin', 1).text))
xmax = int(float(get_and_check(bndbox, 'xmax', 1).text))
ymax = int(float(get_and_check(bndbox, 'ymax', 1).text))
assert xmax > xmin, f'{xml_file}'
assert ymax > ymin, f'{xml_file}'
o_width = abs(xmax - xmin)
o_height = abs(ymax - ymin)
ann = {
"area":o_width * o_height,
"iscrowd":0,
"image_id":image_id,
"bbox":[xmin, ymin, o_width, o_height],
"category_id":category_id,
"id":bbox_id, # 这个表示object的id
"ignore":0,
"segmentation":[]
}
json_dict["annotations"].append(ann)
bbox_id += 1
image_id += 1
for cate, cid in categories.items():
cat = {
"supercategory":"none",
"id":cid,
"name":cate
}
json_dict['categories'].append(cat)
os.makedirs(os.path.dirname(json_file), exist_ok=True)
json_fp = open(json_file, 'w')
json_str = json.dumps(json_dict, ensure_ascii=False)
json_fp.write(json_str)
json_fp.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Convert xml annotations to COCO format!')
parser.add_argument(
"--xml-dir",
default='/shared/xjd/DataSets/transmission_line_detection/self_labeled_xml',
type=str,
help='Directory path to xml files.'
)
parser.add_argument(
"--xml-dir2",
default='/shared/xjd/DataSets/transmission_line_detection/test',
type=str,
help='Directory path to xml files.'
)
# parser.add_argument(
# "--xml-dir3",
# default='/shared/xjd/DataSets/transmission_line_detection/test_xml',
# type=str,
# help='Directory path to xml files.'
# )
# parser.add_argument(
# "--xml-dir4",
# default='/shared/xjd/DataSets/transmission_line_detection/train14000_xml',
# type=str,
# help='Directory path to xml files.'
# )
parser.add_argument(
"--json-file",
default='/shared/xjd/DataSets/transmission_line_detection/train_6cates_1280.json',
type=str,
help='Output COCO format json file.'
)
parser.add_argument(
"--json-file2",
default='/shared/xjd/DataSets/transmission_line_detection/test_6cates_3490.json',
type=str,
help='Output COCO format json file.'
)
args = parser.parse_args()
xml_files = glob.glob(os.path.join(args.xml_dir, "*.xml")) # 返回以.xml结尾的目录及文件列表
# 下面的代码只有在结合两个来源的数据的时候用到
# xml_files2 = glob.glob(os.path.join('/shared/xjd/DataSets/transmission_line_detection/train14000_xml', "*.xml"))
# xml_files2 = glob.glob(os.path.join(args.xml_dir2, "*.xml"))
# xml_files.extend(xml_files2[:-200])
# xml_files3 = glob.glob(os.path.join(args.xml_dir3, "*.xml"))
# xml_files3.extend(xml_files2[-200:])
# 大数据集时才需要下面的代码
# xml_files4 = glob.glob(os.path.join(args.xml_dir4, "*.xml"))
# xml_files.extend(xml_files4)
print(f"Number of xml files:{len(xml_files)}")
convert(xml_files, args.json_file)
print(f"Success:{args.json_file}")
# print(f"Number of xml files:{len(xml_files3)}")
# convert(xml_files3, args.json_file2)
# print(f"Success:{args.json_file2}") | 0.145055 | 0.23092 |
__appname__ = "puls_price_converter"
__version__ = "17.233.1000" #pid locking update, fix coding troubles
#__version__ = "17.229.1140" #update pathes, working with pid-file, code readability
#__version__ = "17.228.1800" #first edition
import os
import sys
import glob
import time
import email
import poplib
import traceback
from email.header import decode_header
work_dir = os.path.dirname(os.path.abspath(sys.argv[0])) #work folder
def main():
app_conf = _pre()
if not app_conf:
print('!!! Error -> no password file', flush=True)
return True
price_get(app_conf)
converter(app_conf)
return True
def _pre():
app_conf = {}
pass_path = os.path.join(work_dir, 'online365.pass') #file with password
if os.path.exists(pass_path):
with open(pass_path, 'r') as f:
data = f.read().strip().split('\n')
app_conf['user'] = data[0].strip()
app_conf['password'] = data[1].strip()
else:
return False
#28176 = puls76
app_conf['s_path'] = '/tmp/28176_tmp' #tepmorary folder
app_conf['d_path'] = '/home/plexpert/www/puls76/prices' #destination folder
app_conf['out_file'] = 'price28176-2901161825.txt' #output txt-file name
app_conf['mail_host'] = 'pop.yandex.ru' #server
app_conf['mail_port'] = 995 #port
app_conf['params'] = [
'пульс',
#'катрен'
]
if not os.path.exists(app_conf['s_path']):
os.makedirs(app_conf['s_path'], mode=0o777)
if not os.path.exists(app_conf['d_path']):
os.makedirs(app_conf['d_path'], mode=0o777)
return app_conf
def puls_parse(filename):
data = None
with open(filename, 'rb') as f_obj:
data = f_obj.read().decode('cp1251').replace(';', '\t')
# self._pHead = {u'ЦЕНА': u'PRICE1', u'КОД': u'CODEPST', u'ТОВАР': u'NAME', u'ЗАВОД': [u'FIRM', u'CNTR'], # [завод, страна]
# u'УПАК': u'QNTPACK', u'ОСТАТОК': u'QNT', u'РЕЕСТР': u'GNVLS',
# u'ГОДЕН': u'GDATE', u'КРАТЗАК': u'MULTIPLC', u'МИНЗАК': u'MINORD', u'ШТРИХ': u'EAN13', u'НДС': u'NDS'}
#YYYYMMDD - mask for expire date
header = 'CODEPST\tF2\tNAME\tF4\tPRICE1\tFIRM\tQNT\tQNTPACK\tF9\tF10\tF11\tGNVLS\tF13\tF14\tEAN13\tMULTIPLC\tGDATE\tCNTR'
ret = []
ret = _format_expire_date(data, -1)
ret.insert(0, header)
return '\n'.join(ret)
def _format_expire_date(data, date_position=-1):
data = data.strip().split('\n')
ret = []
for lin in data:
lin_data = lin.strip().split('\t')
date, _ = lin_data[-1].split()
date = date.strip().split('.')
date.reverse()
date = ''.join(date)
lin_data[-1] = date
lin_data.append(' ')
ret.append('\t'.join(lin_data))
return ret
def converter(app_conf):
for filename in get_names(app_conf["s_path"]):
filename = os.path.abspath(filename)
data = puls_parse(filename)
if data:
os.remove(filename)
try: os.rmdir(os.path.dirname(filename))
except: pass
out_f_name = os.path.join(app_conf["d_path"], app_conf['out_file'])
with open(out_f_name, 'wb') as out_f_obj:
out_f_obj.write(data.encode())
stri = f"-||converting: {os.path.basename(filename)} ->> {out_f_name} - succesfully"
print(stri, flush=True)
time.sleep(1.1)
class Mail_POP3_SSL(poplib.POP3_SSL):
def __enter__(self):
return self
def __exit__(self, *args):
try:
self.quit()
except OSError:
pass
def get_names(path='.'):
ddd = glob.glob(path+'/*')
for f_name in ddd:
yield f_name
def msg_str_parse(v, decoder):
return ''.join(w.decode(cp or 'utf8') if isinstance(w, bytes) else w for w, cp in decoder(v)) if v else ''
def price_get(app_conf):
parse_params = app_conf['params']
user = app_conf["user"]
password = app_conf["password"]
mail_host = app_conf["mail_host"]
mail_port = app_conf["mail_port"]
s_path = app_conf["s_path"]
attach = False
with Mail_POP3_SSL(host=mail_host, port=mail_port) as mail_box:
uid_list = []
mail_box.user(user)
mail_box.pass_(password)
print(f'-|connecting to mailbox {user}', flush=True)
uid_list = mail_box.uidl()[1]
while uid_list:
uid = uid_list.pop().decode().split()[0]
msg = b'\r\n'.join(mail_box.retr(uid)[1])
message = email.message_from_bytes(msg)
v = message['Subject']
s = msg_str_parse(v, decode_header)
for param in parse_params:
if param in s.lower():
for part in message.walk():
if part.get_content_maintype() == 'multipart':
continue
if not part.get('Content-Disposition'):
continue
print(f'-|mail id: {uid}, attachment found', flush=True)
filename = part.get_filename()
try: filename = decode_header(filename)[0][0].decode()
except: pass
if not filename:
filename = "unknown_name.txt"
f_path = os.path.join(s_path, filename)
with open(f_path, 'wb') as f:
f.write(part.get_payload(decode=1))
print(f'-|attachment -> {os.path.basename(f_path)} <- downloaded', flush=True)
mail_box.dele(uid) #deleting message, in production
attach = True
if attach:
print('-|all available prices downloaded', flush=True)
def pid_alive(pid=None, pid_path=None):
import sqlite3
if not pid:
pid = os.getpid()
if not pid_path:
pid_path = os.path.splitext(os.path.abspath(sys.argv[0]))[0] + '.pid'
isLocked = False
con=cur=None
try:
con = sqlite3.connect(pid_path, timeout=0.2, isolation_level='EXCLUSIVE')
con.executescript("PRAGMA page_size = 1024; PRAGMA journal_mode = MEMORY; PRAGMA synchronous = OFF;")
cur = con.cursor()
sql = "CREATE TABLE PID (ID INTEGER NOT NULL, DT TIMESTAMP NOT NULL DEFAULT CURRENCY_TIMESTAMP);"
cur.execute(sql)
con.commit()
except sqlite3.OperationalError as e:
s = e.args[0].lower()
if s.find('table pid already exists')>-1:
pass
elif s.find('database is locked')>-1:
isLocked = True
else:
print(e.__class__, e, flush=True)
except Exception as e:
print(e.__class__, e, flush=True)
finally:
if isLocked:
if con:
con.close()
con=cur=None
else:
cur.execute('INSERT INTO PID(ID)VALUES(?)', (pid,))
sys.PID = [pid, pid_path, con, cur]
return isLocked
def pid_close():
pid, pid_path, con, cur = sys.PID
sys.PID = []
if cur:
cur.close()
if con:
con.close()
try: os.remove(pid_path)
except: pass
if __name__ == '__main__':
sys.stdout = open(sys.stdout.fileno(), mode='w', encoding='UTF-8', buffering=1)
sys.stderr = open(sys.stderr.fileno(), mode='w', encoding='UTF-8', buffering=1)
_pid_path = '/tmp/' + os.path.splitext(os.path.basename(sys.argv[0]))[0] + '.lock'
if pid_alive(pid_path=_pid_path):
print('%s is worked' % 'puls price converter', flush=True)
sys.exit(0)
import atexit
atexit.register(pid_close)
try:
main()
except Exception as Err:
print(traceback.format_exc(), flush=True)
finally:
sys.exit(0) | attcdown/puls_price_conv.py |
__appname__ = "puls_price_converter"
__version__ = "17.233.1000" #pid locking update, fix coding troubles
#__version__ = "17.229.1140" #update pathes, working with pid-file, code readability
#__version__ = "17.228.1800" #first edition
import os
import sys
import glob
import time
import email
import poplib
import traceback
from email.header import decode_header
work_dir = os.path.dirname(os.path.abspath(sys.argv[0])) #work folder
def main():
app_conf = _pre()
if not app_conf:
print('!!! Error -> no password file', flush=True)
return True
price_get(app_conf)
converter(app_conf)
return True
def _pre():
app_conf = {}
pass_path = os.path.join(work_dir, 'online365.pass') #file with password
if os.path.exists(pass_path):
with open(pass_path, 'r') as f:
data = f.read().strip().split('\n')
app_conf['user'] = data[0].strip()
app_conf['password'] = data[1].strip()
else:
return False
#28176 = puls76
app_conf['s_path'] = '/tmp/28176_tmp' #tepmorary folder
app_conf['d_path'] = '/home/plexpert/www/puls76/prices' #destination folder
app_conf['out_file'] = 'price28176-2901161825.txt' #output txt-file name
app_conf['mail_host'] = 'pop.yandex.ru' #server
app_conf['mail_port'] = 995 #port
app_conf['params'] = [
'пульс',
#'катрен'
]
if not os.path.exists(app_conf['s_path']):
os.makedirs(app_conf['s_path'], mode=0o777)
if not os.path.exists(app_conf['d_path']):
os.makedirs(app_conf['d_path'], mode=0o777)
return app_conf
def puls_parse(filename):
data = None
with open(filename, 'rb') as f_obj:
data = f_obj.read().decode('cp1251').replace(';', '\t')
# self._pHead = {u'ЦЕНА': u'PRICE1', u'КОД': u'CODEPST', u'ТОВАР': u'NAME', u'ЗАВОД': [u'FIRM', u'CNTR'], # [завод, страна]
# u'УПАК': u'QNTPACK', u'ОСТАТОК': u'QNT', u'РЕЕСТР': u'GNVLS',
# u'ГОДЕН': u'GDATE', u'КРАТЗАК': u'MULTIPLC', u'МИНЗАК': u'MINORD', u'ШТРИХ': u'EAN13', u'НДС': u'NDS'}
#YYYYMMDD - mask for expire date
header = 'CODEPST\tF2\tNAME\tF4\tPRICE1\tFIRM\tQNT\tQNTPACK\tF9\tF10\tF11\tGNVLS\tF13\tF14\tEAN13\tMULTIPLC\tGDATE\tCNTR'
ret = []
ret = _format_expire_date(data, -1)
ret.insert(0, header)
return '\n'.join(ret)
def _format_expire_date(data, date_position=-1):
data = data.strip().split('\n')
ret = []
for lin in data:
lin_data = lin.strip().split('\t')
date, _ = lin_data[-1].split()
date = date.strip().split('.')
date.reverse()
date = ''.join(date)
lin_data[-1] = date
lin_data.append(' ')
ret.append('\t'.join(lin_data))
return ret
def converter(app_conf):
for filename in get_names(app_conf["s_path"]):
filename = os.path.abspath(filename)
data = puls_parse(filename)
if data:
os.remove(filename)
try: os.rmdir(os.path.dirname(filename))
except: pass
out_f_name = os.path.join(app_conf["d_path"], app_conf['out_file'])
with open(out_f_name, 'wb') as out_f_obj:
out_f_obj.write(data.encode())
stri = f"-||converting: {os.path.basename(filename)} ->> {out_f_name} - succesfully"
print(stri, flush=True)
time.sleep(1.1)
class Mail_POP3_SSL(poplib.POP3_SSL):
def __enter__(self):
return self
def __exit__(self, *args):
try:
self.quit()
except OSError:
pass
def get_names(path='.'):
ddd = glob.glob(path+'/*')
for f_name in ddd:
yield f_name
def msg_str_parse(v, decoder):
return ''.join(w.decode(cp or 'utf8') if isinstance(w, bytes) else w for w, cp in decoder(v)) if v else ''
def price_get(app_conf):
parse_params = app_conf['params']
user = app_conf["user"]
password = app_conf["password"]
mail_host = app_conf["mail_host"]
mail_port = app_conf["mail_port"]
s_path = app_conf["s_path"]
attach = False
with Mail_POP3_SSL(host=mail_host, port=mail_port) as mail_box:
uid_list = []
mail_box.user(user)
mail_box.pass_(password)
print(f'-|connecting to mailbox {user}', flush=True)
uid_list = mail_box.uidl()[1]
while uid_list:
uid = uid_list.pop().decode().split()[0]
msg = b'\r\n'.join(mail_box.retr(uid)[1])
message = email.message_from_bytes(msg)
v = message['Subject']
s = msg_str_parse(v, decode_header)
for param in parse_params:
if param in s.lower():
for part in message.walk():
if part.get_content_maintype() == 'multipart':
continue
if not part.get('Content-Disposition'):
continue
print(f'-|mail id: {uid}, attachment found', flush=True)
filename = part.get_filename()
try: filename = decode_header(filename)[0][0].decode()
except: pass
if not filename:
filename = "unknown_name.txt"
f_path = os.path.join(s_path, filename)
with open(f_path, 'wb') as f:
f.write(part.get_payload(decode=1))
print(f'-|attachment -> {os.path.basename(f_path)} <- downloaded', flush=True)
mail_box.dele(uid) #deleting message, in production
attach = True
if attach:
print('-|all available prices downloaded', flush=True)
def pid_alive(pid=None, pid_path=None):
import sqlite3
if not pid:
pid = os.getpid()
if not pid_path:
pid_path = os.path.splitext(os.path.abspath(sys.argv[0]))[0] + '.pid'
isLocked = False
con=cur=None
try:
con = sqlite3.connect(pid_path, timeout=0.2, isolation_level='EXCLUSIVE')
con.executescript("PRAGMA page_size = 1024; PRAGMA journal_mode = MEMORY; PRAGMA synchronous = OFF;")
cur = con.cursor()
sql = "CREATE TABLE PID (ID INTEGER NOT NULL, DT TIMESTAMP NOT NULL DEFAULT CURRENCY_TIMESTAMP);"
cur.execute(sql)
con.commit()
except sqlite3.OperationalError as e:
s = e.args[0].lower()
if s.find('table pid already exists')>-1:
pass
elif s.find('database is locked')>-1:
isLocked = True
else:
print(e.__class__, e, flush=True)
except Exception as e:
print(e.__class__, e, flush=True)
finally:
if isLocked:
if con:
con.close()
con=cur=None
else:
cur.execute('INSERT INTO PID(ID)VALUES(?)', (pid,))
sys.PID = [pid, pid_path, con, cur]
return isLocked
def pid_close():
pid, pid_path, con, cur = sys.PID
sys.PID = []
if cur:
cur.close()
if con:
con.close()
try: os.remove(pid_path)
except: pass
if __name__ == '__main__':
sys.stdout = open(sys.stdout.fileno(), mode='w', encoding='UTF-8', buffering=1)
sys.stderr = open(sys.stderr.fileno(), mode='w', encoding='UTF-8', buffering=1)
_pid_path = '/tmp/' + os.path.splitext(os.path.basename(sys.argv[0]))[0] + '.lock'
if pid_alive(pid_path=_pid_path):
print('%s is worked' % 'puls price converter', flush=True)
sys.exit(0)
import atexit
atexit.register(pid_close)
try:
main()
except Exception as Err:
print(traceback.format_exc(), flush=True)
finally:
sys.exit(0) | 0.18866 | 0.062303 |
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from typing import List, Optional, Union, TYPE_CHECKING
import numpy as np
from art.estimators.classification.classifier import ClassifierGradients
if TYPE_CHECKING:
from GPy.models import GPClassification
from art.config import CLIP_VALUES_TYPE, PREPROCESSING_TYPE
from art.defences.preprocessor import Preprocessor
from art.defences.postprocessor import Postprocessor
logger = logging.getLogger(__name__)
# pylint: disable=C0103
class GPyGaussianProcessClassifier(ClassifierGradients):
"""
Wrapper class for GPy Gaussian Process classification models.
"""
def __init__(
self,
model: Optional["GPClassification"] = None,
clip_values: Optional["CLIP_VALUES_TYPE"] = None,
preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None,
postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None,
preprocessing: "PREPROCESSING_TYPE" = (0, 1),
) -> None:
"""
Create a `Classifier` instance GPY Gaussian Process classification models.
:param model: GPY Gaussian Process Classification model.
:param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed
for features.
:param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier.
:param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier.
:param preprocessing: Tuple of the form `(subtractor, divider)` of floats or `np.ndarray` of values to be
used for data preprocessing. The first value will be subtracted from the input. The input will then
be divided by the second one.
"""
from GPy.models import GPClassification
if not isinstance(model, GPClassification):
raise TypeError("Model must be of type GPy.models.GPClassification")
super(GPyGaussianProcessClassifier, self).__init__(
clip_values=clip_values,
preprocessing_defences=preprocessing_defences,
postprocessing_defences=postprocessing_defences,
preprocessing=preprocessing,
)
self._nb_classes = 2 # always binary
self._model = model
# pylint: disable=W0221
def class_gradient( # type: ignore
self, x: np.ndarray, label: Union[int, List[int], None] = None, eps: float = 0.0001,
) -> np.ndarray:
"""
Compute per-class derivatives w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:param label: Index of a specific per-class derivative. If an integer is provided, the gradient of that class
output is computed for all samples. If multiple values as provided, the first dimension should
match the batch size of `x`, and each value will be used as target for its corresponding sample in
`x`. If `None`, then gradients for all classes will be computed for each sample.
:param eps: Fraction added to the diagonal elements of the input `x`.
:return: Array of gradients of input features w.r.t. each class in the form
`(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes
`(batch_size, 1, input_shape)` when `label` parameter is specified.
"""
# Apply preprocessing
x_preprocessed, _ = self._apply_preprocessing(x, y=None, fit=False)
grads = np.zeros((np.shape(x_preprocessed)[0], 2, np.shape(x)[1]))
for i in range(np.shape(x_preprocessed)[0]):
# Get gradient for the two classes GPC can maximally have
for i_c in range(2):
ind = self.predict(x[i].reshape(1, -1))[0, i_c]
sur = self.predict(
np.repeat(x_preprocessed[i].reshape(1, -1), np.shape(x_preprocessed)[1], 0)
+ eps * np.eye(np.shape(x_preprocessed)[1])
)[:, i_c]
grads[i, i_c] = ((sur - ind) * eps).reshape(1, -1)
grads = self._apply_preprocessing_gradient(x, grads)
if label is not None:
return grads[:, label, :].reshape(np.shape(x_preprocessed)[0], 1, np.shape(x_preprocessed)[1])
return grads
def loss_gradient(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray:
"""
Compute the gradient of the loss function w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices of shape
`(nb_samples,)`.
:return: Array of gradients of the same shape as `x`.
"""
# Apply preprocessing
x_preprocessed, _ = self._apply_preprocessing(x, y, fit=False)
eps = 0.00001
grads = np.zeros(np.shape(x))
for i in range(np.shape(x)[0]):
# 1.0 - to mimic loss, [0,np.argmax] to get right class
ind = 1.0 - self.predict(x_preprocessed[i].reshape(1, -1))[0, np.argmax(y[i])]
sur = (
1.0
- self.predict(
np.repeat(x_preprocessed[i].reshape(1, -1), np.shape(x_preprocessed)[1], 0)
+ eps * np.eye(np.shape(x_preprocessed)[1])
)[:, np.argmax(y[i])]
)
grads[i] = ((sur - ind) * eps).reshape(1, -1)
grads = self._apply_preprocessing_gradient(x, grads)
return grads
# pylint: disable=W0221
def predict(self, x: np.ndarray, logits: bool = False, **kwargs) -> np.ndarray:
"""
Perform prediction for a batch of inputs.
:param x: Test set.
:param logits: `True` if the prediction should be done without squashing function.
:return: Array of predictions of shape `(nb_inputs, nb_classes)`.
"""
# Apply preprocessing
x_preprocessed, _ = self._apply_preprocessing(x, y=None, fit=False)
# Perform prediction
out = np.zeros((np.shape(x_preprocessed)[0], 2))
if logits:
# output the non-squashed version
out[:, 0] = self.model.predict_noiseless(x_preprocessed)[0].reshape(-1)
out[:, 1] = -1.0 * out[:, 0]
else:
# output normal prediction, scale up to two values
out[:, 0] = self.model.predict(x_preprocessed)[0].reshape(-1)
out[:, 1] = 1.0 - out[:, 0]
# Apply postprocessing
predictions = self._apply_postprocessing(preds=out, fit=False)
return predictions
def predict_uncertainty(self, x: np.ndarray) -> np.ndarray:
"""
Perform uncertainty prediction for a batch of inputs.
:param x: Test set.
:return: Array of uncertainty predictions of shape `(nb_inputs)`.
"""
# Apply preprocessing
x_preprocessed, _ = self._apply_preprocessing(x, y=None, fit=False)
# Perform prediction
out = self.model.predict_noiseless(x_preprocessed)[1]
# Apply postprocessing
predictions = self._apply_postprocessing(preds=out, fit=False)
return predictions
def fit(self, x: np.ndarray, y: np.ndarray, **kwargs) -> None:
"""
Fit the classifier on the training set `(x, y)`.
:param x: Training data. Not used, as given to model in initialized earlier.
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes).
"""
raise NotImplementedError
def save(self, filename: str, path: Optional[str] = None) -> None:
self.model.save_model(filename, save_data=False) | art/estimators/classification/GPy.py | from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from typing import List, Optional, Union, TYPE_CHECKING
import numpy as np
from art.estimators.classification.classifier import ClassifierGradients
if TYPE_CHECKING:
from GPy.models import GPClassification
from art.config import CLIP_VALUES_TYPE, PREPROCESSING_TYPE
from art.defences.preprocessor import Preprocessor
from art.defences.postprocessor import Postprocessor
logger = logging.getLogger(__name__)
# pylint: disable=C0103
class GPyGaussianProcessClassifier(ClassifierGradients):
"""
Wrapper class for GPy Gaussian Process classification models.
"""
def __init__(
self,
model: Optional["GPClassification"] = None,
clip_values: Optional["CLIP_VALUES_TYPE"] = None,
preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None,
postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None,
preprocessing: "PREPROCESSING_TYPE" = (0, 1),
) -> None:
"""
Create a `Classifier` instance GPY Gaussian Process classification models.
:param model: GPY Gaussian Process Classification model.
:param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed
for features.
:param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier.
:param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier.
:param preprocessing: Tuple of the form `(subtractor, divider)` of floats or `np.ndarray` of values to be
used for data preprocessing. The first value will be subtracted from the input. The input will then
be divided by the second one.
"""
from GPy.models import GPClassification
if not isinstance(model, GPClassification):
raise TypeError("Model must be of type GPy.models.GPClassification")
super(GPyGaussianProcessClassifier, self).__init__(
clip_values=clip_values,
preprocessing_defences=preprocessing_defences,
postprocessing_defences=postprocessing_defences,
preprocessing=preprocessing,
)
self._nb_classes = 2 # always binary
self._model = model
# pylint: disable=W0221
def class_gradient( # type: ignore
self, x: np.ndarray, label: Union[int, List[int], None] = None, eps: float = 0.0001,
) -> np.ndarray:
"""
Compute per-class derivatives w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:param label: Index of a specific per-class derivative. If an integer is provided, the gradient of that class
output is computed for all samples. If multiple values as provided, the first dimension should
match the batch size of `x`, and each value will be used as target for its corresponding sample in
`x`. If `None`, then gradients for all classes will be computed for each sample.
:param eps: Fraction added to the diagonal elements of the input `x`.
:return: Array of gradients of input features w.r.t. each class in the form
`(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes
`(batch_size, 1, input_shape)` when `label` parameter is specified.
"""
# Apply preprocessing
x_preprocessed, _ = self._apply_preprocessing(x, y=None, fit=False)
grads = np.zeros((np.shape(x_preprocessed)[0], 2, np.shape(x)[1]))
for i in range(np.shape(x_preprocessed)[0]):
# Get gradient for the two classes GPC can maximally have
for i_c in range(2):
ind = self.predict(x[i].reshape(1, -1))[0, i_c]
sur = self.predict(
np.repeat(x_preprocessed[i].reshape(1, -1), np.shape(x_preprocessed)[1], 0)
+ eps * np.eye(np.shape(x_preprocessed)[1])
)[:, i_c]
grads[i, i_c] = ((sur - ind) * eps).reshape(1, -1)
grads = self._apply_preprocessing_gradient(x, grads)
if label is not None:
return grads[:, label, :].reshape(np.shape(x_preprocessed)[0], 1, np.shape(x_preprocessed)[1])
return grads
def loss_gradient(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray:
"""
Compute the gradient of the loss function w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices of shape
`(nb_samples,)`.
:return: Array of gradients of the same shape as `x`.
"""
# Apply preprocessing
x_preprocessed, _ = self._apply_preprocessing(x, y, fit=False)
eps = 0.00001
grads = np.zeros(np.shape(x))
for i in range(np.shape(x)[0]):
# 1.0 - to mimic loss, [0,np.argmax] to get right class
ind = 1.0 - self.predict(x_preprocessed[i].reshape(1, -1))[0, np.argmax(y[i])]
sur = (
1.0
- self.predict(
np.repeat(x_preprocessed[i].reshape(1, -1), np.shape(x_preprocessed)[1], 0)
+ eps * np.eye(np.shape(x_preprocessed)[1])
)[:, np.argmax(y[i])]
)
grads[i] = ((sur - ind) * eps).reshape(1, -1)
grads = self._apply_preprocessing_gradient(x, grads)
return grads
# pylint: disable=W0221
def predict(self, x: np.ndarray, logits: bool = False, **kwargs) -> np.ndarray:
"""
Perform prediction for a batch of inputs.
:param x: Test set.
:param logits: `True` if the prediction should be done without squashing function.
:return: Array of predictions of shape `(nb_inputs, nb_classes)`.
"""
# Apply preprocessing
x_preprocessed, _ = self._apply_preprocessing(x, y=None, fit=False)
# Perform prediction
out = np.zeros((np.shape(x_preprocessed)[0], 2))
if logits:
# output the non-squashed version
out[:, 0] = self.model.predict_noiseless(x_preprocessed)[0].reshape(-1)
out[:, 1] = -1.0 * out[:, 0]
else:
# output normal prediction, scale up to two values
out[:, 0] = self.model.predict(x_preprocessed)[0].reshape(-1)
out[:, 1] = 1.0 - out[:, 0]
# Apply postprocessing
predictions = self._apply_postprocessing(preds=out, fit=False)
return predictions
def predict_uncertainty(self, x: np.ndarray) -> np.ndarray:
"""
Perform uncertainty prediction for a batch of inputs.
:param x: Test set.
:return: Array of uncertainty predictions of shape `(nb_inputs)`.
"""
# Apply preprocessing
x_preprocessed, _ = self._apply_preprocessing(x, y=None, fit=False)
# Perform prediction
out = self.model.predict_noiseless(x_preprocessed)[1]
# Apply postprocessing
predictions = self._apply_postprocessing(preds=out, fit=False)
return predictions
def fit(self, x: np.ndarray, y: np.ndarray, **kwargs) -> None:
"""
Fit the classifier on the training set `(x, y)`.
:param x: Training data. Not used, as given to model in initialized earlier.
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes).
"""
raise NotImplementedError
def save(self, filename: str, path: Optional[str] = None) -> None:
self.model.save_model(filename, save_data=False) | 0.948834 | 0.560433 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
)
trainset = torchvision.datasets.CIFAR10(root="./data", train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root="./data", train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4, shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
class NetDemo(nn.Module):
def __init__(self):
supper(NetDemo, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=5, stride=1, padding=0, bias=True)
self.maxpool = nn.MaxPool2d(kernel_size=2)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1, padding=0, bias=True)
self.fc1 = nn.Linear(in_features=16*5*5, out_features=120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.conv2(x)
x = self.relu(x)
x = self.maxpool(x)
x = x.view(-1, 16*5*5)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.relu(x)
x = self.fc3(x)
return x
# training
net = NetDemo()
device = torch.device("cuda:0", if torch.cuda.is_available() else "cpu")
print(device)
net.to(device)
for epoch in range(2):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
# loss function
criterion = nn.CrossEntropyLoss()
# set optimizer
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
# reset gradient to zero
optimizer.zero_grad()
# forward
outputs = net(inputs)
# calculate loss
loss = criterion(outputs, labels)
# backward
loss.backward()
# update weight
optimizer.step()
running_loss += loss.item()
if i % 2000 == 1999:
print("[%d, %5d] loss: %.3f" % (epoch+1, i+1, running_loss / 2000))
running_loss = 0.0
print("training finished")
# testing
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = inputs.to(device), labels.to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total))
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = inputs.to(device), labels.to(device)
outputs = net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i])) | NetDemo.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
)
trainset = torchvision.datasets.CIFAR10(root="./data", train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root="./data", train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4, shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
class NetDemo(nn.Module):
def __init__(self):
supper(NetDemo, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=5, stride=1, padding=0, bias=True)
self.maxpool = nn.MaxPool2d(kernel_size=2)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1, padding=0, bias=True)
self.fc1 = nn.Linear(in_features=16*5*5, out_features=120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.conv2(x)
x = self.relu(x)
x = self.maxpool(x)
x = x.view(-1, 16*5*5)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.relu(x)
x = self.fc3(x)
return x
# training
net = NetDemo()
device = torch.device("cuda:0", if torch.cuda.is_available() else "cpu")
print(device)
net.to(device)
for epoch in range(2):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
# loss function
criterion = nn.CrossEntropyLoss()
# set optimizer
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
# reset gradient to zero
optimizer.zero_grad()
# forward
outputs = net(inputs)
# calculate loss
loss = criterion(outputs, labels)
# backward
loss.backward()
# update weight
optimizer.step()
running_loss += loss.item()
if i % 2000 == 1999:
print("[%d, %5d] loss: %.3f" % (epoch+1, i+1, running_loss / 2000))
running_loss = 0.0
print("training finished")
# testing
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = inputs.to(device), labels.to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total))
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = inputs.to(device), labels.to(device)
outputs = net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i])) | 0.943138 | 0.718823 |
import datetime
import matplotlib.pyplot as plt
from decimal import Decimal
def gps_report(gpsdata, goodsats, inps):
cwd = inps['cwd'] # Get current main working directory
file_path = open(cwd+'\\output\\gps_report\\GPS_Report.txt', 'w')
line = 'G '
line += ' Pos_X (km) '
line += ' Pos_Y (km) '
line += ' Pos_Z (km) '
line += ' Vel_X (km/s) '
line += ' Vel_Y (km/s) '
line += ' Vel_Z (km/s) '
line += ' Clk_Bias \n'
file_path.write(line)
# It's all string formatting from here... nothing scientific.
for t in gpsdata:
line = '\n'
line += '* '
line += str(t.year) + ' '
line += str(t.month) + ' '
line += str(t.day) + ' '
line += str(t.hour) + ' '
line += str(t.minute) + ' '
line += str(t.second) + '\n'
file_path.write(line)
for p in goodsats:
if p < 10:
pstr = '0' + str(p)
else:
pstr = str(p)
# Write in position information
line = 'G' + pstr + ' '
for coord in ['x','y','z']:
pos = str(gpsdata[t][p]['p' + coord])
dot = pos.index('.')
if len(pos[dot:]) > 4:
pos = pos[:dot+4]
while len(pos[dot:]) < 4:
pos = pos + '0'
while len(pos[:dot]) < 9:
pos = ' ' + pos
dot = pos.index('.')
pos = pos + ' '
line += pos
for coord in ['x','y','z']:
vel = str(gpsdata[t][p]['v' + coord])
dot = vel.index('.')
if len(vel[dot:]) > 4:
vel = vel[:dot+4]
while len(vel[dot:]) < 4:
vel = vel + '0'
while len(vel[:dot]) < 9:
vel = ' ' + vel
dot = vel.index('.')
vel = vel + ' '
line += vel
b = '%.9E' % Decimal(str(gpsdata[t][p]['clkb']))
dot = b.index('.')
while len(b[:dot]) < 2:
b = ' ' + b
dot = b.index('.')
line += str(b)
line += ' \n'
file_path.write(line)
file_path.close()
return None
def gps_graphs(SV, t_usr_dt, t_usr_ss, gpsdata, inps):
cwd = inps['cwd'] # Get current main working directory
# Turn off interactive plotting
plt.ioff()
# Initialise the 1x3 subplot for PVT data.
fig, (ax1, ax2, ax3) = plt.subplots(3,1,figsize=(12,8))
# Get the positions, velocities, and clock biases.
px = [gpsdata[t][SV]['px'] for t in t_usr_dt]
py = [gpsdata[t][SV]['py'] for t in t_usr_dt]
pz = [gpsdata[t][SV]['pz'] for t in t_usr_dt]
vx = [gpsdata[t][SV]['vx'] for t in t_usr_dt]
vy = [gpsdata[t][SV]['vy'] for t in t_usr_dt]
vz = [gpsdata[t][SV]['vz'] for t in t_usr_dt]
clkb = [gpsdata[t][SV]['clkb'] for t in t_usr_dt]
# Position plots
ax1.set_title('SV ' + str(SV) + ' Position (km)')
ax1.plot(t_usr_ss, px, c = 'r', label='X')
ax1.plot(t_usr_ss, py, c = 'g', label='Y')
ax1.plot(t_usr_ss, pz, c = 'b', label='Z')
ax1.legend(loc='lower right')
# Velocity plots
ax2.set_title('SV ' + str(SV) + ' Velocity (km/s)')
ax2.plot(t_usr_ss, vx, c = 'r', label='X')
ax2.plot(t_usr_ss, vy, c = 'g', label='Y')
ax2.plot(t_usr_ss, vz, c = 'b', label='Z')
ax2.legend(loc='lower right')
# Clock bias plots
ax3.set_title('SV ' + str(SV) + ' Clock bias (s)')
ax3.plot(t_usr_ss, clkb, c = 'k', label='Bias')
ax3.legend(loc="right")
# Tight-spaced plot
plt.tight_layout()
plt.savefig(cwd + '\\output\\gps_plots\\GPS_SV' + str(SV) + '_PVT.png')
# Close this figure
plt.close(fig)
return None
def leo_results(results, inps):
print('Saving final report on both LEOs and their baselines \n')
cwd = inps['cwd'] # Get current main working directory
file_path = open(cwd+'\\output\\LEOGPS_Results.txt', 'w')
line = 'Date '
line += ' Time '
# Headers for LEO 1
line += inps['name1'] + '_PosX '
line += inps['name1'] + '_PosY '
line += inps['name1'] + '_PosZ '
line += inps['name1'] + '_VelX '
line += inps['name1'] + '_VelY '
line += inps['name1'] + '_VelZ '
line += inps['name1'] + '_GDOP '
line += inps['name1'] + '_PDOP '
line += inps['name1'] + '_TDOP '
# Headers for LEO 2
line += inps['name2'] + '_PosX '
line += inps['name2'] + '_PosY '
line += inps['name2'] + '_PosZ '
line += inps['name2'] + '_VelX '
line += inps['name2'] + '_VelY '
line += inps['name2'] + '_VelZ '
line += inps['name2'] + '_GDOP '
line += inps['name2'] + '_PDOP '
line += inps['name2'] + '_TDOP '
# Headers for baseline information
line += 'RelativeX '
line += 'RelativeY '
line += 'RelativeZ '
line += '\n'
file_path.write(line)
# It's all string formatting from here... nothing scientific.
for t in results:
line = str(t)
for vector in results[t]:
for value in vector[:3]:
svalue = str(value)
dot = svalue.index('.')
if len(svalue[dot:]) > 4:
svalue = svalue[:dot+4]
while len(svalue[dot:]) < 4:
svalue = svalue + '0'
while len(svalue[:dot]) < 10:
svalue = ' ' + svalue
dot = svalue.index('.')
line += svalue
line += ' \n'
file_path.write(line)
file_path.close()
print('Completed processing in LEOGPS! Output file stored:')
print(cwd+'\\output\\LEOGPS_Results.txt \n')
return None | codes/pubplt.py | import datetime
import matplotlib.pyplot as plt
from decimal import Decimal
def gps_report(gpsdata, goodsats, inps):
cwd = inps['cwd'] # Get current main working directory
file_path = open(cwd+'\\output\\gps_report\\GPS_Report.txt', 'w')
line = 'G '
line += ' Pos_X (km) '
line += ' Pos_Y (km) '
line += ' Pos_Z (km) '
line += ' Vel_X (km/s) '
line += ' Vel_Y (km/s) '
line += ' Vel_Z (km/s) '
line += ' Clk_Bias \n'
file_path.write(line)
# It's all string formatting from here... nothing scientific.
for t in gpsdata:
line = '\n'
line += '* '
line += str(t.year) + ' '
line += str(t.month) + ' '
line += str(t.day) + ' '
line += str(t.hour) + ' '
line += str(t.minute) + ' '
line += str(t.second) + '\n'
file_path.write(line)
for p in goodsats:
if p < 10:
pstr = '0' + str(p)
else:
pstr = str(p)
# Write in position information
line = 'G' + pstr + ' '
for coord in ['x','y','z']:
pos = str(gpsdata[t][p]['p' + coord])
dot = pos.index('.')
if len(pos[dot:]) > 4:
pos = pos[:dot+4]
while len(pos[dot:]) < 4:
pos = pos + '0'
while len(pos[:dot]) < 9:
pos = ' ' + pos
dot = pos.index('.')
pos = pos + ' '
line += pos
for coord in ['x','y','z']:
vel = str(gpsdata[t][p]['v' + coord])
dot = vel.index('.')
if len(vel[dot:]) > 4:
vel = vel[:dot+4]
while len(vel[dot:]) < 4:
vel = vel + '0'
while len(vel[:dot]) < 9:
vel = ' ' + vel
dot = vel.index('.')
vel = vel + ' '
line += vel
b = '%.9E' % Decimal(str(gpsdata[t][p]['clkb']))
dot = b.index('.')
while len(b[:dot]) < 2:
b = ' ' + b
dot = b.index('.')
line += str(b)
line += ' \n'
file_path.write(line)
file_path.close()
return None
def gps_graphs(SV, t_usr_dt, t_usr_ss, gpsdata, inps):
cwd = inps['cwd'] # Get current main working directory
# Turn off interactive plotting
plt.ioff()
# Initialise the 1x3 subplot for PVT data.
fig, (ax1, ax2, ax3) = plt.subplots(3,1,figsize=(12,8))
# Get the positions, velocities, and clock biases.
px = [gpsdata[t][SV]['px'] for t in t_usr_dt]
py = [gpsdata[t][SV]['py'] for t in t_usr_dt]
pz = [gpsdata[t][SV]['pz'] for t in t_usr_dt]
vx = [gpsdata[t][SV]['vx'] for t in t_usr_dt]
vy = [gpsdata[t][SV]['vy'] for t in t_usr_dt]
vz = [gpsdata[t][SV]['vz'] for t in t_usr_dt]
clkb = [gpsdata[t][SV]['clkb'] for t in t_usr_dt]
# Position plots
ax1.set_title('SV ' + str(SV) + ' Position (km)')
ax1.plot(t_usr_ss, px, c = 'r', label='X')
ax1.plot(t_usr_ss, py, c = 'g', label='Y')
ax1.plot(t_usr_ss, pz, c = 'b', label='Z')
ax1.legend(loc='lower right')
# Velocity plots
ax2.set_title('SV ' + str(SV) + ' Velocity (km/s)')
ax2.plot(t_usr_ss, vx, c = 'r', label='X')
ax2.plot(t_usr_ss, vy, c = 'g', label='Y')
ax2.plot(t_usr_ss, vz, c = 'b', label='Z')
ax2.legend(loc='lower right')
# Clock bias plots
ax3.set_title('SV ' + str(SV) + ' Clock bias (s)')
ax3.plot(t_usr_ss, clkb, c = 'k', label='Bias')
ax3.legend(loc="right")
# Tight-spaced plot
plt.tight_layout()
plt.savefig(cwd + '\\output\\gps_plots\\GPS_SV' + str(SV) + '_PVT.png')
# Close this figure
plt.close(fig)
return None
def leo_results(results, inps):
print('Saving final report on both LEOs and their baselines \n')
cwd = inps['cwd'] # Get current main working directory
file_path = open(cwd+'\\output\\LEOGPS_Results.txt', 'w')
line = 'Date '
line += ' Time '
# Headers for LEO 1
line += inps['name1'] + '_PosX '
line += inps['name1'] + '_PosY '
line += inps['name1'] + '_PosZ '
line += inps['name1'] + '_VelX '
line += inps['name1'] + '_VelY '
line += inps['name1'] + '_VelZ '
line += inps['name1'] + '_GDOP '
line += inps['name1'] + '_PDOP '
line += inps['name1'] + '_TDOP '
# Headers for LEO 2
line += inps['name2'] + '_PosX '
line += inps['name2'] + '_PosY '
line += inps['name2'] + '_PosZ '
line += inps['name2'] + '_VelX '
line += inps['name2'] + '_VelY '
line += inps['name2'] + '_VelZ '
line += inps['name2'] + '_GDOP '
line += inps['name2'] + '_PDOP '
line += inps['name2'] + '_TDOP '
# Headers for baseline information
line += 'RelativeX '
line += 'RelativeY '
line += 'RelativeZ '
line += '\n'
file_path.write(line)
# It's all string formatting from here... nothing scientific.
for t in results:
line = str(t)
for vector in results[t]:
for value in vector[:3]:
svalue = str(value)
dot = svalue.index('.')
if len(svalue[dot:]) > 4:
svalue = svalue[:dot+4]
while len(svalue[dot:]) < 4:
svalue = svalue + '0'
while len(svalue[:dot]) < 10:
svalue = ' ' + svalue
dot = svalue.index('.')
line += svalue
line += ' \n'
file_path.write(line)
file_path.close()
print('Completed processing in LEOGPS! Output file stored:')
print(cwd+'\\output\\LEOGPS_Results.txt \n')
return None | 0.296145 | 0.245254 |
from __future__ import unicode_literals
# Normalize python2 and python3 vacaboulary
# http://www.rfk.id.au/blog/entry/preparing-pyenchant-for-python-3/
try:
is_python2 = str != unicode
except NameError:
# 'unicode' is undefined, must be Python 3
is_python2 = False
unicode = str
basestring = (str, bytes)
else:
# 'unicode' exists, must be Python 2
bytes = str
def validate(template, unvalidated, quiet=False, **kwargs):
try:
if isinstance(template, tuple):
# We have multiple options on the template level.
valid = False
for template_option in template:
try:
valid = validate(template_option, unvalidated, **kwargs)
if valid:
break
except FailedValidationError:
pass
if valid:
return True
else:
raise FailedValidationError("None of {0} in template match topmost level of {1}".format(template, unvalidated))
elif isinstance(template, dict) and isinstance(unvalidated, dict):
# Two dictionaries. Compare key-by-key!
if all([validate(template[key], unvalidated.get(key), **kwargs) for key in template]):
return True
else:
raise FailedValidationError("{0} in template did not match topmost level of {1}".format(template, unvalidated))
elif isinstance(template, list) and isinstance(unvalidated, list):
# Two lists. The template list should have one element to demonstrate its members'
# structure. This can be a tuple.
if all([validate(template[0], item, **kwargs) for item in unvalidated]):
return True
else:
raise FailedValidationError("Not all list items in {0} matched template {1}".format(unvalidated, template))
elif isinstance(template, type):
# Template declared a type. Time to compare values.
if template in (str, unicode) and kwargs.get('fuzzy_string_typing'):
template = basestring
if isinstance(unvalidated, template):
return True
else:
raise FailedValidationError("{0} is not of type {1}".format(unvalidated, template))
else:
if template == unvalidated or template is None:
return True
else:
raise FailedValidationError("{0} is not equal to {1}".format(unvalidated, template))
except FailedValidationError as e:
if quiet:
return False
else:
raise e
class FailedValidationError(Exception):
pass
def deep_merge(base, incoming):
if not isinstance(base, dict) or not isinstance(incoming, dict):
return incoming
for key in incoming:
if key in base:
base[key] = deep_merge(base[key], incoming[key])
else:
base[key] = incoming[key]
return base | validict/__init__.py | from __future__ import unicode_literals
# Normalize python2 and python3 vacaboulary
# http://www.rfk.id.au/blog/entry/preparing-pyenchant-for-python-3/
try:
is_python2 = str != unicode
except NameError:
# 'unicode' is undefined, must be Python 3
is_python2 = False
unicode = str
basestring = (str, bytes)
else:
# 'unicode' exists, must be Python 2
bytes = str
def validate(template, unvalidated, quiet=False, **kwargs):
try:
if isinstance(template, tuple):
# We have multiple options on the template level.
valid = False
for template_option in template:
try:
valid = validate(template_option, unvalidated, **kwargs)
if valid:
break
except FailedValidationError:
pass
if valid:
return True
else:
raise FailedValidationError("None of {0} in template match topmost level of {1}".format(template, unvalidated))
elif isinstance(template, dict) and isinstance(unvalidated, dict):
# Two dictionaries. Compare key-by-key!
if all([validate(template[key], unvalidated.get(key), **kwargs) for key in template]):
return True
else:
raise FailedValidationError("{0} in template did not match topmost level of {1}".format(template, unvalidated))
elif isinstance(template, list) and isinstance(unvalidated, list):
# Two lists. The template list should have one element to demonstrate its members'
# structure. This can be a tuple.
if all([validate(template[0], item, **kwargs) for item in unvalidated]):
return True
else:
raise FailedValidationError("Not all list items in {0} matched template {1}".format(unvalidated, template))
elif isinstance(template, type):
# Template declared a type. Time to compare values.
if template in (str, unicode) and kwargs.get('fuzzy_string_typing'):
template = basestring
if isinstance(unvalidated, template):
return True
else:
raise FailedValidationError("{0} is not of type {1}".format(unvalidated, template))
else:
if template == unvalidated or template is None:
return True
else:
raise FailedValidationError("{0} is not equal to {1}".format(unvalidated, template))
except FailedValidationError as e:
if quiet:
return False
else:
raise e
class FailedValidationError(Exception):
pass
def deep_merge(base, incoming):
if not isinstance(base, dict) or not isinstance(incoming, dict):
return incoming
for key in incoming:
if key in base:
base[key] = deep_merge(base[key], incoming[key])
else:
base[key] = incoming[key]
return base | 0.521227 | 0.23479 |
# pylint: disable=no-self-argument
import re
from typing import Dict, List, Optional
from pydantic import BaseModel, validator
from app.core.security.password import validate_password
from app.helpers.expressions import VALID_EMAIL
class GroupBase(BaseModel):
""" Base Schema for Groups """
name: str
description: Optional[str] = None
class GroupBaseDB(GroupBase):
""" Base Schema for DB """
id: int
class Group(GroupBaseDB):
""" Final Schema for API """
class UserBasic(BaseModel):
""" Basic user info - combine with id to give clients linking and list abilities """
first_name: Optional[str] = None
last_name: Optional[str] = None
class UserBase(UserBasic):
""" Base Schema for User with optional data to be collected"""
username: Optional[str] = None
email: Optional[str] = None
is_active: Optional[bool] = True
is_superuser: Optional[bool] = False
@validator('email')
def validate_email(cls, value):
""" validates the email provided is valid form """
if value is not None and not re.search(VALID_EMAIL, value):
raise ValueError('email address is not valid')
return value
@validator('username')
def validate_username(cls, value):
""" validates the username is alphanumeric """
if value is not None and not value.isalnum():
raise ValueError('username must be alphanumeric')
return value
class UserBaseDB(UserBase):
""" Base Schema for User after DB save to return most non sensitive data """
id: int = None
class UserList(UserBasic):
""" Add ID into UserBasic so we can provide a list for linking and name building """
id: int
def password(value: str, values: Dict[str, str]) -> str:
""" make sure the password supplied meets our criteria """
# We will assume all attempts will fail so start with least intense first
if 'password' not in values or value != values['password']:
raise ValueError('passwords do not match')
# Validate returns True if valid, or raises Value error if not
validate_password(value)
return value
class UserCreate(UserBase):
""" Add required fields required to create a user """
password: str
password_validate: str
_validate_password = validator('password_validate', allow_reuse=True, always=True)(password)
class UserUpdate(UserBaseDB):
""" Schema to allow user to update password """
password: Optional[str] = None
class User(UserBaseDB):
""" Does not include hashed password, could include other extra's """
groups: Optional[List] = None
class UserDB(UserBaseDB):
""" Final DB Object """
hashed_password: str
class UserDBCreate(UserBase):
""" Object to save in the database / does not include key """
hashed_password: Optional[str] = None | src/app/schema/auth.py | # pylint: disable=no-self-argument
import re
from typing import Dict, List, Optional
from pydantic import BaseModel, validator
from app.core.security.password import validate_password
from app.helpers.expressions import VALID_EMAIL
class GroupBase(BaseModel):
""" Base Schema for Groups """
name: str
description: Optional[str] = None
class GroupBaseDB(GroupBase):
""" Base Schema for DB """
id: int
class Group(GroupBaseDB):
""" Final Schema for API """
class UserBasic(BaseModel):
""" Basic user info - combine with id to give clients linking and list abilities """
first_name: Optional[str] = None
last_name: Optional[str] = None
class UserBase(UserBasic):
""" Base Schema for User with optional data to be collected"""
username: Optional[str] = None
email: Optional[str] = None
is_active: Optional[bool] = True
is_superuser: Optional[bool] = False
@validator('email')
def validate_email(cls, value):
""" validates the email provided is valid form """
if value is not None and not re.search(VALID_EMAIL, value):
raise ValueError('email address is not valid')
return value
@validator('username')
def validate_username(cls, value):
""" validates the username is alphanumeric """
if value is not None and not value.isalnum():
raise ValueError('username must be alphanumeric')
return value
class UserBaseDB(UserBase):
""" Base Schema for User after DB save to return most non sensitive data """
id: int = None
class UserList(UserBasic):
""" Add ID into UserBasic so we can provide a list for linking and name building """
id: int
def password(value: str, values: Dict[str, str]) -> str:
""" make sure the password supplied meets our criteria """
# We will assume all attempts will fail so start with least intense first
if 'password' not in values or value != values['password']:
raise ValueError('passwords do not match')
# Validate returns True if valid, or raises Value error if not
validate_password(value)
return value
class UserCreate(UserBase):
""" Add required fields required to create a user """
password: str
password_validate: str
_validate_password = validator('password_validate', allow_reuse=True, always=True)(password)
class UserUpdate(UserBaseDB):
""" Schema to allow user to update password """
password: Optional[str] = None
class User(UserBaseDB):
""" Does not include hashed password, could include other extra's """
groups: Optional[List] = None
class UserDB(UserBaseDB):
""" Final DB Object """
hashed_password: str
class UserDBCreate(UserBase):
""" Object to save in the database / does not include key """
hashed_password: Optional[str] = None | 0.803868 | 0.242172 |
import numpy as np
from ..core import Array
def _read_bound_key(infofile, ncpu):
with open(infofile) as f:
content = f.readlines()
starting_line = None
for num, line in enumerate(content):
if len(set(["DOMAIN", "ind_min", "ind_max"]) - set(line.split())) == 0:
starting_line = num + 1
break
bound_key = []
if starting_line is not None:
for n in range(ncpu):
line = content[starting_line + n].split()
bound_key.append(int(float(line[1])))
bound_key.append(int(float(content[starting_line + ncpu - 1].split()[2])))
return bound_key
def _btest(i, pos):
return bool(i & (1 << pos))
def _hilbert3d(x, y, z, bit_length):
i_bit_mask = np.zeros(3 * bit_length, dtype=bool)
x_bit_mask = np.zeros(bit_length, dtype=bool)
y_bit_mask = np.zeros_like(x_bit_mask)
z_bit_mask = np.zeros_like(x_bit_mask)
state_diagram = np.array([
1, 2, 3, 2, 4, 5, 3, 5, 0, 1, 3, 2, 7, 6, 4, 5, 2, 6, 0, 7, 8, 8, 0, 7, 0, 7, 1,
6, 3, 4, 2, 5, 0, 9, 10, 9, 1, 1, 11, 11, 0, 3, 7, 4, 1, 2, 6, 5, 6, 0, 6, 11,
9, 0, 9, 8, 2, 3, 1, 0, 5, 4, 6, 7, 11, 11, 0, 7, 5, 9, 0, 7, 4, 3, 5, 2, 7, 0,
6, 1, 4, 4, 8, 8, 0, 6, 10, 6, 6, 5, 1, 2, 7, 4, 0, 3, 5, 7, 5, 3, 1, 1, 11, 11,
4, 7, 3, 0, 5, 6, 2, 1, 6, 1, 6, 10, 9, 4, 9, 10, 6, 7, 5, 4, 1, 0, 2, 3, 10, 3,
1, 1, 10, 3, 5, 9, 2, 5, 3, 4, 1, 6, 0, 7, 4, 4, 8, 8, 2, 7, 2, 3, 2, 1, 5, 6,
3, 0, 4, 7, 7, 2, 11, 2, 7, 5, 8, 5, 4, 5, 7, 6, 3, 2, 0, 1, 10, 3, 2, 6, 10, 3,
4, 4, 6, 1, 7, 0, 5, 2, 4, 3
]).reshape((8, 2, 12), order='F')
# Convert to binary
for i in range(bit_length):
x_bit_mask[i] = _btest(x, i)
y_bit_mask[i] = _btest(y, i)
z_bit_mask[i] = _btest(z, i)
# Interleave bits
for i in range(bit_length):
i_bit_mask[3 * i + 2] = x_bit_mask[i]
i_bit_mask[3 * i + 1] = y_bit_mask[i]
i_bit_mask[3 * i] = z_bit_mask[i]
# Build Hilbert ordering using state diagram
cstate = 0
for i in range(bit_length - 1, -1, -1):
b2 = 0
if i_bit_mask[3 * i + 2]:
b2 = 1
b1 = 0
if i_bit_mask[3 * i + 1]:
b1 = 1
b0 = 0
if i_bit_mask[3 * i]:
b0 = 1
sdigit = b2 * 4 + b1 * 2 + b0
nstate = state_diagram[sdigit, 0, cstate]
hdigit = state_diagram[sdigit, 1, cstate]
i_bit_mask[3 * i + 2] = _btest(hdigit, 2)
i_bit_mask[3 * i + 1] = _btest(hdigit, 1)
i_bit_mask[3 * i] = _btest(hdigit, 0)
cstate = nstate
order = 0
for i in range(3 * bit_length):
b0 = 0
if i_bit_mask[i]:
b0 = 1
order = order + b0 * (2**i)
return order
def _get_cpu_list(bounding_box, lmax, levelmax, infofile, ncpu, ndim):
bound_key = _read_bound_key(infofile=infofile, ncpu=ncpu)
xmin = bounding_box["xmin"]
xmax = bounding_box["xmax"]
ymin = bounding_box["ymin"]
ymax = bounding_box["ymax"]
zmin = bounding_box["zmin"]
zmax = bounding_box["zmax"]
dmax = max(xmax - xmin, ymax - ymin, zmax - zmin)
for ilevel in range(1, lmax + 1):
dx = 0.5**ilevel
if dx < dmax:
break
lmin = ilevel
bit_length = lmin - 1
maxdom = 2**bit_length
imin = 0
imax = 0
jmin = 0
jmax = 0
kmin = 0
kmax = 0
if bit_length > 0:
imin = int(xmin * maxdom)
imax = imin + 1
jmin = int(ymin * maxdom)
jmax = jmin + 1
kmin = int(zmin * maxdom)
kmax = kmin + 1
dkey = (2**(levelmax + 1) // maxdom)**ndim
ndom = 1
if bit_length > 0:
ndom = 8
idom = [imin, imax] * 4
jdom = [jmin, jmin, jmax, jmax] * 2
kdom = [kmin] * 4 + [kmax] * 4
bounding_min = [0, 0, 0, 0, 0, 0, 0, 0]
bounding_max = [0, 0, 0, 0, 0, 0, 0, 0]
bounding = None
for i in range(ndom):
if bit_length > 0:
bounding = _hilbert3d(idom[i], jdom[i], kdom[i], bit_length)
order_min = bounding
else:
order_min = 0
bounding_min[i] = order_min * dkey
bounding_max[i] = (order_min + 1) * dkey
cpu_min = [0, 0, 0, 0, 0, 0, 0, 0]
cpu_max = [0, 0, 0, 0, 0, 0, 0, 0]
for impi in range(ncpu):
for i in range(ndom):
if ((bound_key[impi] <= bounding_min[i])
and (bound_key[impi + 1] > bounding_min[i])):
cpu_min[i] = impi
if ((bound_key[impi] < bounding_max[i])
and (bound_key[impi + 1] >= bounding_max[i])):
cpu_max[i] = impi
cpu_list = []
for i in range(ndom):
for j in range(cpu_min[i], cpu_max[i] + 1):
if j + 1 not in cpu_list:
cpu_list.append(j + 1)
return cpu_list
def hilbert_cpu_list(meta, scaling, select, infofile):
if meta["ordering type"] != "hilbert":
return
if isinstance(select, bool):
return
bounding_box = {"xmin": 0, "xmax": 1, "ymin": 0, "ymax": 1, "zmin": 0, "zmax": 1}
# Make an array of cell centers according to lmax
box_size = (meta["boxlen"] * scaling).magnitude
ncells = 2**min(meta["levelmax"], 18) # limit to 262000 cells
half_dxmin = 0.5 * box_size / ncells
xyz_centers = Array(values=np.linspace(half_dxmin, box_size - half_dxmin, ncells),
unit=scaling.units)
new_bbox = False
for c in "xyz":
key = f"position_{c}"
if key in select:
new_bbox = True
func_test = select[key](xyz_centers)
inds = np.argwhere(func_test.values).ravel()
start = xyz_centers[inds.min()] - (half_dxmin * scaling.units)
end = xyz_centers[inds.max()] + (half_dxmin * scaling.units)
bounding_box["{}min".format(c)] = start._array / box_size
bounding_box["{}max".format(c)] = end._array / box_size
if new_bbox:
return _get_cpu_list(bounding_box=bounding_box,
lmax=meta["lmax"],
levelmax=meta["levelmax"],
infofile=infofile,
ncpu=meta["ncpu"],
ndim=meta["ndim"]) | src/osyris/io/hilbert.py |
import numpy as np
from ..core import Array
def _read_bound_key(infofile, ncpu):
with open(infofile) as f:
content = f.readlines()
starting_line = None
for num, line in enumerate(content):
if len(set(["DOMAIN", "ind_min", "ind_max"]) - set(line.split())) == 0:
starting_line = num + 1
break
bound_key = []
if starting_line is not None:
for n in range(ncpu):
line = content[starting_line + n].split()
bound_key.append(int(float(line[1])))
bound_key.append(int(float(content[starting_line + ncpu - 1].split()[2])))
return bound_key
def _btest(i, pos):
return bool(i & (1 << pos))
def _hilbert3d(x, y, z, bit_length):
i_bit_mask = np.zeros(3 * bit_length, dtype=bool)
x_bit_mask = np.zeros(bit_length, dtype=bool)
y_bit_mask = np.zeros_like(x_bit_mask)
z_bit_mask = np.zeros_like(x_bit_mask)
state_diagram = np.array([
1, 2, 3, 2, 4, 5, 3, 5, 0, 1, 3, 2, 7, 6, 4, 5, 2, 6, 0, 7, 8, 8, 0, 7, 0, 7, 1,
6, 3, 4, 2, 5, 0, 9, 10, 9, 1, 1, 11, 11, 0, 3, 7, 4, 1, 2, 6, 5, 6, 0, 6, 11,
9, 0, 9, 8, 2, 3, 1, 0, 5, 4, 6, 7, 11, 11, 0, 7, 5, 9, 0, 7, 4, 3, 5, 2, 7, 0,
6, 1, 4, 4, 8, 8, 0, 6, 10, 6, 6, 5, 1, 2, 7, 4, 0, 3, 5, 7, 5, 3, 1, 1, 11, 11,
4, 7, 3, 0, 5, 6, 2, 1, 6, 1, 6, 10, 9, 4, 9, 10, 6, 7, 5, 4, 1, 0, 2, 3, 10, 3,
1, 1, 10, 3, 5, 9, 2, 5, 3, 4, 1, 6, 0, 7, 4, 4, 8, 8, 2, 7, 2, 3, 2, 1, 5, 6,
3, 0, 4, 7, 7, 2, 11, 2, 7, 5, 8, 5, 4, 5, 7, 6, 3, 2, 0, 1, 10, 3, 2, 6, 10, 3,
4, 4, 6, 1, 7, 0, 5, 2, 4, 3
]).reshape((8, 2, 12), order='F')
# Convert to binary
for i in range(bit_length):
x_bit_mask[i] = _btest(x, i)
y_bit_mask[i] = _btest(y, i)
z_bit_mask[i] = _btest(z, i)
# Interleave bits
for i in range(bit_length):
i_bit_mask[3 * i + 2] = x_bit_mask[i]
i_bit_mask[3 * i + 1] = y_bit_mask[i]
i_bit_mask[3 * i] = z_bit_mask[i]
# Build Hilbert ordering using state diagram
cstate = 0
for i in range(bit_length - 1, -1, -1):
b2 = 0
if i_bit_mask[3 * i + 2]:
b2 = 1
b1 = 0
if i_bit_mask[3 * i + 1]:
b1 = 1
b0 = 0
if i_bit_mask[3 * i]:
b0 = 1
sdigit = b2 * 4 + b1 * 2 + b0
nstate = state_diagram[sdigit, 0, cstate]
hdigit = state_diagram[sdigit, 1, cstate]
i_bit_mask[3 * i + 2] = _btest(hdigit, 2)
i_bit_mask[3 * i + 1] = _btest(hdigit, 1)
i_bit_mask[3 * i] = _btest(hdigit, 0)
cstate = nstate
order = 0
for i in range(3 * bit_length):
b0 = 0
if i_bit_mask[i]:
b0 = 1
order = order + b0 * (2**i)
return order
def _get_cpu_list(bounding_box, lmax, levelmax, infofile, ncpu, ndim):
bound_key = _read_bound_key(infofile=infofile, ncpu=ncpu)
xmin = bounding_box["xmin"]
xmax = bounding_box["xmax"]
ymin = bounding_box["ymin"]
ymax = bounding_box["ymax"]
zmin = bounding_box["zmin"]
zmax = bounding_box["zmax"]
dmax = max(xmax - xmin, ymax - ymin, zmax - zmin)
for ilevel in range(1, lmax + 1):
dx = 0.5**ilevel
if dx < dmax:
break
lmin = ilevel
bit_length = lmin - 1
maxdom = 2**bit_length
imin = 0
imax = 0
jmin = 0
jmax = 0
kmin = 0
kmax = 0
if bit_length > 0:
imin = int(xmin * maxdom)
imax = imin + 1
jmin = int(ymin * maxdom)
jmax = jmin + 1
kmin = int(zmin * maxdom)
kmax = kmin + 1
dkey = (2**(levelmax + 1) // maxdom)**ndim
ndom = 1
if bit_length > 0:
ndom = 8
idom = [imin, imax] * 4
jdom = [jmin, jmin, jmax, jmax] * 2
kdom = [kmin] * 4 + [kmax] * 4
bounding_min = [0, 0, 0, 0, 0, 0, 0, 0]
bounding_max = [0, 0, 0, 0, 0, 0, 0, 0]
bounding = None
for i in range(ndom):
if bit_length > 0:
bounding = _hilbert3d(idom[i], jdom[i], kdom[i], bit_length)
order_min = bounding
else:
order_min = 0
bounding_min[i] = order_min * dkey
bounding_max[i] = (order_min + 1) * dkey
cpu_min = [0, 0, 0, 0, 0, 0, 0, 0]
cpu_max = [0, 0, 0, 0, 0, 0, 0, 0]
for impi in range(ncpu):
for i in range(ndom):
if ((bound_key[impi] <= bounding_min[i])
and (bound_key[impi + 1] > bounding_min[i])):
cpu_min[i] = impi
if ((bound_key[impi] < bounding_max[i])
and (bound_key[impi + 1] >= bounding_max[i])):
cpu_max[i] = impi
cpu_list = []
for i in range(ndom):
for j in range(cpu_min[i], cpu_max[i] + 1):
if j + 1 not in cpu_list:
cpu_list.append(j + 1)
return cpu_list
def hilbert_cpu_list(meta, scaling, select, infofile):
if meta["ordering type"] != "hilbert":
return
if isinstance(select, bool):
return
bounding_box = {"xmin": 0, "xmax": 1, "ymin": 0, "ymax": 1, "zmin": 0, "zmax": 1}
# Make an array of cell centers according to lmax
box_size = (meta["boxlen"] * scaling).magnitude
ncells = 2**min(meta["levelmax"], 18) # limit to 262000 cells
half_dxmin = 0.5 * box_size / ncells
xyz_centers = Array(values=np.linspace(half_dxmin, box_size - half_dxmin, ncells),
unit=scaling.units)
new_bbox = False
for c in "xyz":
key = f"position_{c}"
if key in select:
new_bbox = True
func_test = select[key](xyz_centers)
inds = np.argwhere(func_test.values).ravel()
start = xyz_centers[inds.min()] - (half_dxmin * scaling.units)
end = xyz_centers[inds.max()] + (half_dxmin * scaling.units)
bounding_box["{}min".format(c)] = start._array / box_size
bounding_box["{}max".format(c)] = end._array / box_size
if new_bbox:
return _get_cpu_list(bounding_box=bounding_box,
lmax=meta["lmax"],
levelmax=meta["levelmax"],
infofile=infofile,
ncpu=meta["ncpu"],
ndim=meta["ndim"]) | 0.433742 | 0.38549 |
import logging
import time
import argparse
import requests
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def post(emailAddress, password):
session = requests.session()
data = {'email_address': emailAddress,
'password': password,
'persistent': 'True'
} # login data
headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36',
'referer': 'https://inkread.com/feeds/explore/?bundles=zh'
}
session.headers = headers
session.post('https://inkread.com/login/', data=data)
logger.info(str(dict(session.cookies)))
time.sleep(5)
r = session.post('https://inkread.com/send_now/')
with open('1.html', 'w')as f:
f.write(r.text)
return r.status_code
def delay(settingTime):
logger.info('Waiting for the first time.')
while settingTime != time.strftime("%H:%M", time.localtime(time.time())):
time.sleep(55)
def loginTest(emailAddress, password):
session = requests.session()
data = {'email_address': emailAddress,
'password': password,
'persistent': 'True'
} # login data
headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36',
'referer': 'https://inkread.com/feeds/explore/?bundles=zh'
}
session.post('https://inkread.com/login/', data=data, headers=headers)
return len(dict(session.cookies)) == 2
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Automatic Script for Kindle4rss")
parser.add_argument('-t', '--time', type=str, default='3:14', help='Set the Time to Send File, Default 3:14')
parser.add_argument('-d', '--day', type=int, default=1, help='Days Between Sending Files, Default 1 Day')
parser.add_argument('-u', '--user', type=str, required=True, help='User Name, Mostly E-mail Address')
parser.add_argument('-p', '--password', type=str, required=True, help='Login Password')
args = parser.parse_args()
if '@' not in args.user:
logger.error('Check your login email address please')
exit(-1)
if not loginTest('<EMAIL>', '(imp@h01)'):
logger.error('Login Failure. Check your user name or password.')
if len(args.time) == 4: # 头部加0
args.time = '0' + args.time
logger.info(
'User {} Password {} Sending file at {} every {} day.'.format(args.user, args.password, args.time, args.day))
while True:
delay(args.time)
logger.info('Start')
post(args.user, args.password)
logger.info('Finished, waiting for the next time')
time.sleep(60 * 60 * 24 * args.day - 20) | 2.py |
import logging
import time
import argparse
import requests
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def post(emailAddress, password):
session = requests.session()
data = {'email_address': emailAddress,
'password': password,
'persistent': 'True'
} # login data
headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36',
'referer': 'https://inkread.com/feeds/explore/?bundles=zh'
}
session.headers = headers
session.post('https://inkread.com/login/', data=data)
logger.info(str(dict(session.cookies)))
time.sleep(5)
r = session.post('https://inkread.com/send_now/')
with open('1.html', 'w')as f:
f.write(r.text)
return r.status_code
def delay(settingTime):
logger.info('Waiting for the first time.')
while settingTime != time.strftime("%H:%M", time.localtime(time.time())):
time.sleep(55)
def loginTest(emailAddress, password):
session = requests.session()
data = {'email_address': emailAddress,
'password': password,
'persistent': 'True'
} # login data
headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36',
'referer': 'https://inkread.com/feeds/explore/?bundles=zh'
}
session.post('https://inkread.com/login/', data=data, headers=headers)
return len(dict(session.cookies)) == 2
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Automatic Script for Kindle4rss")
parser.add_argument('-t', '--time', type=str, default='3:14', help='Set the Time to Send File, Default 3:14')
parser.add_argument('-d', '--day', type=int, default=1, help='Days Between Sending Files, Default 1 Day')
parser.add_argument('-u', '--user', type=str, required=True, help='User Name, Mostly E-mail Address')
parser.add_argument('-p', '--password', type=str, required=True, help='Login Password')
args = parser.parse_args()
if '@' not in args.user:
logger.error('Check your login email address please')
exit(-1)
if not loginTest('<EMAIL>', '(imp@h01)'):
logger.error('Login Failure. Check your user name or password.')
if len(args.time) == 4: # 头部加0
args.time = '0' + args.time
logger.info(
'User {} Password {} Sending file at {} every {} day.'.format(args.user, args.password, args.time, args.day))
while True:
delay(args.time)
logger.info('Start')
post(args.user, args.password)
logger.info('Finished, waiting for the next time')
time.sleep(60 * 60 * 24 * args.day - 20) | 0.31384 | 0.086825 |
__all__ = ['Input']
import re
from tebless.devs import Widget, echo
from tebless.utils.keyboard import KEY_BACKSPACE, KEY_DELETE
class Input(Widget):
"""Input widget with label.
:param text: placeholder text
:param label: Desc of input
:param align: center, ljust, rjust text
:param fill_c: blank space
:param cursor: pointer
:param left_l: left terminator
:param right_l: right terminator
:param max_len: max string length
:param validation: a regex string to validate input
:param text_style: apply to text
:type text: str
:type label: str
:type align: str
:type fill_c: str
:type cursor: str
:type left_l: str
:type right_l: str
:type max_len: int
:type validation: regex
:type text_style: func
:Example:
>>> from tebless.widgets import Input, Window
>>> @Window.decorator(main=True)
... def view(window):
... window += Input(label="Insert text", cordx=2,
... cordy=2, width=10, align='center')
"""
def __init__(self,
text='',
label='',
align='left',
max_len=6,
*args, **kwargs):
params = dict(text=text, label=label, max_len=round(max_len))
super().__init__(on_key=self._on_key, *args, **params, **kwargs)
self._text = text
self._label = label
self._max_len = round(max_len)
self._fill_c = kwargs.get('fill_c', '_')
self._cursor = kwargs.get('cursor', '_')
self._left_l = kwargs.get('left_l', ' [ ')
self._right_l = kwargs.get('right_l', ' ]')
self._validation = kwargs.get('validation', r'.')
self._text_style = kwargs.get('text_style', lambda x: x)
if align == 'left':
self._align = self.term.ljust
elif align == 'center':
self._align = self.term.center
elif align == 'right':
self._align = self.term.rjust
else:
raise ValueError('Only valids aligns: left, right, center')
if self.term.length(self._text) > self._max_len:
raise ValueError('text is too long')
elif self.term.length(self._fill_c) > 1:
raise ValueError('fill_c need a char')
elif self.term.length(self._cursor) > 1:
raise ValueError('cursor need a char')
def _on_key(self, key):
correct_len = self.term.length(self.value) < self._max_len
validations = re.match(self._validation, key) and key.isprintable()
# TODO: Add event on fail validation
if correct_len and validations:
self.value += key
elif key.code in (KEY_BACKSPACE, KEY_DELETE) and self.value:
self.value = self.value[:-1]
def paint(self):
text = self._text_style(self.value)
if self.term.length(self.value) < self._max_len:
text = text + self._cursor
text = self._align(text, fillchar=self._fill_c, width=self._max_len)
input_field = self._left_l + text + self._right_l # [_______]
echo(self.term.move(self.y, self.x) +
self._label + input_field) # label
@property
def width(self):
len_widget = self.term.length(
self._label) + self.term.length(self._right_l)
len_widget += self.term.length(self._left_l) + self._max_len
return len_widget
@property
def height(self):
return 1
@property
def value(self):
return self._text
@property
def label(self):
return self._label
@label.setter
def label(self, value):
if not isinstance(value, str):
raise TypeError('Only supported string')
self._label = value
self.on_change()
@value.setter
def value(self, value):
if not (isinstance(value, str) or isinstance(value, int)):
raise TypeError('Only supported string or int')
self._text = str(value)
self.on_change() | tebless/widgets/input.py | __all__ = ['Input']
import re
from tebless.devs import Widget, echo
from tebless.utils.keyboard import KEY_BACKSPACE, KEY_DELETE
class Input(Widget):
"""Input widget with label.
:param text: placeholder text
:param label: Desc of input
:param align: center, ljust, rjust text
:param fill_c: blank space
:param cursor: pointer
:param left_l: left terminator
:param right_l: right terminator
:param max_len: max string length
:param validation: a regex string to validate input
:param text_style: apply to text
:type text: str
:type label: str
:type align: str
:type fill_c: str
:type cursor: str
:type left_l: str
:type right_l: str
:type max_len: int
:type validation: regex
:type text_style: func
:Example:
>>> from tebless.widgets import Input, Window
>>> @Window.decorator(main=True)
... def view(window):
... window += Input(label="Insert text", cordx=2,
... cordy=2, width=10, align='center')
"""
def __init__(self,
text='',
label='',
align='left',
max_len=6,
*args, **kwargs):
params = dict(text=text, label=label, max_len=round(max_len))
super().__init__(on_key=self._on_key, *args, **params, **kwargs)
self._text = text
self._label = label
self._max_len = round(max_len)
self._fill_c = kwargs.get('fill_c', '_')
self._cursor = kwargs.get('cursor', '_')
self._left_l = kwargs.get('left_l', ' [ ')
self._right_l = kwargs.get('right_l', ' ]')
self._validation = kwargs.get('validation', r'.')
self._text_style = kwargs.get('text_style', lambda x: x)
if align == 'left':
self._align = self.term.ljust
elif align == 'center':
self._align = self.term.center
elif align == 'right':
self._align = self.term.rjust
else:
raise ValueError('Only valids aligns: left, right, center')
if self.term.length(self._text) > self._max_len:
raise ValueError('text is too long')
elif self.term.length(self._fill_c) > 1:
raise ValueError('fill_c need a char')
elif self.term.length(self._cursor) > 1:
raise ValueError('cursor need a char')
def _on_key(self, key):
correct_len = self.term.length(self.value) < self._max_len
validations = re.match(self._validation, key) and key.isprintable()
# TODO: Add event on fail validation
if correct_len and validations:
self.value += key
elif key.code in (KEY_BACKSPACE, KEY_DELETE) and self.value:
self.value = self.value[:-1]
def paint(self):
text = self._text_style(self.value)
if self.term.length(self.value) < self._max_len:
text = text + self._cursor
text = self._align(text, fillchar=self._fill_c, width=self._max_len)
input_field = self._left_l + text + self._right_l # [_______]
echo(self.term.move(self.y, self.x) +
self._label + input_field) # label
@property
def width(self):
len_widget = self.term.length(
self._label) + self.term.length(self._right_l)
len_widget += self.term.length(self._left_l) + self._max_len
return len_widget
@property
def height(self):
return 1
@property
def value(self):
return self._text
@property
def label(self):
return self._label
@label.setter
def label(self, value):
if not isinstance(value, str):
raise TypeError('Only supported string')
self._label = value
self.on_change()
@value.setter
def value(self, value):
if not (isinstance(value, str) or isinstance(value, int)):
raise TypeError('Only supported string or int')
self._text = str(value)
self.on_change() | 0.591723 | 0.316119 |
from direct.interval.IntervalGlobal import *
from direct.gui.OnscreenText import OnscreenText
from direct.actor import Actor
from pandac.PandaModules import *
from otp.otpbase import OTPGlobals
from otp.avatar import Avatar
from pirates.piratesbase import PiratesGlobals
from pirates.movement.AnimationMixer import AnimationMixer
from pirates.movement.UsesAnimationMixer import UsesAnimationMixer
from pirates.effects.UsesEffectNode import UsesEffectNode
from pirates.pirate import AvatarTypes
from pirates.pirate.AvatarType import AvatarType
from pirates.audio import SoundGlobals
from pirates.audio.SoundGlobals import loadSfx
from otp.otpbase import OTPRender
from pirates.effects.JRDeathEffect import JRDeathEffect
from pirates.effects.ShockwaveRing import ShockwaveRing
from pirates.effects.JRSpiritEffect import JRSpiritEffect
from pirates.battle import EnemyGlobals
import random
WALK_CUTOFF = 0.5
ADVANCE_CUTOFF = 0.5
RUN_CUTOFF = PiratesGlobals.ToonForwardSpeed
class Creature(UsesAnimationMixer, Avatar.Avatar, UsesEffectNode):
FailsafeAnims = (('idle', 1.0), ('idle', 1.0), ('idle', 1.0), ('idle', 1.0), ('idle', 1.0), ('idle', 1.0), ('idle', 1.0), ('idle', 1.0), ('idle', 1.0), ('idle', 1.0), ('idle', 1.0), ('idle', 1.0), ('idle', 1.0), ('idle', 1.0))
SfxNames = {
'death': SoundGlobals.SFX_MONSTER_DEATH }
sfx = { }
actor = None
animInfo = { }
class AnimationMixer(AnimationMixer):
LOOP = AnimationMixer.LOOP
ACTION = dict(AnimationMixer.ACTION)
ACTION['MOVIE'] = AnimationMixer.ACTION_INDEX + 1
def __init__(self, animationMixer = None):
Avatar.Avatar.__init__(self)
UsesEffectNode.__init__(self)
self.setPickable(0)
self.shadowFileName = 'models/misc/drop_shadow'
self.dimensions = VBase3(0.0, 0.0, 0.0)
self.nameText = None
self.avatarType = None
self.level = None
self.nametagOffset = 2.0
self.headNode = self.find('**/def_head')
if not Creature.sfx:
for name in Creature.SfxNames:
Creature.sfx[name] = loadSfx(Creature.SfxNames[name])
self.setupReflection()
if not animationMixer:
pass
animationMixer = self.AnimationMixer
UsesAnimationMixer.__init__(self, animationMixer)
self.deathEffect = None
self.shockwaveRingIval = None
self.shockwaveRingEffect = None
self.spiritIval = None
self.spiritEffect = None
def delete(self):
if self.deathEffect:
self.deathEffect.stop()
self.deathEffect = None
if self.shockwaveRingIval:
self.shockwaveRingIval.pause()
self.shockwaveRingIval = None
if self.shockwaveRingEffect:
self.shockwaveRingEffect.stop()
self.shockwaveRingEffect = None
if self.spiritIval:
self.spiritIval.pause()
self.spiritIval = None
if self.spiritEffect:
self.spiritEffect.stop()
self.spiritEffect = None
Avatar.Avatar.delete(self)
UsesAnimationMixer.delete(self)
UsesEffectNode.delete(self)
def setupReflection(self):
OTPRender.renderReflection(False, self, 'p_creature', None)
def forceLoadAnimDict(self):
for anim in self.animDict:
self.getAnimControls(anim)
def generateCreature(self):
if self.actor:
self.copyActor(self.actor)
self.headNode = self.find('**/def_head')
if base.options.character_detail_level == PiratesGlobals.CD_LOW:
self.setLODAnimation(100, 5, 0.10000000000000001)
self.enableMixing()
def setAvatarType(self, avatarType):
self.avatarType = avatarType
self.height = EnemyGlobals.getHeight(avatarType)
self.initializeDropShadow()
if base.options.terrain_detail_level == PiratesGlobals.CD_LOW:
self.shadowPlacer.off()
self.initializeNametag3d()
setAvatarType = report(types = [
'module',
'args'], dConfigParam = 'nametag')(setAvatarType)
def setLevel(self, level):
self.level = level
def getLevel(self):
return self.level
def initializeNametag3d(self):
Avatar.Avatar.initializeNametag3d(self)
self.nametag3d.setFogOff()
self.nametag3d.setLightOff()
self.nametag3d.setColorScaleOff(100)
self.nametag3d.setH(self.getGeomNode().getH())
self.nametag.setFont(PiratesGlobals.getPirateBoldOutlineFont())
self.iconNodePath = self.nametag.getNameIcon()
if self.iconNodePath.isEmpty():
self.notify.warning('empty iconNodePath in initializeNametag3d')
return 0
if not self.nameText:
self.nameText = OnscreenText(fg = Vec4(1, 1, 1, 1), bg = Vec4(0, 0, 0, 0), scale = 1.1000000000000001, align = TextNode.ACenter, mayChange = 1, font = PiratesGlobals.getPirateBoldOutlineFont())
self.nameText.reparentTo(self.iconNodePath)
self.nameText.setTransparency(TransparencyAttrib.MDual, 2)
self.nameText.setColorScaleOff(100)
self.nameText.setLightOff()
self.nameText.setFogOff()
initializeNametag3d = report(types = [
'module',
'args'], dConfigParam = 'nametag')(initializeNametag3d)
def initializeNametag3dPet(self):
pass
def getNameText(self):
return self.nameText
def scaleAnimRate(self, forwardSpeed):
rate = 1.0
myMaxSpeed = self.getMaxSpeed()
if myMaxSpeed > 0 and forwardSpeed > 0:
currTime = globalClockDelta.globalClock.getFrameTime()
maxSpeed = myMaxSpeed * (currTime - self.prevSpeedClock)
prevTime = self.prevSpeedClock
self.prevSpeedClock = currTime
rate = min(1.25, forwardSpeed / maxSpeed)
return rate
def getNametagJoints(self):
joints = []
for lodName in self.getLODNames():
bundle = self.getPartBundle('modelRoot', lodName)
joint = bundle.findChild('name_tag')
if joint:
joints.append(joint)
continue
return joints
getNametagJoints = report(types = [
'module',
'args'], dConfigParam = 'nametag')(getNametagJoints)
def adjustNametag3d(self, parentScale = 1.0):
self.nametag3d.setZ(self.scale * parentScale * self.nametagOffset - self.nametagOffset)
def getAirborneHeight(self):
return 0.0
def getMaxSpeed(self):
return 0
def getRadius(self):
return self.battleTubeRadius
def play(self, *args, **kwArgs):
UsesAnimationMixer.play(self, *args, **args)
def loop(self, *args, **kwArgs):
UsesAnimationMixer.loop(self, *args, **args)
def pingpong(self, *args, **kwArgs):
UsesAnimationMixer.pingpong(self, *args, **args)
def pose(self, *args, **kwArgs):
UsesAnimationMixer.pose(self, *args, **args)
def stop(self, *args, **kwArgs):
UsesAnimationMixer.stop(self, *args, **args)
def getDeathAnimName(self, animNum = None):
animStrings = [
'death']
if animNum not in range(len(animStrings)):
animNum = random.choice([
0])
return animStrings[animNum]
def getAnimInfo(self, state):
return self.animInfo.get(state, self.FailsafeAnims)
def setupAnimInfoState(cls, state, info):
if len(info) < len(cls.FailsafeAnims):
info += cls.FailsafeAnims[len(info) - len(cls.FailsafeAnims):]
cls.animInfo[state] = info
setupAnimInfoState = classmethod(setupAnimInfoState)
def setupAnimInfo(cls):
cls.setupAnimInfoState('LandRoam', cls.FailsafeAnims)
cls.setupAnimInfoState('WaterRoam', cls.FailsafeAnims)
setupAnimInfo = classmethod(setupAnimInfo)
def setLODs(self):
avatarDetail = base.config.GetString('avatar-detail', 'high')
if avatarDetail == 'high':
dist = [
0,
20,
80,
280]
elif avatarDetail == 'med':
dist = [
0,
10,
40,
280]
elif avatarDetail == 'low':
dist = [
0,
5,
20,
280]
else:
raise StandardError, 'Invalid avatar-detail: %s' % avatarDetail
self.addLOD('hi', dist[1], dist[0])
self.addLOD('med', dist[2], dist[1])
self.addLOD('low', dist[3], dist[2])
def setupAssets(cls):
cls.animInfo = Creature.animInfo.copy()
cls.setupAnimInfo()
filePrefix = cls.ModelInfo[1]
animList = cls.AnimList
animDict = { }
for anim in animList:
animDict[anim[0]] = filePrefix + anim[1]
cls.animDict = animDict
filePrefix = cls.ModelInfo[1]
for name in cls.SfxNames:
cls.sfx[name] = loadSfx(cls.SfxNames[name])
cls.actor = Actor.Actor()
if loader.loadModel(filePrefix + 'med') != None:
avatarDetail = base.config.GetString('avatar-detail', 'high')
if avatarDetail == 'high':
dist = [
0,
20,
80,
280]
elif avatarDetail == 'med':
dist = [
0,
10,
40,
280]
elif avatarDetail == 'low':
dist = [
0,
6,
20,
280]
else:
raise StandardError, 'Invalid avatar-detail: %s' % avatarDetail
cls.actor.setLODNode()
cls.actor.addLOD('hi', dist[1], dist[0])
cls.actor.addLOD('med', dist[2], dist[1])
cls.actor.addLOD('low', dist[3], dist[2])
creatureDetail = base.config.GetBool('want-high-creature-detail', 0)
if creatureDetail:
cls.actor.loadModel(filePrefix + 'hi', 'modelRoot', 'hi')
cls.actor.loadModel(filePrefix + 'med', 'modelRoot', 'med')
cls.actor.loadModel(filePrefix + 'low', 'modelRoot', 'low')
else:
cls.actor.loadModel(filePrefix + 'med', 'modelRoot', 'hi')
cls.actor.loadModel(filePrefix + 'low', 'modelRoot', 'med')
cls.actor.loadModel(filePrefix + 'super', 'modelRoot', 'low')
cls.actor.loadAnims(cls.animDict, 'modelRoot', 'all')
else:
cls.actor.loadModel(cls.ModelInfo[0])
cls.actor.loadAnims(cls.animDict)
cls.actor.getGeomNode().setH(180)
setupAssets = classmethod(setupAssets)
def getSfx(self, name):
return self.sfx.get(name)
def shouldNotice(self):
return 1
def endShuffle(self):
idleAnimInfo = self.animInfo['LandRoam'][PiratesGlobals.STAND_INDEX]
try:
self.loop(idleAnimInfo[0], blendDelay = 0.29999999999999999, rate = idleAnimInfo[1])
except TypeError:
e = None
self.notify.error('Invalid animation %s for %s' % (idleAnimInfo, self))
def getSplashOverride(self):
pass
Creature.setupAnimInfo() | pirates/creature/Creature.py |
from direct.interval.IntervalGlobal import *
from direct.gui.OnscreenText import OnscreenText
from direct.actor import Actor
from pandac.PandaModules import *
from otp.otpbase import OTPGlobals
from otp.avatar import Avatar
from pirates.piratesbase import PiratesGlobals
from pirates.movement.AnimationMixer import AnimationMixer
from pirates.movement.UsesAnimationMixer import UsesAnimationMixer
from pirates.effects.UsesEffectNode import UsesEffectNode
from pirates.pirate import AvatarTypes
from pirates.pirate.AvatarType import AvatarType
from pirates.audio import SoundGlobals
from pirates.audio.SoundGlobals import loadSfx
from otp.otpbase import OTPRender
from pirates.effects.JRDeathEffect import JRDeathEffect
from pirates.effects.ShockwaveRing import ShockwaveRing
from pirates.effects.JRSpiritEffect import JRSpiritEffect
from pirates.battle import EnemyGlobals
import random
WALK_CUTOFF = 0.5
ADVANCE_CUTOFF = 0.5
RUN_CUTOFF = PiratesGlobals.ToonForwardSpeed
class Creature(UsesAnimationMixer, Avatar.Avatar, UsesEffectNode):
FailsafeAnims = (('idle', 1.0), ('idle', 1.0), ('idle', 1.0), ('idle', 1.0), ('idle', 1.0), ('idle', 1.0), ('idle', 1.0), ('idle', 1.0), ('idle', 1.0), ('idle', 1.0), ('idle', 1.0), ('idle', 1.0), ('idle', 1.0), ('idle', 1.0))
SfxNames = {
'death': SoundGlobals.SFX_MONSTER_DEATH }
sfx = { }
actor = None
animInfo = { }
class AnimationMixer(AnimationMixer):
LOOP = AnimationMixer.LOOP
ACTION = dict(AnimationMixer.ACTION)
ACTION['MOVIE'] = AnimationMixer.ACTION_INDEX + 1
def __init__(self, animationMixer = None):
Avatar.Avatar.__init__(self)
UsesEffectNode.__init__(self)
self.setPickable(0)
self.shadowFileName = 'models/misc/drop_shadow'
self.dimensions = VBase3(0.0, 0.0, 0.0)
self.nameText = None
self.avatarType = None
self.level = None
self.nametagOffset = 2.0
self.headNode = self.find('**/def_head')
if not Creature.sfx:
for name in Creature.SfxNames:
Creature.sfx[name] = loadSfx(Creature.SfxNames[name])
self.setupReflection()
if not animationMixer:
pass
animationMixer = self.AnimationMixer
UsesAnimationMixer.__init__(self, animationMixer)
self.deathEffect = None
self.shockwaveRingIval = None
self.shockwaveRingEffect = None
self.spiritIval = None
self.spiritEffect = None
def delete(self):
if self.deathEffect:
self.deathEffect.stop()
self.deathEffect = None
if self.shockwaveRingIval:
self.shockwaveRingIval.pause()
self.shockwaveRingIval = None
if self.shockwaveRingEffect:
self.shockwaveRingEffect.stop()
self.shockwaveRingEffect = None
if self.spiritIval:
self.spiritIval.pause()
self.spiritIval = None
if self.spiritEffect:
self.spiritEffect.stop()
self.spiritEffect = None
Avatar.Avatar.delete(self)
UsesAnimationMixer.delete(self)
UsesEffectNode.delete(self)
def setupReflection(self):
OTPRender.renderReflection(False, self, 'p_creature', None)
def forceLoadAnimDict(self):
for anim in self.animDict:
self.getAnimControls(anim)
def generateCreature(self):
if self.actor:
self.copyActor(self.actor)
self.headNode = self.find('**/def_head')
if base.options.character_detail_level == PiratesGlobals.CD_LOW:
self.setLODAnimation(100, 5, 0.10000000000000001)
self.enableMixing()
def setAvatarType(self, avatarType):
self.avatarType = avatarType
self.height = EnemyGlobals.getHeight(avatarType)
self.initializeDropShadow()
if base.options.terrain_detail_level == PiratesGlobals.CD_LOW:
self.shadowPlacer.off()
self.initializeNametag3d()
setAvatarType = report(types = [
'module',
'args'], dConfigParam = 'nametag')(setAvatarType)
def setLevel(self, level):
self.level = level
def getLevel(self):
return self.level
def initializeNametag3d(self):
Avatar.Avatar.initializeNametag3d(self)
self.nametag3d.setFogOff()
self.nametag3d.setLightOff()
self.nametag3d.setColorScaleOff(100)
self.nametag3d.setH(self.getGeomNode().getH())
self.nametag.setFont(PiratesGlobals.getPirateBoldOutlineFont())
self.iconNodePath = self.nametag.getNameIcon()
if self.iconNodePath.isEmpty():
self.notify.warning('empty iconNodePath in initializeNametag3d')
return 0
if not self.nameText:
self.nameText = OnscreenText(fg = Vec4(1, 1, 1, 1), bg = Vec4(0, 0, 0, 0), scale = 1.1000000000000001, align = TextNode.ACenter, mayChange = 1, font = PiratesGlobals.getPirateBoldOutlineFont())
self.nameText.reparentTo(self.iconNodePath)
self.nameText.setTransparency(TransparencyAttrib.MDual, 2)
self.nameText.setColorScaleOff(100)
self.nameText.setLightOff()
self.nameText.setFogOff()
initializeNametag3d = report(types = [
'module',
'args'], dConfigParam = 'nametag')(initializeNametag3d)
def initializeNametag3dPet(self):
pass
def getNameText(self):
return self.nameText
def scaleAnimRate(self, forwardSpeed):
rate = 1.0
myMaxSpeed = self.getMaxSpeed()
if myMaxSpeed > 0 and forwardSpeed > 0:
currTime = globalClockDelta.globalClock.getFrameTime()
maxSpeed = myMaxSpeed * (currTime - self.prevSpeedClock)
prevTime = self.prevSpeedClock
self.prevSpeedClock = currTime
rate = min(1.25, forwardSpeed / maxSpeed)
return rate
def getNametagJoints(self):
joints = []
for lodName in self.getLODNames():
bundle = self.getPartBundle('modelRoot', lodName)
joint = bundle.findChild('name_tag')
if joint:
joints.append(joint)
continue
return joints
getNametagJoints = report(types = [
'module',
'args'], dConfigParam = 'nametag')(getNametagJoints)
def adjustNametag3d(self, parentScale = 1.0):
self.nametag3d.setZ(self.scale * parentScale * self.nametagOffset - self.nametagOffset)
def getAirborneHeight(self):
return 0.0
def getMaxSpeed(self):
return 0
def getRadius(self):
return self.battleTubeRadius
def play(self, *args, **kwArgs):
UsesAnimationMixer.play(self, *args, **args)
def loop(self, *args, **kwArgs):
UsesAnimationMixer.loop(self, *args, **args)
def pingpong(self, *args, **kwArgs):
UsesAnimationMixer.pingpong(self, *args, **args)
def pose(self, *args, **kwArgs):
UsesAnimationMixer.pose(self, *args, **args)
def stop(self, *args, **kwArgs):
UsesAnimationMixer.stop(self, *args, **args)
def getDeathAnimName(self, animNum = None):
animStrings = [
'death']
if animNum not in range(len(animStrings)):
animNum = random.choice([
0])
return animStrings[animNum]
def getAnimInfo(self, state):
return self.animInfo.get(state, self.FailsafeAnims)
def setupAnimInfoState(cls, state, info):
if len(info) < len(cls.FailsafeAnims):
info += cls.FailsafeAnims[len(info) - len(cls.FailsafeAnims):]
cls.animInfo[state] = info
setupAnimInfoState = classmethod(setupAnimInfoState)
def setupAnimInfo(cls):
cls.setupAnimInfoState('LandRoam', cls.FailsafeAnims)
cls.setupAnimInfoState('WaterRoam', cls.FailsafeAnims)
setupAnimInfo = classmethod(setupAnimInfo)
def setLODs(self):
avatarDetail = base.config.GetString('avatar-detail', 'high')
if avatarDetail == 'high':
dist = [
0,
20,
80,
280]
elif avatarDetail == 'med':
dist = [
0,
10,
40,
280]
elif avatarDetail == 'low':
dist = [
0,
5,
20,
280]
else:
raise StandardError, 'Invalid avatar-detail: %s' % avatarDetail
self.addLOD('hi', dist[1], dist[0])
self.addLOD('med', dist[2], dist[1])
self.addLOD('low', dist[3], dist[2])
def setupAssets(cls):
cls.animInfo = Creature.animInfo.copy()
cls.setupAnimInfo()
filePrefix = cls.ModelInfo[1]
animList = cls.AnimList
animDict = { }
for anim in animList:
animDict[anim[0]] = filePrefix + anim[1]
cls.animDict = animDict
filePrefix = cls.ModelInfo[1]
for name in cls.SfxNames:
cls.sfx[name] = loadSfx(cls.SfxNames[name])
cls.actor = Actor.Actor()
if loader.loadModel(filePrefix + 'med') != None:
avatarDetail = base.config.GetString('avatar-detail', 'high')
if avatarDetail == 'high':
dist = [
0,
20,
80,
280]
elif avatarDetail == 'med':
dist = [
0,
10,
40,
280]
elif avatarDetail == 'low':
dist = [
0,
6,
20,
280]
else:
raise StandardError, 'Invalid avatar-detail: %s' % avatarDetail
cls.actor.setLODNode()
cls.actor.addLOD('hi', dist[1], dist[0])
cls.actor.addLOD('med', dist[2], dist[1])
cls.actor.addLOD('low', dist[3], dist[2])
creatureDetail = base.config.GetBool('want-high-creature-detail', 0)
if creatureDetail:
cls.actor.loadModel(filePrefix + 'hi', 'modelRoot', 'hi')
cls.actor.loadModel(filePrefix + 'med', 'modelRoot', 'med')
cls.actor.loadModel(filePrefix + 'low', 'modelRoot', 'low')
else:
cls.actor.loadModel(filePrefix + 'med', 'modelRoot', 'hi')
cls.actor.loadModel(filePrefix + 'low', 'modelRoot', 'med')
cls.actor.loadModel(filePrefix + 'super', 'modelRoot', 'low')
cls.actor.loadAnims(cls.animDict, 'modelRoot', 'all')
else:
cls.actor.loadModel(cls.ModelInfo[0])
cls.actor.loadAnims(cls.animDict)
cls.actor.getGeomNode().setH(180)
setupAssets = classmethod(setupAssets)
def getSfx(self, name):
return self.sfx.get(name)
def shouldNotice(self):
return 1
def endShuffle(self):
idleAnimInfo = self.animInfo['LandRoam'][PiratesGlobals.STAND_INDEX]
try:
self.loop(idleAnimInfo[0], blendDelay = 0.29999999999999999, rate = idleAnimInfo[1])
except TypeError:
e = None
self.notify.error('Invalid animation %s for %s' % (idleAnimInfo, self))
def getSplashOverride(self):
pass
Creature.setupAnimInfo() | 0.566019 | 0.08438 |
import numpy as np
import numpy
from scipy.stats import multivariate_normal
class MultiGaussianDistribution(object):
def __init__(self, dimensions):
self.dimensions = dimensions
self.mean = np.zeros([dimensions])
self.cov = np.ones([dimensions, dimensions])
#self.cov = np.dot(self.cov, self.cov.T)
self.distri = multivariate_normal(mean=self.mean, cov=self.cov)
def prob(self, observation):
return self.distri.pdf(observation)
def update_mean(self, mean):
self.mean = mean
self.distri.mean = mean
def update_cov(self, cov):
self.cov = cov
self.distri.cov = cov
def update_distri(self):
self.distri = multivariate_normal(mean=self.mean, cov=self.cov)
class GaussianMixture(object):
def __init__(self, component_no, states, vector_len):
self.state = range(states)
self.component_no = component_no
self.component = range(component_no)
self.distributions = [[MultiGaussianDistribution(vector_len) for i in range(component_no)] for j in range(states)]
self.weights = np.array([[1.0/component_no for i in range(component_no)] for i in range(states)])
def save_model(self):
for component in self.component:
for state in self.state:
print 'component: %s, state: %s'%(component, state)
print self.distributions[state][component].mean
print self.distributions[state][component].cov
def init_a(state_no):
#a = np.random.random([state_no, state_no])
a = np.ones([state_no, state_no])
for i in xrange(state_no):
a[i] = a[i]/sum(a[i])
return a
def get_key(prefix, *args, **kwargs):
return prefix + ':' + '-'.join(map(lambda x:str(x), args))
class Hmm(object):
def __init__(self, state_no, gm, sequence, pi, a, component_no):
self.state_no = state_no
self.a = a
self.pi = pi
self.gm = gm
self.components = component_no
self.observations = sequence
self.T = len(sequence)
self.cache={}
def do_cache(fn):
def _(inst, *args, **kwargs):
key = get_key(fn.func_name, *args, **kwargs)
if key not in inst.cache:
res = fn(inst, *args, **kwargs)
inst.cache[key] = res
return inst.cache[key]
return _
@do_cache
def alpha(self, t, state):
if t == 0:
return self.pi[state]*self.b(state, self.observations[0])
else:
total = 0.0
for _state in xrange(self.state_no):
total += self.alpha(t-1, _state)*self.a[_state][state]
return total*self.b(state, self.observations[t])
@do_cache
def beta(self, t, state):
if t == self.T - 1:
return 1
else:
total = 0.0
for _state in xrange(self.state_no):
total += self.a[state][_state]*self.b(_state, self.observations[t+1])*self.beta(t+1, _state)
return total
@do_cache
def b(self, state, ob):
return sum([self.b_component(state, com, ob) for com in xrange(self.components)])
@do_cache
def b_component(self, state, component, ob):
return self.gm.weights[state][component]*self.gm.distributions[state][component].prob(ob)
@do_cache
def gamma_component(self, t, state, component):
ob = self.observations[t]
nor = self.gm.weights[state][component]*self.gm.distributions[state][component].prob(ob)
denor = 0.0
for i in xrange(self.components):
denor += self.gm.weights[state][i]*self.gm.distributions[state][i].prob(ob)
return self.gamma(t, state)*nor/denor
@do_cache
def gamma(self, t, state):
nor, denor = 0.0, 0.0
nor = self.alpha(t, state)*self.beta(t, state)
for _state in xrange(self.state_no):
denor +=self.alpha(t, _state)*self.beta(t, _state)
return nor/denor
@do_cache
def xi(self, t, state_one, state_two):
nor, denor = 0, 0
ob = self.observations[t+1]
nor = self.gamma(t, state_one)*self.a[state_one][state_two]*self.b(state_two, ob)*self.beta(t+1, state_two)
denor = self.beta(t, state_one)
return nor/denor
def gamma_sum(self, state):
return sum([self.gamma(t, state) for t in xrange(self.T)])
def gamma_component_sum(self, state, component):
return sum([self.gamma_component(t, state, component) for t in xrange(self.T)])
def gamma_component_observation_sum(self, state, component):
return sum([numpy.multiply(self.observations[t], self.gamma_component(t, state, component)) for t in xrange(self.T)])
def gamma_component_cov_sum(self, state, component):
cov = lambda t:numpy.outer(self.observations[t]-self.gm.distributions[state][component].mean, self.observations[t]-self.gm.distributions[state][component].mean)
return sum([numpy.multiply(cov(t), self.gamma_component(t, state, component)) for t in xrange(self.T)])
def xi_sum(self, state_one, state_two):
return sum([self.xi(t, state_one, state_two) for t in xrange(self.T-1)])
def predict(self):
res = 0.0
for _state in xrange(self.state_no):
res += self.alpha(self.T-1, _state)
return numpy.log(res)
class Trainner(object):
def __init__(self, state_no, component_no, sequences):
self.a = init_a(state_no)
self.state_no = state_no
self.pi = np.array([1.0/state_no for i in range(state_no)])
self.component_no = component_no
self.sequences = sequences
self.E = len(sequences)
self.gm = GaussianMixture(component_no=component_no, states=state_no, vector_len=len(self.sequences[0][0]))
self.hmms = []
for sequence in sequences:
self.hmms.append(Hmm(state_no, self.gm, sequence, self.pi, self.a, component_no))
def update(self):
self.gm = GaussianMixture(component_no=self.component_no, states=self.state_no, vector_len=len(self.sequences[0][0]))
self.gm.weights = self.weight
for i in xrange(self.state_no):
for j in xrange(self.component_no):
self.gm.distributions[i][j].update_mean(self.mean[i][j])
self.gm.distributions[i][j].update_cov(self.cov[i][j])
self.gm.distributions[i][j].update_distri()
for n, sequence in enumerate(self.sequences):
#self.hmms[n] = Hmm(self.state_no, self.gm, sequence, self.new_pi, self.new_a, self.component_no)
self.hmms[n].pi = self.pi
self.hmms[n].a = self.a
self.hmms[n].cache = {}
self.hmms[n].gm = self.gm
def fit(self, num=100, threashold=0.0001):
for step in xrange(num):
new_pi = np.zeros_like(self.pi)
new_weight = []
new_cov = []
new_mean = []
new_a = np.zeros_like(self.a)
_hmm = self.hmms[0]
old = []
error = 0
for hmm in self.hmms:
old.append(hmm.predict())
for state in xrange(self.state_no):
_weights = []
_mean = []
_cov = []
for hmm in self.hmms:
new_pi[state] += hmm.gamma(0, state)
new_pi[state] /= self.E
gamma_denor = np.zeros_like(_hmm.gamma_sum(state))
for hmm in self.hmms:
gamma_denor += hmm.gamma_sum(state)
for component in xrange(self.component_no):
gamma_component_nor = np.zeros_like(_hmm.gamma_component_sum(state, component))
gamma_component_observation_sum = np.zeros_like(_hmm.gamma_component_observation_sum(state, component))
gamma_component_cov_sum = np.zeros_like(_hmm.gamma_component_cov_sum(state, component))
for hmm in self.hmms:
gamma_component_nor += hmm.gamma_component_sum(state, component)
gamma_component_observation_sum += hmm.gamma_component_observation_sum(state, component)
gamma_component_cov_sum += hmm.gamma_component_cov_sum(state, component)
_weights.append(gamma_component_nor/gamma_denor)
_mean.append(numpy.multiply(gamma_component_observation_sum, 1/gamma_component_nor))
_cov.append(numpy.multiply(gamma_component_cov_sum, 1/gamma_component_nor))
new_mean.append(_mean)
new_weight.append(_weights)
new_cov.append(_cov)
for _state in xrange(self.state_no):
xi_sum = np.zeros_like(_hmm.xi_sum(state, state))
for hmm in self.hmms:
new_a[state][_state] += hmm.xi_sum(state, _state)
new_a[state][_state] /= gamma_denor
print '--------------'
for n, hmm in enumerate(self.hmms):
error += old[n]-hmm.predict()
if error <= threashold and step > 1:
print error
return
else:
print error
self.a = new_a
self.pi = new_pi
self.mean = new_mean
self.cov = new_cov
self.weight = new_weight
self.update()
def predict(self, ob):
gm = GaussianMixture(component_no=self.component_no, states=self.state_no, vector_len=len(ob[0]))
gm.weights = self.weight
for i in xrange(self.state_no):
for j in xrange(self.component_no):
gm.distributions[i][j].update_mean(self.mean[i][j])
gm.distributions[i][j].update_cov(self.cov[i][j])
gm.distributions[i][j].update_distri()
hmm = Hmm(self.state_no, gm, ob, self.pi, self.a, self.component_no)
return hmm.predict()
if __name__ == "__main__":
observation_one = [[1, 2, 3], [3, 2, 4], [1, 3, 4], [2, 4, 5], [3, 2, 5]]
observation_two = [[2, 2, 3], [3, 2, 4], [1, 3, 4], [2, 4, 5], [3, 2, 5]]
observation_three = [[3, 2, 3], [3, 2, 4], [1, 3, 4], [2, 4, 5], [3, 2, 5]]
a = []
a.extend(observation_one)
a.extend(observation_two)
mean = numpy.mean(a)
cov = numpy.cov(a)
obs = [observation_one, observation_two]
train = Trainner(2, 2, obs)
train.fit(20)
print train.predict(obs[0])
print train.predict(obs[1])
print train.predict(observation_three) | stammer/hmmtrain/__init__.py | import numpy as np
import numpy
from scipy.stats import multivariate_normal
class MultiGaussianDistribution(object):
def __init__(self, dimensions):
self.dimensions = dimensions
self.mean = np.zeros([dimensions])
self.cov = np.ones([dimensions, dimensions])
#self.cov = np.dot(self.cov, self.cov.T)
self.distri = multivariate_normal(mean=self.mean, cov=self.cov)
def prob(self, observation):
return self.distri.pdf(observation)
def update_mean(self, mean):
self.mean = mean
self.distri.mean = mean
def update_cov(self, cov):
self.cov = cov
self.distri.cov = cov
def update_distri(self):
self.distri = multivariate_normal(mean=self.mean, cov=self.cov)
class GaussianMixture(object):
def __init__(self, component_no, states, vector_len):
self.state = range(states)
self.component_no = component_no
self.component = range(component_no)
self.distributions = [[MultiGaussianDistribution(vector_len) for i in range(component_no)] for j in range(states)]
self.weights = np.array([[1.0/component_no for i in range(component_no)] for i in range(states)])
def save_model(self):
for component in self.component:
for state in self.state:
print 'component: %s, state: %s'%(component, state)
print self.distributions[state][component].mean
print self.distributions[state][component].cov
def init_a(state_no):
#a = np.random.random([state_no, state_no])
a = np.ones([state_no, state_no])
for i in xrange(state_no):
a[i] = a[i]/sum(a[i])
return a
def get_key(prefix, *args, **kwargs):
return prefix + ':' + '-'.join(map(lambda x:str(x), args))
class Hmm(object):
def __init__(self, state_no, gm, sequence, pi, a, component_no):
self.state_no = state_no
self.a = a
self.pi = pi
self.gm = gm
self.components = component_no
self.observations = sequence
self.T = len(sequence)
self.cache={}
def do_cache(fn):
def _(inst, *args, **kwargs):
key = get_key(fn.func_name, *args, **kwargs)
if key not in inst.cache:
res = fn(inst, *args, **kwargs)
inst.cache[key] = res
return inst.cache[key]
return _
@do_cache
def alpha(self, t, state):
if t == 0:
return self.pi[state]*self.b(state, self.observations[0])
else:
total = 0.0
for _state in xrange(self.state_no):
total += self.alpha(t-1, _state)*self.a[_state][state]
return total*self.b(state, self.observations[t])
@do_cache
def beta(self, t, state):
if t == self.T - 1:
return 1
else:
total = 0.0
for _state in xrange(self.state_no):
total += self.a[state][_state]*self.b(_state, self.observations[t+1])*self.beta(t+1, _state)
return total
@do_cache
def b(self, state, ob):
return sum([self.b_component(state, com, ob) for com in xrange(self.components)])
@do_cache
def b_component(self, state, component, ob):
return self.gm.weights[state][component]*self.gm.distributions[state][component].prob(ob)
@do_cache
def gamma_component(self, t, state, component):
ob = self.observations[t]
nor = self.gm.weights[state][component]*self.gm.distributions[state][component].prob(ob)
denor = 0.0
for i in xrange(self.components):
denor += self.gm.weights[state][i]*self.gm.distributions[state][i].prob(ob)
return self.gamma(t, state)*nor/denor
@do_cache
def gamma(self, t, state):
nor, denor = 0.0, 0.0
nor = self.alpha(t, state)*self.beta(t, state)
for _state in xrange(self.state_no):
denor +=self.alpha(t, _state)*self.beta(t, _state)
return nor/denor
@do_cache
def xi(self, t, state_one, state_two):
nor, denor = 0, 0
ob = self.observations[t+1]
nor = self.gamma(t, state_one)*self.a[state_one][state_two]*self.b(state_two, ob)*self.beta(t+1, state_two)
denor = self.beta(t, state_one)
return nor/denor
def gamma_sum(self, state):
return sum([self.gamma(t, state) for t in xrange(self.T)])
def gamma_component_sum(self, state, component):
return sum([self.gamma_component(t, state, component) for t in xrange(self.T)])
def gamma_component_observation_sum(self, state, component):
return sum([numpy.multiply(self.observations[t], self.gamma_component(t, state, component)) for t in xrange(self.T)])
def gamma_component_cov_sum(self, state, component):
cov = lambda t:numpy.outer(self.observations[t]-self.gm.distributions[state][component].mean, self.observations[t]-self.gm.distributions[state][component].mean)
return sum([numpy.multiply(cov(t), self.gamma_component(t, state, component)) for t in xrange(self.T)])
def xi_sum(self, state_one, state_two):
return sum([self.xi(t, state_one, state_two) for t in xrange(self.T-1)])
def predict(self):
res = 0.0
for _state in xrange(self.state_no):
res += self.alpha(self.T-1, _state)
return numpy.log(res)
class Trainner(object):
def __init__(self, state_no, component_no, sequences):
self.a = init_a(state_no)
self.state_no = state_no
self.pi = np.array([1.0/state_no for i in range(state_no)])
self.component_no = component_no
self.sequences = sequences
self.E = len(sequences)
self.gm = GaussianMixture(component_no=component_no, states=state_no, vector_len=len(self.sequences[0][0]))
self.hmms = []
for sequence in sequences:
self.hmms.append(Hmm(state_no, self.gm, sequence, self.pi, self.a, component_no))
def update(self):
self.gm = GaussianMixture(component_no=self.component_no, states=self.state_no, vector_len=len(self.sequences[0][0]))
self.gm.weights = self.weight
for i in xrange(self.state_no):
for j in xrange(self.component_no):
self.gm.distributions[i][j].update_mean(self.mean[i][j])
self.gm.distributions[i][j].update_cov(self.cov[i][j])
self.gm.distributions[i][j].update_distri()
for n, sequence in enumerate(self.sequences):
#self.hmms[n] = Hmm(self.state_no, self.gm, sequence, self.new_pi, self.new_a, self.component_no)
self.hmms[n].pi = self.pi
self.hmms[n].a = self.a
self.hmms[n].cache = {}
self.hmms[n].gm = self.gm
def fit(self, num=100, threashold=0.0001):
for step in xrange(num):
new_pi = np.zeros_like(self.pi)
new_weight = []
new_cov = []
new_mean = []
new_a = np.zeros_like(self.a)
_hmm = self.hmms[0]
old = []
error = 0
for hmm in self.hmms:
old.append(hmm.predict())
for state in xrange(self.state_no):
_weights = []
_mean = []
_cov = []
for hmm in self.hmms:
new_pi[state] += hmm.gamma(0, state)
new_pi[state] /= self.E
gamma_denor = np.zeros_like(_hmm.gamma_sum(state))
for hmm in self.hmms:
gamma_denor += hmm.gamma_sum(state)
for component in xrange(self.component_no):
gamma_component_nor = np.zeros_like(_hmm.gamma_component_sum(state, component))
gamma_component_observation_sum = np.zeros_like(_hmm.gamma_component_observation_sum(state, component))
gamma_component_cov_sum = np.zeros_like(_hmm.gamma_component_cov_sum(state, component))
for hmm in self.hmms:
gamma_component_nor += hmm.gamma_component_sum(state, component)
gamma_component_observation_sum += hmm.gamma_component_observation_sum(state, component)
gamma_component_cov_sum += hmm.gamma_component_cov_sum(state, component)
_weights.append(gamma_component_nor/gamma_denor)
_mean.append(numpy.multiply(gamma_component_observation_sum, 1/gamma_component_nor))
_cov.append(numpy.multiply(gamma_component_cov_sum, 1/gamma_component_nor))
new_mean.append(_mean)
new_weight.append(_weights)
new_cov.append(_cov)
for _state in xrange(self.state_no):
xi_sum = np.zeros_like(_hmm.xi_sum(state, state))
for hmm in self.hmms:
new_a[state][_state] += hmm.xi_sum(state, _state)
new_a[state][_state] /= gamma_denor
print '--------------'
for n, hmm in enumerate(self.hmms):
error += old[n]-hmm.predict()
if error <= threashold and step > 1:
print error
return
else:
print error
self.a = new_a
self.pi = new_pi
self.mean = new_mean
self.cov = new_cov
self.weight = new_weight
self.update()
def predict(self, ob):
gm = GaussianMixture(component_no=self.component_no, states=self.state_no, vector_len=len(ob[0]))
gm.weights = self.weight
for i in xrange(self.state_no):
for j in xrange(self.component_no):
gm.distributions[i][j].update_mean(self.mean[i][j])
gm.distributions[i][j].update_cov(self.cov[i][j])
gm.distributions[i][j].update_distri()
hmm = Hmm(self.state_no, gm, ob, self.pi, self.a, self.component_no)
return hmm.predict()
if __name__ == "__main__":
observation_one = [[1, 2, 3], [3, 2, 4], [1, 3, 4], [2, 4, 5], [3, 2, 5]]
observation_two = [[2, 2, 3], [3, 2, 4], [1, 3, 4], [2, 4, 5], [3, 2, 5]]
observation_three = [[3, 2, 3], [3, 2, 4], [1, 3, 4], [2, 4, 5], [3, 2, 5]]
a = []
a.extend(observation_one)
a.extend(observation_two)
mean = numpy.mean(a)
cov = numpy.cov(a)
obs = [observation_one, observation_two]
train = Trainner(2, 2, obs)
train.fit(20)
print train.predict(obs[0])
print train.predict(obs[1])
print train.predict(observation_three) | 0.677474 | 0.376423 |
import os
import wget
import tarfile
import logging
import shutil
import pytorch_lightning as pl
from typing import Tuple, Optional
from omegaconf import DictConfig
from openspeech.data.dataset import SpeechToTextDataset
from torch.utils.data import DataLoader
from openspeech.datasets import register_data_module
from openspeech.vocabs import VOCAB_REGISTRY
from openspeech.vocabs.vocab import Vocabulary
from openspeech.data.data_loader import BucketingSampler, AudioDataLoader
@register_data_module('librispeech')
class LightningLibriSpeechDataModule(pl.LightningDataModule):
"""
PyTorch Lightning Data Module for LibriSpeech Dataset.
Args:
configs (DictConfig): configuraion set
"""
LIBRISPEECH_TRAIN_NUM = 281241
LIBRISPEECH_VALID_NUM = 5567
LIBRISPEECH_TEST_NUM = 5559
LIBRISPEECH_PARTS = [
'dev-clean',
'test-clean',
'dev-other',
'test-other',
'train-clean-100',
'train-clean-360',
'train-other-500',
]
def __init__(self, configs: DictConfig) -> None:
super(LightningLibriSpeechDataModule, self).__init__()
self.configs = configs
self.dataset = dict()
self.logger = logging.getLogger(__name__)
def _parse_manifest_file(self, manifest_file_path: str) -> Tuple[list, list]:
""" Parsing manifest file """
audio_paths = list()
transcripts = list()
with open(manifest_file_path) as f:
for idx, line in enumerate(f.readlines()):
audio_path, _, transcript = line.split('\t')
transcript = transcript.replace('\n', '')
audio_paths.append(audio_path)
transcripts.append(transcript)
return audio_paths, transcripts
def _download_dataset(self) -> None:
"""
Download librispeech dataset.
- train-960(train-clean-100, train-clean-360, train-other-500)
- dev-clean
- dev-other
- test-clean
- test-other
"""
base_url = "http://www.openslr.org/resources/12"
train_dir = "train-960"
if not os.path.exists(self.configs.dataset.dataset_path):
os.mkdir(self.configs.dataset.dataset_path)
for part in self.LIBRISPEECH_PARTS:
self.logger.info(f"Librispeech-{part} download..")
url = f"{base_url}/{part}.tar.gz"
wget.download(url)
shutil.move(f"{part}.tar.gz", os.path.join(self.configs.dataset.dataset_path, f"{part}.tar.gz"))
self.logger.info(f"Un-tarring archive {self.configs.dataset.dataset_path}/{part}.tar.gz")
tar = tarfile.open(f"{self.configs.dataset.dataset_path}/{part}.tar.gz", mode="r:gz")
tar.extractall(self.configs.dataset.dataset_path)
tar.close()
os.remove(f"{self.configs.dataset.dataset_path}/{part}.tar.gz")
self.logger.info("Merge all train packs into one")
if not os.path.exists(self.configs.dataset.dataset_path):
os.mkdir(self.configs.dataset.dataset_path)
if not os.path.exists(os.path.join(self.configs.dataset.dataset_path, train_dir)):
os.mkdir(os.path.join(self.configs.dataset.dataset_path, train_dir))
for part in self.LIBRISPEECH_PARTS[-3:]: # train
path = os.path.join(self.configs.dataset.dataset_path, part)
subfolders = os.listdir(path)
for subfolder in subfolders:
shutil.move(
os.path.join(path, subfolder),
os.path.join(self.configs.dataset.dataset_path, train_dir, subfolder),
)
def prepare_data(self) -> Vocabulary:
"""
Prepare librispeech data
Returns:
vocab (Vocabulary): vocab class of KsponSpeech.
"""
if not os.path.exists(self.configs.dataset.dataset_path):
raise ValueError("Dataset path is not valid.")
if self.configs.vocab.unit == 'libri_subword':
from openspeech.datasets.librispeech.preprocess.subword import generate_manifest_files
elif self.configs.vocab.unit == 'libri_character':
from openspeech.datasets.librispeech.preprocess.character import generate_manifest_files
else:
raise ValueError(f"Unsupported vocabulary unit: {self.configs.vocab.unit}")
if self.configs.dataset.dataset_download:
self._download_dataset()
if not os.path.exists(self.configs.dataset.manifest_file_path):
self.logger.info("Manifest file is not exists !!\n"
"Generate manifest files..")
if hasattr(self.configs.vocab, "vocab_size"):
generate_manifest_files(
dataset_path=self.configs.dataset.dataset_path,
manifest_file_path=self.configs.dataset.manifest_file_path,
vocab_path=self.configs.vocab.vocab_path,
vocab_size=self.configs.vocab.vocab_size,
)
else:
generate_manifest_files(
dataset_path=self.configs.dataset.dataset_path,
manifest_file_path=self.configs.dataset.manifest_file_path,
vocab_path=self.configs.vocab.vocab_path,
)
return VOCAB_REGISTRY[self.configs.vocab.unit](self.configs)
def setup(self, stage: Optional[str] = None, vocab: Vocabulary = None) -> None:
r""" Split dataset into train, valid, and test. """
valid_end_idx = self.LIBRISPEECH_TRAIN_NUM + self.LIBRISPEECH_VALID_NUM
audio_paths, transcripts = self._parse_manifest_file(self.configs.dataset.manifest_file_path)
audio_paths = {
"train": audio_paths[:self.LIBRISPEECH_TRAIN_NUM],
"valid": audio_paths[self.LIBRISPEECH_TRAIN_NUM:valid_end_idx],
"test": audio_paths[valid_end_idx:],
}
transcripts = {
"train": transcripts[:self.LIBRISPEECH_TRAIN_NUM],
"valid": transcripts[self.LIBRISPEECH_TRAIN_NUM:valid_end_idx],
"test": transcripts[valid_end_idx:],
}
for stage in audio_paths.keys():
self.dataset[stage] = SpeechToTextDataset(
configs=self.configs,
dataset_path=self.configs.dataset.dataset_path,
audio_paths=audio_paths[stage],
transcripts=transcripts[stage],
sos_id=vocab.sos_id,
eos_id=vocab.eos_id,
apply_spec_augment=self.configs.audio.apply_spec_augment if stage == 'train' else False,
del_silence=self.configs.audio.del_silence if stage == 'train' else False,
)
def train_dataloader(self) -> DataLoader:
train_sampler = BucketingSampler(self.dataset['train'], batch_size=self.configs.trainer.batch_size)
return AudioDataLoader(
dataset=self.dataset['train'],
num_workers=self.configs.trainer.num_workers,
batch_sampler=train_sampler,
)
def val_dataloader(self) -> DataLoader:
valid_sampler = BucketingSampler(self.dataset['valid'], batch_size=self.configs.trainer.batch_size)
return AudioDataLoader(
dataset=self.dataset['valid'],
num_workers=self.configs.trainer.num_workers,
batch_sampler=valid_sampler,
)
def test_dataloader(self) -> DataLoader:
test_sampler = BucketingSampler(self.dataset['test'], batch_size=self.configs.trainer.batch_size)
return AudioDataLoader(
dataset=self.dataset['test'],
num_workers=self.configs.trainer.num_workers,
batch_sampler=test_sampler,
) | openspeech/datasets/librispeech/lit_data_module.py |
import os
import wget
import tarfile
import logging
import shutil
import pytorch_lightning as pl
from typing import Tuple, Optional
from omegaconf import DictConfig
from openspeech.data.dataset import SpeechToTextDataset
from torch.utils.data import DataLoader
from openspeech.datasets import register_data_module
from openspeech.vocabs import VOCAB_REGISTRY
from openspeech.vocabs.vocab import Vocabulary
from openspeech.data.data_loader import BucketingSampler, AudioDataLoader
@register_data_module('librispeech')
class LightningLibriSpeechDataModule(pl.LightningDataModule):
"""
PyTorch Lightning Data Module for LibriSpeech Dataset.
Args:
configs (DictConfig): configuraion set
"""
LIBRISPEECH_TRAIN_NUM = 281241
LIBRISPEECH_VALID_NUM = 5567
LIBRISPEECH_TEST_NUM = 5559
LIBRISPEECH_PARTS = [
'dev-clean',
'test-clean',
'dev-other',
'test-other',
'train-clean-100',
'train-clean-360',
'train-other-500',
]
def __init__(self, configs: DictConfig) -> None:
super(LightningLibriSpeechDataModule, self).__init__()
self.configs = configs
self.dataset = dict()
self.logger = logging.getLogger(__name__)
def _parse_manifest_file(self, manifest_file_path: str) -> Tuple[list, list]:
""" Parsing manifest file """
audio_paths = list()
transcripts = list()
with open(manifest_file_path) as f:
for idx, line in enumerate(f.readlines()):
audio_path, _, transcript = line.split('\t')
transcript = transcript.replace('\n', '')
audio_paths.append(audio_path)
transcripts.append(transcript)
return audio_paths, transcripts
def _download_dataset(self) -> None:
"""
Download librispeech dataset.
- train-960(train-clean-100, train-clean-360, train-other-500)
- dev-clean
- dev-other
- test-clean
- test-other
"""
base_url = "http://www.openslr.org/resources/12"
train_dir = "train-960"
if not os.path.exists(self.configs.dataset.dataset_path):
os.mkdir(self.configs.dataset.dataset_path)
for part in self.LIBRISPEECH_PARTS:
self.logger.info(f"Librispeech-{part} download..")
url = f"{base_url}/{part}.tar.gz"
wget.download(url)
shutil.move(f"{part}.tar.gz", os.path.join(self.configs.dataset.dataset_path, f"{part}.tar.gz"))
self.logger.info(f"Un-tarring archive {self.configs.dataset.dataset_path}/{part}.tar.gz")
tar = tarfile.open(f"{self.configs.dataset.dataset_path}/{part}.tar.gz", mode="r:gz")
tar.extractall(self.configs.dataset.dataset_path)
tar.close()
os.remove(f"{self.configs.dataset.dataset_path}/{part}.tar.gz")
self.logger.info("Merge all train packs into one")
if not os.path.exists(self.configs.dataset.dataset_path):
os.mkdir(self.configs.dataset.dataset_path)
if not os.path.exists(os.path.join(self.configs.dataset.dataset_path, train_dir)):
os.mkdir(os.path.join(self.configs.dataset.dataset_path, train_dir))
for part in self.LIBRISPEECH_PARTS[-3:]: # train
path = os.path.join(self.configs.dataset.dataset_path, part)
subfolders = os.listdir(path)
for subfolder in subfolders:
shutil.move(
os.path.join(path, subfolder),
os.path.join(self.configs.dataset.dataset_path, train_dir, subfolder),
)
def prepare_data(self) -> Vocabulary:
"""
Prepare librispeech data
Returns:
vocab (Vocabulary): vocab class of KsponSpeech.
"""
if not os.path.exists(self.configs.dataset.dataset_path):
raise ValueError("Dataset path is not valid.")
if self.configs.vocab.unit == 'libri_subword':
from openspeech.datasets.librispeech.preprocess.subword import generate_manifest_files
elif self.configs.vocab.unit == 'libri_character':
from openspeech.datasets.librispeech.preprocess.character import generate_manifest_files
else:
raise ValueError(f"Unsupported vocabulary unit: {self.configs.vocab.unit}")
if self.configs.dataset.dataset_download:
self._download_dataset()
if not os.path.exists(self.configs.dataset.manifest_file_path):
self.logger.info("Manifest file is not exists !!\n"
"Generate manifest files..")
if hasattr(self.configs.vocab, "vocab_size"):
generate_manifest_files(
dataset_path=self.configs.dataset.dataset_path,
manifest_file_path=self.configs.dataset.manifest_file_path,
vocab_path=self.configs.vocab.vocab_path,
vocab_size=self.configs.vocab.vocab_size,
)
else:
generate_manifest_files(
dataset_path=self.configs.dataset.dataset_path,
manifest_file_path=self.configs.dataset.manifest_file_path,
vocab_path=self.configs.vocab.vocab_path,
)
return VOCAB_REGISTRY[self.configs.vocab.unit](self.configs)
def setup(self, stage: Optional[str] = None, vocab: Vocabulary = None) -> None:
r""" Split dataset into train, valid, and test. """
valid_end_idx = self.LIBRISPEECH_TRAIN_NUM + self.LIBRISPEECH_VALID_NUM
audio_paths, transcripts = self._parse_manifest_file(self.configs.dataset.manifest_file_path)
audio_paths = {
"train": audio_paths[:self.LIBRISPEECH_TRAIN_NUM],
"valid": audio_paths[self.LIBRISPEECH_TRAIN_NUM:valid_end_idx],
"test": audio_paths[valid_end_idx:],
}
transcripts = {
"train": transcripts[:self.LIBRISPEECH_TRAIN_NUM],
"valid": transcripts[self.LIBRISPEECH_TRAIN_NUM:valid_end_idx],
"test": transcripts[valid_end_idx:],
}
for stage in audio_paths.keys():
self.dataset[stage] = SpeechToTextDataset(
configs=self.configs,
dataset_path=self.configs.dataset.dataset_path,
audio_paths=audio_paths[stage],
transcripts=transcripts[stage],
sos_id=vocab.sos_id,
eos_id=vocab.eos_id,
apply_spec_augment=self.configs.audio.apply_spec_augment if stage == 'train' else False,
del_silence=self.configs.audio.del_silence if stage == 'train' else False,
)
def train_dataloader(self) -> DataLoader:
train_sampler = BucketingSampler(self.dataset['train'], batch_size=self.configs.trainer.batch_size)
return AudioDataLoader(
dataset=self.dataset['train'],
num_workers=self.configs.trainer.num_workers,
batch_sampler=train_sampler,
)
def val_dataloader(self) -> DataLoader:
valid_sampler = BucketingSampler(self.dataset['valid'], batch_size=self.configs.trainer.batch_size)
return AudioDataLoader(
dataset=self.dataset['valid'],
num_workers=self.configs.trainer.num_workers,
batch_sampler=valid_sampler,
)
def test_dataloader(self) -> DataLoader:
test_sampler = BucketingSampler(self.dataset['test'], batch_size=self.configs.trainer.batch_size)
return AudioDataLoader(
dataset=self.dataset['test'],
num_workers=self.configs.trainer.num_workers,
batch_sampler=test_sampler,
) | 0.698124 | 0.156395 |
def fix_multi_T1w_source_name(in_files):
"""
Make up a generic source name when there are multiple T1s
>>> fix_multi_T1w_source_name([
... '/path/to/sub-045_ses-test_T1w.nii.gz',
... '/path/to/sub-045_ses-retest_T1w.nii.gz'])
'/path/to/sub-045_T1w.nii.gz'
"""
import os
from nipype.utils.filemanip import filename_to_list
base, in_file = os.path.split(filename_to_list(in_files)[0])
subject_label = in_file.split("_", 1)[0].split("-")[1]
return os.path.join(base, "sub-%s_T1w.nii.gz" % subject_label)
def meepi_optimal_comb_source_name(in_files):
"""
Create a new source name when optimally
combining multiple multi-echo EPIs
>>> meepi_optimal_comb_source_name([
... 'sub-01_run-01_echo-1_bold.nii.gz',
... 'sub-01_run-01_echo-2_bold.nii.gz',
... 'sub-01_run-01_echo-3_bold.nii.gz',])
'sub-01_run-01_bold.nii.gz'
"""
import os
from nipype.utils.filemanip import filename_to_list
base, in_file = os.path.split(filename_to_list(in_files)[0])
entities = [ent for ent in in_file.split('_') if not ent.startswith('echo-')]
basename = '_'.join(entities)
return os.path.join(base, basename)
def add_suffix(in_files, suffix):
"""
Wrap nipype's fname_presuffix to conveniently just add a prefix
>>> add_suffix([
... '/path/to/sub-045_ses-test_T1w.nii.gz',
... '/path/to/sub-045_ses-retest_T1w.nii.gz'], '_test')
'sub-045_ses-test_T1w_test.nii.gz'
"""
import os.path as op
from nipype.utils.filemanip import fname_presuffix, filename_to_list
return op.basename(fname_presuffix(filename_to_list(in_files)[0],
suffix=suffix))
def read_crashfile(path):
if path.endswith('.pklz'):
return _read_pkl(path)
elif path.endswith('.txt'):
return _read_txt(path)
def _read_pkl(path):
from nipype.utils.filemanip import loadcrash
crash_data = loadcrash(path)
data = {'file': path,
'traceback': ''.join(crash_data['traceback'])}
if 'node' in crash_data:
data['node'] = crash_data['node']
if data['node'].base_dir:
data['node_dir'] = data['node'].output_dir()
else:
data['node_dir'] = "Node crashed before execution"
data['inputs'] = sorted(data['node'].inputs.trait_get().items())
return data
def _read_txt(path):
with open(path, 'r') as fp:
lines = fp.readlines()
data = {'file': path}
traceback_start = 0
if lines[0].startswith('Node'):
data['node'] = lines[0].split(': ', 1)[1].strip()
data['node_dir'] = lines[1].split(': ', 1)[1].strip()
inputs = []
cur_key = ''
cur_val = ''
for i, line in enumerate(lines[5:]):
if line[0].isspace():
cur_val += line
continue
if cur_val:
inputs.append((cur_key, cur_val.strip()))
if line.startswith("Traceback ("):
traceback_start = i + 5
break
cur_key, cur_val = tuple(line.split(' = ', 1))
data['inputs'] = sorted(inputs)
else:
data['node_dir'] = "Node crashed before execution"
data['traceback'] = ''.join(lines[traceback_start:]).strip()
return data
if __name__ == '__main__':
pass | fmriprep/utils/misc.py | def fix_multi_T1w_source_name(in_files):
"""
Make up a generic source name when there are multiple T1s
>>> fix_multi_T1w_source_name([
... '/path/to/sub-045_ses-test_T1w.nii.gz',
... '/path/to/sub-045_ses-retest_T1w.nii.gz'])
'/path/to/sub-045_T1w.nii.gz'
"""
import os
from nipype.utils.filemanip import filename_to_list
base, in_file = os.path.split(filename_to_list(in_files)[0])
subject_label = in_file.split("_", 1)[0].split("-")[1]
return os.path.join(base, "sub-%s_T1w.nii.gz" % subject_label)
def meepi_optimal_comb_source_name(in_files):
"""
Create a new source name when optimally
combining multiple multi-echo EPIs
>>> meepi_optimal_comb_source_name([
... 'sub-01_run-01_echo-1_bold.nii.gz',
... 'sub-01_run-01_echo-2_bold.nii.gz',
... 'sub-01_run-01_echo-3_bold.nii.gz',])
'sub-01_run-01_bold.nii.gz'
"""
import os
from nipype.utils.filemanip import filename_to_list
base, in_file = os.path.split(filename_to_list(in_files)[0])
entities = [ent for ent in in_file.split('_') if not ent.startswith('echo-')]
basename = '_'.join(entities)
return os.path.join(base, basename)
def add_suffix(in_files, suffix):
"""
Wrap nipype's fname_presuffix to conveniently just add a prefix
>>> add_suffix([
... '/path/to/sub-045_ses-test_T1w.nii.gz',
... '/path/to/sub-045_ses-retest_T1w.nii.gz'], '_test')
'sub-045_ses-test_T1w_test.nii.gz'
"""
import os.path as op
from nipype.utils.filemanip import fname_presuffix, filename_to_list
return op.basename(fname_presuffix(filename_to_list(in_files)[0],
suffix=suffix))
def read_crashfile(path):
if path.endswith('.pklz'):
return _read_pkl(path)
elif path.endswith('.txt'):
return _read_txt(path)
def _read_pkl(path):
from nipype.utils.filemanip import loadcrash
crash_data = loadcrash(path)
data = {'file': path,
'traceback': ''.join(crash_data['traceback'])}
if 'node' in crash_data:
data['node'] = crash_data['node']
if data['node'].base_dir:
data['node_dir'] = data['node'].output_dir()
else:
data['node_dir'] = "Node crashed before execution"
data['inputs'] = sorted(data['node'].inputs.trait_get().items())
return data
def _read_txt(path):
with open(path, 'r') as fp:
lines = fp.readlines()
data = {'file': path}
traceback_start = 0
if lines[0].startswith('Node'):
data['node'] = lines[0].split(': ', 1)[1].strip()
data['node_dir'] = lines[1].split(': ', 1)[1].strip()
inputs = []
cur_key = ''
cur_val = ''
for i, line in enumerate(lines[5:]):
if line[0].isspace():
cur_val += line
continue
if cur_val:
inputs.append((cur_key, cur_val.strip()))
if line.startswith("Traceback ("):
traceback_start = i + 5
break
cur_key, cur_val = tuple(line.split(' = ', 1))
data['inputs'] = sorted(inputs)
else:
data['node_dir'] = "Node crashed before execution"
data['traceback'] = ''.join(lines[traceback_start:]).strip()
return data
if __name__ == '__main__':
pass | 0.478041 | 0.243502 |
import os
ROBLOX_DIR = os.path.abspath(os.path.join(__file__, '..'))
ROBLOX_GA_DIR = os.path.join(ROBLOX_DIR, "GameAnalyticsSDK")
RBXMX_TMP_FILE = os.path.join(ROBLOX_DIR, "GameAnalyticsSDK.rbxmx.tmp")
RBXMX_RELEASE_FILE = os.path.join(ROBLOX_DIR, "release", "GameAnalyticsSDK.rbxmx")
GAMEANALYTICSSERVER_BODY = 'GameAnalyticsServer_BODY'
GAMEANALYTICSSERVER_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalyticsServer.server.lua")
GAMEANALYTICSSERVERINITUSINGSETTINGS_BODY = 'GameAnalyticsServerInitUsingSettings_BODY'
GAMEANALYTICSSERVERINITUSINGSETTINGS_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalyticsServerInitUsingSettings.server.lua")
INSTALL_BODY = 'INSTALL_BODY'
INSTALL_FILE = os.path.join(ROBLOX_GA_DIR, "INSTALL.txt")
GAMEANALYTICSCLIENT_BODY = 'GameAnalyticsClient_BODY'
GAMEANALYTICSCLIENT_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalyticsClient.client.lua")
GAMEANALYTICS_BODY = 'GameAnalytics_BODY'
GAMEANALYTICS_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "init.lua")
SETTINGS_BODY = 'Settings_BODY'
SETTINGS_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "Settings.lua")
HTTPAPI_BODY = 'HttpApi_BODY'
HTTPAPI_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "HttpApi", "init.lua")
LOCKBOX_BODY = 'lockbox_BODY'
LOCKBOX_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "HttpApi", "Encoding", "lockbox", "init.lua")
STREAM_BODY = 'stream_BODY'
STREAM_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "HttpApi", "Encoding", "lockbox", "util", "stream.lua")
QUEUE_BODY = 'queue_BODY'
QUEUE_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "HttpApi", "Encoding", "lockbox", "util", "queue.lua")
ARRAY_BODY = 'array_BODY'
ARRAY_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "HttpApi", "Encoding", "lockbox", "util", "array.lua")
BASE64_BODY = 'base64_BODY'
BASE64_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "HttpApi", "Encoding", "lockbox", "util", "base64.lua")
UTIL_BIT_BODY = 'util_bit_BODY'
UTIL_BIT_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "HttpApi", "Encoding", "lockbox", "util", "bit.lua")
HMAC_BODY = 'hmac_BODY'
HMAC_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "HttpApi", "Encoding", "lockbox", "mac", "hmac.lua")
SHA2_256_BODY = 'sha2_256_BODY'
SHA2_256_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "HttpApi", "Encoding", "lockbox", "digest", "sha2_256.lua")
BIT_BODY = 'bit_BODY'
BIT_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "HttpApi", "Encoding", "bit.lua")
LOGGER_BODY = 'Logger_BODY'
LOGGER_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "Logger.lua")
STORE_BODY = 'Store_BODY'
STORE_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "Store.lua")
EVENTS_BODY = 'Events_BODY'
EVENTS_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "Events.lua")
UTILITIES_BODY = 'Utilities_BODY'
UTILITIES_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "Utilities.lua")
VERSION_BODY = 'Version_BODY'
VERSION_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "Version.lua")
STATE_BODY = 'State_BODY'
STATE_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "State.lua")
VALIDATION_BODY = 'Validation_BODY'
VALIDATION_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "Validation.lua")
THREADING_BODY = 'Threading_BODY'
THREADING_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "Threading.lua")
GAERRORSEVERITY_BODY = 'GAErrorSeverity_BODY'
GAERRORSEVERITY_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "GAErrorSeverity.lua")
GAPROGRESSIONSTATUS_BODY = 'GAProgressionStatus_BODY'
GAPROGRESSIONSTATUS_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "GAProgressionStatus.lua")
GARESOURCEFLOWTYPE_BODY = 'GAResourceFlowType_BODY'
GARESOURCEFLOWTYPE_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "GAResourceFlowType.lua")
def main():
print('--- generating rbxmx file ----')
rbxmx_contents = ""
with open(RBXMX_TMP_FILE, 'r') as rbxmx_tmp_file:
rbxmx_contents = rbxmx_tmp_file.read()
file_contents = ""
with open(GAMEANALYTICSSERVER_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(GAMEANALYTICSSERVER_BODY, file_contents)
file_contents = ""
with open(GAMEANALYTICSSERVERINITUSINGSETTINGS_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(GAMEANALYTICSSERVERINITUSINGSETTINGS_BODY, file_contents)
file_contents = ""
with open(INSTALL_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(INSTALL_BODY, file_contents)
file_contents = ""
with open(GAMEANALYTICSCLIENT_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(GAMEANALYTICSCLIENT_BODY, file_contents)
file_contents = ""
with open(GAMEANALYTICS_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(GAMEANALYTICS_BODY, file_contents)
file_contents = ""
with open(SETTINGS_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(SETTINGS_BODY, file_contents)
file_contents = ""
with open(HTTPAPI_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(HTTPAPI_BODY, file_contents)
file_contents = ""
with open(LOCKBOX_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(LOCKBOX_BODY, file_contents)
file_contents = ""
with open(STREAM_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(STREAM_BODY, file_contents)
file_contents = ""
with open(QUEUE_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(QUEUE_BODY, file_contents)
file_contents = ""
with open(ARRAY_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(ARRAY_BODY, file_contents)
file_contents = ""
with open(BASE64_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(BASE64_BODY, file_contents)
file_contents = ""
with open(UTIL_BIT_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(UTIL_BIT_BODY, file_contents)
file_contents = ""
with open(HMAC_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(HMAC_BODY, file_contents)
file_contents = ""
with open(SHA2_256_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(SHA2_256_BODY, file_contents)
file_contents = ""
with open(BIT_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(BIT_BODY, file_contents)
file_contents = ""
with open(LOGGER_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(LOGGER_BODY, file_contents)
file_contents = ""
with open(STORE_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(STORE_BODY, file_contents)
file_contents = ""
with open(EVENTS_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(EVENTS_BODY, file_contents)
file_contents = ""
with open(UTILITIES_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(UTILITIES_BODY, file_contents)
file_contents = ""
with open(VERSION_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(VERSION_BODY, file_contents)
file_contents = ""
with open(STATE_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(STATE_BODY, file_contents)
file_contents = ""
with open(VALIDATION_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(VALIDATION_BODY, file_contents)
file_contents = ""
with open(THREADING_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(THREADING_BODY, file_contents)
file_contents = ""
with open(GAERRORSEVERITY_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(GAERRORSEVERITY_BODY, file_contents)
file_contents = ""
with open(GAPROGRESSIONSTATUS_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(GAPROGRESSIONSTATUS_BODY, file_contents)
file_contents = ""
with open(GARESOURCEFLOWTYPE_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(GARESOURCEFLOWTYPE_BODY, file_contents)
f = open(RBXMX_RELEASE_FILE, "w")
f.write(rbxmx_contents)
f.truncate()
f.close()
print('--- done generating rbxmx file ----')
if __name__ == '__main__':
main() | generate_rbxmx_file.py | import os
ROBLOX_DIR = os.path.abspath(os.path.join(__file__, '..'))
ROBLOX_GA_DIR = os.path.join(ROBLOX_DIR, "GameAnalyticsSDK")
RBXMX_TMP_FILE = os.path.join(ROBLOX_DIR, "GameAnalyticsSDK.rbxmx.tmp")
RBXMX_RELEASE_FILE = os.path.join(ROBLOX_DIR, "release", "GameAnalyticsSDK.rbxmx")
GAMEANALYTICSSERVER_BODY = 'GameAnalyticsServer_BODY'
GAMEANALYTICSSERVER_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalyticsServer.server.lua")
GAMEANALYTICSSERVERINITUSINGSETTINGS_BODY = 'GameAnalyticsServerInitUsingSettings_BODY'
GAMEANALYTICSSERVERINITUSINGSETTINGS_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalyticsServerInitUsingSettings.server.lua")
INSTALL_BODY = 'INSTALL_BODY'
INSTALL_FILE = os.path.join(ROBLOX_GA_DIR, "INSTALL.txt")
GAMEANALYTICSCLIENT_BODY = 'GameAnalyticsClient_BODY'
GAMEANALYTICSCLIENT_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalyticsClient.client.lua")
GAMEANALYTICS_BODY = 'GameAnalytics_BODY'
GAMEANALYTICS_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "init.lua")
SETTINGS_BODY = 'Settings_BODY'
SETTINGS_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "Settings.lua")
HTTPAPI_BODY = 'HttpApi_BODY'
HTTPAPI_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "HttpApi", "init.lua")
LOCKBOX_BODY = 'lockbox_BODY'
LOCKBOX_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "HttpApi", "Encoding", "lockbox", "init.lua")
STREAM_BODY = 'stream_BODY'
STREAM_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "HttpApi", "Encoding", "lockbox", "util", "stream.lua")
QUEUE_BODY = 'queue_BODY'
QUEUE_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "HttpApi", "Encoding", "lockbox", "util", "queue.lua")
ARRAY_BODY = 'array_BODY'
ARRAY_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "HttpApi", "Encoding", "lockbox", "util", "array.lua")
BASE64_BODY = 'base64_BODY'
BASE64_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "HttpApi", "Encoding", "lockbox", "util", "base64.lua")
UTIL_BIT_BODY = 'util_bit_BODY'
UTIL_BIT_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "HttpApi", "Encoding", "lockbox", "util", "bit.lua")
HMAC_BODY = 'hmac_BODY'
HMAC_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "HttpApi", "Encoding", "lockbox", "mac", "hmac.lua")
SHA2_256_BODY = 'sha2_256_BODY'
SHA2_256_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "HttpApi", "Encoding", "lockbox", "digest", "sha2_256.lua")
BIT_BODY = 'bit_BODY'
BIT_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "HttpApi", "Encoding", "bit.lua")
LOGGER_BODY = 'Logger_BODY'
LOGGER_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "Logger.lua")
STORE_BODY = 'Store_BODY'
STORE_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "Store.lua")
EVENTS_BODY = 'Events_BODY'
EVENTS_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "Events.lua")
UTILITIES_BODY = 'Utilities_BODY'
UTILITIES_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "Utilities.lua")
VERSION_BODY = 'Version_BODY'
VERSION_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "Version.lua")
STATE_BODY = 'State_BODY'
STATE_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "State.lua")
VALIDATION_BODY = 'Validation_BODY'
VALIDATION_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "Validation.lua")
THREADING_BODY = 'Threading_BODY'
THREADING_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "Threading.lua")
GAERRORSEVERITY_BODY = 'GAErrorSeverity_BODY'
GAERRORSEVERITY_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "GAErrorSeverity.lua")
GAPROGRESSIONSTATUS_BODY = 'GAProgressionStatus_BODY'
GAPROGRESSIONSTATUS_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "GAProgressionStatus.lua")
GARESOURCEFLOWTYPE_BODY = 'GAResourceFlowType_BODY'
GARESOURCEFLOWTYPE_FILE = os.path.join(ROBLOX_GA_DIR, "GameAnalytics", "GAResourceFlowType.lua")
def main():
print('--- generating rbxmx file ----')
rbxmx_contents = ""
with open(RBXMX_TMP_FILE, 'r') as rbxmx_tmp_file:
rbxmx_contents = rbxmx_tmp_file.read()
file_contents = ""
with open(GAMEANALYTICSSERVER_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(GAMEANALYTICSSERVER_BODY, file_contents)
file_contents = ""
with open(GAMEANALYTICSSERVERINITUSINGSETTINGS_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(GAMEANALYTICSSERVERINITUSINGSETTINGS_BODY, file_contents)
file_contents = ""
with open(INSTALL_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(INSTALL_BODY, file_contents)
file_contents = ""
with open(GAMEANALYTICSCLIENT_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(GAMEANALYTICSCLIENT_BODY, file_contents)
file_contents = ""
with open(GAMEANALYTICS_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(GAMEANALYTICS_BODY, file_contents)
file_contents = ""
with open(SETTINGS_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(SETTINGS_BODY, file_contents)
file_contents = ""
with open(HTTPAPI_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(HTTPAPI_BODY, file_contents)
file_contents = ""
with open(LOCKBOX_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(LOCKBOX_BODY, file_contents)
file_contents = ""
with open(STREAM_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(STREAM_BODY, file_contents)
file_contents = ""
with open(QUEUE_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(QUEUE_BODY, file_contents)
file_contents = ""
with open(ARRAY_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(ARRAY_BODY, file_contents)
file_contents = ""
with open(BASE64_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(BASE64_BODY, file_contents)
file_contents = ""
with open(UTIL_BIT_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(UTIL_BIT_BODY, file_contents)
file_contents = ""
with open(HMAC_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(HMAC_BODY, file_contents)
file_contents = ""
with open(SHA2_256_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(SHA2_256_BODY, file_contents)
file_contents = ""
with open(BIT_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(BIT_BODY, file_contents)
file_contents = ""
with open(LOGGER_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(LOGGER_BODY, file_contents)
file_contents = ""
with open(STORE_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(STORE_BODY, file_contents)
file_contents = ""
with open(EVENTS_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(EVENTS_BODY, file_contents)
file_contents = ""
with open(UTILITIES_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(UTILITIES_BODY, file_contents)
file_contents = ""
with open(VERSION_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(VERSION_BODY, file_contents)
file_contents = ""
with open(STATE_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(STATE_BODY, file_contents)
file_contents = ""
with open(VALIDATION_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(VALIDATION_BODY, file_contents)
file_contents = ""
with open(THREADING_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(THREADING_BODY, file_contents)
file_contents = ""
with open(GAERRORSEVERITY_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(GAERRORSEVERITY_BODY, file_contents)
file_contents = ""
with open(GAPROGRESSIONSTATUS_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(GAPROGRESSIONSTATUS_BODY, file_contents)
file_contents = ""
with open(GARESOURCEFLOWTYPE_FILE, 'r') as file:
file_contents = file.read()
rbxmx_contents = rbxmx_contents.replace(GARESOURCEFLOWTYPE_BODY, file_contents)
f = open(RBXMX_RELEASE_FILE, "w")
f.write(rbxmx_contents)
f.truncate()
f.close()
print('--- done generating rbxmx file ----')
if __name__ == '__main__':
main() | 0.1291 | 0.065575 |
from unittest.mock import MagicMock
import numpy as np
# ----- Test Example #1 ------
# ex1: The distribution of samples of a test example.
X_dsel_ex1 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[1, -1], [-0.5, 0.5], [0.5, 0.5],
[0, -1], [0.75, -0.5], [0.0, 0.0],
[-1, -1], [0, -0.5], [1, -1]])
# Labels associated with the samples. This information is used by techniques based on a posteriori information (LCA and
# a posteriori in the calculation of the competence level).
y_dsel_ex1 = np.array([0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0])
# Pre-processed results on DSEL. This information is used by the majority of DS techniques to estimate the competence.
dsel_processed_ex1 = np.array([[1, 1, 1], [0, 0, 1], [0, 0, 1], [1, 1, 0], [1, 1, 1], [0, 0, 1], [0, 0, 0], [1, 1, 1],
[1, 1, 0], [0, 0, 1], [0, 0, 1], [0, 1, 0], [0, 1, 0], [1, 1, 1], [1, 1, 1]])
# pre-calculated indices of 7 Nearest neighbors used to estimate the competence level
neighbors_ex1 = np.array([[8, 11, 4, 7, 13, 10, 1],
[7, 1, 11, 13, 0, 8, 4],
[5, 3, 4, 8, 10, 11, 7]])
# Scores obtained for the two classes. This information is used by the techniques based on posterior probabilities
dsel_scores_ex1 = np.tile(np.array([[1.0, 0.0], [0.5, 0.5], [0.8, 0.2]]), (15, 1, 1))
# Distance information is used by the probabilistic techniques (des.probabilistic) as well as the MLA, A Priori and
# A Posteriori methods. Three values are considered: all zeros, all ones and the real distances calculated on the toy
# example.
distances_ex1 = np.array([[0.35355339, 0.35355339, 0.55901699, 0.79056942, 0.79056942, 0.90138782, 1.03077641],
[0.3, 0.39051248, 0.53851648, 0.86023253, 0.94339811, 1.04403065, 1.28549601],
[0.70710678, 1.41421356, 1.95256242, 2.12132034, 2.79508497, 2.82842712, 2.91547595]])
# ----- Test Example all ones ------
dsel_processed_all_ones = np.ones((15, 3))
dsel_scores_all_ones = np.ones((15, 3, 2))
distances_all_ones = np.ones((3, 7))
# ----- Test Example all zeros ------
dsel_processed_all_zeros = np.zeros((15, 3))
dsel_scores_all_zeros = np.zeros((15, 3, 2))
distances_all_zeros = np.zeros((3, 7))
# ----- Test Example from Combining pattern classifiers ------
# This example is used to test the results of the A priori, A posteriori and MLA techniques
distances_ex_kuncheva = np.linspace(1, 15, num=15)
# 10 neighbors used in the example
neighbors_ex_kuncheva = np.linspace(0, 14, num=15, dtype=int)
# target class of each example. independent means that it should be used by the a priori technique
y_dsel_ex_kuncheva_independent = np.array([2, 1, 2, 2, 3, 1, 2, 1, 3, 3, 2, 1, 2, 2, 1]) - 1
# dependent means that it should be used by the a posteriori technique
y_dsel_ex_kuncheva_dependent = np.array([1, 0, 1, 1, 2, 0, 1, 0, 0, 2, 1, 2, 1, 1, 0])
# Predictions of the base classifier ci used to estimate its competence level
classifier_pred_ex_kuncheva = np.array([2, 3, 2, 2, 1, 1, 2, 2, 3, 3, 1, 2, 2, 2, 1]) - 1
# whether or not the base classifier made the correct prediction for each sample in dsel
dsel_processed_kuncheva = np.transpose(np.array([[1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1]]))
# scores obtained by each class by the base classifier ci. In this example we consider that the posteriori is always 1
# fo the predicted class
dsel_scores_ex_kuncheva = np.array([[[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0]]]).reshape(15, 1, 3) # 15 samples, 1 classifier, 3 classes
k_ex_kuncheva = 15
n_classes_ex_kuncheva = 3
# ----- Routines to generate a pool of classifiers using MagicMock ------
def create_base_classifier(return_value, return_prob=None):
classifier = MagicMock()
classifier.predict.return_value = [return_value]
classifier.predict_proba.return_value = return_prob
return classifier
def create_pool_classifiers():
clf_0 = create_base_classifier(return_value=0, return_prob=np.atleast_2d([0.5, 0.5]))
clf_1 = create_base_classifier(return_value=1, return_prob=np.atleast_2d([1.0, 0.0]))
clf_2 = create_base_classifier(return_value=0, return_prob=np.atleast_2d([0.33, 0.67]))
pool_classifiers = [clf_0, clf_1, clf_2]
return pool_classifiers
def create_pool_all_agree(return_value, size):
return [create_base_classifier(return_value=return_value)] * size | deslib/tests/examples_test.py | from unittest.mock import MagicMock
import numpy as np
# ----- Test Example #1 ------
# ex1: The distribution of samples of a test example.
X_dsel_ex1 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[1, -1], [-0.5, 0.5], [0.5, 0.5],
[0, -1], [0.75, -0.5], [0.0, 0.0],
[-1, -1], [0, -0.5], [1, -1]])
# Labels associated with the samples. This information is used by techniques based on a posteriori information (LCA and
# a posteriori in the calculation of the competence level).
y_dsel_ex1 = np.array([0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0])
# Pre-processed results on DSEL. This information is used by the majority of DS techniques to estimate the competence.
dsel_processed_ex1 = np.array([[1, 1, 1], [0, 0, 1], [0, 0, 1], [1, 1, 0], [1, 1, 1], [0, 0, 1], [0, 0, 0], [1, 1, 1],
[1, 1, 0], [0, 0, 1], [0, 0, 1], [0, 1, 0], [0, 1, 0], [1, 1, 1], [1, 1, 1]])
# pre-calculated indices of 7 Nearest neighbors used to estimate the competence level
neighbors_ex1 = np.array([[8, 11, 4, 7, 13, 10, 1],
[7, 1, 11, 13, 0, 8, 4],
[5, 3, 4, 8, 10, 11, 7]])
# Scores obtained for the two classes. This information is used by the techniques based on posterior probabilities
dsel_scores_ex1 = np.tile(np.array([[1.0, 0.0], [0.5, 0.5], [0.8, 0.2]]), (15, 1, 1))
# Distance information is used by the probabilistic techniques (des.probabilistic) as well as the MLA, A Priori and
# A Posteriori methods. Three values are considered: all zeros, all ones and the real distances calculated on the toy
# example.
distances_ex1 = np.array([[0.35355339, 0.35355339, 0.55901699, 0.79056942, 0.79056942, 0.90138782, 1.03077641],
[0.3, 0.39051248, 0.53851648, 0.86023253, 0.94339811, 1.04403065, 1.28549601],
[0.70710678, 1.41421356, 1.95256242, 2.12132034, 2.79508497, 2.82842712, 2.91547595]])
# ----- Test Example all ones ------
dsel_processed_all_ones = np.ones((15, 3))
dsel_scores_all_ones = np.ones((15, 3, 2))
distances_all_ones = np.ones((3, 7))
# ----- Test Example all zeros ------
dsel_processed_all_zeros = np.zeros((15, 3))
dsel_scores_all_zeros = np.zeros((15, 3, 2))
distances_all_zeros = np.zeros((3, 7))
# ----- Test Example from Combining pattern classifiers ------
# This example is used to test the results of the A priori, A posteriori and MLA techniques
distances_ex_kuncheva = np.linspace(1, 15, num=15)
# 10 neighbors used in the example
neighbors_ex_kuncheva = np.linspace(0, 14, num=15, dtype=int)
# target class of each example. independent means that it should be used by the a priori technique
y_dsel_ex_kuncheva_independent = np.array([2, 1, 2, 2, 3, 1, 2, 1, 3, 3, 2, 1, 2, 2, 1]) - 1
# dependent means that it should be used by the a posteriori technique
y_dsel_ex_kuncheva_dependent = np.array([1, 0, 1, 1, 2, 0, 1, 0, 0, 2, 1, 2, 1, 1, 0])
# Predictions of the base classifier ci used to estimate its competence level
classifier_pred_ex_kuncheva = np.array([2, 3, 2, 2, 1, 1, 2, 2, 3, 3, 1, 2, 2, 2, 1]) - 1
# whether or not the base classifier made the correct prediction for each sample in dsel
dsel_processed_kuncheva = np.transpose(np.array([[1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1]]))
# scores obtained by each class by the base classifier ci. In this example we consider that the posteriori is always 1
# fo the predicted class
dsel_scores_ex_kuncheva = np.array([[[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0]]]).reshape(15, 1, 3) # 15 samples, 1 classifier, 3 classes
k_ex_kuncheva = 15
n_classes_ex_kuncheva = 3
# ----- Routines to generate a pool of classifiers using MagicMock ------
def create_base_classifier(return_value, return_prob=None):
classifier = MagicMock()
classifier.predict.return_value = [return_value]
classifier.predict_proba.return_value = return_prob
return classifier
def create_pool_classifiers():
clf_0 = create_base_classifier(return_value=0, return_prob=np.atleast_2d([0.5, 0.5]))
clf_1 = create_base_classifier(return_value=1, return_prob=np.atleast_2d([1.0, 0.0]))
clf_2 = create_base_classifier(return_value=0, return_prob=np.atleast_2d([0.33, 0.67]))
pool_classifiers = [clf_0, clf_1, clf_2]
return pool_classifiers
def create_pool_all_agree(return_value, size):
return [create_base_classifier(return_value=return_value)] * size | 0.847432 | 0.832271 |
from typing import List, Tuple, Union, Deque, Iterable, Dict
from collections import deque
from tqdm import tqdm # type: ignore
def get_data(fn: str) -> Deque[int]:
with open(fn) as f:
data = f.read()
return deque([int(el) for el in data])
class Node:
def __init__(self, value: int):
self.value = value
self.next: Union[None, Node] = None
def __repr__(self) -> str:
return str(self.value)
class CircularLinkedList:
def __init__(self, data: Deque[int]) -> None:
self.nodes: Dict[int, Node] = {}
node = Node(data.popleft())
self.head = node
for value in data:
node.next = Node(value)
node = node.next
self.nodes[value] = node
node.next = self.head
self.nodes[self.head.value] = self.head
self.maxval = max(data)
def traverse(self, start: Union[int, None] = None) -> Iterable[Node]:
if start is not None:
start_node = self.nodes[start]
else:
start_node = self.head
node = start_node
while node.next != start_node and node.next is not None:
yield node
node = node.next
yield node
def print_list(self, start: int = None):
nodes = []
for node in self.traverse(start):
nodes.append(str(node))
print(" -> ".join(nodes))
def get_solution_str(self) -> str:
nodes = []
iter_node = self.traverse(1)
_ = next(iter_node)
for node in iter_node:
nodes.append(str(node))
return "".join(nodes)
def pick_up(self):
iter_node = self.traverse(self.head.value)
_ = next(iter_node)
nodes = []
for _ in range(3):
n = next(iter_node)
nodes.append(n.value)
# cut out the 3 nodes after head
self.head.next = self.nodes[nodes[2]].next
# find destination
dest = self.head.value - 1 if self.head.value != 1 else self.maxval
while dest in nodes:
if dest == 1:
dest = self.maxval
else:
dest -= 1
# implement after destination node
dest_next = self.nodes[dest].next
self.nodes[dest].next = self.nodes[nodes[0]]
self.nodes[nodes[2]].next = dest_next
# set new head
self.head = self.head.next
def solve_part1(fn: str) -> int:
input = get_data(fn)
cll = CircularLinkedList(data=input)
for _ in range(100):
cll.pick_up()
return int(cll.get_solution_str())
def solve_part2(fn: str) -> int:
input = get_data(fn)
input = input + deque(range(max(input) + 1, 1_000_001))
cll = CircularLinkedList(data=input)
for _ in tqdm(range(10_000_000)):
cll.pick_up()
n1 = cll.nodes[1].next
n2 = cll.nodes[n1.value].next
return n1.value * n2.value
if __name__ == "__main__":
fn = "./data/test_data23.txt"
ans = solve_part1(fn)
print("solution of part 1:", ans)
ans2 = solve_part2(fn)
print("solution of part 2:", ans2) | puzzles/puzzle23.py | from typing import List, Tuple, Union, Deque, Iterable, Dict
from collections import deque
from tqdm import tqdm # type: ignore
def get_data(fn: str) -> Deque[int]:
with open(fn) as f:
data = f.read()
return deque([int(el) for el in data])
class Node:
def __init__(self, value: int):
self.value = value
self.next: Union[None, Node] = None
def __repr__(self) -> str:
return str(self.value)
class CircularLinkedList:
def __init__(self, data: Deque[int]) -> None:
self.nodes: Dict[int, Node] = {}
node = Node(data.popleft())
self.head = node
for value in data:
node.next = Node(value)
node = node.next
self.nodes[value] = node
node.next = self.head
self.nodes[self.head.value] = self.head
self.maxval = max(data)
def traverse(self, start: Union[int, None] = None) -> Iterable[Node]:
if start is not None:
start_node = self.nodes[start]
else:
start_node = self.head
node = start_node
while node.next != start_node and node.next is not None:
yield node
node = node.next
yield node
def print_list(self, start: int = None):
nodes = []
for node in self.traverse(start):
nodes.append(str(node))
print(" -> ".join(nodes))
def get_solution_str(self) -> str:
nodes = []
iter_node = self.traverse(1)
_ = next(iter_node)
for node in iter_node:
nodes.append(str(node))
return "".join(nodes)
def pick_up(self):
iter_node = self.traverse(self.head.value)
_ = next(iter_node)
nodes = []
for _ in range(3):
n = next(iter_node)
nodes.append(n.value)
# cut out the 3 nodes after head
self.head.next = self.nodes[nodes[2]].next
# find destination
dest = self.head.value - 1 if self.head.value != 1 else self.maxval
while dest in nodes:
if dest == 1:
dest = self.maxval
else:
dest -= 1
# implement after destination node
dest_next = self.nodes[dest].next
self.nodes[dest].next = self.nodes[nodes[0]]
self.nodes[nodes[2]].next = dest_next
# set new head
self.head = self.head.next
def solve_part1(fn: str) -> int:
input = get_data(fn)
cll = CircularLinkedList(data=input)
for _ in range(100):
cll.pick_up()
return int(cll.get_solution_str())
def solve_part2(fn: str) -> int:
input = get_data(fn)
input = input + deque(range(max(input) + 1, 1_000_001))
cll = CircularLinkedList(data=input)
for _ in tqdm(range(10_000_000)):
cll.pick_up()
n1 = cll.nodes[1].next
n2 = cll.nodes[n1.value].next
return n1.value * n2.value
if __name__ == "__main__":
fn = "./data/test_data23.txt"
ans = solve_part1(fn)
print("solution of part 1:", ans)
ans2 = solve_part2(fn)
print("solution of part 2:", ans2) | 0.806586 | 0.334481 |
from allensdk.api.queries.cell_types_api import CellTypesApi
import os
import sys
ct = CellTypesApi()
from data_helper import CURRENT_DATASETS, DATASET_TARGET_SWEEPS
test = '-test' in sys.argv
dataset_ids = CURRENT_DATASETS
if test:
dataset_ids = [479704527]
sweep_numbers_for_data = DATASET_TARGET_SWEEPS
for dataset_id in dataset_ids:
raw_ephys_file_name = '%d_raw_data.nwb' % dataset_id
if not os.path.isfile(raw_ephys_file_name):
print('Downloading data: %s'%raw_ephys_file_name)
ct.save_ephys_data(dataset_id, raw_ephys_file_name)
print('Saved: %s'%raw_ephys_file_name)
else:
print('File: %s already present...'%raw_ephys_file_name)
print('Loading data from: %s'%raw_ephys_file_name)
from allensdk.core.nwb_data_set import NwbDataSet
data_set = NwbDataSet(raw_ephys_file_name)
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
sweep_numbers = sweep_numbers_for_data[dataset_id]
subset = {}
for sweep_number in sweep_numbers:
sweep_data = data_set.get_sweep(sweep_number)
# start/stop indices that exclude the experimental test pulse (if applicable)
index_range = sweep_data['index_range']
# stimulus is a numpy array in amps
stimulus = sweep_data['stimulus'][index_range[0]:index_range[-1]]
# response is a numpy array in volts
response = sweep_data['response'][index_range[0]:index_range[-1]]*1000
subset[sweep_number] = response
# sampling rate is in Hz
sampling_rate = sweep_data['sampling_rate']
# define some time points in seconds (i.e., convert to absolute time)
time_pts = np.arange(0,len(stimulus)/sampling_rate,1./sampling_rate)
subset['t'] = time_pts
metadata = data_set.get_sweep_metadata(sweep_number)
ampl = round(metadata['aibs_stimulus_amplitude_pa'],4)
# plot the stimulus and the voltage response for the random trial
plt.subplot(2,1,1)
plt.plot(time_pts,stimulus)
plt.ylabel('Stimulus (A)')
plt.subplot(2,1,2)
plt.plot(time_pts,response, label = 'S %s, %s pA'%(sweep_number, ampl))
volts_file = open('%s.dat'%dataset_id, 'w')
max = 1.5 # s
for i in range(len(subset['t'])):
t = subset['t'][i]
if t <= max:
line = '%s '%t
for s in sweep_numbers:
line += '%s '% (float(subset[s][i])/1000)
volts_file.write(line+'\n')
volts_file.close()
plt.ylabel('Membrane voltage (mV)')
plt.xlabel('Time (s)')
fig.canvas.set_window_title("Dataset: %s"%dataset_id)
plt.legend()
plt.show() | CellTypesDatabase/data/download.py | from allensdk.api.queries.cell_types_api import CellTypesApi
import os
import sys
ct = CellTypesApi()
from data_helper import CURRENT_DATASETS, DATASET_TARGET_SWEEPS
test = '-test' in sys.argv
dataset_ids = CURRENT_DATASETS
if test:
dataset_ids = [479704527]
sweep_numbers_for_data = DATASET_TARGET_SWEEPS
for dataset_id in dataset_ids:
raw_ephys_file_name = '%d_raw_data.nwb' % dataset_id
if not os.path.isfile(raw_ephys_file_name):
print('Downloading data: %s'%raw_ephys_file_name)
ct.save_ephys_data(dataset_id, raw_ephys_file_name)
print('Saved: %s'%raw_ephys_file_name)
else:
print('File: %s already present...'%raw_ephys_file_name)
print('Loading data from: %s'%raw_ephys_file_name)
from allensdk.core.nwb_data_set import NwbDataSet
data_set = NwbDataSet(raw_ephys_file_name)
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
sweep_numbers = sweep_numbers_for_data[dataset_id]
subset = {}
for sweep_number in sweep_numbers:
sweep_data = data_set.get_sweep(sweep_number)
# start/stop indices that exclude the experimental test pulse (if applicable)
index_range = sweep_data['index_range']
# stimulus is a numpy array in amps
stimulus = sweep_data['stimulus'][index_range[0]:index_range[-1]]
# response is a numpy array in volts
response = sweep_data['response'][index_range[0]:index_range[-1]]*1000
subset[sweep_number] = response
# sampling rate is in Hz
sampling_rate = sweep_data['sampling_rate']
# define some time points in seconds (i.e., convert to absolute time)
time_pts = np.arange(0,len(stimulus)/sampling_rate,1./sampling_rate)
subset['t'] = time_pts
metadata = data_set.get_sweep_metadata(sweep_number)
ampl = round(metadata['aibs_stimulus_amplitude_pa'],4)
# plot the stimulus and the voltage response for the random trial
plt.subplot(2,1,1)
plt.plot(time_pts,stimulus)
plt.ylabel('Stimulus (A)')
plt.subplot(2,1,2)
plt.plot(time_pts,response, label = 'S %s, %s pA'%(sweep_number, ampl))
volts_file = open('%s.dat'%dataset_id, 'w')
max = 1.5 # s
for i in range(len(subset['t'])):
t = subset['t'][i]
if t <= max:
line = '%s '%t
for s in sweep_numbers:
line += '%s '% (float(subset[s][i])/1000)
volts_file.write(line+'\n')
volts_file.close()
plt.ylabel('Membrane voltage (mV)')
plt.xlabel('Time (s)')
fig.canvas.set_window_title("Dataset: %s"%dataset_id)
plt.legend()
plt.show() | 0.324663 | 0.303796 |
from .models import Cadres, Teacher, Curriculum
from django.contrib import admin
@admin.register(Cadres)
class CadresAdmin(admin.ModelAdmin):
def save_model(self, request, obj, form, change):
if change:
user = request.user.username
name = self.model.objects.get(pk=obj.pk).name
# person = form.cleaned_data['person'].name
f = open('e://jiaowuxitong.txt', 'a')
# f.write(person+'职位:'+job+',被'+user+'修改'+'\r\n')
f.write('学生干部,干部:'+name+'被'+user+'修改'+'\r\n')
f.close()
else:
pass
super().save_model(request, obj, form, change)
fieldsets = (
('个人信息', {
'fields': ('c_id', 'name', 'sex', 'position', 'phone', 'QQ')
}),)
# 只读字段
readonly_fields = ['c_id', ]
# 默认排序字段
ordering = ['c_id']
# 可选排序字段
sortable_by = ['c_id', 'sex']
# 列表页展示字段
list_display = ['c_id', 'name', 'sex', 'position', 'phone', 'QQ']
# 设置路由地址
list_display_links = ['c_id', 'name']
# 设置过滤器
list_filter = ['sex']
# 设置每页展示数据量
list_per_page = 10
# 设置可搜索字段
search_fields = ['name', 'position']
admin.site.site_title = '教务系统(极简)'
admin.site.site_header = '18级网络工程2班'
@admin.register(Curriculum)
class CurriculumAdmin(admin.ModelAdmin):
# 修改页展示字段
fieldsets = (
('课程信息', {
'fields': ("name", "teacher", "all_time", "theoretical_time",
"practice_time", "score", "category", "method",
"assessment", "week_time", "time", "place"),
}),
)
# 列表页可排序字段
# sortable_by = ['score', 'category', 'assessment', 'all_time', 'score', 'category', 'name']
# 列表页展示字段
list_display = ['name', 'teacher', 'all_time', 'theoretical_time', 'practice_time',
'score', 'category', 'method', 'assessment', 'week_time', 'time', 'place']
# 设置过滤字段
list_filter = ['category', 'assessment', 'method']
# 设置每页显示数据量
list_per_page = 10
# 设置搜索字段
search_fields = ['name', 'teacher', 'place']
@admin.register(Teacher)
class TeacherAdmin(admin.ModelAdmin):
fieldsets = (
('个人信息', {
'fields': ('id', 'name', 'sex', 'phone', 'subject')
})
,
)
# 只读字段
readonly_fields = ['id',]
# 默认排序字段
ordering = ['id']
# 列表页展示字段
list_display = ['id', 'name', 'subject', 'phone']
# 设置路由地址
list_display_links = ['id', 'name']
# 设置每页展示数据量
list_per_page = 10
# 设置可搜索字段
search_fields = ['name', 'subject', 'phone'] | function/admin.py | from .models import Cadres, Teacher, Curriculum
from django.contrib import admin
@admin.register(Cadres)
class CadresAdmin(admin.ModelAdmin):
def save_model(self, request, obj, form, change):
if change:
user = request.user.username
name = self.model.objects.get(pk=obj.pk).name
# person = form.cleaned_data['person'].name
f = open('e://jiaowuxitong.txt', 'a')
# f.write(person+'职位:'+job+',被'+user+'修改'+'\r\n')
f.write('学生干部,干部:'+name+'被'+user+'修改'+'\r\n')
f.close()
else:
pass
super().save_model(request, obj, form, change)
fieldsets = (
('个人信息', {
'fields': ('c_id', 'name', 'sex', 'position', 'phone', 'QQ')
}),)
# 只读字段
readonly_fields = ['c_id', ]
# 默认排序字段
ordering = ['c_id']
# 可选排序字段
sortable_by = ['c_id', 'sex']
# 列表页展示字段
list_display = ['c_id', 'name', 'sex', 'position', 'phone', 'QQ']
# 设置路由地址
list_display_links = ['c_id', 'name']
# 设置过滤器
list_filter = ['sex']
# 设置每页展示数据量
list_per_page = 10
# 设置可搜索字段
search_fields = ['name', 'position']
admin.site.site_title = '教务系统(极简)'
admin.site.site_header = '18级网络工程2班'
@admin.register(Curriculum)
class CurriculumAdmin(admin.ModelAdmin):
# 修改页展示字段
fieldsets = (
('课程信息', {
'fields': ("name", "teacher", "all_time", "theoretical_time",
"practice_time", "score", "category", "method",
"assessment", "week_time", "time", "place"),
}),
)
# 列表页可排序字段
# sortable_by = ['score', 'category', 'assessment', 'all_time', 'score', 'category', 'name']
# 列表页展示字段
list_display = ['name', 'teacher', 'all_time', 'theoretical_time', 'practice_time',
'score', 'category', 'method', 'assessment', 'week_time', 'time', 'place']
# 设置过滤字段
list_filter = ['category', 'assessment', 'method']
# 设置每页显示数据量
list_per_page = 10
# 设置搜索字段
search_fields = ['name', 'teacher', 'place']
@admin.register(Teacher)
class TeacherAdmin(admin.ModelAdmin):
fieldsets = (
('个人信息', {
'fields': ('id', 'name', 'sex', 'phone', 'subject')
})
,
)
# 只读字段
readonly_fields = ['id',]
# 默认排序字段
ordering = ['id']
# 列表页展示字段
list_display = ['id', 'name', 'subject', 'phone']
# 设置路由地址
list_display_links = ['id', 'name']
# 设置每页展示数据量
list_per_page = 10
# 设置可搜索字段
search_fields = ['name', 'subject', 'phone'] | 0.165425 | 0.109325 |
statementArr = [
'startSwitch("variableName")',
'endSwitch()',
'''getComment("This is a comment")''',
"puts('Something to print')",
"getClassBeginning('sampleClass')",
"getClassEnding()",
'setVar(valueToGet="1", valueToChange="x")',
'startCase("x")',
'endCase()',
'startDefault()',
'endDefault()',
"equals(thing1 = 'x', thing2 = 'y', theType='int')",
'''Or([compare(thing1 = "1", thing2 = "1", theType = "Int"), compare(thing1 = '"lolz"', thing2 = "lolz", theType = "String")])''',
'''And(["x", "y"])''',
'''Not("x")''',
'greaterThan("thing1", "thing2")',
'lessThan("thing1", "thing2")',
'Eval('"Evaluate this string."')',
'concatenateStrings(['"String 1"', '"String 2"'])',
"endConditionalBlock()",
"startConditionalBlock()",
'startIf(condition="x==2")',
"endIf()",
'initializeVar(initialValue="1", variableName = "x", variableType = "String", arrayDimensions=None)',
'getFileExtension()',
'startForEach(variableName="varInArr", array="theArr", typeInArray="String")',
'startElse()',
'startWhile("x==1")',
'getReturnStatement("toReturn")',
'compare(theType="String", thing1="str1", thing2="str2")'
"endElse()",
"endWhile()",
"endForEach()",
"endMethod(methodName='methodName')",
'endElseIf()',
'arrayContains(valueToCheck="1", array="arrayOfIntegers")',
'this("variableName")',
'''function(functionName="startForEach", returnType="String", isStatic=True, parameterNames=["lang", "array", "variableName", "typeInArray"], parameterTypes = ["String", "String", "String", "String"], body=[])''',
'startMain()',
'endMain()',
'subString(start="1", end="5", theString="strName")',
'charAt(theString="stringName", index="1")',
'index(indexList = ["1"], theType="String[]", theObject="ArrayName")',
'stringLength("stringName")',
'split(string = "string,test,1", separator = ",")',
'add(["1","2","3"])',
'concatenateArrays(["array1", "array2"])',
'endConstructor()',
'divide(20, 5)',
'toString(objectToConvert=1, convertFrom="Int")',
'startMethod(name="exampleMethod", returnType="int", parameterNames=["param1", "param2"], parameterTypes=["int", "int"], isStatic=True, requiresTheFunctions=False, isDefined=False)',
'typeOf(1)',
'callFunctionWithNamedArgs()',
'getVariableName(True)',
'getVariableName(False)',
"lessThanOrEqual('a', 'b')",
"greaterThanOrEqual('a', 'b')",
"getArrayInitializer([[1,2], [3,4]])",
"concatenateStrings(['a', 'b'])",
'def callFunction("functionName", "fromClass", ["parameter1", "parameter2"]):',
'semicolon("a statement")',
'getCorrespondingTypeWithoutBrackets("int")',
'getCorrespondingTypeWithoutBrackets("string")',
'getCorrespondingTypeWithoutBrackets("boolean")',
'startElseIf("condition")',
'include("fileToInclude")',
'Error("There is an error!")',
'args()',
'seriesOfStatements[1,2,3,4]',
'compare('"String1"', '"String2"', "String")'
] | libraries/statementArr.py | statementArr = [
'startSwitch("variableName")',
'endSwitch()',
'''getComment("This is a comment")''',
"puts('Something to print')",
"getClassBeginning('sampleClass')",
"getClassEnding()",
'setVar(valueToGet="1", valueToChange="x")',
'startCase("x")',
'endCase()',
'startDefault()',
'endDefault()',
"equals(thing1 = 'x', thing2 = 'y', theType='int')",
'''Or([compare(thing1 = "1", thing2 = "1", theType = "Int"), compare(thing1 = '"lolz"', thing2 = "lolz", theType = "String")])''',
'''And(["x", "y"])''',
'''Not("x")''',
'greaterThan("thing1", "thing2")',
'lessThan("thing1", "thing2")',
'Eval('"Evaluate this string."')',
'concatenateStrings(['"String 1"', '"String 2"'])',
"endConditionalBlock()",
"startConditionalBlock()",
'startIf(condition="x==2")',
"endIf()",
'initializeVar(initialValue="1", variableName = "x", variableType = "String", arrayDimensions=None)',
'getFileExtension()',
'startForEach(variableName="varInArr", array="theArr", typeInArray="String")',
'startElse()',
'startWhile("x==1")',
'getReturnStatement("toReturn")',
'compare(theType="String", thing1="str1", thing2="str2")'
"endElse()",
"endWhile()",
"endForEach()",
"endMethod(methodName='methodName')",
'endElseIf()',
'arrayContains(valueToCheck="1", array="arrayOfIntegers")',
'this("variableName")',
'''function(functionName="startForEach", returnType="String", isStatic=True, parameterNames=["lang", "array", "variableName", "typeInArray"], parameterTypes = ["String", "String", "String", "String"], body=[])''',
'startMain()',
'endMain()',
'subString(start="1", end="5", theString="strName")',
'charAt(theString="stringName", index="1")',
'index(indexList = ["1"], theType="String[]", theObject="ArrayName")',
'stringLength("stringName")',
'split(string = "string,test,1", separator = ",")',
'add(["1","2","3"])',
'concatenateArrays(["array1", "array2"])',
'endConstructor()',
'divide(20, 5)',
'toString(objectToConvert=1, convertFrom="Int")',
'startMethod(name="exampleMethod", returnType="int", parameterNames=["param1", "param2"], parameterTypes=["int", "int"], isStatic=True, requiresTheFunctions=False, isDefined=False)',
'typeOf(1)',
'callFunctionWithNamedArgs()',
'getVariableName(True)',
'getVariableName(False)',
"lessThanOrEqual('a', 'b')",
"greaterThanOrEqual('a', 'b')",
"getArrayInitializer([[1,2], [3,4]])",
"concatenateStrings(['a', 'b'])",
'def callFunction("functionName", "fromClass", ["parameter1", "parameter2"]):',
'semicolon("a statement")',
'getCorrespondingTypeWithoutBrackets("int")',
'getCorrespondingTypeWithoutBrackets("string")',
'getCorrespondingTypeWithoutBrackets("boolean")',
'startElseIf("condition")',
'include("fileToInclude")',
'Error("There is an error!")',
'args()',
'seriesOfStatements[1,2,3,4]',
'compare('"String1"', '"String2"', "String")'
] | 0.273186 | 0.32888 |
from ..conf_tests import app, cli
from flask_batteries.commands import generate, destroy, new
import os
import traceback
from flask_batteries.config import TAB
from flask_batteries.helpers import verify_file
from flask_batteries.installers import FlaskSQLAlchemyInstaller
import subprocess
def test_model_generator(app, cli):
result = cli.invoke(generate, ["model", "user"])
assert result.exit_code == 0, traceback.print_exception(*result.exc_info)
result = cli.invoke(generate, ["model", "user_posts"])
assert result.exit_code == 0, traceback.print_exception(*result.exc_info)
assert os.path.exists(os.path.join("src", "models", "user.py"))
assert os.path.exists(os.path.join("test", "models", "test_user.py"))
assert os.path.exists(os.path.join("src", "models", "user_posts.py"))
assert os.path.exists(os.path.join("test", "models", "test_user_posts.py"))
# Verify models/__init__.py
lines_to_verify = [
"from .user import User",
"from .user_posts import UserPosts",
]
assert verify_file(os.path.join("src", "models", "__init__.py"), lines_to_verify)
# Verify src/__init__.py
lines_to_verify = [
"# Import models",
"from src.models import User, UserPosts",
]
assert verify_file(os.path.join("src", "__init__.py"), lines_to_verify)
# Destroy "user_posts" model
result = cli.invoke(destroy, ["model", "user_posts"])
assert result.exit_code == 0, traceback.print_exception(*result.exc_info)
# Verify src/__init__.py
lines_to_verify = [
"# Import models",
"from src.models import User",
]
assert verify_file(os.path.join("src", "__init__.py"), lines_to_verify)
# Destroy "user" model
result = cli.invoke(destroy, ["model", "user"])
assert result.exit_code == 0, traceback.print_exception(*result.exc_info)
# Verify src/__init__.py
lines_to_verify = [
"# Import models",
"from src.models import User",
]
assert not verify_file(os.path.join("src", "__init__.py"), lines_to_verify)
assert not os.path.exists(os.path.join("src", "models", "user.py"))
assert not os.path.exists(os.path.join("test", "models", "test_user.py"))
assert not os.path.exists(os.path.join("src", "models", "user_posts.py"))
assert not os.path.exists(os.path.join("test", "models", "test_user_posts.py"))
def test_fails_when_sqlalchemy_not_installed(cli):
result = cli.invoke(new, ["app", "--skip-db"])
assert result.exit_code == 0, traceback.print_exception(*result.exc_info)
result = cli.invoke(generate, ["model", "user"])
assert result.exit_code != 0
result = cli.invoke(destroy, ["model", "user"])
assert result.exit_code != 0
def test_generated_test_passes(cli, app):
result = cli.invoke(generate, ["model", "user"])
assert result.exit_code == 0, traceback.print_exception(*result.exc_info)
# Run the generated app's test suite and verify exit code is 0
if os.name != "nt":
run_tests = subprocess.run(
"source venv/bin/activate && pytest -k test_user", shell=True
)
else:
run_tests = subprocess.run(
"venv\\Scripts\\activate && pytest -k test_user", shell=True
)
assert run_tests.returncode == 0, run_tests.stdout | tests/generators/test_model_generator.py | from ..conf_tests import app, cli
from flask_batteries.commands import generate, destroy, new
import os
import traceback
from flask_batteries.config import TAB
from flask_batteries.helpers import verify_file
from flask_batteries.installers import FlaskSQLAlchemyInstaller
import subprocess
def test_model_generator(app, cli):
result = cli.invoke(generate, ["model", "user"])
assert result.exit_code == 0, traceback.print_exception(*result.exc_info)
result = cli.invoke(generate, ["model", "user_posts"])
assert result.exit_code == 0, traceback.print_exception(*result.exc_info)
assert os.path.exists(os.path.join("src", "models", "user.py"))
assert os.path.exists(os.path.join("test", "models", "test_user.py"))
assert os.path.exists(os.path.join("src", "models", "user_posts.py"))
assert os.path.exists(os.path.join("test", "models", "test_user_posts.py"))
# Verify models/__init__.py
lines_to_verify = [
"from .user import User",
"from .user_posts import UserPosts",
]
assert verify_file(os.path.join("src", "models", "__init__.py"), lines_to_verify)
# Verify src/__init__.py
lines_to_verify = [
"# Import models",
"from src.models import User, UserPosts",
]
assert verify_file(os.path.join("src", "__init__.py"), lines_to_verify)
# Destroy "user_posts" model
result = cli.invoke(destroy, ["model", "user_posts"])
assert result.exit_code == 0, traceback.print_exception(*result.exc_info)
# Verify src/__init__.py
lines_to_verify = [
"# Import models",
"from src.models import User",
]
assert verify_file(os.path.join("src", "__init__.py"), lines_to_verify)
# Destroy "user" model
result = cli.invoke(destroy, ["model", "user"])
assert result.exit_code == 0, traceback.print_exception(*result.exc_info)
# Verify src/__init__.py
lines_to_verify = [
"# Import models",
"from src.models import User",
]
assert not verify_file(os.path.join("src", "__init__.py"), lines_to_verify)
assert not os.path.exists(os.path.join("src", "models", "user.py"))
assert not os.path.exists(os.path.join("test", "models", "test_user.py"))
assert not os.path.exists(os.path.join("src", "models", "user_posts.py"))
assert not os.path.exists(os.path.join("test", "models", "test_user_posts.py"))
def test_fails_when_sqlalchemy_not_installed(cli):
result = cli.invoke(new, ["app", "--skip-db"])
assert result.exit_code == 0, traceback.print_exception(*result.exc_info)
result = cli.invoke(generate, ["model", "user"])
assert result.exit_code != 0
result = cli.invoke(destroy, ["model", "user"])
assert result.exit_code != 0
def test_generated_test_passes(cli, app):
result = cli.invoke(generate, ["model", "user"])
assert result.exit_code == 0, traceback.print_exception(*result.exc_info)
# Run the generated app's test suite and verify exit code is 0
if os.name != "nt":
run_tests = subprocess.run(
"source venv/bin/activate && pytest -k test_user", shell=True
)
else:
run_tests = subprocess.run(
"venv\\Scripts\\activate && pytest -k test_user", shell=True
)
assert run_tests.returncode == 0, run_tests.stdout | 0.321993 | 0.331471 |
import json
import logging
import inspect
import io
import os
my_path = os.path.dirname(__file__)
def check_hashseed(desired_seed = 0):
if os.environ.get('PYTHONHASHSEED') != desired_seed:
info(f"Ideally set PYTHONHASHSEED={desired_seed} for perfect reproducibility")
return False
return True
def inv_dict(d):
ans = {}
for k, v in d.items():
if v not in ans:
ans[v] = []
ans[v].append(k)
return ans
def remove_docstring(f):
"""Remove docstring"""
assert '\n """' in f, f"No triple quote docstring (after four spaces) in: \n{f}"
i = f.index('\n """')
j = f.index('"""', i + 8)
return f[:i + 1] + f[j + 4:]
def get_docstring(f):
assert '\n """' in f, f"No triple quote docstring (after four spaces) in: \n{f}"
i = f.index('\n """')
j = f.index('"""', i + 8)
docstring = f[i + 1:j + 3]
if not docstring.strip(' "'):
warn(f"Empty docstring in:\n{f}")
return docstring
def flatten(it):
return (e for a in it for e in (flatten(a) if isinstance(a, (tuple, list)) else (a,)))
def save_json(obj, filename, make_dirs_if_necessary=False, indent=2, **kwargs):
"""Saves compressed file if filename ends with '.gz'"""
import json
if make_dirs_if_necessary:
os.makedirs(os.path.dirname(filename), exist_ok=True)
if filename.endswith(".gz"):
import gzip
with gzip.open(filename, "wt") as f:
return json.dump(obj, f, indent=indent, **kwargs)
with open(filename, "w", encoding="utf8") as f:
return json.dump(obj, f, indent=indent, **kwargs)
def load_json(filename):
"""Loads compressed file if filename ends with '.gz'"""
import json
if filename.endswith(".gz"):
import gzip
with gzip.open(filename, "rt") as f:
return json.load(f)
with open(filename, "r", encoding="utf8") as f:
return json.load(f)
def stringify(const):
if type(const) is str:
return json.dumps(const)
return str(const)
def dedup(stuff):
seen = set()
return [a for a in stuff if a not in seen and not seen.add(a)]
def color_str(obj, code="\033[0;36m"):
return code + str(obj) + '\033[0m'
_configured = False
def configure_logging(stdio_level=logging.INFO,
file_level=logging.DEBUG,
filename=os.path.join(my_path, ".problems.log")):
global _configured
if _configured:
warning("Re-configuring logging")
stdio_handler = logging.StreamHandler()
stdio_handler.setLevel(stdio_level)
file_hanlder = logging.FileHandler(filename)
file_hanlder.setLevel(file_level)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message).200s",
datefmt="%m/%d/%Y %H:%M:%S",
level=min(stdio_level, file_level),
handlers=[stdio_handler, file_hanlder]
)
_configured = True
_get_or_create_logger().debug("Configured logging")
_loggers = {}
def _get_or_create_logger():
global _configured, _loggers
if not _configured:
configure_logging()
name = "_"
for frame in inspect.stack():
name = inspect.getmodule(frame[0]).__name__
if name != __name__:
break
if name not in _loggers:
_loggers[name] = logging.getLogger(name)
return _loggers[name]
def print_to_string(*args, end="", **kwargs):
with io.StringIO() as buf:
print(*args, file=buf, end=end, **kwargs)
return buf.getvalue()
def debug(*args, **kwargs):
_get_or_create_logger().debug(print_to_string(*args, **kwargs))
def info(*args, **kwargs):
_get_or_create_logger().info(print_to_string(*args, **kwargs))
log = info
def warning(*args, **kwargs):
_get_or_create_logger().warning(print_to_string(*args, **kwargs))
warn = warning
def error(*args, **kwargs):
_get_or_create_logger().error(print_to_string(*args, **kwargs)) | utils.py | import json
import logging
import inspect
import io
import os
my_path = os.path.dirname(__file__)
def check_hashseed(desired_seed = 0):
if os.environ.get('PYTHONHASHSEED') != desired_seed:
info(f"Ideally set PYTHONHASHSEED={desired_seed} for perfect reproducibility")
return False
return True
def inv_dict(d):
ans = {}
for k, v in d.items():
if v not in ans:
ans[v] = []
ans[v].append(k)
return ans
def remove_docstring(f):
"""Remove docstring"""
assert '\n """' in f, f"No triple quote docstring (after four spaces) in: \n{f}"
i = f.index('\n """')
j = f.index('"""', i + 8)
return f[:i + 1] + f[j + 4:]
def get_docstring(f):
assert '\n """' in f, f"No triple quote docstring (after four spaces) in: \n{f}"
i = f.index('\n """')
j = f.index('"""', i + 8)
docstring = f[i + 1:j + 3]
if not docstring.strip(' "'):
warn(f"Empty docstring in:\n{f}")
return docstring
def flatten(it):
return (e for a in it for e in (flatten(a) if isinstance(a, (tuple, list)) else (a,)))
def save_json(obj, filename, make_dirs_if_necessary=False, indent=2, **kwargs):
"""Saves compressed file if filename ends with '.gz'"""
import json
if make_dirs_if_necessary:
os.makedirs(os.path.dirname(filename), exist_ok=True)
if filename.endswith(".gz"):
import gzip
with gzip.open(filename, "wt") as f:
return json.dump(obj, f, indent=indent, **kwargs)
with open(filename, "w", encoding="utf8") as f:
return json.dump(obj, f, indent=indent, **kwargs)
def load_json(filename):
"""Loads compressed file if filename ends with '.gz'"""
import json
if filename.endswith(".gz"):
import gzip
with gzip.open(filename, "rt") as f:
return json.load(f)
with open(filename, "r", encoding="utf8") as f:
return json.load(f)
def stringify(const):
if type(const) is str:
return json.dumps(const)
return str(const)
def dedup(stuff):
seen = set()
return [a for a in stuff if a not in seen and not seen.add(a)]
def color_str(obj, code="\033[0;36m"):
return code + str(obj) + '\033[0m'
_configured = False
def configure_logging(stdio_level=logging.INFO,
file_level=logging.DEBUG,
filename=os.path.join(my_path, ".problems.log")):
global _configured
if _configured:
warning("Re-configuring logging")
stdio_handler = logging.StreamHandler()
stdio_handler.setLevel(stdio_level)
file_hanlder = logging.FileHandler(filename)
file_hanlder.setLevel(file_level)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message).200s",
datefmt="%m/%d/%Y %H:%M:%S",
level=min(stdio_level, file_level),
handlers=[stdio_handler, file_hanlder]
)
_configured = True
_get_or_create_logger().debug("Configured logging")
_loggers = {}
def _get_or_create_logger():
global _configured, _loggers
if not _configured:
configure_logging()
name = "_"
for frame in inspect.stack():
name = inspect.getmodule(frame[0]).__name__
if name != __name__:
break
if name not in _loggers:
_loggers[name] = logging.getLogger(name)
return _loggers[name]
def print_to_string(*args, end="", **kwargs):
with io.StringIO() as buf:
print(*args, file=buf, end=end, **kwargs)
return buf.getvalue()
def debug(*args, **kwargs):
_get_or_create_logger().debug(print_to_string(*args, **kwargs))
def info(*args, **kwargs):
_get_or_create_logger().info(print_to_string(*args, **kwargs))
log = info
def warning(*args, **kwargs):
_get_or_create_logger().warning(print_to_string(*args, **kwargs))
warn = warning
def error(*args, **kwargs):
_get_or_create_logger().error(print_to_string(*args, **kwargs)) | 0.350644 | 0.141756 |
# It's based on oslo.i18n usage in OpenStack Keystone project and
# recommendations from
# https://docs.openstack.org/oslo.i18n/latest/user/usage.html
"""Utilities and helper functions."""
import eventlet
import functools
import inspect
import json
import mimetypes
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_context import context as common_context
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import strutils
import pecan
import six
from zun.api import utils as api_utils
from zun.common import clients
from zun.common import consts
from zun.common import exception
from zun.common.i18n import _
from zun.common import privileged
import zun.conf
from zun.network import neutron
from zun import objects
CONF = zun.conf.CONF
LOG = logging.getLogger(__name__)
NETWORK_ATTACH_EXTERNAL = 'network:attach_external_network'
synchronized = lockutils.synchronized_with_prefix(consts.NAME_PREFIX)
VALID_STATES = {
'commit': [consts.RUNNING, consts.STOPPED, consts.PAUSED],
'delete': [consts.CREATED, consts.ERROR, consts.STOPPED, consts.DELETED,
consts.DEAD],
'delete_force': [consts.CREATED, consts.CREATING, consts.ERROR,
consts.RUNNING, consts.STOPPED, consts.UNKNOWN,
consts.DELETED, consts.DEAD, consts.RESTARTING,
consts.REBUILDING],
'delete_after_stop': [consts.RUNNING, consts.CREATED, consts.ERROR,
consts.STOPPED, consts.DELETED, consts.DEAD],
'start': [consts.CREATED, consts.STOPPED, consts.ERROR],
'stop': [consts.RUNNING],
'reboot': [consts.CREATED, consts.RUNNING, consts.STOPPED, consts.ERROR],
'rebuild': [consts.CREATED, consts.RUNNING, consts.STOPPED, consts.ERROR],
'pause': [consts.RUNNING],
'unpause': [consts.PAUSED],
'kill': [consts.RUNNING],
'execute': [consts.RUNNING],
'execute_resize': [consts.RUNNING],
'update': [consts.CREATED, consts.RUNNING, consts.STOPPED, consts.PAUSED],
'attach': [consts.RUNNING],
'resize': [consts.RUNNING],
'top': [consts.RUNNING],
'get_archive': [consts.CREATED, consts.PAUSED, consts.RUNNING,
consts.STOPPED],
'put_archive': [consts.CREATED, consts.PAUSED, consts.RUNNING,
consts.STOPPED],
'logs': [consts.CREATED, consts.ERROR, consts.PAUSED, consts.RUNNING,
consts.STOPPED, consts.UNKNOWN],
'stats': [consts.RUNNING],
'add_security_group': [consts.CREATED, consts.RUNNING, consts.STOPPED,
consts.PAUSED],
'remove_security_group': [consts.CREATED, consts.RUNNING, consts.STOPPED,
consts.PAUSED],
'resize_container': [consts.CREATED, consts.RUNNING, consts.STOPPED,
consts.PAUSED]
}
VALID_CONTAINER_FILED = {
'image': 'image',
'command': 'command',
'args': 'args',
'resources': 'resources',
'ports': 'ports',
'volumeMounts': 'volumeMounts',
'env': 'environment',
'workDir': 'workdir',
'imagePullPolicy': 'image_pull_policy',
}
VALID_CAPSULE_FIELD = {
'restartPolicy': 'restart_policy',
}
VALID_CAPSULE_RESTART_POLICY = {
'Never': 'no',
'Always': 'always',
'OnFailure': 'on-failure',
}
def validate_container_state(container, action):
if container.status not in VALID_STATES[action]:
raise exception.InvalidStateException(
id=container.uuid,
action=action,
actual_state=container.status)
def validate_image_driver(image_driver):
if image_driver not in CONF.image_driver_list:
detail = _("Invalid input for image_driver, "
"it should be within the image drivers list")
raise exception.ValidationError(detail=detail)
def safe_rstrip(value, chars=None):
"""Removes trailing characters from a string if that does not make it empty
:param value: A string value that will be stripped.
:param chars: Characters to remove.
:return: Stripped value.
"""
if not isinstance(value, six.string_types):
LOG.warning(
"Failed to remove trailing character. Returning original object. "
"Supplied object is not a string: %s.", value)
return value
return value.rstrip(chars) or value
def _do_allow_certain_content_types(func, content_types_list):
# Allows you to bypass pecan's content-type restrictions
cfg = pecan.util._cfg(func)
cfg.setdefault('content_types', {})
cfg['content_types'].update((value, '')
for value in content_types_list)
return func
def allow_certain_content_types(*content_types_list):
def _wrapper(func):
return _do_allow_certain_content_types(func, content_types_list)
return _wrapper
def allow_all_content_types(f):
return _do_allow_certain_content_types(f, mimetypes.types_map.values())
def parse_image_name(image, driver=None):
image_parts = image.split(':', 1)
image_repo = image_parts[0]
if driver is None:
driver = CONF.default_image_driver
if driver == 'glance':
image_tag = ''
else:
image_tag = 'latest'
if len(image_parts) > 1:
image_tag = image_parts[1]
return image_repo, image_tag
def spawn_n(func, *args, **kwargs):
"""Passthrough method for eventlet.spawn_n.
This utility exists so that it can be stubbed for testing without
interfering with the service spawns.
It will also grab the context from the threadlocal store and add it to
the store on the new thread. This allows for continuity in logging the
context when using this method to spawn a new thread.
"""
_context = common_context.get_current()
@functools.wraps(func)
def context_wrapper(*args, **kwargs):
# NOTE: If update_store is not called after spawn_n it won't be
# available for the logger to pull from threadlocal storage.
if _context is not None:
_context.update_store()
func(*args, **kwargs)
eventlet.spawn_n(context_wrapper, *args, **kwargs)
def translate_exception(function):
"""Wraps a method to catch exceptions.
If the exception is not an instance of ZunException,
translate it into one.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except Exception as e:
if not isinstance(e, exception.ZunException):
LOG.exception("Unexpected error: %s", six.text_type(e))
e = exception.ZunException("Unexpected error: %s"
% six.text_type(e))
raise e
raise
return decorated_function
def check_container_id(function):
"""Check container_id property of given container instance."""
@functools.wraps(function)
def decorated_function(*args, **kwargs):
container = args[2]
if getattr(container, 'container_id', None) is None:
msg = _("Cannot operate an uncreated container.")
raise exception.Invalid(message=msg)
return function(*args, **kwargs)
return decorated_function
def get_image_pull_policy(image_pull_policy, image_tag):
if not image_pull_policy:
if image_tag == 'latest' or not image_tag:
image_pull_policy = 'always'
else:
image_pull_policy = 'ifnotpresent'
return image_pull_policy
def should_pull_image(image_pull_policy, present):
if image_pull_policy == 'never':
return False
if (image_pull_policy == 'always' or
(image_pull_policy == 'ifnotpresent' and not present)):
return True
return False
def get_floating_cpu_set():
"""Parse floating_cpu_set config.
:returns: a set of pcpu ids can be used by containers
"""
if not CONF.floating_cpu_set:
return None
cpuset_ids = parse_floating_cpu(CONF.floating_cpu_set)
if not cpuset_ids:
raise exception.Invalid(_("No CPUs available after parsing %r") %
CONF.floating_cpu_set)
return cpuset_ids
def parse_floating_cpu(spec):
"""Parse a CPU set specification.
Each element in the list is either a single CPU number, a range of
CPU numbers.
:param spec: cpu set string eg "1-4,6"
:returns: a set of CPU indexes
"""
cpuset_ids = set()
for rule in spec.split(','):
range_part = rule.strip().split("-", 1)
if len(range_part) > 1:
try:
start, end = [int(p.strip()) for p in range_part]
except ValueError:
raise exception.Invalid()
if start < end:
cpuset_ids |= set(range(start, end + 1))
else:
raise exception.Invalid()
else:
try:
cpuset_ids.add(int(rule))
except ValueError:
raise exception.Invalid()
return cpuset_ids
def get_security_group_ids(context, security_groups, **kwargs):
if not security_groups:
return None
else:
neutron = clients.OpenStackClients(context).neutron()
search_opts = {'tenant_id': context.project_id}
security_groups_list = neutron.list_security_groups(
**search_opts).get('security_groups', [])
security_group_ids = [item['id'] for item in security_groups_list
if item['name'] in security_groups
or item['id'] in security_groups]
if len(security_group_ids) >= len(security_groups):
return security_group_ids
else:
raise exception.ZunException(_(
"Any of the security group in %s is not found ") %
security_groups)
def custom_execute(*cmd, **kwargs):
try:
return processutils.execute(*cmd, **kwargs)
except processutils.ProcessExecutionError as e:
sanitized_cmd = strutils.mask_password(' '.join(cmd))
raise exception.CommandError(cmd=sanitized_cmd,
error=six.text_type(e))
def get_root_helper():
return 'sudo zun-rootwrap %s' % CONF.rootwrap_config
@privileged.default.entrypoint
def execute_root(*cmd, **kwargs):
# NOTE(kiennt): Set run_as_root=False because if it is set to True, the
# command is prefixed by the command specified in the
# root_helper kwargs [1]. But we use oslo.privsep instead
# of rootwrap so set run_as_root=False.
# [1] https://github.com/openstack/oslo.concurrency/blob/master/oslo_concurrency/processutils.py#L218 # noqa
return custom_execute(*cmd, shell=False, run_as_root=False, **kwargs)
def execute(*cmd, **kwargs):
run_as_root = kwargs.pop('run_as_root', False)
# NOTE(kiennt): Root_helper is unnecessary when use privsep,
# therefore pop it!
kwargs.pop('root_helper', None)
if run_as_root:
return execute_root(*cmd, **kwargs)
else:
return custom_execute(*cmd, **kwargs)
def check_capsule_template(tpl):
# TODO(kevinz): add volume spec check
tpl_json = tpl
if isinstance(tpl, six.string_types):
try:
tpl_json = json.loads(tpl)
except Exception as e:
raise exception.FailedParseStringToJson(e)
kind_field = tpl_json.get('kind')
if kind_field not in ['capsule', 'Capsule']:
raise exception.InvalidCapsuleTemplate("kind fields need to be "
"set as capsule or Capsule")
spec_field = tpl_json.get('spec')
if spec_field is None:
raise exception.InvalidCapsuleTemplate("No Spec found")
# Align the Capsule restartPolicy with container restart_policy
# Also change the template filed name from Kubernetes type to OpenStack
# type.
if 'restartPolicy' in spec_field.keys():
spec_field['restartPolicy'] = \
VALID_CAPSULE_RESTART_POLICY[spec_field['restartPolicy']]
spec_field[VALID_CAPSULE_FIELD['restartPolicy']] = \
spec_field.pop('restartPolicy')
if spec_field.get('containers') is None:
raise exception.InvalidCapsuleTemplate("No valid containers field")
return spec_field, tpl_json
def capsule_get_container_spec(spec_field):
containers_spec = spec_field.get('containers')
containers_num = len(containers_spec)
if containers_num == 0:
raise exception.InvalidCapsuleTemplate("Capsule need to have one "
"container at least")
for i in range(0, containers_num):
container_spec = containers_spec[i]
if 'image' not in container_spec.keys():
raise exception.InvalidCapsuleTemplate("Container "
"image is needed")
# Remap the Capsule's container fields to native Zun container fields.
for key in list(container_spec.keys()):
container_spec[VALID_CONTAINER_FILED[key]] = \
container_spec.pop(key)
return containers_spec
def capsule_get_volume_spec(spec_field):
volumes_spec = spec_field.get('volumes')
if not volumes_spec:
return []
volumes_num = len(volumes_spec)
for i in range(volumes_num):
volume_name = volumes_spec[i].get('name')
if volume_name is None:
raise exception.InvalidCapsuleTemplate("Volume name "
"is needed")
if volumes_spec[i].get('cinder'):
cinder_spec = volumes_spec[i].get('cinder')
volume_uuid = cinder_spec.get('volumeID')
volume_size = cinder_spec.get('size')
if not volume_uuid:
if volume_size is None:
raise exception.InvalidCapsuleTemplate("Volume size "
"is needed")
elif volume_uuid and volume_size:
raise exception.InvalidCapsuleTemplate("Volume size and uuid "
"could not be set at "
"the same time")
else:
raise exception.InvalidCapsuleTemplate("Zun now Only support "
"Cinder volume driver")
return volumes_spec
def is_all_projects(search_opts):
all_projects = search_opts.get('all_projects')
if all_projects:
try:
all_projects = strutils.bool_from_string(all_projects, True)
except ValueError:
bools = ', '.join(strutils.TRUE_STRINGS + strutils.FALSE_STRINGS)
raise exception.InvalidValue(_('Valid all_projects values are: %s')
% bools)
else:
all_projects = False
return all_projects
def get_container(container_ident):
container = api_utils.get_resource('Container', container_ident)
if not container:
pecan.abort(404, ('Not found; the container you requested '
'does not exist.'))
return container
def get_image(image_id):
image = api_utils.get_resource('Image', image_id)
if not image:
pecan.abort(404, ('Not found; the image you requested '
'does not exist.'))
return image
def check_for_restart_policy(container_dict):
"""Check for restart policy input
:param container_dict: a container within the request body.
"""
restart_policy = container_dict.get('restart_policy')
if not restart_policy:
return
name = restart_policy.get('Name')
num = restart_policy.setdefault('MaximumRetryCount', '0')
count = int(num)
if name in ['unless-stopped', 'always']:
if count != 0:
msg = _("maximum retry count not valid with restart "
"policy of %s") % name
raise exception.InvalidValue(msg)
elif name in ['no']:
container_dict.get('restart_policy')['MaximumRetryCount'] = '0'
def build_requested_networks(context, nets):
"""Build requested networks by calling neutron client
:param nets: The special network uuid when create container
if none, will call neutron to create new network.
:returns: available network and ports
"""
neutron_api = neutron.NeutronAPI(context)
requested_networks = []
for net in nets:
if net.get('port'):
port = neutron_api.get_neutron_port(net['port'])
neutron_api.ensure_neutron_port_usable(port)
network = neutron_api.get_neutron_network(port['network_id'])
requested_networks.append({'network': port['network_id'],
'port': port['id'],
'router:external':
network.get('router:external'),
'shared': network.get('shared'),
'fixed_ip': '',
'preserve_on_delete': True})
elif net.get('network'):
network = neutron_api.get_neutron_network(net['network'])
requested_networks.append({'network': network['id'],
'port': '',
'router:external':
network.get('router:external'),
'shared': network.get('shared'),
'fixed_ip':
net.get('fixed_ip') or
net.get('v4-fixed-ip', '') or
net.get('v6-fixed-ip', ''),
'preserve_on_delete': False})
if not requested_networks:
# Find an available neutron net and create docker network by
# wrapping the neutron net.
network = neutron_api.get_available_network()
requested_networks.append({'network': network['id'],
'port': '',
'router:external':
network.get('router:external'),
'shared': network.get('shared'),
'fixed_ip': '',
'preserve_on_delete': False})
check_external_network_attach(context, requested_networks)
return requested_networks
def check_external_network_attach(context, nets):
"""Check if attaching to external network is permitted."""
if not context.can(NETWORK_ATTACH_EXTERNAL,
fatal=False):
for net in nets:
if net.get('router:external') and not net.get('shared'):
raise exception.ExternalNetworkAttachForbidden(
network_uuid=net['network'])
class EventReporter(object):
"""Context manager to report container action events."""
def __init__(self, context, event_name, *container_uuids):
self.context = context
self.event_name = event_name
self.container_uuids = container_uuids
def __enter__(self):
for uuid in self.container_uuids:
objects.ContainerActionEvent.event_start(
self.context, uuid, self.event_name, want_result=False)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for uuid in self.container_uuids:
objects.ContainerActionEvent.event_finish(
self.context, uuid, self.event_name, exc_val=exc_val,
exc_tb=exc_tb, want_result=False)
return False
def get_wrapped_function(function):
"""Get the method at the bottom of a stack of decorators."""
if not hasattr(function, '__closure__') or not function.__closure__:
return function
def _get_wrapped_function(function):
if not hasattr(function, '__closure__') or not function.__closure__:
return None
for closure in function.__closure__:
func = closure.cell_contents
deeper_func = _get_wrapped_function(func)
if deeper_func:
return deeper_func
elif hasattr(closure.cell_contents, '__call__'):
return closure.cell_contents
return function
return _get_wrapped_function(function)
def wrap_container_event(prefix):
"""Warps a method to log the event taken on the container, and result.
This decorator wraps a method to log the start and result of an event, as
part of an action taken on a container.
"""
def helper(function):
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
wrapped_func = get_wrapped_function(function)
keyed_args = inspect.getcallargs(wrapped_func, self, context,
*args, **kwargs)
container_uuid = keyed_args['container'].uuid
event_name = '{0}_{1}'.format(prefix, function.__name__)
with EventReporter(context, event_name, container_uuid):
return function(self, context, *args, **kwargs)
return decorated_function
return helper
def wrap_exception():
def helper(function):
@functools.wraps(function)
def decorated_function(self, context, container, *args, **kwargs):
try:
return function(self, context, container, *args, **kwargs)
except exception.DockerError as e:
with excutils.save_and_reraise_exception(reraise=False):
LOG.error("Error occurred while calling Docker API: %s",
six.text_type(e))
except Exception as e:
with excutils.save_and_reraise_exception(reraise=False):
LOG.exception("Unexpected exception: %s", six.text_type(e))
return decorated_function
return helper | zun/common/utils.py |
# It's based on oslo.i18n usage in OpenStack Keystone project and
# recommendations from
# https://docs.openstack.org/oslo.i18n/latest/user/usage.html
"""Utilities and helper functions."""
import eventlet
import functools
import inspect
import json
import mimetypes
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_context import context as common_context
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import strutils
import pecan
import six
from zun.api import utils as api_utils
from zun.common import clients
from zun.common import consts
from zun.common import exception
from zun.common.i18n import _
from zun.common import privileged
import zun.conf
from zun.network import neutron
from zun import objects
CONF = zun.conf.CONF
LOG = logging.getLogger(__name__)
NETWORK_ATTACH_EXTERNAL = 'network:attach_external_network'
synchronized = lockutils.synchronized_with_prefix(consts.NAME_PREFIX)
VALID_STATES = {
'commit': [consts.RUNNING, consts.STOPPED, consts.PAUSED],
'delete': [consts.CREATED, consts.ERROR, consts.STOPPED, consts.DELETED,
consts.DEAD],
'delete_force': [consts.CREATED, consts.CREATING, consts.ERROR,
consts.RUNNING, consts.STOPPED, consts.UNKNOWN,
consts.DELETED, consts.DEAD, consts.RESTARTING,
consts.REBUILDING],
'delete_after_stop': [consts.RUNNING, consts.CREATED, consts.ERROR,
consts.STOPPED, consts.DELETED, consts.DEAD],
'start': [consts.CREATED, consts.STOPPED, consts.ERROR],
'stop': [consts.RUNNING],
'reboot': [consts.CREATED, consts.RUNNING, consts.STOPPED, consts.ERROR],
'rebuild': [consts.CREATED, consts.RUNNING, consts.STOPPED, consts.ERROR],
'pause': [consts.RUNNING],
'unpause': [consts.PAUSED],
'kill': [consts.RUNNING],
'execute': [consts.RUNNING],
'execute_resize': [consts.RUNNING],
'update': [consts.CREATED, consts.RUNNING, consts.STOPPED, consts.PAUSED],
'attach': [consts.RUNNING],
'resize': [consts.RUNNING],
'top': [consts.RUNNING],
'get_archive': [consts.CREATED, consts.PAUSED, consts.RUNNING,
consts.STOPPED],
'put_archive': [consts.CREATED, consts.PAUSED, consts.RUNNING,
consts.STOPPED],
'logs': [consts.CREATED, consts.ERROR, consts.PAUSED, consts.RUNNING,
consts.STOPPED, consts.UNKNOWN],
'stats': [consts.RUNNING],
'add_security_group': [consts.CREATED, consts.RUNNING, consts.STOPPED,
consts.PAUSED],
'remove_security_group': [consts.CREATED, consts.RUNNING, consts.STOPPED,
consts.PAUSED],
'resize_container': [consts.CREATED, consts.RUNNING, consts.STOPPED,
consts.PAUSED]
}
VALID_CONTAINER_FILED = {
'image': 'image',
'command': 'command',
'args': 'args',
'resources': 'resources',
'ports': 'ports',
'volumeMounts': 'volumeMounts',
'env': 'environment',
'workDir': 'workdir',
'imagePullPolicy': 'image_pull_policy',
}
VALID_CAPSULE_FIELD = {
'restartPolicy': 'restart_policy',
}
VALID_CAPSULE_RESTART_POLICY = {
'Never': 'no',
'Always': 'always',
'OnFailure': 'on-failure',
}
def validate_container_state(container, action):
if container.status not in VALID_STATES[action]:
raise exception.InvalidStateException(
id=container.uuid,
action=action,
actual_state=container.status)
def validate_image_driver(image_driver):
if image_driver not in CONF.image_driver_list:
detail = _("Invalid input for image_driver, "
"it should be within the image drivers list")
raise exception.ValidationError(detail=detail)
def safe_rstrip(value, chars=None):
"""Removes trailing characters from a string if that does not make it empty
:param value: A string value that will be stripped.
:param chars: Characters to remove.
:return: Stripped value.
"""
if not isinstance(value, six.string_types):
LOG.warning(
"Failed to remove trailing character. Returning original object. "
"Supplied object is not a string: %s.", value)
return value
return value.rstrip(chars) or value
def _do_allow_certain_content_types(func, content_types_list):
# Allows you to bypass pecan's content-type restrictions
cfg = pecan.util._cfg(func)
cfg.setdefault('content_types', {})
cfg['content_types'].update((value, '')
for value in content_types_list)
return func
def allow_certain_content_types(*content_types_list):
def _wrapper(func):
return _do_allow_certain_content_types(func, content_types_list)
return _wrapper
def allow_all_content_types(f):
return _do_allow_certain_content_types(f, mimetypes.types_map.values())
def parse_image_name(image, driver=None):
image_parts = image.split(':', 1)
image_repo = image_parts[0]
if driver is None:
driver = CONF.default_image_driver
if driver == 'glance':
image_tag = ''
else:
image_tag = 'latest'
if len(image_parts) > 1:
image_tag = image_parts[1]
return image_repo, image_tag
def spawn_n(func, *args, **kwargs):
"""Passthrough method for eventlet.spawn_n.
This utility exists so that it can be stubbed for testing without
interfering with the service spawns.
It will also grab the context from the threadlocal store and add it to
the store on the new thread. This allows for continuity in logging the
context when using this method to spawn a new thread.
"""
_context = common_context.get_current()
@functools.wraps(func)
def context_wrapper(*args, **kwargs):
# NOTE: If update_store is not called after spawn_n it won't be
# available for the logger to pull from threadlocal storage.
if _context is not None:
_context.update_store()
func(*args, **kwargs)
eventlet.spawn_n(context_wrapper, *args, **kwargs)
def translate_exception(function):
"""Wraps a method to catch exceptions.
If the exception is not an instance of ZunException,
translate it into one.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except Exception as e:
if not isinstance(e, exception.ZunException):
LOG.exception("Unexpected error: %s", six.text_type(e))
e = exception.ZunException("Unexpected error: %s"
% six.text_type(e))
raise e
raise
return decorated_function
def check_container_id(function):
"""Check container_id property of given container instance."""
@functools.wraps(function)
def decorated_function(*args, **kwargs):
container = args[2]
if getattr(container, 'container_id', None) is None:
msg = _("Cannot operate an uncreated container.")
raise exception.Invalid(message=msg)
return function(*args, **kwargs)
return decorated_function
def get_image_pull_policy(image_pull_policy, image_tag):
if not image_pull_policy:
if image_tag == 'latest' or not image_tag:
image_pull_policy = 'always'
else:
image_pull_policy = 'ifnotpresent'
return image_pull_policy
def should_pull_image(image_pull_policy, present):
if image_pull_policy == 'never':
return False
if (image_pull_policy == 'always' or
(image_pull_policy == 'ifnotpresent' and not present)):
return True
return False
def get_floating_cpu_set():
"""Parse floating_cpu_set config.
:returns: a set of pcpu ids can be used by containers
"""
if not CONF.floating_cpu_set:
return None
cpuset_ids = parse_floating_cpu(CONF.floating_cpu_set)
if not cpuset_ids:
raise exception.Invalid(_("No CPUs available after parsing %r") %
CONF.floating_cpu_set)
return cpuset_ids
def parse_floating_cpu(spec):
"""Parse a CPU set specification.
Each element in the list is either a single CPU number, a range of
CPU numbers.
:param spec: cpu set string eg "1-4,6"
:returns: a set of CPU indexes
"""
cpuset_ids = set()
for rule in spec.split(','):
range_part = rule.strip().split("-", 1)
if len(range_part) > 1:
try:
start, end = [int(p.strip()) for p in range_part]
except ValueError:
raise exception.Invalid()
if start < end:
cpuset_ids |= set(range(start, end + 1))
else:
raise exception.Invalid()
else:
try:
cpuset_ids.add(int(rule))
except ValueError:
raise exception.Invalid()
return cpuset_ids
def get_security_group_ids(context, security_groups, **kwargs):
if not security_groups:
return None
else:
neutron = clients.OpenStackClients(context).neutron()
search_opts = {'tenant_id': context.project_id}
security_groups_list = neutron.list_security_groups(
**search_opts).get('security_groups', [])
security_group_ids = [item['id'] for item in security_groups_list
if item['name'] in security_groups
or item['id'] in security_groups]
if len(security_group_ids) >= len(security_groups):
return security_group_ids
else:
raise exception.ZunException(_(
"Any of the security group in %s is not found ") %
security_groups)
def custom_execute(*cmd, **kwargs):
try:
return processutils.execute(*cmd, **kwargs)
except processutils.ProcessExecutionError as e:
sanitized_cmd = strutils.mask_password(' '.join(cmd))
raise exception.CommandError(cmd=sanitized_cmd,
error=six.text_type(e))
def get_root_helper():
return 'sudo zun-rootwrap %s' % CONF.rootwrap_config
@privileged.default.entrypoint
def execute_root(*cmd, **kwargs):
# NOTE(kiennt): Set run_as_root=False because if it is set to True, the
# command is prefixed by the command specified in the
# root_helper kwargs [1]. But we use oslo.privsep instead
# of rootwrap so set run_as_root=False.
# [1] https://github.com/openstack/oslo.concurrency/blob/master/oslo_concurrency/processutils.py#L218 # noqa
return custom_execute(*cmd, shell=False, run_as_root=False, **kwargs)
def execute(*cmd, **kwargs):
run_as_root = kwargs.pop('run_as_root', False)
# NOTE(kiennt): Root_helper is unnecessary when use privsep,
# therefore pop it!
kwargs.pop('root_helper', None)
if run_as_root:
return execute_root(*cmd, **kwargs)
else:
return custom_execute(*cmd, **kwargs)
def check_capsule_template(tpl):
# TODO(kevinz): add volume spec check
tpl_json = tpl
if isinstance(tpl, six.string_types):
try:
tpl_json = json.loads(tpl)
except Exception as e:
raise exception.FailedParseStringToJson(e)
kind_field = tpl_json.get('kind')
if kind_field not in ['capsule', 'Capsule']:
raise exception.InvalidCapsuleTemplate("kind fields need to be "
"set as capsule or Capsule")
spec_field = tpl_json.get('spec')
if spec_field is None:
raise exception.InvalidCapsuleTemplate("No Spec found")
# Align the Capsule restartPolicy with container restart_policy
# Also change the template filed name from Kubernetes type to OpenStack
# type.
if 'restartPolicy' in spec_field.keys():
spec_field['restartPolicy'] = \
VALID_CAPSULE_RESTART_POLICY[spec_field['restartPolicy']]
spec_field[VALID_CAPSULE_FIELD['restartPolicy']] = \
spec_field.pop('restartPolicy')
if spec_field.get('containers') is None:
raise exception.InvalidCapsuleTemplate("No valid containers field")
return spec_field, tpl_json
def capsule_get_container_spec(spec_field):
containers_spec = spec_field.get('containers')
containers_num = len(containers_spec)
if containers_num == 0:
raise exception.InvalidCapsuleTemplate("Capsule need to have one "
"container at least")
for i in range(0, containers_num):
container_spec = containers_spec[i]
if 'image' not in container_spec.keys():
raise exception.InvalidCapsuleTemplate("Container "
"image is needed")
# Remap the Capsule's container fields to native Zun container fields.
for key in list(container_spec.keys()):
container_spec[VALID_CONTAINER_FILED[key]] = \
container_spec.pop(key)
return containers_spec
def capsule_get_volume_spec(spec_field):
volumes_spec = spec_field.get('volumes')
if not volumes_spec:
return []
volumes_num = len(volumes_spec)
for i in range(volumes_num):
volume_name = volumes_spec[i].get('name')
if volume_name is None:
raise exception.InvalidCapsuleTemplate("Volume name "
"is needed")
if volumes_spec[i].get('cinder'):
cinder_spec = volumes_spec[i].get('cinder')
volume_uuid = cinder_spec.get('volumeID')
volume_size = cinder_spec.get('size')
if not volume_uuid:
if volume_size is None:
raise exception.InvalidCapsuleTemplate("Volume size "
"is needed")
elif volume_uuid and volume_size:
raise exception.InvalidCapsuleTemplate("Volume size and uuid "
"could not be set at "
"the same time")
else:
raise exception.InvalidCapsuleTemplate("Zun now Only support "
"Cinder volume driver")
return volumes_spec
def is_all_projects(search_opts):
all_projects = search_opts.get('all_projects')
if all_projects:
try:
all_projects = strutils.bool_from_string(all_projects, True)
except ValueError:
bools = ', '.join(strutils.TRUE_STRINGS + strutils.FALSE_STRINGS)
raise exception.InvalidValue(_('Valid all_projects values are: %s')
% bools)
else:
all_projects = False
return all_projects
def get_container(container_ident):
container = api_utils.get_resource('Container', container_ident)
if not container:
pecan.abort(404, ('Not found; the container you requested '
'does not exist.'))
return container
def get_image(image_id):
image = api_utils.get_resource('Image', image_id)
if not image:
pecan.abort(404, ('Not found; the image you requested '
'does not exist.'))
return image
def check_for_restart_policy(container_dict):
"""Check for restart policy input
:param container_dict: a container within the request body.
"""
restart_policy = container_dict.get('restart_policy')
if not restart_policy:
return
name = restart_policy.get('Name')
num = restart_policy.setdefault('MaximumRetryCount', '0')
count = int(num)
if name in ['unless-stopped', 'always']:
if count != 0:
msg = _("maximum retry count not valid with restart "
"policy of %s") % name
raise exception.InvalidValue(msg)
elif name in ['no']:
container_dict.get('restart_policy')['MaximumRetryCount'] = '0'
def build_requested_networks(context, nets):
"""Build requested networks by calling neutron client
:param nets: The special network uuid when create container
if none, will call neutron to create new network.
:returns: available network and ports
"""
neutron_api = neutron.NeutronAPI(context)
requested_networks = []
for net in nets:
if net.get('port'):
port = neutron_api.get_neutron_port(net['port'])
neutron_api.ensure_neutron_port_usable(port)
network = neutron_api.get_neutron_network(port['network_id'])
requested_networks.append({'network': port['network_id'],
'port': port['id'],
'router:external':
network.get('router:external'),
'shared': network.get('shared'),
'fixed_ip': '',
'preserve_on_delete': True})
elif net.get('network'):
network = neutron_api.get_neutron_network(net['network'])
requested_networks.append({'network': network['id'],
'port': '',
'router:external':
network.get('router:external'),
'shared': network.get('shared'),
'fixed_ip':
net.get('fixed_ip') or
net.get('v4-fixed-ip', '') or
net.get('v6-fixed-ip', ''),
'preserve_on_delete': False})
if not requested_networks:
# Find an available neutron net and create docker network by
# wrapping the neutron net.
network = neutron_api.get_available_network()
requested_networks.append({'network': network['id'],
'port': '',
'router:external':
network.get('router:external'),
'shared': network.get('shared'),
'fixed_ip': '',
'preserve_on_delete': False})
check_external_network_attach(context, requested_networks)
return requested_networks
def check_external_network_attach(context, nets):
"""Check if attaching to external network is permitted."""
if not context.can(NETWORK_ATTACH_EXTERNAL,
fatal=False):
for net in nets:
if net.get('router:external') and not net.get('shared'):
raise exception.ExternalNetworkAttachForbidden(
network_uuid=net['network'])
class EventReporter(object):
"""Context manager to report container action events."""
def __init__(self, context, event_name, *container_uuids):
self.context = context
self.event_name = event_name
self.container_uuids = container_uuids
def __enter__(self):
for uuid in self.container_uuids:
objects.ContainerActionEvent.event_start(
self.context, uuid, self.event_name, want_result=False)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for uuid in self.container_uuids:
objects.ContainerActionEvent.event_finish(
self.context, uuid, self.event_name, exc_val=exc_val,
exc_tb=exc_tb, want_result=False)
return False
def get_wrapped_function(function):
"""Get the method at the bottom of a stack of decorators."""
if not hasattr(function, '__closure__') or not function.__closure__:
return function
def _get_wrapped_function(function):
if not hasattr(function, '__closure__') or not function.__closure__:
return None
for closure in function.__closure__:
func = closure.cell_contents
deeper_func = _get_wrapped_function(func)
if deeper_func:
return deeper_func
elif hasattr(closure.cell_contents, '__call__'):
return closure.cell_contents
return function
return _get_wrapped_function(function)
def wrap_container_event(prefix):
"""Warps a method to log the event taken on the container, and result.
This decorator wraps a method to log the start and result of an event, as
part of an action taken on a container.
"""
def helper(function):
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
wrapped_func = get_wrapped_function(function)
keyed_args = inspect.getcallargs(wrapped_func, self, context,
*args, **kwargs)
container_uuid = keyed_args['container'].uuid
event_name = '{0}_{1}'.format(prefix, function.__name__)
with EventReporter(context, event_name, container_uuid):
return function(self, context, *args, **kwargs)
return decorated_function
return helper
def wrap_exception():
def helper(function):
@functools.wraps(function)
def decorated_function(self, context, container, *args, **kwargs):
try:
return function(self, context, container, *args, **kwargs)
except exception.DockerError as e:
with excutils.save_and_reraise_exception(reraise=False):
LOG.error("Error occurred while calling Docker API: %s",
six.text_type(e))
except Exception as e:
with excutils.save_and_reraise_exception(reraise=False):
LOG.exception("Unexpected exception: %s", six.text_type(e))
return decorated_function
return helper | 0.515376 | 0.209935 |
from re import S
import re
import time
from requests.sessions import merge_setting
import telepot
from telepot import Bot
from telepot.loop import MessageLoop
from telepot.namedtuple import InlineKeyboardMarkup, InlineKeyboardButton, ReplyKeyboardMarkup
from common.MyMQTT import MyMQTT
from common.RegManager import RegManager
from common.Mapper import Mapper
import datetime
import json
import requests
import time
import io
import base64
import pickle
import cv2
import os
class Telegrambot():
def __init__(self, confAddr):
try:
self.conf = json.load(open(confAddr))
except:
print("Configuration file not found")
exit()
self.token = self.conf["token"]
self.bot = telepot.Bot(self.token)
self.serviceId = self.conf["serviceId"]
self.client = MyMQTT(
self.serviceId, self.conf["broker"], int(self.conf["port"]), self)
self.homeCatAddr = self.conf["homeCatAddress"]
self.webServerAddr = self.conf["webServerAddress"]
self.overtempTopic = self.conf["overTempTopic"]
self.switchTopic = self.conf["switchTopic"]
#self.__message = {"start": "", "info": ""}
regMsg = {"registerType": "service",
"id": self.serviceId,
"type": "telegram",
"attribute": {"topic1": self.overtempTopic,
"topic2": self.switchTopic,
}}
self.Reg = RegManager(self.homeCatAddr)
self.museumSetting = self.Reg.register(regMsg)
# check the register is correct or not
if self.museumSetting == "":
exit()
self.possibleSwitch =[]
zones = set(self.museumSetting["zones"].keys())-{"outdoor"}
for zone in zones:
temp = [zone]
self.possibleSwitch.append(temp)
self.possibleSwitch.append(["All light"])
self.possibleSwitch.append(["All laser"])
self.possibleSwitch.append(["All motor"])
self.possibleSwitch.append(["All camera"])
self.possibleSwitch.append(["ALL"])
self.mapper =Mapper()
self.lights = self.Reg.getData("devices", "light", None)["data"]
self.zone2light = self.mapper.getMap_zone2Light(self.lights,self.museumSetting["zones"])
self.cameras = self.Reg.getData("devices", "camera", None)["data"]
self.cam2entrance = self.mapper.getMap_camera2entrance(self.cameras)
self.cam2rest = self.mapper.getMap_camera2REST(self.cameras)
self.chat_auth = {}
self.switchMode = "None"
def start(self):
self.client.start()
# subscribe to topic according to available device
self.client.mySubscribe(self.overtempTopic)
MessageLoop(self.bot,self.msg_handler).run_as_thread()
def stop(self):
self.workingStatus = False
self.client.stop()
# unregister device
self.Reg.delete("service", self.serviceId)
def msg_handler(self,msg):
content_type, chat_type, chat_id = telepot.glance(msg)
message = msg['text']
if message == "/start":
self.initial_message(chat_id)
elif message == "/client":
self.user_message(chat_id)
elif message == "/administrator":
self.chat_auth[str(chat_id)]=False
self.bot.sendMessage(chat_id, 'Please enter the password')
elif message == "/see data":
if self.check_auth(chat_id)==True:
self.admin_see_data(chat_id)
else:
self.bot.sendMessage(chat_id, "Please send '/administrator' to login first!")
elif message == "/operate":
if self.check_auth(chat_id)==True:
self.admin_operate(chat_id)
else:
self.bot.sendMessage(chat_id, "Please send '/administrator' to login first!")
elif message == "/switchon":
if self.check_auth(chat_id)==True:
self.switchMode ="on"
self.admin_switch_zone(chat_id)
else:
self.bot.sendMessage(chat_id, "Please send '/administrator' to login first!")
elif message == "/switchoff":
if self.check_auth(chat_id)==True:
self.switchMode="off"
self.admin_switch_zone(chat_id)
else:
self.bot.sendMessage(chat_id, "Please send '/administrator' to login first!")
elif message == "/Logout":
if self.check_auth(chat_id)==True:
del self.chat_auth[str(chat_id)]
else:
self.bot.sendMessage(chat_id, "You haven't logged in!")
else:
if self.switchMode == "on" or self.switchMode=="off":
if self.check_auth(chat_id) ==True:
for switch in self.possibleSwitch:
if switch[0] == message:
self.admin_switch(chat_id,message,self.switchMode)
self.switchMode ="None"
self.bot.sendMessage(chat_id, "Command sent successfully!")
mark_up = ReplyKeyboardMarkup(keyboard=[['/operate'], ['/see data'],['/Logout']],one_time_keyboard=True)
self.bot.sendMessage(
chat_id, text='What would you like to do next?', reply_markup=mark_up)
return
self.bot.sendMessage(chat_id, 'Please enter the correct command!')
else:
self.bot.sendMessage(chat_id, "Please send '/administrator' to login first!")
else:
try:
if self.chat_auth[str(chat_id)]==False:
if message == "admin":
self.chat_auth[str(chat_id)]=True
self.admin_message(chat_id)
else:
self.bot.sendMessage(chat_id, 'Password error! Please re-type password!')
else:
self.bot.sendMessage(chat_id, 'Please enter the correct command!')
except:
self.bot.sendMessage(chat_id, "Please send '/administrator' to login !")
def check_auth(self,chat_id):
try:
if self.chat_auth[str(chat_id)]==True:
return True
else:
return False
except:
return False
def initial_message(self,chat_id):
# design as reply keyboard
mark_up = ReplyKeyboardMarkup(keyboard=[['/client'], ['/administrator']],one_time_keyboard=True)
self.bot.sendMessage(
chat_id, text='Welcome to the museum!', reply_markup=mark_up)
def user_message(self, chat_id):
self.bot.sendMessage(chat_id,
parse_mode='Markdown',
text='*What do you want to know about the Museum?*'
)
self.bot.sendMessage(chat_id,
parse_mode='Markdown',
text="[See data]("+self.webServerAddr+")"
)
def admin_message(self,chat_id):
mark_up = ReplyKeyboardMarkup(keyboard=[['/operate'], ['/see data']],one_time_keyboard=True)
self.bot.sendMessage(
chat_id, text='Login success! What would you like to do?', reply_markup=mark_up)
def admin_see_data(self,chat_id):
self.bot.keyboardRow = 2
keyboard = InlineKeyboardMarkup(inline_keyboard=[
[InlineKeyboardButton(
text='See data', url=self.webServerAddr)]
])
self.bot.sendMessage(
chat_id, 'What would you like to see?', reply_markup=keyboard)
mark_up = ReplyKeyboardMarkup(keyboard=[['/operate'], ['/see data']],one_time_keyboard=True)
self.bot.sendMessage(
chat_id, text='What else would you like to do?', reply_markup=mark_up)
def admin_operate(self, chat_id):
mark_up = ReplyKeyboardMarkup(keyboard=[['/switchon'], ['/switchoff']],one_time_keyboard=True)
self.bot.sendMessage(
chat_id, text='Please select your operation...', reply_markup=mark_up)
def admin_switch_zone(self,chat_id):
mark_up = ReplyKeyboardMarkup(keyboard=self.possibleSwitch,
one_time_keyboard=True)
self.bot.sendMessage(
chat_id, text='Please select the light in each zone or other devices you want to switch...', reply_markup=mark_up)
def publish(self,target, switchTo):
msg = {"target":target,"switchTo":switchTo,"timestamp":str(datetime.datetime.now())}
self.client.myPublish(self.switchTopic, msg)
print("Published: " + json.dumps(msg))
def admin_switch(self,chat_id,message,switchMode):
if message == "ALL":
self.publish(target="ALL",switchTo=switchMode)
elif message == "All light":
self.publish(target="light",switchTo=switchMode)
elif message == "All laser":
self.publish(target="laser",switchTo=switchMode)
elif message == "All motor":
self.publish(target="motor",switchTo=switchMode)
elif message == "All camera":
self.publish(target="camera",switchTo=switchMode)
else:
lights = self.zone2light[message]
for light in lights:
self.publish(target=light,switchTo=switchMode)
def getImage(self, uri, seq):
uri = uri+"/"+str(seq)
response = requests.get(uri,None)
img =self.exactImageFromResponse(response)
return img
def exactImageFromResponse(self,response):
data = response.text
imgs = json.loads(data)["img"]
img = self.json2im(imgs)
return img
def json2im(self,jstr):
"""Convert a JSON string back to a Numpy array"""
load = json.loads(jstr)
imdata = base64.b64decode(load['image'])
im = pickle.loads(imdata)
return im
def notify(self, topic, msg):
msg = json.loads(msg)
info = "ALERT!!Find someone with too high body temperature!!!"
print(msg)
info2 = "In "+self.cam2entrance[msg["id"]]+" ,with temperature: "+str(msg["temperature"])
imMagAddr = self.cam2rest[msg["id"]]
photo = self.getImage(imMagAddr,msg["sequenceNum"])
cv2.imwrite("./temp/"+str(msg["sequenceNum"])+".jpg",photo)
if self.chat_auth!=[]:
for chat_id in set(self.chat_auth.keys()):
if self.chat_auth[chat_id]==True:
self.bot.sendMessage(chat_id, info)
self.bot.sendMessage(chat_id,info2)
self.bot.sendPhoto(chat_id,photo=open("./temp/"+str(msg["sequenceNum"])+".jpg","rb"))
os.remove("./temp/"+str(msg["sequenceNum"])+".jpg")
if __name__ == "__main__":
configFile = input("Enter the location of configuration file: ")
if len(configFile) == 0:
configFile = "./configuration.json"
telegrambot = Telegrambot(configFile)
telegrambot.start()
print('waiting ...')
# Keep the program running.
while (True):
if input() == 'q':
break
telegrambot.stop() | teleBot/Telegram.py | from re import S
import re
import time
from requests.sessions import merge_setting
import telepot
from telepot import Bot
from telepot.loop import MessageLoop
from telepot.namedtuple import InlineKeyboardMarkup, InlineKeyboardButton, ReplyKeyboardMarkup
from common.MyMQTT import MyMQTT
from common.RegManager import RegManager
from common.Mapper import Mapper
import datetime
import json
import requests
import time
import io
import base64
import pickle
import cv2
import os
class Telegrambot():
def __init__(self, confAddr):
try:
self.conf = json.load(open(confAddr))
except:
print("Configuration file not found")
exit()
self.token = self.conf["token"]
self.bot = telepot.Bot(self.token)
self.serviceId = self.conf["serviceId"]
self.client = MyMQTT(
self.serviceId, self.conf["broker"], int(self.conf["port"]), self)
self.homeCatAddr = self.conf["homeCatAddress"]
self.webServerAddr = self.conf["webServerAddress"]
self.overtempTopic = self.conf["overTempTopic"]
self.switchTopic = self.conf["switchTopic"]
#self.__message = {"start": "", "info": ""}
regMsg = {"registerType": "service",
"id": self.serviceId,
"type": "telegram",
"attribute": {"topic1": self.overtempTopic,
"topic2": self.switchTopic,
}}
self.Reg = RegManager(self.homeCatAddr)
self.museumSetting = self.Reg.register(regMsg)
# check the register is correct or not
if self.museumSetting == "":
exit()
self.possibleSwitch =[]
zones = set(self.museumSetting["zones"].keys())-{"outdoor"}
for zone in zones:
temp = [zone]
self.possibleSwitch.append(temp)
self.possibleSwitch.append(["All light"])
self.possibleSwitch.append(["All laser"])
self.possibleSwitch.append(["All motor"])
self.possibleSwitch.append(["All camera"])
self.possibleSwitch.append(["ALL"])
self.mapper =Mapper()
self.lights = self.Reg.getData("devices", "light", None)["data"]
self.zone2light = self.mapper.getMap_zone2Light(self.lights,self.museumSetting["zones"])
self.cameras = self.Reg.getData("devices", "camera", None)["data"]
self.cam2entrance = self.mapper.getMap_camera2entrance(self.cameras)
self.cam2rest = self.mapper.getMap_camera2REST(self.cameras)
self.chat_auth = {}
self.switchMode = "None"
def start(self):
self.client.start()
# subscribe to topic according to available device
self.client.mySubscribe(self.overtempTopic)
MessageLoop(self.bot,self.msg_handler).run_as_thread()
def stop(self):
self.workingStatus = False
self.client.stop()
# unregister device
self.Reg.delete("service", self.serviceId)
def msg_handler(self,msg):
content_type, chat_type, chat_id = telepot.glance(msg)
message = msg['text']
if message == "/start":
self.initial_message(chat_id)
elif message == "/client":
self.user_message(chat_id)
elif message == "/administrator":
self.chat_auth[str(chat_id)]=False
self.bot.sendMessage(chat_id, 'Please enter the password')
elif message == "/see data":
if self.check_auth(chat_id)==True:
self.admin_see_data(chat_id)
else:
self.bot.sendMessage(chat_id, "Please send '/administrator' to login first!")
elif message == "/operate":
if self.check_auth(chat_id)==True:
self.admin_operate(chat_id)
else:
self.bot.sendMessage(chat_id, "Please send '/administrator' to login first!")
elif message == "/switchon":
if self.check_auth(chat_id)==True:
self.switchMode ="on"
self.admin_switch_zone(chat_id)
else:
self.bot.sendMessage(chat_id, "Please send '/administrator' to login first!")
elif message == "/switchoff":
if self.check_auth(chat_id)==True:
self.switchMode="off"
self.admin_switch_zone(chat_id)
else:
self.bot.sendMessage(chat_id, "Please send '/administrator' to login first!")
elif message == "/Logout":
if self.check_auth(chat_id)==True:
del self.chat_auth[str(chat_id)]
else:
self.bot.sendMessage(chat_id, "You haven't logged in!")
else:
if self.switchMode == "on" or self.switchMode=="off":
if self.check_auth(chat_id) ==True:
for switch in self.possibleSwitch:
if switch[0] == message:
self.admin_switch(chat_id,message,self.switchMode)
self.switchMode ="None"
self.bot.sendMessage(chat_id, "Command sent successfully!")
mark_up = ReplyKeyboardMarkup(keyboard=[['/operate'], ['/see data'],['/Logout']],one_time_keyboard=True)
self.bot.sendMessage(
chat_id, text='What would you like to do next?', reply_markup=mark_up)
return
self.bot.sendMessage(chat_id, 'Please enter the correct command!')
else:
self.bot.sendMessage(chat_id, "Please send '/administrator' to login first!")
else:
try:
if self.chat_auth[str(chat_id)]==False:
if message == "admin":
self.chat_auth[str(chat_id)]=True
self.admin_message(chat_id)
else:
self.bot.sendMessage(chat_id, 'Password error! Please re-type password!')
else:
self.bot.sendMessage(chat_id, 'Please enter the correct command!')
except:
self.bot.sendMessage(chat_id, "Please send '/administrator' to login !")
def check_auth(self,chat_id):
try:
if self.chat_auth[str(chat_id)]==True:
return True
else:
return False
except:
return False
def initial_message(self,chat_id):
# design as reply keyboard
mark_up = ReplyKeyboardMarkup(keyboard=[['/client'], ['/administrator']],one_time_keyboard=True)
self.bot.sendMessage(
chat_id, text='Welcome to the museum!', reply_markup=mark_up)
def user_message(self, chat_id):
self.bot.sendMessage(chat_id,
parse_mode='Markdown',
text='*What do you want to know about the Museum?*'
)
self.bot.sendMessage(chat_id,
parse_mode='Markdown',
text="[See data]("+self.webServerAddr+")"
)
def admin_message(self,chat_id):
mark_up = ReplyKeyboardMarkup(keyboard=[['/operate'], ['/see data']],one_time_keyboard=True)
self.bot.sendMessage(
chat_id, text='Login success! What would you like to do?', reply_markup=mark_up)
def admin_see_data(self,chat_id):
self.bot.keyboardRow = 2
keyboard = InlineKeyboardMarkup(inline_keyboard=[
[InlineKeyboardButton(
text='See data', url=self.webServerAddr)]
])
self.bot.sendMessage(
chat_id, 'What would you like to see?', reply_markup=keyboard)
mark_up = ReplyKeyboardMarkup(keyboard=[['/operate'], ['/see data']],one_time_keyboard=True)
self.bot.sendMessage(
chat_id, text='What else would you like to do?', reply_markup=mark_up)
def admin_operate(self, chat_id):
mark_up = ReplyKeyboardMarkup(keyboard=[['/switchon'], ['/switchoff']],one_time_keyboard=True)
self.bot.sendMessage(
chat_id, text='Please select your operation...', reply_markup=mark_up)
def admin_switch_zone(self,chat_id):
mark_up = ReplyKeyboardMarkup(keyboard=self.possibleSwitch,
one_time_keyboard=True)
self.bot.sendMessage(
chat_id, text='Please select the light in each zone or other devices you want to switch...', reply_markup=mark_up)
def publish(self,target, switchTo):
msg = {"target":target,"switchTo":switchTo,"timestamp":str(datetime.datetime.now())}
self.client.myPublish(self.switchTopic, msg)
print("Published: " + json.dumps(msg))
def admin_switch(self,chat_id,message,switchMode):
if message == "ALL":
self.publish(target="ALL",switchTo=switchMode)
elif message == "All light":
self.publish(target="light",switchTo=switchMode)
elif message == "All laser":
self.publish(target="laser",switchTo=switchMode)
elif message == "All motor":
self.publish(target="motor",switchTo=switchMode)
elif message == "All camera":
self.publish(target="camera",switchTo=switchMode)
else:
lights = self.zone2light[message]
for light in lights:
self.publish(target=light,switchTo=switchMode)
def getImage(self, uri, seq):
uri = uri+"/"+str(seq)
response = requests.get(uri,None)
img =self.exactImageFromResponse(response)
return img
def exactImageFromResponse(self,response):
data = response.text
imgs = json.loads(data)["img"]
img = self.json2im(imgs)
return img
def json2im(self,jstr):
"""Convert a JSON string back to a Numpy array"""
load = json.loads(jstr)
imdata = base64.b64decode(load['image'])
im = pickle.loads(imdata)
return im
def notify(self, topic, msg):
msg = json.loads(msg)
info = "ALERT!!Find someone with too high body temperature!!!"
print(msg)
info2 = "In "+self.cam2entrance[msg["id"]]+" ,with temperature: "+str(msg["temperature"])
imMagAddr = self.cam2rest[msg["id"]]
photo = self.getImage(imMagAddr,msg["sequenceNum"])
cv2.imwrite("./temp/"+str(msg["sequenceNum"])+".jpg",photo)
if self.chat_auth!=[]:
for chat_id in set(self.chat_auth.keys()):
if self.chat_auth[chat_id]==True:
self.bot.sendMessage(chat_id, info)
self.bot.sendMessage(chat_id,info2)
self.bot.sendPhoto(chat_id,photo=open("./temp/"+str(msg["sequenceNum"])+".jpg","rb"))
os.remove("./temp/"+str(msg["sequenceNum"])+".jpg")
if __name__ == "__main__":
configFile = input("Enter the location of configuration file: ")
if len(configFile) == 0:
configFile = "./configuration.json"
telegrambot = Telegrambot(configFile)
telegrambot.start()
print('waiting ...')
# Keep the program running.
while (True):
if input() == 'q':
break
telegrambot.stop() | 0.116487 | 0.062991 |
from src.FileActionHelper import FileActionHelper
from src.Constants import Constants
from src.StringActionHelper import StringActionHelper
from src.FileUpdater import FileUpdater
from src.ChangelogTableHelper import ChangelogTableHelper
from bs4 import element
import html2markdown
def get_new_table_row(cell_list, new_cell_index, new_cells):
"""
Returns table raw with with new cells
:return: string
"""
new_table_row = cell_list[0: new_cell_index] + new_cells
new_table_row.append(cell_list[len(cell_list) - 1])
return Constants.TABLE_COLUMN_SPLITTER_IN_CHANGELOG.join(new_table_row)
class ChangelogFileUpdater(FileUpdater):
def __init__(self,
extension,
release_candidate_version,
last_released_version,
php_compatibility_versions,
php_tested_versions,
shopsystem_compatibility_versions,
shopsystem_tested_versions,
platform_compatibility_versions=None,
platform_tested_versions=None):
super().__init__(extension,
release_candidate_version,
last_released_version,
php_compatibility_versions,
php_tested_versions,
shopsystem_compatibility_versions,
shopsystem_tested_versions,
platform_compatibility_versions,
platform_tested_versions)
def add_new_release_entry_to_changelog(self):
"""
Adds new entry to changelog file
"""
soup, changelog_data = FileActionHelper.get_changelog_markdown_entries(self.extension)
if soup.h2.string != self.release_candidate_version:
header_entry = soup.new_tag(Constants.RELEASE_HEADER_TAG_IN_CHANGELOG)
header_entry.string = self.release_candidate_version
soup.h2.insert_before(header_entry)
soup.h2.insert_after(element.NavigableString(Constants.NEW_LINE))
table_entry = soup.new_tag(Constants.TABLE_TAG_IN_CHANGELOG)
table_entry_contents = \
FileActionHelper.get_changelog_markdown_entry_part(self.extension,
self.last_released_version, 'table').contents
table_entry.contents = table_entry_contents
self.update_table_rows(table_entry_contents)
soup.h2.insert_after(table_entry)
soup.p.insert_before(element.NavigableString(Constants.NEW_LINE))
print("{} Updating CHANGELOG file {}".format(Constants.PRETTY_LOG_ADDITION, Constants.PRETTY_LOG_ADDITION))
with open(FileActionHelper.get_file_path_by_config_key(self.extension, Constants.CHANGELOG_FILE), 'w') as f:
f.write(html2markdown.convert(str(soup)))
def update_table_rows(self, table_entry_contents):
"""
Updates changelog table
"""
for entry in table_entry_contents:
if isinstance(entry, element.NavigableString):
if Constants.OVERVIEW_IN_CHANGELOG in entry.string:
new_entry_text = self.get_compatible_php_versions_table_header_string(entry.string)
table_entry_contents[table_entry_contents.index(entry)].string.replace_with(new_entry_text)
if Constants.TABLE_ROW_COLUMN_SPLITTER_IN_CHANGELOG in entry.string:
new_entry_text = self.get_row_separator_table_row(entry.string)
table_entry_contents[table_entry_contents.index(entry)].string.replace_with(new_entry_text)
if isinstance(entry, element.Tag):
if Constants.TESTED_IN_CHANGELOG in entry.text:
new_entry_text = self.get_tested_shopsystem_and_platform_versions_table_string(
entry.nextSibling.string)
new_entry_text = self.get_tested_php_versions_table_string(new_entry_text)
table_entry_contents[table_entry_contents.index(entry)].nextSibling.string.replace_with(
new_entry_text)
if Constants.COMPATIBILITY_IN_CHANGELOG in entry.text:
new_entry_text = self.get_compatibility_shopsystem_and_platform_versions_table_string(
entry.nextSibling.string)
new_entry_text = self.get_compatible_php_versions_table_string(new_entry_text)
table_entry_contents[table_entry_contents.index(entry)].nextSibling.string.replace_with(
new_entry_text)
def get_compatible_php_versions_table_header_string(self, table_row) -> str:
"""
Returns header table entry row with php versions
:return: string
"""
table_cells = table_row.split(Constants.TABLE_COLUMN_SPLITTER_IN_CHANGELOG)
first_php_index = FileUpdater.get_index_of_first_list_entry_containing_text(table_cells,
Constants.PHP_IN_CHANGELOG)
new_php_cells = []
for version in self.php_compatibility_versions:
new_php_cells.append(" {} {} ".format(Constants.PHP_IN_CHANGELOG, version))
return get_new_table_row(table_cells, first_php_index, new_php_cells)
def get_tested_php_versions_table_string(self, table_row) -> str:
"""
Returns table entry row with tested php versions
:return: string
"""
table_cells = table_row.split(Constants.TABLE_COLUMN_SPLITTER_IN_CHANGELOG)
intersection_list = list(set(self.php_compatibility_versions).intersection(self.php_tested_versions))
tick_positions = []
for intersection_version in intersection_list:
tick_positions.append(self.php_compatibility_versions.index(intersection_version))
new_sign_cells = self.get_new_sign_cells(tick_positions)
return get_new_table_row(table_cells,
ChangelogTableHelper.get_first_sign_in_table_row_index(table_cells),
new_sign_cells)
def get_compatible_php_versions_table_string(self, table_row) -> str:
"""
Returns table entry row with compatible php versions
:return: string
"""
table_cells = table_row.split(Constants.TABLE_COLUMN_SPLITTER_IN_CHANGELOG)
tick_positions = [self.php_compatibility_versions.index(a) for a in self.php_compatibility_versions]
new_sign_cells = self.get_new_sign_cells(tick_positions)
return get_new_table_row(table_cells,
ChangelogTableHelper.get_first_sign_in_table_row_index(table_cells),
new_sign_cells)
def get_new_sign_cells(self, tick_positions) -> list:
"""
Returns string with table entry row filled with crosses or ticks
:return: string
"""
new_sign_cells = []
for version in self.php_compatibility_versions:
sign_to_put = " {} ".format(Constants.CROSS_SIGN_IN_CHANGELOG)
if self.php_compatibility_versions.index(version) in tick_positions:
sign_to_put = " {} ".format(Constants.TICK_SIGN_IN_CHANGELOG_UNICODE)
new_sign_cells.append("{}".format(sign_to_put))
return new_sign_cells
def get_tested_shopsystem_and_platform_versions_table_string(self, table_row):
"""
Returns table entry row with tested shop system and platform versions
:return: string
"""
return self.get_shopsystem_and_platform_versions_table_string(table_row, self.shopsystem_tested_versions,
self.platform_tested_versions)
def get_compatibility_shopsystem_and_platform_versions_table_string(self, table_row):
"""
Returns table entry row with compatible shop system and platform versions
:return: string
"""
return self.get_shopsystem_and_platform_versions_table_string(table_row, self.shopsystem_compatibility_versions,
self.platform_compatibility_versions)
def get_shopsystem_and_platform_versions_table_string(self, table_row, shopsystem_version_range,
platform_version_range) -> str:
"""
Returns table entry row with updated shop system and platform versions
:return: string
"""
table_cells = table_row.split(Constants.TABLE_COLUMN_SPLITTER_IN_CHANGELOG)
for cell in table_cells:
if self.extension in cell.lower():
index = table_cells.index(cell)
versions = cell.split(',')
versions[0] = versions[0].replace(StringActionHelper.find_part_to_replace(versions[0]),
' - '.join(shopsystem_version_range))
table_cells[index] = versions[0]
if len(versions) > 1 and platform_version_range is not None:
versions[1] = versions[1].replace(StringActionHelper.find_part_to_replace(versions[1]),
' - '.join(platform_version_range))
table_cells[index] = ",".join(versions)
return Constants.TABLE_COLUMN_SPLITTER_IN_CHANGELOG.join(table_cells)
def get_row_separator_table_row(self, table_row):
"""
Returns table entry row with updated shop system and platform versions
:return: string
"""
table_cells = table_row.split(Constants.TABLE_COLUMN_SPLITTER_IN_CHANGELOG)
first_special_cell_index = FileUpdater.get_index_of_first_list_entry_containing_text(table_cells, ":")
new_special_cells = [Constants.ROW_SEPARATOR_IN_CHANGELOG for i in self.php_compatibility_versions]
return get_new_table_row(table_cells, first_special_cell_index, new_special_cells) | src/ChangelogUpdater.py | from src.FileActionHelper import FileActionHelper
from src.Constants import Constants
from src.StringActionHelper import StringActionHelper
from src.FileUpdater import FileUpdater
from src.ChangelogTableHelper import ChangelogTableHelper
from bs4 import element
import html2markdown
def get_new_table_row(cell_list, new_cell_index, new_cells):
"""
Returns table raw with with new cells
:return: string
"""
new_table_row = cell_list[0: new_cell_index] + new_cells
new_table_row.append(cell_list[len(cell_list) - 1])
return Constants.TABLE_COLUMN_SPLITTER_IN_CHANGELOG.join(new_table_row)
class ChangelogFileUpdater(FileUpdater):
def __init__(self,
extension,
release_candidate_version,
last_released_version,
php_compatibility_versions,
php_tested_versions,
shopsystem_compatibility_versions,
shopsystem_tested_versions,
platform_compatibility_versions=None,
platform_tested_versions=None):
super().__init__(extension,
release_candidate_version,
last_released_version,
php_compatibility_versions,
php_tested_versions,
shopsystem_compatibility_versions,
shopsystem_tested_versions,
platform_compatibility_versions,
platform_tested_versions)
def add_new_release_entry_to_changelog(self):
"""
Adds new entry to changelog file
"""
soup, changelog_data = FileActionHelper.get_changelog_markdown_entries(self.extension)
if soup.h2.string != self.release_candidate_version:
header_entry = soup.new_tag(Constants.RELEASE_HEADER_TAG_IN_CHANGELOG)
header_entry.string = self.release_candidate_version
soup.h2.insert_before(header_entry)
soup.h2.insert_after(element.NavigableString(Constants.NEW_LINE))
table_entry = soup.new_tag(Constants.TABLE_TAG_IN_CHANGELOG)
table_entry_contents = \
FileActionHelper.get_changelog_markdown_entry_part(self.extension,
self.last_released_version, 'table').contents
table_entry.contents = table_entry_contents
self.update_table_rows(table_entry_contents)
soup.h2.insert_after(table_entry)
soup.p.insert_before(element.NavigableString(Constants.NEW_LINE))
print("{} Updating CHANGELOG file {}".format(Constants.PRETTY_LOG_ADDITION, Constants.PRETTY_LOG_ADDITION))
with open(FileActionHelper.get_file_path_by_config_key(self.extension, Constants.CHANGELOG_FILE), 'w') as f:
f.write(html2markdown.convert(str(soup)))
def update_table_rows(self, table_entry_contents):
"""
Updates changelog table
"""
for entry in table_entry_contents:
if isinstance(entry, element.NavigableString):
if Constants.OVERVIEW_IN_CHANGELOG in entry.string:
new_entry_text = self.get_compatible_php_versions_table_header_string(entry.string)
table_entry_contents[table_entry_contents.index(entry)].string.replace_with(new_entry_text)
if Constants.TABLE_ROW_COLUMN_SPLITTER_IN_CHANGELOG in entry.string:
new_entry_text = self.get_row_separator_table_row(entry.string)
table_entry_contents[table_entry_contents.index(entry)].string.replace_with(new_entry_text)
if isinstance(entry, element.Tag):
if Constants.TESTED_IN_CHANGELOG in entry.text:
new_entry_text = self.get_tested_shopsystem_and_platform_versions_table_string(
entry.nextSibling.string)
new_entry_text = self.get_tested_php_versions_table_string(new_entry_text)
table_entry_contents[table_entry_contents.index(entry)].nextSibling.string.replace_with(
new_entry_text)
if Constants.COMPATIBILITY_IN_CHANGELOG in entry.text:
new_entry_text = self.get_compatibility_shopsystem_and_platform_versions_table_string(
entry.nextSibling.string)
new_entry_text = self.get_compatible_php_versions_table_string(new_entry_text)
table_entry_contents[table_entry_contents.index(entry)].nextSibling.string.replace_with(
new_entry_text)
def get_compatible_php_versions_table_header_string(self, table_row) -> str:
"""
Returns header table entry row with php versions
:return: string
"""
table_cells = table_row.split(Constants.TABLE_COLUMN_SPLITTER_IN_CHANGELOG)
first_php_index = FileUpdater.get_index_of_first_list_entry_containing_text(table_cells,
Constants.PHP_IN_CHANGELOG)
new_php_cells = []
for version in self.php_compatibility_versions:
new_php_cells.append(" {} {} ".format(Constants.PHP_IN_CHANGELOG, version))
return get_new_table_row(table_cells, first_php_index, new_php_cells)
def get_tested_php_versions_table_string(self, table_row) -> str:
"""
Returns table entry row with tested php versions
:return: string
"""
table_cells = table_row.split(Constants.TABLE_COLUMN_SPLITTER_IN_CHANGELOG)
intersection_list = list(set(self.php_compatibility_versions).intersection(self.php_tested_versions))
tick_positions = []
for intersection_version in intersection_list:
tick_positions.append(self.php_compatibility_versions.index(intersection_version))
new_sign_cells = self.get_new_sign_cells(tick_positions)
return get_new_table_row(table_cells,
ChangelogTableHelper.get_first_sign_in_table_row_index(table_cells),
new_sign_cells)
def get_compatible_php_versions_table_string(self, table_row) -> str:
"""
Returns table entry row with compatible php versions
:return: string
"""
table_cells = table_row.split(Constants.TABLE_COLUMN_SPLITTER_IN_CHANGELOG)
tick_positions = [self.php_compatibility_versions.index(a) for a in self.php_compatibility_versions]
new_sign_cells = self.get_new_sign_cells(tick_positions)
return get_new_table_row(table_cells,
ChangelogTableHelper.get_first_sign_in_table_row_index(table_cells),
new_sign_cells)
def get_new_sign_cells(self, tick_positions) -> list:
"""
Returns string with table entry row filled with crosses or ticks
:return: string
"""
new_sign_cells = []
for version in self.php_compatibility_versions:
sign_to_put = " {} ".format(Constants.CROSS_SIGN_IN_CHANGELOG)
if self.php_compatibility_versions.index(version) in tick_positions:
sign_to_put = " {} ".format(Constants.TICK_SIGN_IN_CHANGELOG_UNICODE)
new_sign_cells.append("{}".format(sign_to_put))
return new_sign_cells
def get_tested_shopsystem_and_platform_versions_table_string(self, table_row):
"""
Returns table entry row with tested shop system and platform versions
:return: string
"""
return self.get_shopsystem_and_platform_versions_table_string(table_row, self.shopsystem_tested_versions,
self.platform_tested_versions)
def get_compatibility_shopsystem_and_platform_versions_table_string(self, table_row):
"""
Returns table entry row with compatible shop system and platform versions
:return: string
"""
return self.get_shopsystem_and_platform_versions_table_string(table_row, self.shopsystem_compatibility_versions,
self.platform_compatibility_versions)
def get_shopsystem_and_platform_versions_table_string(self, table_row, shopsystem_version_range,
platform_version_range) -> str:
"""
Returns table entry row with updated shop system and platform versions
:return: string
"""
table_cells = table_row.split(Constants.TABLE_COLUMN_SPLITTER_IN_CHANGELOG)
for cell in table_cells:
if self.extension in cell.lower():
index = table_cells.index(cell)
versions = cell.split(',')
versions[0] = versions[0].replace(StringActionHelper.find_part_to_replace(versions[0]),
' - '.join(shopsystem_version_range))
table_cells[index] = versions[0]
if len(versions) > 1 and platform_version_range is not None:
versions[1] = versions[1].replace(StringActionHelper.find_part_to_replace(versions[1]),
' - '.join(platform_version_range))
table_cells[index] = ",".join(versions)
return Constants.TABLE_COLUMN_SPLITTER_IN_CHANGELOG.join(table_cells)
def get_row_separator_table_row(self, table_row):
"""
Returns table entry row with updated shop system and platform versions
:return: string
"""
table_cells = table_row.split(Constants.TABLE_COLUMN_SPLITTER_IN_CHANGELOG)
first_special_cell_index = FileUpdater.get_index_of_first_list_entry_containing_text(table_cells, ":")
new_special_cells = [Constants.ROW_SEPARATOR_IN_CHANGELOG for i in self.php_compatibility_versions]
return get_new_table_row(table_cells, first_special_cell_index, new_special_cells) | 0.555435 | 0.13733 |
import requests
import pafy
from hurry.filesize import size
from bs4 import BeautifulSoup
def trade_spider():
print('Do you want to download a video or audio? (a/v)')
download_content()
def download_content():
type_of_file = input()
if type_of_file == 'v' or type_of_file == 'a':
type_string = 'video' if type_of_file == 'v' else 'audio'
result = check_by_url_and_return_video(type_string)
if result[1] == 'video':
if result[0]:
video = result[0]
print('The ' + type_string + ' you are going to download is:\n' + video.title)
print('\nDetails:\n\n' + 'Rating: ' + "%.1f" % video.rating + '\nView Count: ' + str(video.viewcount) + '\nAuthor: ' + video.author + '\nLength: ' + str(video.length) + ' seconds\nDuration: ' + str(video.duration) + '\nLikes: ' + str(video.likes) + '\nDislikes: ' + str(video.dislikes))
print('\nDescription: ' + video.description)
download_video(video) if type_of_file == 'v' else download_audio(video)
else:
print('Invalid search. Please try again.')
trade_spider()
else:
if result[0]:
videos = result[0]
for video in videos:
download_video(video) if type_of_file == 'v' else download_audio(video)
else:
print('Invalid search. Please try again.')
trade_spider()
else:
print('Invalid search. Please try again.')
trade_spider()
def check_by_url_and_return_video(type):
type_string = 'video' if type == 'v' else 'audio'
print('Search:')
query = input()
if "playlist" not in query:
url = 'https://www.youtube.com/results?search_query='
for word in query.split():
url += word + '+'
source_code = requests.get(url[:len(url)-1])
plain_text = source_code.text
soup = BeautifulSoup(plain_text, "lxml")
try:
link = soup.find('a', {'class': 'yt-uix-sessionlink yt-uix-tile-link yt-ui-ellipsis yt-ui-ellipsis-2 spf-link '})
try:
video = pafy.new('https://www.youtube.com' + link['href'])
return [video, 'video']
except TypeError:
print('No results found.')
return [0, 'video']
except ValueError:
return [0, 'video']
else:
playlist_source_code = requests.get(query)
playlist_plain_text = playlist_source_code.text
playlist_soup = BeautifulSoup(playlist_plain_text, "lxml")
playlist = pafy.get_playlist(query)
print('\nYou are going to download ' + type_string + 's of the playlist:\n' + playlist['title'])
print('\nAuthor: ' + playlist['author'])
print('\nList of ' + type_string + 's:')
for item in playlist['items']:
print(item['pafy'].title)
videos = []
try:
for link in playlist_soup.findAll('a', {'class': 'pl-video-title-link yt-uix-tile-link yt-uix-sessionlink spf-link '}):
try:
href = link['href']
videos.append(pafy.new('https://www.youtube.com' + href))
except TypeError:
print('No results found.')
return [0, 'playlist']
return [videos, 'playlist']
except ValueError:
return [0, 'playlist']
def download_video(video):
best_video = video.getbest(preftype="mp4", ftypestrict=False)
print('\nResolution of best video available: ' + best_video.resolution)
print('\nFile size of best video available: ' + size(best_video.get_filesize()))
best_video.download(quiet=False, callback=show_progress)
print('Video downloaded successfully.')
def download_audio(video):
best_audio = video.getbestaudio(preftype="m4a", ftypestrict=False)
print('\nBitrate of best audio available: ' + best_audio.bitrate)
print('\nFile size of best audio available: ' + size(best_audio.get_filesize()))
best_audio.download(quiet=False, callback=show_progress)
print('Audio downloaded successfully.')
def show_progress(total, received, ratio, rate, eta):
eta_string = " second" if int(eta) == 1 else " seconds"
print("Progress: %.2f" % (ratio * 100) + "%" + " Download Speed: " + str(int(rate)) + " kbps ETA: " + str(int(eta)) + eta_string, end="\r")
trade_spider() | youtube/youtube_videos_7.py | import requests
import pafy
from hurry.filesize import size
from bs4 import BeautifulSoup
def trade_spider():
print('Do you want to download a video or audio? (a/v)')
download_content()
def download_content():
type_of_file = input()
if type_of_file == 'v' or type_of_file == 'a':
type_string = 'video' if type_of_file == 'v' else 'audio'
result = check_by_url_and_return_video(type_string)
if result[1] == 'video':
if result[0]:
video = result[0]
print('The ' + type_string + ' you are going to download is:\n' + video.title)
print('\nDetails:\n\n' + 'Rating: ' + "%.1f" % video.rating + '\nView Count: ' + str(video.viewcount) + '\nAuthor: ' + video.author + '\nLength: ' + str(video.length) + ' seconds\nDuration: ' + str(video.duration) + '\nLikes: ' + str(video.likes) + '\nDislikes: ' + str(video.dislikes))
print('\nDescription: ' + video.description)
download_video(video) if type_of_file == 'v' else download_audio(video)
else:
print('Invalid search. Please try again.')
trade_spider()
else:
if result[0]:
videos = result[0]
for video in videos:
download_video(video) if type_of_file == 'v' else download_audio(video)
else:
print('Invalid search. Please try again.')
trade_spider()
else:
print('Invalid search. Please try again.')
trade_spider()
def check_by_url_and_return_video(type):
type_string = 'video' if type == 'v' else 'audio'
print('Search:')
query = input()
if "playlist" not in query:
url = 'https://www.youtube.com/results?search_query='
for word in query.split():
url += word + '+'
source_code = requests.get(url[:len(url)-1])
plain_text = source_code.text
soup = BeautifulSoup(plain_text, "lxml")
try:
link = soup.find('a', {'class': 'yt-uix-sessionlink yt-uix-tile-link yt-ui-ellipsis yt-ui-ellipsis-2 spf-link '})
try:
video = pafy.new('https://www.youtube.com' + link['href'])
return [video, 'video']
except TypeError:
print('No results found.')
return [0, 'video']
except ValueError:
return [0, 'video']
else:
playlist_source_code = requests.get(query)
playlist_plain_text = playlist_source_code.text
playlist_soup = BeautifulSoup(playlist_plain_text, "lxml")
playlist = pafy.get_playlist(query)
print('\nYou are going to download ' + type_string + 's of the playlist:\n' + playlist['title'])
print('\nAuthor: ' + playlist['author'])
print('\nList of ' + type_string + 's:')
for item in playlist['items']:
print(item['pafy'].title)
videos = []
try:
for link in playlist_soup.findAll('a', {'class': 'pl-video-title-link yt-uix-tile-link yt-uix-sessionlink spf-link '}):
try:
href = link['href']
videos.append(pafy.new('https://www.youtube.com' + href))
except TypeError:
print('No results found.')
return [0, 'playlist']
return [videos, 'playlist']
except ValueError:
return [0, 'playlist']
def download_video(video):
best_video = video.getbest(preftype="mp4", ftypestrict=False)
print('\nResolution of best video available: ' + best_video.resolution)
print('\nFile size of best video available: ' + size(best_video.get_filesize()))
best_video.download(quiet=False, callback=show_progress)
print('Video downloaded successfully.')
def download_audio(video):
best_audio = video.getbestaudio(preftype="m4a", ftypestrict=False)
print('\nBitrate of best audio available: ' + best_audio.bitrate)
print('\nFile size of best audio available: ' + size(best_audio.get_filesize()))
best_audio.download(quiet=False, callback=show_progress)
print('Audio downloaded successfully.')
def show_progress(total, received, ratio, rate, eta):
eta_string = " second" if int(eta) == 1 else " seconds"
print("Progress: %.2f" % (ratio * 100) + "%" + " Download Speed: " + str(int(rate)) + " kbps ETA: " + str(int(eta)) + eta_string, end="\r")
trade_spider() | 0.19163 | 0.156105 |
from src.extract_old_site.modules import feature_descriptions as desc
import pytest
from unittest import mock
import pathlib
import os
# Test Data
# Sidebar
index0_html_str = """
<html><body>
<b>Descriptions</b><p>
<a target="body" href="../split/report53.html">Burial 1 Description</a><br>
<a target="body" href="../split/report62.html">Feature 7 (Burial 9) Description</a><br>
<a target="body" href="../split/report63.html">Feature 8 Description</a><br>
</body></html>
"""
# Topbar
tabs0_html_str = """
<html><body><b>Descriptions | <a target="_parent" href="../index.html">Home</a></b></body></html>
"""
# Burial 1
tab0_html_str = """
<html><head><title>Excavating Occaneechi Town - [Descriptions]</title></head>
<frameset rows="28,*">
<frame name="tabs" scrolling="no" src="tabs0.html" marginwidth=1 marginheight=1>
<frame name="main" src="body0.html" marginwidth=1 marginheight=1>
</frameset><noframes>you need frames</noframes></html>
"""
body0_html_str = """
<html><frameset cols="240,*" border=1>
<frame name="choice" src="index0.html" marginwidth=1 marginheight=1>
<frame name="body" src="../split/report53.html" marginwidth=1 marginheight=1>
</frameset></html>
"""
report53_html_str = """
<html><frameset rows="28,*,28" border=1>
<frame scrolling="no" src="report53a.html" marginwidth=1 marginheight=1>
<frame src="report53b.html" marginwidth=1 marginheight=1>
<frame scrolling="no" src="report53c.html" marginwidth=1 marginheight=1>
</frameset><noframes>you need frames</noframes></html>
"""
report53a_html_str = """
<html><body><center><b>Burial 1 Description</b>
</center></body></html>
"""
report53b_html_str = """
<html><body bgcolor=white>
by <NAME><p>
<p>
<i>Grave Goods</i><p>
<p>
Over the sternum were a <a href="../excavations/slid_aho.html" target="body"><u>large shell
gorget</u></a> and a
<a href="../excavations/slid_ahp.html" target="body"><u>small shell gorget</u></a> with punctated designs.<p>
</body></html>
"""
report53c_html_str = "<html><body><center>Page 21</center></body></html>"
# Feature 7 (Burial 9)
tab0_10_html_str = """
<html><head><title>Excavating Occaneechi Town - [Descriptions]</title></head>
<frameset rows="28,*">
<frame name="tabs" scrolling="no" src="tabs0.html" marginwidth=1 marginheight=1>
<frame name="main" src="body0_10.html" marginwidth=1 marginheight=1>
</frameset><noframes>you need frames</noframes></html>
"""
body0_10_html_str = """
<html><frameset cols="240,*" border=1>
<frame name="choice" src="index0.html" marginwidth=1 marginheight=1>
<frame name="body" src="../split/report62.html" marginwidth=1 marginheight=1>
</frameset></html>
"""
report62_html_str = """
<html><frameset rows="28,*,28" border=1>
<frame scrolling="no" src="report62a.html" marginwidth=1 marginheight=1>
<frame src="report62b.html" marginwidth=1 marginheight=1>
<frame scrolling="no" src="report62c.html" marginwidth=1 marginheight=1>
</frameset><noframes>you need frames</noframes></html>
"""
report62a_html_str = """
<html><body><center><b>Feature 7 (Burial 9) Description</b>
</center></body></html>
"""
report62b_html_str = """
<html><body bgcolor=white>
by <NAME><p>
<p>
<i>Grave Goods</i><p>
<p>
Associated artifacts consisted of an <a href="../excavations/slid_akr.html" target="body"><u>iron hoe</u></a> placed adjacent to and southwest of the skull
(the blade end lay under the shoulder and occipital region of the skull) and a
bone-handled <a href="../excavations/slid_aks.html" target="body"><u>iron knife</u></a> placed under the right forearm.<p>
</body></html>
"""
report62c_html_str = "<html><body><center>Page 30</center></body></html>"
# Feature 8
tab0_11_html_str = """
<html><head><title>Excavating Occaneechi Town - [Descriptions]</title></head>
<frameset rows="28,*">
<frame name="tabs" scrolling="no" src="tabs0.html" marginwidth=1 marginheight=1>
<frame name="main" src="body0_11.html" marginwidth=1 marginheight=1>
</frameset><noframes>you need frames</noframes></html>
"""
body0_11_html_str = """
<html><frameset cols="240,*" border=1>
<frame name="choice" src="index0.html" marginwidth=1 marginheight=1>
<frame name="body" src="../split/report63.html" marginwidth=1 marginheight=1>
</frameset></html>
"""
report63_html_str = """
<html><frameset rows="28,*,28" border=1>
<frame scrolling="no" src="report63a.html" marginwidth=1 marginheight=1>
<frame src="report63b.html" marginwidth=1 marginheight=1>
<frame scrolling="no" src="report63c.html" marginwidth=1 marginheight=1>
</frameset><noframes>you need frames</noframes></html>
"""
report63a_html_str = """
<html><body><center><b>Feature 8 Description</b>
</center></body></html>
"""
report63b_html_str = """
<html><body bgcolor=white>
by <NAME>, Jr.<p>
<p>
Feature 8 was located just east of Structure 7 at 290.0R58.0. At the top of
subsoil, this feature appeared as an irregular patch of brown loam, about 2.0
ft in diameter, that contained bits of charcoal, animal bone, fired clay, and a
large net-impressed rimsherd.<p>
</body></html>
"""
report63c_html_str = "<html><body><center>Page 31</center></body></html>"
# Extracted data
report53a_extracted = "Burial 1 Description"
report62a_extracted = "Feature 7 (Burial 9) Description"
report63a_extracted = "Feature 8 Description"
# Also removes all the double spaces in the paragraphs
reports53_62_63_fully_extracted = {
"module": {
"author": None,
"shortTitle": "Feature Descriptions",
"fullTitle": "Feature Descriptions",
"path": "/dig/html/descriptions/tab0.html",
"sections": [{
"name": "Burial 1 Description",
"path": "/dig/html/split/report53.html",
"pageNum": "21",
"subsections": []
}, {
"name": "Feature 7 (Burial 9) Description",
"path": "/dig/html/split/report62.html",
"pageNum": "30",
"subsections": []
}, {
"name": "Feature 8 Description",
"path": "/dig/html/split/report63.html",
"pageNum": "31",
"subsections": []
}]
},
"pages": {
"21": {
"pageTitle": "Burial 1 Description",
"parentModuleShortTitle": "Feature Descriptions",
"content": [{
"type": "paragraph",
"content": "by <NAME>"
}, {
"type": "italic-title",
"content": "Grave Goods"
}, {
"type": "paragraph",
"content": ('Over the sternum were a <a href="/dig/html/excavations/slid_aho.html">'
'<u>large shell gorget</u></a> and a '
'<a href="/dig/html/excavations/slid_ahp.html"><u>'
'small shell gorget</u></a> with punctated designs.')
}]
},
"30": {
"pageTitle": "Feature 7 (Burial 9) Description",
"parentModuleShortTitle": "Feature Descriptions",
"content": [{
"type": "paragraph",
"content": "by <NAME>"
}, {
"type": "italic-title",
"content": "Grave Goods"
}, {
"type": "paragraph",
"content": ('Associated artifacts consisted of an <a href="/dig/html/excavations/slid_akr.html">'
'<u>iron hoe</u></a> placed adjacent to and southwest of the skull '
'(the blade end lay under the shoulder and occipital region of the skull) and a '
'bone-handled <a href="/dig/html/excavations/slid_aks.html">'
'<u>iron knife</u></a> placed under the right forearm.')
}]
},
"31": {
"pageTitle": "Feature 8 Description",
"parentModuleShortTitle": "Feature Descriptions",
"content": [{
"type": "paragraph",
"content": "by <NAME>, Jr."
}, {
"type": "paragraph",
"content": ('Feature 8 was located just east of Structure 7 at 290.0R58.0. At the top of '
'subsoil, this feature appeared as an irregular patch of brown loam, about 2.0 '
'ft in diameter, that contained bits of charcoal, animal bone, fired clay, and a '
'large net-impressed rimsherd.')
}]
}
}
}
report53c_extracted = "21"
report62c_extracted = "30"
report63c_extracted = "31"
def mock_readfile(filename, parent_dir_path_obj):
resolved_path_obj = pathlib.Path(os.path.normpath(parent_dir_path_obj / filename))
filename = resolved_path_obj.name
parent_dir_str = resolved_path_obj.parent.as_posix()
if parent_dir_str == "C:/dig/html/descriptions":
if filename == "index0.html":
return index0_html_str
elif filename == "tabs0.html":
return tabs0_html_str
elif filename == "tab0.html":
return tab0_html_str
elif filename == "tab0_10.html":
return tab0_10_html_str
elif filename == "tab0_11.html":
return tab0_11_html_str
elif filename == "body0.html":
return body0_html_str
elif filename == "body0_10.html":
return body0_10_html_str
elif filename == "body0_11.html":
return body0_11_html_str
elif parent_dir_str == "C:/dig/html/split":
if filename == "report53.html":
return report53_html_str
if filename == "report53a.html":
return report53a_html_str
if filename == "report53b.html":
return report53b_html_str
if filename == "report53c.html":
return report53c_html_str
elif filename == "report62.html":
return report62_html_str
elif filename == "report62a.html":
return report62a_html_str
elif filename == "report62b.html":
return report62b_html_str
elif filename == "report62c.html":
return report62c_html_str
elif filename == "report63.html":
return report63_html_str
elif filename == "report63a.html":
return report63a_html_str
elif filename == "report63b.html":
return report63b_html_str
elif filename == "report63c.html":
return report63c_html_str
raise Exception("did not find file in mock_readfile")
def test_extract_sidebar_sections():
assert desc.extract_sidebar_sections(index0_html_str) == [{
"name": "Burial 1 Description",
"path": "/dig/html/split/report53.html",
"subsections": []
}, {
"name": "Feature 7 (Burial 9) Description",
"path": "/dig/html/split/report62.html",
"subsections": []
}, {
"name": "Feature 8 Description",
"path": "/dig/html/split/report63.html",
"subsections": []
}]
@pytest.mark.parametrize("report_a_html_str,expected_result", [
(report53a_html_str, report53a_extracted),
(report62a_html_str, report62a_extracted),
(report63a_html_str, report63a_extracted)
])
def test_extract_page_title(report_a_html_str, expected_result):
assert desc.extract_page_title(report_a_html_str) == expected_result
def test_extract_descriptions():
with mock.patch.object(pathlib.Path, "iterdir") as mock_iterdir:
filenames_list = [
"body0.html", "body0_10.html", "body0_11.html", "index0.html",
"tab0.html", "tab0_10.html", "tab0_11.html", "tabs0.html"
]
iterdir_path_objs = [(pathlib.Path("C:/dig/html/descriptions") / filename)
for filename in filenames_list]
mock_iterdir.return_value = iterdir_path_objs
assert desc.extract_descriptions("C:/", mock_readfile) == reports53_62_63_fully_extracted | tests/extract_old_site/modules/test_feature_descriptions.py | from src.extract_old_site.modules import feature_descriptions as desc
import pytest
from unittest import mock
import pathlib
import os
# Test Data
# Sidebar
index0_html_str = """
<html><body>
<b>Descriptions</b><p>
<a target="body" href="../split/report53.html">Burial 1 Description</a><br>
<a target="body" href="../split/report62.html">Feature 7 (Burial 9) Description</a><br>
<a target="body" href="../split/report63.html">Feature 8 Description</a><br>
</body></html>
"""
# Topbar
tabs0_html_str = """
<html><body><b>Descriptions | <a target="_parent" href="../index.html">Home</a></b></body></html>
"""
# Burial 1
tab0_html_str = """
<html><head><title>Excavating Occaneechi Town - [Descriptions]</title></head>
<frameset rows="28,*">
<frame name="tabs" scrolling="no" src="tabs0.html" marginwidth=1 marginheight=1>
<frame name="main" src="body0.html" marginwidth=1 marginheight=1>
</frameset><noframes>you need frames</noframes></html>
"""
body0_html_str = """
<html><frameset cols="240,*" border=1>
<frame name="choice" src="index0.html" marginwidth=1 marginheight=1>
<frame name="body" src="../split/report53.html" marginwidth=1 marginheight=1>
</frameset></html>
"""
report53_html_str = """
<html><frameset rows="28,*,28" border=1>
<frame scrolling="no" src="report53a.html" marginwidth=1 marginheight=1>
<frame src="report53b.html" marginwidth=1 marginheight=1>
<frame scrolling="no" src="report53c.html" marginwidth=1 marginheight=1>
</frameset><noframes>you need frames</noframes></html>
"""
report53a_html_str = """
<html><body><center><b>Burial 1 Description</b>
</center></body></html>
"""
report53b_html_str = """
<html><body bgcolor=white>
by <NAME><p>
<p>
<i>Grave Goods</i><p>
<p>
Over the sternum were a <a href="../excavations/slid_aho.html" target="body"><u>large shell
gorget</u></a> and a
<a href="../excavations/slid_ahp.html" target="body"><u>small shell gorget</u></a> with punctated designs.<p>
</body></html>
"""
report53c_html_str = "<html><body><center>Page 21</center></body></html>"
# Feature 7 (Burial 9)
tab0_10_html_str = """
<html><head><title>Excavating Occaneechi Town - [Descriptions]</title></head>
<frameset rows="28,*">
<frame name="tabs" scrolling="no" src="tabs0.html" marginwidth=1 marginheight=1>
<frame name="main" src="body0_10.html" marginwidth=1 marginheight=1>
</frameset><noframes>you need frames</noframes></html>
"""
body0_10_html_str = """
<html><frameset cols="240,*" border=1>
<frame name="choice" src="index0.html" marginwidth=1 marginheight=1>
<frame name="body" src="../split/report62.html" marginwidth=1 marginheight=1>
</frameset></html>
"""
report62_html_str = """
<html><frameset rows="28,*,28" border=1>
<frame scrolling="no" src="report62a.html" marginwidth=1 marginheight=1>
<frame src="report62b.html" marginwidth=1 marginheight=1>
<frame scrolling="no" src="report62c.html" marginwidth=1 marginheight=1>
</frameset><noframes>you need frames</noframes></html>
"""
report62a_html_str = """
<html><body><center><b>Feature 7 (Burial 9) Description</b>
</center></body></html>
"""
report62b_html_str = """
<html><body bgcolor=white>
by <NAME><p>
<p>
<i>Grave Goods</i><p>
<p>
Associated artifacts consisted of an <a href="../excavations/slid_akr.html" target="body"><u>iron hoe</u></a> placed adjacent to and southwest of the skull
(the blade end lay under the shoulder and occipital region of the skull) and a
bone-handled <a href="../excavations/slid_aks.html" target="body"><u>iron knife</u></a> placed under the right forearm.<p>
</body></html>
"""
report62c_html_str = "<html><body><center>Page 30</center></body></html>"
# Feature 8
tab0_11_html_str = """
<html><head><title>Excavating Occaneechi Town - [Descriptions]</title></head>
<frameset rows="28,*">
<frame name="tabs" scrolling="no" src="tabs0.html" marginwidth=1 marginheight=1>
<frame name="main" src="body0_11.html" marginwidth=1 marginheight=1>
</frameset><noframes>you need frames</noframes></html>
"""
body0_11_html_str = """
<html><frameset cols="240,*" border=1>
<frame name="choice" src="index0.html" marginwidth=1 marginheight=1>
<frame name="body" src="../split/report63.html" marginwidth=1 marginheight=1>
</frameset></html>
"""
report63_html_str = """
<html><frameset rows="28,*,28" border=1>
<frame scrolling="no" src="report63a.html" marginwidth=1 marginheight=1>
<frame src="report63b.html" marginwidth=1 marginheight=1>
<frame scrolling="no" src="report63c.html" marginwidth=1 marginheight=1>
</frameset><noframes>you need frames</noframes></html>
"""
report63a_html_str = """
<html><body><center><b>Feature 8 Description</b>
</center></body></html>
"""
report63b_html_str = """
<html><body bgcolor=white>
by <NAME>, Jr.<p>
<p>
Feature 8 was located just east of Structure 7 at 290.0R58.0. At the top of
subsoil, this feature appeared as an irregular patch of brown loam, about 2.0
ft in diameter, that contained bits of charcoal, animal bone, fired clay, and a
large net-impressed rimsherd.<p>
</body></html>
"""
report63c_html_str = "<html><body><center>Page 31</center></body></html>"
# Extracted data
report53a_extracted = "Burial 1 Description"
report62a_extracted = "Feature 7 (Burial 9) Description"
report63a_extracted = "Feature 8 Description"
# Also removes all the double spaces in the paragraphs
reports53_62_63_fully_extracted = {
"module": {
"author": None,
"shortTitle": "Feature Descriptions",
"fullTitle": "Feature Descriptions",
"path": "/dig/html/descriptions/tab0.html",
"sections": [{
"name": "Burial 1 Description",
"path": "/dig/html/split/report53.html",
"pageNum": "21",
"subsections": []
}, {
"name": "Feature 7 (Burial 9) Description",
"path": "/dig/html/split/report62.html",
"pageNum": "30",
"subsections": []
}, {
"name": "Feature 8 Description",
"path": "/dig/html/split/report63.html",
"pageNum": "31",
"subsections": []
}]
},
"pages": {
"21": {
"pageTitle": "Burial 1 Description",
"parentModuleShortTitle": "Feature Descriptions",
"content": [{
"type": "paragraph",
"content": "by <NAME>"
}, {
"type": "italic-title",
"content": "Grave Goods"
}, {
"type": "paragraph",
"content": ('Over the sternum were a <a href="/dig/html/excavations/slid_aho.html">'
'<u>large shell gorget</u></a> and a '
'<a href="/dig/html/excavations/slid_ahp.html"><u>'
'small shell gorget</u></a> with punctated designs.')
}]
},
"30": {
"pageTitle": "Feature 7 (Burial 9) Description",
"parentModuleShortTitle": "Feature Descriptions",
"content": [{
"type": "paragraph",
"content": "by <NAME>"
}, {
"type": "italic-title",
"content": "Grave Goods"
}, {
"type": "paragraph",
"content": ('Associated artifacts consisted of an <a href="/dig/html/excavations/slid_akr.html">'
'<u>iron hoe</u></a> placed adjacent to and southwest of the skull '
'(the blade end lay under the shoulder and occipital region of the skull) and a '
'bone-handled <a href="/dig/html/excavations/slid_aks.html">'
'<u>iron knife</u></a> placed under the right forearm.')
}]
},
"31": {
"pageTitle": "Feature 8 Description",
"parentModuleShortTitle": "Feature Descriptions",
"content": [{
"type": "paragraph",
"content": "by <NAME>, Jr."
}, {
"type": "paragraph",
"content": ('Feature 8 was located just east of Structure 7 at 290.0R58.0. At the top of '
'subsoil, this feature appeared as an irregular patch of brown loam, about 2.0 '
'ft in diameter, that contained bits of charcoal, animal bone, fired clay, and a '
'large net-impressed rimsherd.')
}]
}
}
}
report53c_extracted = "21"
report62c_extracted = "30"
report63c_extracted = "31"
def mock_readfile(filename, parent_dir_path_obj):
resolved_path_obj = pathlib.Path(os.path.normpath(parent_dir_path_obj / filename))
filename = resolved_path_obj.name
parent_dir_str = resolved_path_obj.parent.as_posix()
if parent_dir_str == "C:/dig/html/descriptions":
if filename == "index0.html":
return index0_html_str
elif filename == "tabs0.html":
return tabs0_html_str
elif filename == "tab0.html":
return tab0_html_str
elif filename == "tab0_10.html":
return tab0_10_html_str
elif filename == "tab0_11.html":
return tab0_11_html_str
elif filename == "body0.html":
return body0_html_str
elif filename == "body0_10.html":
return body0_10_html_str
elif filename == "body0_11.html":
return body0_11_html_str
elif parent_dir_str == "C:/dig/html/split":
if filename == "report53.html":
return report53_html_str
if filename == "report53a.html":
return report53a_html_str
if filename == "report53b.html":
return report53b_html_str
if filename == "report53c.html":
return report53c_html_str
elif filename == "report62.html":
return report62_html_str
elif filename == "report62a.html":
return report62a_html_str
elif filename == "report62b.html":
return report62b_html_str
elif filename == "report62c.html":
return report62c_html_str
elif filename == "report63.html":
return report63_html_str
elif filename == "report63a.html":
return report63a_html_str
elif filename == "report63b.html":
return report63b_html_str
elif filename == "report63c.html":
return report63c_html_str
raise Exception("did not find file in mock_readfile")
def test_extract_sidebar_sections():
assert desc.extract_sidebar_sections(index0_html_str) == [{
"name": "Burial 1 Description",
"path": "/dig/html/split/report53.html",
"subsections": []
}, {
"name": "Feature 7 (Burial 9) Description",
"path": "/dig/html/split/report62.html",
"subsections": []
}, {
"name": "Feature 8 Description",
"path": "/dig/html/split/report63.html",
"subsections": []
}]
@pytest.mark.parametrize("report_a_html_str,expected_result", [
(report53a_html_str, report53a_extracted),
(report62a_html_str, report62a_extracted),
(report63a_html_str, report63a_extracted)
])
def test_extract_page_title(report_a_html_str, expected_result):
assert desc.extract_page_title(report_a_html_str) == expected_result
def test_extract_descriptions():
with mock.patch.object(pathlib.Path, "iterdir") as mock_iterdir:
filenames_list = [
"body0.html", "body0_10.html", "body0_11.html", "index0.html",
"tab0.html", "tab0_10.html", "tab0_11.html", "tabs0.html"
]
iterdir_path_objs = [(pathlib.Path("C:/dig/html/descriptions") / filename)
for filename in filenames_list]
mock_iterdir.return_value = iterdir_path_objs
assert desc.extract_descriptions("C:/", mock_readfile) == reports53_62_63_fully_extracted | 0.512937 | 0.129348 |
import json
from hubcommander.bot_components.decorators import hubcommander_command, format_help_text, auth
from hubcommander.bot_components.slack_comm import WORKING_COLOR
from hubcommander.bot_components.parse_functions import ParseException
def test_hubcommander_command_required(user_data, slack_client):
fail_command_kwargs = dict(
name="!FailCommand",
usage="!FailCommand <arg1>",
description="This is a test command that will fail due to lack of required args",
required=[
dict(name="arg1", properties=dict(type=str, help="This is argument 1")),
],
optional=[]
)
class TestCommands:
def __init__(self):
pass
@hubcommander_command(
name="!TestCommand",
usage="!TestCommand <arg1> <arg2> <arg3>",
description="This is a test command to make sure that things are working properly.",
required=[
dict(name="arg1", properties=dict(type=str, help="This is argument 1")),
dict(name="arg2", properties=dict(type=str, help="This is argument 2")),
dict(name="arg3", properties=dict(type=str, help="This is argument 3"))
],
optional=[]
)
def pass_command(self, data, user_data, arg1, arg2, arg3):
assert self
assert data
assert user_data
assert arg1 == "arg1"
assert arg2 == "arg2"
assert arg3 == "arg3"
@hubcommander_command(**fail_command_kwargs)
def fail_command(self, data, user_data, arg1):
assert False # Can't Touch This...
tc = TestCommands()
data = dict(text="!TestCommand arg1 arg2 arg3")
tc.pass_command(data, user_data)
data = dict(text="!FailCommand", channel="12345")
tc.fail_command(data, user_data)
help_text = format_help_text(data, user_data, **fail_command_kwargs)
attachment = {
"text": help_text,
"color": WORKING_COLOR,
"mrkdwn_in": ["text"]
}
slack_client.api_call.assert_called_with("chat.postEphemeral", channel="12345", as_user=True,
attachments=json.dumps([attachment]), text=" ", user=user_data["id"])
def test_hubcommander_command_optional(user_data, slack_client):
class TestCommands:
def __init__(self):
pass
@hubcommander_command(
name="!OptionalArgs",
usage="!OptionalArgs <arg1> <optional arg here> <another optional argument here>",
description="This is a test command with an optional argument",
required=[
dict(name="arg1", properties=dict(type=str, help="This is argument 1")),
],
optional=[
dict(name="--optional", properties=dict(type=str, help="This is argument 2")),
dict(name="--optional2", properties=dict(type=str, help="This is argument 3")),
dict(name="--optional3", properties=dict(type=str, help="This is argument 4"))
]
)
def optional_arg_command(self, data, user_data, arg1, **optionals):
assert arg1 == "required"
assert len(optionals) == 3
assert optionals["optional"] == "some_optional"
assert optionals["optional2"] == "some_optional2"
assert not optionals["optional3"]
tc = TestCommands()
data = dict(text="!OptionalArgs required --optional some_optional --optional2 some_optional2")
tc.optional_arg_command(data, user_data)
def test_hubcommander_command_with_custom_validation(user_data, slack_client):
from hubcommander.bot_components.parse_functions import parse_toggles
verify_command_kwargs = dict(
name="!VerifyToggle",
usage="!VerifyToggle <testToggle>",
description="This is a test command to verify proper toggles",
required=[
dict(name="test_toggle", properties=dict(type=str, help="This is argument 1"),
validation_func=parse_toggles,
validation_func_kwargs=dict(
toggle_type="<enablement flag>"
))
]
)
class TestCommands:
def __init__(self):
pass
@hubcommander_command(**verify_command_kwargs)
def verify_toggle(self, data, user_data, test_toggle):
assert type(test_toggle) is bool
tc = TestCommands()
data = dict(text="!VerifyToggle on")
tc.verify_toggle(data, user_data)
data = dict(text="!VerifyToggle false")
tc.verify_toggle(data, user_data)
data = dict(text="!VerifyToggle WillFail", channel="12345")
tc.verify_toggle(data, user_data)
proper_usage_text = ""
try:
parse_toggles(tc, "WillFail", toggle_type="<enablement flag>")
except ParseException as pe:
proper_usage_text = pe.format_proper_usage(user_data["name"])
attachment = {
"text": proper_usage_text,
"color": "danger",
"mrkdwn_in": ["text"]
}
slack_client.api_call.assert_called_with("chat.postEphemeral", channel="12345", as_user=True,
attachments=json.dumps([attachment]), text=" ", user=user_data["id"])
def test_auth_decorator(user_data, slack_client, auth_plugin):
class TestCommands:
def __init__(self):
self.commands = {
"!TestCommand": {
"auth": {
"plugin": auth_plugin,
"kwargs": {
"should_auth": True
}
}
},
"!TestCommand2": {
"auth": {
"plugin": auth_plugin,
"kwargs": {
"should_auth": False
}
}
},
"!OptionalArgs": {
"auth": {
"plugin": auth_plugin,
"kwargs": {
"should_auth": True
}
}
},
"!OptionalArgs2": {
"auth": {
"plugin": auth_plugin,
"kwargs": {
"should_auth": False
}
}
}
}
@hubcommander_command(
name="!TestCommand",
usage="!TestCommand <arg1> <arg2> <arg3>",
description="This is a test command to make sure that things are working properly.",
required=[
dict(name="arg1", properties=dict(type=str, help="This is argument 1")),
dict(name="arg2", properties=dict(type=str, help="This is argument 2")),
dict(name="arg3", properties=dict(type=str, help="This is argument 3"))
],
optional=[]
)
@auth()
def pass_command(self, data, user_data, arg1, arg2, arg3):
assert arg1 == "arg1"
assert arg2 == "arg2"
assert arg3 == "arg3"
return True
@hubcommander_command(
name="!TestCommand2",
usage="!TestCommand2 <arg1> <arg2> <arg3>",
description="This is a test command that will fail to authenticate.",
required=[
dict(name="arg1", properties=dict(type=str, help="This is argument 1")),
dict(name="arg2", properties=dict(type=str, help="This is argument 2")),
dict(name="arg3", properties=dict(type=str, help="This is argument 3"))
],
optional=[]
)
@auth()
def fail_command(self, data, user_data, arg1, arg2, arg3):
# Will never get here...
assert False
@hubcommander_command(
name="!OptionalArgs",
usage="!OptionalArgs <arg1> <optional arg here> <another optional argument here>",
description="This is a test command with an optional argument",
required=[
dict(name="arg1", properties=dict(type=str, help="This is argument 1")),
],
optional=[
dict(name="--optional", properties=dict(type=str, help="This is argument 2")),
dict(name="--optional2", properties=dict(type=str, help="This is argument 3")),
dict(name="--optional3", properties=dict(type=str, help="This is argument 4"))
]
)
@auth()
def optional_arg_command(self, data, user_data, arg1, **optionals):
assert arg1 == "required"
assert len(optionals) == 3
assert optionals["optional"] == "some_optional"
assert optionals["optional2"] == "some_optional2"
assert not optionals["optional3"]
return True
@hubcommander_command(
name="!OptionalArgs2",
usage="!OptionalArgs2 <arg1> <optional arg here> <another optional argument here>",
description="This is a test command with an optional argument that will fail to auth",
required=[
dict(name="arg1", properties=dict(type=str, help="This is argument 1")),
],
optional=[
dict(name="--optional", properties=dict(type=str, help="This is argument 2")),
dict(name="--optional2", properties=dict(type=str, help="This is argument 3")),
dict(name="--optional3", properties=dict(type=str, help="This is argument 4"))
]
)
@auth()
def optional_fail_arg_command(self, data, user_data, arg1, **optionals):
# Will never reach this:
assert False
tc = TestCommands()
data = dict(text="!TestCommand arg1 arg2 arg3")
assert tc.pass_command(data, user_data)
assert not tc.fail_command(data, user_data)
# Test that commands with optional arguments work properly:
data = dict(text="!OptionalArgs required --optional some_optional --optional2 some_optional2")
assert tc.optional_arg_command(data, user_data)
assert not tc.optional_fail_arg_command(data, user_data)
def test_help_command_with_list(user_data, slack_client):
valid_values = ["one", "two", "three"]
verify_command_kwargs = dict(
name="!TestCommand",
usage="!TestCommand <testThing>",
description="This is a test command to test help text for things in lists",
required=[
dict(name="test_thing", properties=dict(type=str.lower, help="Must be one of: `{values}`"),
choices="valid_values")
]
)
class TestCommands:
def __init__(self):
self.commands = {
"!TestCommand": {
"valid_values": valid_values
}
}
@hubcommander_command(**verify_command_kwargs)
def the_command(self, data, user_data, test_thing):
assert True
tc = TestCommands()
# Will assert True
data = dict(text="!TestCommand one")
tc.the_command(data, user_data)
# Will ALSO assert True... we are making sure to lowercase the choices with str.lower as the type:
data = dict(text="!TestCommand ThReE")
tc.the_command(data, user_data)
# Will NOT assert true -- this will output help text:
data = dict(text="!TestCommand", channel="12345")
tc.the_command(data, user_data)
help_text = format_help_text(data, user_data, **verify_command_kwargs)
attachment = {
"text": help_text,
"color": WORKING_COLOR,
"mrkdwn_in": ["text"]
}
slack_client.api_call.assert_called_with("chat.postEphemeral", channel="12345", as_user=True,
attachments=json.dumps([attachment]), text=" ", user=user_data["id"])
# Will NOT assert true
data = dict(text="!TestCommand alskjfasdlkf", channel="12345")
tc.the_command(data, user_data)
attachment = {
"text": help_text,
"color": WORKING_COLOR,
"mrkdwn_in": ["text"]
}
slack_client.api_call.assert_called_with("chat.postEphemeral", channel="12345", as_user=True,
attachments=json.dumps([attachment]), text=" ", user=user_data["id"])
def test_uppercase_and_lowercasing(user_data, slack_client):
class TestCommands:
def __init__(self):
pass
@hubcommander_command(
name="!TestCommand",
usage="!TestCommand <arg1> <arg2> <arg3>",
description="This is a test command to make sure that casing is correct.",
required=[
dict(name="arg1", properties=dict(type=str, help="NoT AlL LoWeRCaSE"),
lowercase=False),
dict(name="arg2", properties=dict(type=str, help="all lowercase")),
dict(name="arg3", properties=dict(type=str, help="ALL UPPERCASE"),
uppercase=True)
],
)
def pass_command(self, data, user_data, arg1, arg2, arg3):
assert self
assert data
assert user_data
assert arg1 == "NoT AlL LoWeRCaSE"
assert arg2 == "all lowercase"
assert arg3 == "ALL UPPERCASE"
tc = TestCommands()
data = dict(text="!TestCommand \"NoT AlL LoWeRCaSE\" \"ALL lOWERcASE\" \"all Uppercase\"")
tc.pass_command(data, user_data)
def test_cleanup(user_data, slack_client):
class TestCommands:
def __init__(self):
pass
@hubcommander_command(
name="!TestCommand",
usage="!TestCommand <arg1> <arg2>",
description="This is a test command to make sure that undesirable characters are cleaned up.",
required=[
dict(name="arg1", properties=dict(type=str, help="This will clean things up")),
dict(name="arg2", properties=dict(type=str, help="This will not clean things up."),
cleanup=False),
],
)
def pass_command(self, data, user_data, arg1, arg2):
assert self
assert data
assert user_data
assert arg1 == "all cleaned up!"
assert arg2 == "<not all[} cleaned up}"
tc = TestCommands()
data = dict(text="!TestCommand \"<all cleaned up!>>][\" \"<not all[} cleaned up}\"")
tc.pass_command(data, user_data) | tests/test_decorators.py | import json
from hubcommander.bot_components.decorators import hubcommander_command, format_help_text, auth
from hubcommander.bot_components.slack_comm import WORKING_COLOR
from hubcommander.bot_components.parse_functions import ParseException
def test_hubcommander_command_required(user_data, slack_client):
fail_command_kwargs = dict(
name="!FailCommand",
usage="!FailCommand <arg1>",
description="This is a test command that will fail due to lack of required args",
required=[
dict(name="arg1", properties=dict(type=str, help="This is argument 1")),
],
optional=[]
)
class TestCommands:
def __init__(self):
pass
@hubcommander_command(
name="!TestCommand",
usage="!TestCommand <arg1> <arg2> <arg3>",
description="This is a test command to make sure that things are working properly.",
required=[
dict(name="arg1", properties=dict(type=str, help="This is argument 1")),
dict(name="arg2", properties=dict(type=str, help="This is argument 2")),
dict(name="arg3", properties=dict(type=str, help="This is argument 3"))
],
optional=[]
)
def pass_command(self, data, user_data, arg1, arg2, arg3):
assert self
assert data
assert user_data
assert arg1 == "arg1"
assert arg2 == "arg2"
assert arg3 == "arg3"
@hubcommander_command(**fail_command_kwargs)
def fail_command(self, data, user_data, arg1):
assert False # Can't Touch This...
tc = TestCommands()
data = dict(text="!TestCommand arg1 arg2 arg3")
tc.pass_command(data, user_data)
data = dict(text="!FailCommand", channel="12345")
tc.fail_command(data, user_data)
help_text = format_help_text(data, user_data, **fail_command_kwargs)
attachment = {
"text": help_text,
"color": WORKING_COLOR,
"mrkdwn_in": ["text"]
}
slack_client.api_call.assert_called_with("chat.postEphemeral", channel="12345", as_user=True,
attachments=json.dumps([attachment]), text=" ", user=user_data["id"])
def test_hubcommander_command_optional(user_data, slack_client):
class TestCommands:
def __init__(self):
pass
@hubcommander_command(
name="!OptionalArgs",
usage="!OptionalArgs <arg1> <optional arg here> <another optional argument here>",
description="This is a test command with an optional argument",
required=[
dict(name="arg1", properties=dict(type=str, help="This is argument 1")),
],
optional=[
dict(name="--optional", properties=dict(type=str, help="This is argument 2")),
dict(name="--optional2", properties=dict(type=str, help="This is argument 3")),
dict(name="--optional3", properties=dict(type=str, help="This is argument 4"))
]
)
def optional_arg_command(self, data, user_data, arg1, **optionals):
assert arg1 == "required"
assert len(optionals) == 3
assert optionals["optional"] == "some_optional"
assert optionals["optional2"] == "some_optional2"
assert not optionals["optional3"]
tc = TestCommands()
data = dict(text="!OptionalArgs required --optional some_optional --optional2 some_optional2")
tc.optional_arg_command(data, user_data)
def test_hubcommander_command_with_custom_validation(user_data, slack_client):
from hubcommander.bot_components.parse_functions import parse_toggles
verify_command_kwargs = dict(
name="!VerifyToggle",
usage="!VerifyToggle <testToggle>",
description="This is a test command to verify proper toggles",
required=[
dict(name="test_toggle", properties=dict(type=str, help="This is argument 1"),
validation_func=parse_toggles,
validation_func_kwargs=dict(
toggle_type="<enablement flag>"
))
]
)
class TestCommands:
def __init__(self):
pass
@hubcommander_command(**verify_command_kwargs)
def verify_toggle(self, data, user_data, test_toggle):
assert type(test_toggle) is bool
tc = TestCommands()
data = dict(text="!VerifyToggle on")
tc.verify_toggle(data, user_data)
data = dict(text="!VerifyToggle false")
tc.verify_toggle(data, user_data)
data = dict(text="!VerifyToggle WillFail", channel="12345")
tc.verify_toggle(data, user_data)
proper_usage_text = ""
try:
parse_toggles(tc, "WillFail", toggle_type="<enablement flag>")
except ParseException as pe:
proper_usage_text = pe.format_proper_usage(user_data["name"])
attachment = {
"text": proper_usage_text,
"color": "danger",
"mrkdwn_in": ["text"]
}
slack_client.api_call.assert_called_with("chat.postEphemeral", channel="12345", as_user=True,
attachments=json.dumps([attachment]), text=" ", user=user_data["id"])
def test_auth_decorator(user_data, slack_client, auth_plugin):
class TestCommands:
def __init__(self):
self.commands = {
"!TestCommand": {
"auth": {
"plugin": auth_plugin,
"kwargs": {
"should_auth": True
}
}
},
"!TestCommand2": {
"auth": {
"plugin": auth_plugin,
"kwargs": {
"should_auth": False
}
}
},
"!OptionalArgs": {
"auth": {
"plugin": auth_plugin,
"kwargs": {
"should_auth": True
}
}
},
"!OptionalArgs2": {
"auth": {
"plugin": auth_plugin,
"kwargs": {
"should_auth": False
}
}
}
}
@hubcommander_command(
name="!TestCommand",
usage="!TestCommand <arg1> <arg2> <arg3>",
description="This is a test command to make sure that things are working properly.",
required=[
dict(name="arg1", properties=dict(type=str, help="This is argument 1")),
dict(name="arg2", properties=dict(type=str, help="This is argument 2")),
dict(name="arg3", properties=dict(type=str, help="This is argument 3"))
],
optional=[]
)
@auth()
def pass_command(self, data, user_data, arg1, arg2, arg3):
assert arg1 == "arg1"
assert arg2 == "arg2"
assert arg3 == "arg3"
return True
@hubcommander_command(
name="!TestCommand2",
usage="!TestCommand2 <arg1> <arg2> <arg3>",
description="This is a test command that will fail to authenticate.",
required=[
dict(name="arg1", properties=dict(type=str, help="This is argument 1")),
dict(name="arg2", properties=dict(type=str, help="This is argument 2")),
dict(name="arg3", properties=dict(type=str, help="This is argument 3"))
],
optional=[]
)
@auth()
def fail_command(self, data, user_data, arg1, arg2, arg3):
# Will never get here...
assert False
@hubcommander_command(
name="!OptionalArgs",
usage="!OptionalArgs <arg1> <optional arg here> <another optional argument here>",
description="This is a test command with an optional argument",
required=[
dict(name="arg1", properties=dict(type=str, help="This is argument 1")),
],
optional=[
dict(name="--optional", properties=dict(type=str, help="This is argument 2")),
dict(name="--optional2", properties=dict(type=str, help="This is argument 3")),
dict(name="--optional3", properties=dict(type=str, help="This is argument 4"))
]
)
@auth()
def optional_arg_command(self, data, user_data, arg1, **optionals):
assert arg1 == "required"
assert len(optionals) == 3
assert optionals["optional"] == "some_optional"
assert optionals["optional2"] == "some_optional2"
assert not optionals["optional3"]
return True
@hubcommander_command(
name="!OptionalArgs2",
usage="!OptionalArgs2 <arg1> <optional arg here> <another optional argument here>",
description="This is a test command with an optional argument that will fail to auth",
required=[
dict(name="arg1", properties=dict(type=str, help="This is argument 1")),
],
optional=[
dict(name="--optional", properties=dict(type=str, help="This is argument 2")),
dict(name="--optional2", properties=dict(type=str, help="This is argument 3")),
dict(name="--optional3", properties=dict(type=str, help="This is argument 4"))
]
)
@auth()
def optional_fail_arg_command(self, data, user_data, arg1, **optionals):
# Will never reach this:
assert False
tc = TestCommands()
data = dict(text="!TestCommand arg1 arg2 arg3")
assert tc.pass_command(data, user_data)
assert not tc.fail_command(data, user_data)
# Test that commands with optional arguments work properly:
data = dict(text="!OptionalArgs required --optional some_optional --optional2 some_optional2")
assert tc.optional_arg_command(data, user_data)
assert not tc.optional_fail_arg_command(data, user_data)
def test_help_command_with_list(user_data, slack_client):
valid_values = ["one", "two", "three"]
verify_command_kwargs = dict(
name="!TestCommand",
usage="!TestCommand <testThing>",
description="This is a test command to test help text for things in lists",
required=[
dict(name="test_thing", properties=dict(type=str.lower, help="Must be one of: `{values}`"),
choices="valid_values")
]
)
class TestCommands:
def __init__(self):
self.commands = {
"!TestCommand": {
"valid_values": valid_values
}
}
@hubcommander_command(**verify_command_kwargs)
def the_command(self, data, user_data, test_thing):
assert True
tc = TestCommands()
# Will assert True
data = dict(text="!TestCommand one")
tc.the_command(data, user_data)
# Will ALSO assert True... we are making sure to lowercase the choices with str.lower as the type:
data = dict(text="!TestCommand ThReE")
tc.the_command(data, user_data)
# Will NOT assert true -- this will output help text:
data = dict(text="!TestCommand", channel="12345")
tc.the_command(data, user_data)
help_text = format_help_text(data, user_data, **verify_command_kwargs)
attachment = {
"text": help_text,
"color": WORKING_COLOR,
"mrkdwn_in": ["text"]
}
slack_client.api_call.assert_called_with("chat.postEphemeral", channel="12345", as_user=True,
attachments=json.dumps([attachment]), text=" ", user=user_data["id"])
# Will NOT assert true
data = dict(text="!TestCommand alskjfasdlkf", channel="12345")
tc.the_command(data, user_data)
attachment = {
"text": help_text,
"color": WORKING_COLOR,
"mrkdwn_in": ["text"]
}
slack_client.api_call.assert_called_with("chat.postEphemeral", channel="12345", as_user=True,
attachments=json.dumps([attachment]), text=" ", user=user_data["id"])
def test_uppercase_and_lowercasing(user_data, slack_client):
class TestCommands:
def __init__(self):
pass
@hubcommander_command(
name="!TestCommand",
usage="!TestCommand <arg1> <arg2> <arg3>",
description="This is a test command to make sure that casing is correct.",
required=[
dict(name="arg1", properties=dict(type=str, help="NoT AlL LoWeRCaSE"),
lowercase=False),
dict(name="arg2", properties=dict(type=str, help="all lowercase")),
dict(name="arg3", properties=dict(type=str, help="ALL UPPERCASE"),
uppercase=True)
],
)
def pass_command(self, data, user_data, arg1, arg2, arg3):
assert self
assert data
assert user_data
assert arg1 == "NoT AlL LoWeRCaSE"
assert arg2 == "all lowercase"
assert arg3 == "ALL UPPERCASE"
tc = TestCommands()
data = dict(text="!TestCommand \"NoT AlL LoWeRCaSE\" \"ALL lOWERcASE\" \"all Uppercase\"")
tc.pass_command(data, user_data)
def test_cleanup(user_data, slack_client):
class TestCommands:
def __init__(self):
pass
@hubcommander_command(
name="!TestCommand",
usage="!TestCommand <arg1> <arg2>",
description="This is a test command to make sure that undesirable characters are cleaned up.",
required=[
dict(name="arg1", properties=dict(type=str, help="This will clean things up")),
dict(name="arg2", properties=dict(type=str, help="This will not clean things up."),
cleanup=False),
],
)
def pass_command(self, data, user_data, arg1, arg2):
assert self
assert data
assert user_data
assert arg1 == "all cleaned up!"
assert arg2 == "<not all[} cleaned up}"
tc = TestCommands()
data = dict(text="!TestCommand \"<all cleaned up!>>][\" \"<not all[} cleaned up}\"")
tc.pass_command(data, user_data) | 0.667798 | 0.38885 |
# pylint: disable=line-too-long
from azure.cli.core.application import APPLICATION
from azure.cli.core.commands import cli_command
from azure.cli.core.commands.arm import cli_generic_update_command
from azure.cli.core.util import empty_on_404
from azure.cli.core.profiles import supported_api_version, PROFILE_TYPE
from ._client_factory import cf_web_client, cf_plans, cf_webapps
def deprecate(argv):
if len(argv) > 1 and argv[0] == 'appservice' and argv[1] == 'web':
from azure.cli.core.util import CLIError
raise CLIError("All 'appservice web' commands have been renamed to 'webapp'")
APPLICATION.register(APPLICATION.COMMAND_PARSER_PARSING, deprecate)
def output_slots_in_table(slots):
return [{'name': s['name'], 'status': s['state'], 'plan': s['appServicePlan']} for s in slots]
def transform_list_location_output(result):
return [{'name': x.name} for x in result]
def transform_web_output(web):
props = ['name', 'state', 'location', 'resourceGroup', 'defaultHostName', 'appServicePlanId', 'ftpPublishingUrl']
result = {k: web[k] for k in web if k in props}
# to get width under control, also the plan usually is in the same RG
result['appServicePlan'] = result.pop('appServicePlanId').split('/')[-1]
return result
def transform_web_list_output(webs):
return [transform_web_output(w) for w in webs]
def ex_handler_factory(creating_plan=False):
def _polish_bad_errors(ex):
import json
from azure.cli.core.util import CLIError
try:
detail = json.loads(ex.response.text)['Message']
if creating_plan:
if 'Requested features are not supported in region' in detail:
detail = ("Plan with linux worker is not supported in current region. For " +
"supported regions, please refer to https://docs.microsoft.com/en-us/"
"azure/app-service-web/app-service-linux-intro")
elif 'Not enough available reserved instance servers to satisfy' in detail:
detail = ("Plan with Linux worker can only be created in a group " +
"which has never contained a Windows worker, and vice versa. " +
"Please use a new resource group. Original error:" + detail)
ex = CLIError(detail)
except Exception: # pylint: disable=broad-except
pass
raise ex
return _polish_bad_errors
custom_path = 'azure.cli.command_modules.appservice.custom#'
cli_command(__name__, 'webapp create', custom_path + 'create_webapp', exception_handler=ex_handler_factory())
cli_command(__name__, 'webapp list', custom_path + 'list_webapp', table_transformer=transform_web_list_output)
cli_command(__name__, 'webapp show', custom_path + 'show_webapp', exception_handler=empty_on_404, table_transformer=transform_web_output)
cli_command(__name__, 'webapp delete', custom_path + 'delete_webapp')
cli_command(__name__, 'webapp stop', custom_path + 'stop_webapp')
cli_command(__name__, 'webapp start', custom_path + 'start_webapp')
cli_command(__name__, 'webapp restart', custom_path + 'restart_webapp')
cli_generic_update_command(__name__, 'webapp update', 'azure.mgmt.web.operations.web_apps_operations#WebAppsOperations.get',
'azure.mgmt.web.operations.web_apps_operations#WebAppsOperations.create_or_update',
custom_function_op=custom_path + 'update_webapp',
setter_arg_name='site_envelope', factory=cf_webapps)
cli_command(__name__, 'webapp traffic-routing set', custom_path + 'set_traffic_routing')
cli_command(__name__, 'webapp traffic-routing show', custom_path + 'show_traffic_routing')
cli_command(__name__, 'webapp traffic-routing clear', custom_path + 'clear_traffic_routing')
cli_command(__name__, 'webapp config set', custom_path + 'update_site_configs')
cli_command(__name__, 'webapp config show', custom_path + 'get_site_configs', exception_handler=empty_on_404)
cli_command(__name__, 'webapp config appsettings list', custom_path + 'get_app_settings', exception_handler=empty_on_404)
cli_command(__name__, 'webapp config appsettings set', custom_path + 'update_app_settings')
cli_command(__name__, 'webapp config appsettings delete', custom_path + 'delete_app_settings')
cli_command(__name__, 'webapp config connection-string list', custom_path + 'get_connection_strings', exception_handler=empty_on_404)
cli_command(__name__, 'webapp config connection-string set', custom_path + 'update_connection_strings')
cli_command(__name__, 'webapp config connection-string delete', custom_path + 'delete_connection_strings')
cli_command(__name__, 'webapp config hostname add', custom_path + 'add_hostname', exception_handler=ex_handler_factory())
cli_command(__name__, 'webapp config hostname list', custom_path + 'list_hostnames')
cli_command(__name__, 'webapp config hostname delete', custom_path + 'delete_hostname')
cli_command(__name__, 'webapp config hostname get-external-ip', custom_path + 'get_external_ip')
cli_command(__name__, 'webapp config container set', custom_path + 'update_container_settings')
cli_command(__name__, 'webapp config container delete', custom_path + 'delete_container_settings')
cli_command(__name__, 'webapp config container show', custom_path + 'show_container_settings', exception_handler=empty_on_404)
cli_command(__name__, 'webapp config ssl upload', custom_path + 'upload_ssl_cert', exception_handler=ex_handler_factory())
cli_command(__name__, 'webapp config ssl list', custom_path + 'list_ssl_certs')
cli_command(__name__, 'webapp config ssl bind', custom_path + 'bind_ssl_cert', exception_handler=ex_handler_factory())
cli_command(__name__, 'webapp config ssl unbind', custom_path + 'unbind_ssl_cert')
cli_command(__name__, 'webapp config ssl delete', custom_path + 'delete_ssl_cert')
cli_command(__name__, 'webapp config backup list', custom_path + 'list_backups')
cli_command(__name__, 'webapp config backup show', custom_path + 'show_backup_configuration', exception_handler=empty_on_404)
cli_command(__name__, 'webapp config backup create', custom_path + 'create_backup', exception_handler=ex_handler_factory())
cli_command(__name__, 'webapp config backup update', custom_path + 'update_backup_schedule', exception_handler=ex_handler_factory())
cli_command(__name__, 'webapp config backup restore', custom_path + 'restore_backup', exception_handler=ex_handler_factory())
cli_command(__name__, 'webapp deployment source config-local-git', custom_path + 'enable_local_git')
cli_command(__name__, 'webapp deployment source config-zip', custom_path + 'enable_zip_deploy')
cli_command(__name__, 'webapp deployment source config', custom_path + 'config_source_control', exception_handler=ex_handler_factory())
cli_command(__name__, 'webapp deployment source sync', custom_path + 'sync_site_repo', exception_handler=ex_handler_factory())
cli_command(__name__, 'webapp deployment source show', custom_path + 'show_source_control', exception_handler=empty_on_404)
cli_command(__name__, 'webapp deployment source delete', custom_path + 'delete_source_control')
cli_command(__name__, 'webapp deployment source update-token', custom_path + 'update_git_token', exception_handler=ex_handler_factory())
cli_command(__name__, 'webapp log tail', custom_path + 'get_streaming_log')
cli_command(__name__, 'webapp log download', custom_path + 'download_historical_logs')
cli_command(__name__, 'webapp log config', custom_path + 'config_diagnostics')
cli_command(__name__, 'webapp log show', custom_path + 'show_diagnostic_settings')
cli_command(__name__, 'webapp browse', custom_path + 'view_in_browser')
cli_command(__name__, 'webapp deployment slot list', custom_path + 'list_slots', table_transformer=output_slots_in_table)
cli_command(__name__, 'webapp deployment slot delete', custom_path + 'delete_slot')
cli_command(__name__, 'webapp deployment slot auto-swap', custom_path + 'config_slot_auto_swap')
cli_command(__name__, 'webapp deployment slot swap', custom_path + 'swap_slot', exception_handler=ex_handler_factory())
cli_command(__name__, 'webapp deployment slot create', custom_path + 'create_webapp_slot', exception_handler=ex_handler_factory())
cli_command(__name__, 'webapp deployment user set', custom_path + 'set_deployment_user', exception_handler=ex_handler_factory())
cli_command(__name__, 'webapp deployment list-publishing-profiles',
custom_path + 'list_publish_profiles')
cli_command(__name__, 'webapp deployment container config',
custom_path + 'enable_cd')
cli_command(__name__, 'webapp deployment container show-cd-url',
custom_path + 'show_container_cd_url')
cli_command(__name__, 'webapp deployment user show', 'azure.mgmt.web.web_site_management_client#WebSiteManagementClient.get_publishing_user', cf_web_client, exception_handler=empty_on_404)
cli_command(__name__, 'webapp list-runtimes', custom_path + 'list_runtimes')
cli_command(__name__, 'webapp auth show', custom_path + 'get_auth_settings')
cli_command(__name__, 'webapp auth update', custom_path + 'update_auth_settings')
if not supported_api_version(PROFILE_TYPE, max_api='2017-03-09-profile'):
cli_command(__name__, 'appservice plan create', custom_path + 'create_app_service_plan', exception_handler=ex_handler_factory(creating_plan=True))
cli_command(__name__, 'appservice plan delete', 'azure.mgmt.web.operations.app_service_plans_operations#AppServicePlansOperations.delete', cf_plans, confirmation=True)
cli_command(__name__, 'appservice plan list', custom_path + 'list_app_service_plans')
cli_command(__name__, 'appservice plan show', 'azure.mgmt.web.operations.app_service_plans_operations#AppServicePlansOperations.get', cf_plans, exception_handler=empty_on_404)
cli_generic_update_command(__name__, 'appservice plan update', 'azure.mgmt.web.operations.app_service_plans_operations#AppServicePlansOperations.get',
'azure.mgmt.web.operations.app_service_plans_operations#AppServicePlansOperations.create_or_update',
custom_function_op=custom_path + 'update_app_service_plan',
setter_arg_name='app_service_plan', factory=cf_plans)
cli_command(__name__, 'appservice list-locations', 'azure.mgmt.web.web_site_management_client#WebSiteManagementClient.list_geo_regions', cf_web_client, transform=transform_list_location_output)
cli_command(__name__, 'functionapp create', custom_path + 'create_function')
cli_command(__name__, 'functionapp list', custom_path + 'list_function_app', table_transformer=transform_web_list_output)
cli_command(__name__, 'functionapp show', custom_path + 'show_webapp', exception_handler=empty_on_404, table_transformer=transform_web_output)
cli_command(__name__, 'functionapp delete', custom_path + 'delete_function_app')
cli_command(__name__, 'functionapp stop', custom_path + 'stop_webapp')
cli_command(__name__, 'functionapp start', custom_path + 'start_webapp')
cli_command(__name__, 'functionapp restart', custom_path + 'restart_webapp')
cli_command(__name__, 'functionapp list-consumption-locations', custom_path + 'list_consumption_locations')
cli_command(__name__, 'functionapp config appsettings list', custom_path + 'get_app_settings', exception_handler=empty_on_404)
cli_command(__name__, 'functionapp config appsettings set', custom_path + 'update_app_settings')
cli_command(__name__, 'functionapp config appsettings delete', custom_path + 'delete_app_settings')
cli_command(__name__, 'functionapp config hostname add', custom_path + 'add_hostname', exception_handler=ex_handler_factory())
cli_command(__name__, 'functionapp config hostname list', custom_path + 'list_hostnames')
cli_command(__name__, 'functionapp config hostname delete', custom_path + 'delete_hostname')
cli_command(__name__, 'functionapp config hostname get-external-ip', custom_path + 'get_external_ip')
cli_command(__name__, 'functionapp config ssl upload', custom_path + 'upload_ssl_cert', exception_handler=ex_handler_factory())
cli_command(__name__, 'functionapp config ssl list', custom_path + 'list_ssl_certs')
cli_command(__name__, 'functionapp config ssl bind', custom_path + 'bind_ssl_cert', exception_handler=ex_handler_factory())
cli_command(__name__, 'functionapp config ssl unbind', custom_path + 'unbind_ssl_cert')
cli_command(__name__, 'functionapp config ssl delete', custom_path + 'delete_ssl_cert')
cli_command(__name__, 'functionapp deployment source config-local-git', custom_path + 'enable_local_git')
cli_command(__name__, 'functionapp deployment source config-zip', custom_path + 'enable_zip_deploy')
cli_command(__name__, 'functionapp deployment source config', custom_path + 'config_source_control', exception_handler=ex_handler_factory())
cli_command(__name__, 'functionapp deployment source sync', custom_path + 'sync_site_repo')
cli_command(__name__, 'functionapp deployment source show', custom_path + 'show_source_control', exception_handler=empty_on_404)
cli_command(__name__, 'functionapp deployment source delete', custom_path + 'delete_source_control')
cli_command(__name__, 'functionapp deployment source update-token', custom_path + 'update_git_token', exception_handler=ex_handler_factory())
cli_command(__name__, 'functionapp deployment user set', custom_path + 'set_deployment_user', exception_handler=ex_handler_factory())
cli_command(__name__, 'functionapp deployment list-publishing-profiles',
custom_path + 'list_publish_profiles')
cli_command(__name__, 'functionapp deployment user show', 'azure.mgmt.web.web_site_management_client#WebSiteManagementClient.get_publishing_user', cf_web_client, exception_handler=empty_on_404) | src/command_modules/azure-cli-appservice/azure/cli/command_modules/appservice/commands.py |
# pylint: disable=line-too-long
from azure.cli.core.application import APPLICATION
from azure.cli.core.commands import cli_command
from azure.cli.core.commands.arm import cli_generic_update_command
from azure.cli.core.util import empty_on_404
from azure.cli.core.profiles import supported_api_version, PROFILE_TYPE
from ._client_factory import cf_web_client, cf_plans, cf_webapps
def deprecate(argv):
if len(argv) > 1 and argv[0] == 'appservice' and argv[1] == 'web':
from azure.cli.core.util import CLIError
raise CLIError("All 'appservice web' commands have been renamed to 'webapp'")
APPLICATION.register(APPLICATION.COMMAND_PARSER_PARSING, deprecate)
def output_slots_in_table(slots):
return [{'name': s['name'], 'status': s['state'], 'plan': s['appServicePlan']} for s in slots]
def transform_list_location_output(result):
return [{'name': x.name} for x in result]
def transform_web_output(web):
props = ['name', 'state', 'location', 'resourceGroup', 'defaultHostName', 'appServicePlanId', 'ftpPublishingUrl']
result = {k: web[k] for k in web if k in props}
# to get width under control, also the plan usually is in the same RG
result['appServicePlan'] = result.pop('appServicePlanId').split('/')[-1]
return result
def transform_web_list_output(webs):
return [transform_web_output(w) for w in webs]
def ex_handler_factory(creating_plan=False):
def _polish_bad_errors(ex):
import json
from azure.cli.core.util import CLIError
try:
detail = json.loads(ex.response.text)['Message']
if creating_plan:
if 'Requested features are not supported in region' in detail:
detail = ("Plan with linux worker is not supported in current region. For " +
"supported regions, please refer to https://docs.microsoft.com/en-us/"
"azure/app-service-web/app-service-linux-intro")
elif 'Not enough available reserved instance servers to satisfy' in detail:
detail = ("Plan with Linux worker can only be created in a group " +
"which has never contained a Windows worker, and vice versa. " +
"Please use a new resource group. Original error:" + detail)
ex = CLIError(detail)
except Exception: # pylint: disable=broad-except
pass
raise ex
return _polish_bad_errors
custom_path = 'azure.cli.command_modules.appservice.custom#'
cli_command(__name__, 'webapp create', custom_path + 'create_webapp', exception_handler=ex_handler_factory())
cli_command(__name__, 'webapp list', custom_path + 'list_webapp', table_transformer=transform_web_list_output)
cli_command(__name__, 'webapp show', custom_path + 'show_webapp', exception_handler=empty_on_404, table_transformer=transform_web_output)
cli_command(__name__, 'webapp delete', custom_path + 'delete_webapp')
cli_command(__name__, 'webapp stop', custom_path + 'stop_webapp')
cli_command(__name__, 'webapp start', custom_path + 'start_webapp')
cli_command(__name__, 'webapp restart', custom_path + 'restart_webapp')
cli_generic_update_command(__name__, 'webapp update', 'azure.mgmt.web.operations.web_apps_operations#WebAppsOperations.get',
'azure.mgmt.web.operations.web_apps_operations#WebAppsOperations.create_or_update',
custom_function_op=custom_path + 'update_webapp',
setter_arg_name='site_envelope', factory=cf_webapps)
cli_command(__name__, 'webapp traffic-routing set', custom_path + 'set_traffic_routing')
cli_command(__name__, 'webapp traffic-routing show', custom_path + 'show_traffic_routing')
cli_command(__name__, 'webapp traffic-routing clear', custom_path + 'clear_traffic_routing')
cli_command(__name__, 'webapp config set', custom_path + 'update_site_configs')
cli_command(__name__, 'webapp config show', custom_path + 'get_site_configs', exception_handler=empty_on_404)
cli_command(__name__, 'webapp config appsettings list', custom_path + 'get_app_settings', exception_handler=empty_on_404)
cli_command(__name__, 'webapp config appsettings set', custom_path + 'update_app_settings')
cli_command(__name__, 'webapp config appsettings delete', custom_path + 'delete_app_settings')
cli_command(__name__, 'webapp config connection-string list', custom_path + 'get_connection_strings', exception_handler=empty_on_404)
cli_command(__name__, 'webapp config connection-string set', custom_path + 'update_connection_strings')
cli_command(__name__, 'webapp config connection-string delete', custom_path + 'delete_connection_strings')
cli_command(__name__, 'webapp config hostname add', custom_path + 'add_hostname', exception_handler=ex_handler_factory())
cli_command(__name__, 'webapp config hostname list', custom_path + 'list_hostnames')
cli_command(__name__, 'webapp config hostname delete', custom_path + 'delete_hostname')
cli_command(__name__, 'webapp config hostname get-external-ip', custom_path + 'get_external_ip')
cli_command(__name__, 'webapp config container set', custom_path + 'update_container_settings')
cli_command(__name__, 'webapp config container delete', custom_path + 'delete_container_settings')
cli_command(__name__, 'webapp config container show', custom_path + 'show_container_settings', exception_handler=empty_on_404)
cli_command(__name__, 'webapp config ssl upload', custom_path + 'upload_ssl_cert', exception_handler=ex_handler_factory())
cli_command(__name__, 'webapp config ssl list', custom_path + 'list_ssl_certs')
cli_command(__name__, 'webapp config ssl bind', custom_path + 'bind_ssl_cert', exception_handler=ex_handler_factory())
cli_command(__name__, 'webapp config ssl unbind', custom_path + 'unbind_ssl_cert')
cli_command(__name__, 'webapp config ssl delete', custom_path + 'delete_ssl_cert')
cli_command(__name__, 'webapp config backup list', custom_path + 'list_backups')
cli_command(__name__, 'webapp config backup show', custom_path + 'show_backup_configuration', exception_handler=empty_on_404)
cli_command(__name__, 'webapp config backup create', custom_path + 'create_backup', exception_handler=ex_handler_factory())
cli_command(__name__, 'webapp config backup update', custom_path + 'update_backup_schedule', exception_handler=ex_handler_factory())
cli_command(__name__, 'webapp config backup restore', custom_path + 'restore_backup', exception_handler=ex_handler_factory())
cli_command(__name__, 'webapp deployment source config-local-git', custom_path + 'enable_local_git')
cli_command(__name__, 'webapp deployment source config-zip', custom_path + 'enable_zip_deploy')
cli_command(__name__, 'webapp deployment source config', custom_path + 'config_source_control', exception_handler=ex_handler_factory())
cli_command(__name__, 'webapp deployment source sync', custom_path + 'sync_site_repo', exception_handler=ex_handler_factory())
cli_command(__name__, 'webapp deployment source show', custom_path + 'show_source_control', exception_handler=empty_on_404)
cli_command(__name__, 'webapp deployment source delete', custom_path + 'delete_source_control')
cli_command(__name__, 'webapp deployment source update-token', custom_path + 'update_git_token', exception_handler=ex_handler_factory())
cli_command(__name__, 'webapp log tail', custom_path + 'get_streaming_log')
cli_command(__name__, 'webapp log download', custom_path + 'download_historical_logs')
cli_command(__name__, 'webapp log config', custom_path + 'config_diagnostics')
cli_command(__name__, 'webapp log show', custom_path + 'show_diagnostic_settings')
cli_command(__name__, 'webapp browse', custom_path + 'view_in_browser')
cli_command(__name__, 'webapp deployment slot list', custom_path + 'list_slots', table_transformer=output_slots_in_table)
cli_command(__name__, 'webapp deployment slot delete', custom_path + 'delete_slot')
cli_command(__name__, 'webapp deployment slot auto-swap', custom_path + 'config_slot_auto_swap')
cli_command(__name__, 'webapp deployment slot swap', custom_path + 'swap_slot', exception_handler=ex_handler_factory())
cli_command(__name__, 'webapp deployment slot create', custom_path + 'create_webapp_slot', exception_handler=ex_handler_factory())
cli_command(__name__, 'webapp deployment user set', custom_path + 'set_deployment_user', exception_handler=ex_handler_factory())
cli_command(__name__, 'webapp deployment list-publishing-profiles',
custom_path + 'list_publish_profiles')
cli_command(__name__, 'webapp deployment container config',
custom_path + 'enable_cd')
cli_command(__name__, 'webapp deployment container show-cd-url',
custom_path + 'show_container_cd_url')
cli_command(__name__, 'webapp deployment user show', 'azure.mgmt.web.web_site_management_client#WebSiteManagementClient.get_publishing_user', cf_web_client, exception_handler=empty_on_404)
cli_command(__name__, 'webapp list-runtimes', custom_path + 'list_runtimes')
cli_command(__name__, 'webapp auth show', custom_path + 'get_auth_settings')
cli_command(__name__, 'webapp auth update', custom_path + 'update_auth_settings')
if not supported_api_version(PROFILE_TYPE, max_api='2017-03-09-profile'):
cli_command(__name__, 'appservice plan create', custom_path + 'create_app_service_plan', exception_handler=ex_handler_factory(creating_plan=True))
cli_command(__name__, 'appservice plan delete', 'azure.mgmt.web.operations.app_service_plans_operations#AppServicePlansOperations.delete', cf_plans, confirmation=True)
cli_command(__name__, 'appservice plan list', custom_path + 'list_app_service_plans')
cli_command(__name__, 'appservice plan show', 'azure.mgmt.web.operations.app_service_plans_operations#AppServicePlansOperations.get', cf_plans, exception_handler=empty_on_404)
cli_generic_update_command(__name__, 'appservice plan update', 'azure.mgmt.web.operations.app_service_plans_operations#AppServicePlansOperations.get',
'azure.mgmt.web.operations.app_service_plans_operations#AppServicePlansOperations.create_or_update',
custom_function_op=custom_path + 'update_app_service_plan',
setter_arg_name='app_service_plan', factory=cf_plans)
cli_command(__name__, 'appservice list-locations', 'azure.mgmt.web.web_site_management_client#WebSiteManagementClient.list_geo_regions', cf_web_client, transform=transform_list_location_output)
cli_command(__name__, 'functionapp create', custom_path + 'create_function')
cli_command(__name__, 'functionapp list', custom_path + 'list_function_app', table_transformer=transform_web_list_output)
cli_command(__name__, 'functionapp show', custom_path + 'show_webapp', exception_handler=empty_on_404, table_transformer=transform_web_output)
cli_command(__name__, 'functionapp delete', custom_path + 'delete_function_app')
cli_command(__name__, 'functionapp stop', custom_path + 'stop_webapp')
cli_command(__name__, 'functionapp start', custom_path + 'start_webapp')
cli_command(__name__, 'functionapp restart', custom_path + 'restart_webapp')
cli_command(__name__, 'functionapp list-consumption-locations', custom_path + 'list_consumption_locations')
cli_command(__name__, 'functionapp config appsettings list', custom_path + 'get_app_settings', exception_handler=empty_on_404)
cli_command(__name__, 'functionapp config appsettings set', custom_path + 'update_app_settings')
cli_command(__name__, 'functionapp config appsettings delete', custom_path + 'delete_app_settings')
cli_command(__name__, 'functionapp config hostname add', custom_path + 'add_hostname', exception_handler=ex_handler_factory())
cli_command(__name__, 'functionapp config hostname list', custom_path + 'list_hostnames')
cli_command(__name__, 'functionapp config hostname delete', custom_path + 'delete_hostname')
cli_command(__name__, 'functionapp config hostname get-external-ip', custom_path + 'get_external_ip')
cli_command(__name__, 'functionapp config ssl upload', custom_path + 'upload_ssl_cert', exception_handler=ex_handler_factory())
cli_command(__name__, 'functionapp config ssl list', custom_path + 'list_ssl_certs')
cli_command(__name__, 'functionapp config ssl bind', custom_path + 'bind_ssl_cert', exception_handler=ex_handler_factory())
cli_command(__name__, 'functionapp config ssl unbind', custom_path + 'unbind_ssl_cert')
cli_command(__name__, 'functionapp config ssl delete', custom_path + 'delete_ssl_cert')
cli_command(__name__, 'functionapp deployment source config-local-git', custom_path + 'enable_local_git')
cli_command(__name__, 'functionapp deployment source config-zip', custom_path + 'enable_zip_deploy')
cli_command(__name__, 'functionapp deployment source config', custom_path + 'config_source_control', exception_handler=ex_handler_factory())
cli_command(__name__, 'functionapp deployment source sync', custom_path + 'sync_site_repo')
cli_command(__name__, 'functionapp deployment source show', custom_path + 'show_source_control', exception_handler=empty_on_404)
cli_command(__name__, 'functionapp deployment source delete', custom_path + 'delete_source_control')
cli_command(__name__, 'functionapp deployment source update-token', custom_path + 'update_git_token', exception_handler=ex_handler_factory())
cli_command(__name__, 'functionapp deployment user set', custom_path + 'set_deployment_user', exception_handler=ex_handler_factory())
cli_command(__name__, 'functionapp deployment list-publishing-profiles',
custom_path + 'list_publish_profiles')
cli_command(__name__, 'functionapp deployment user show', 'azure.mgmt.web.web_site_management_client#WebSiteManagementClient.get_publishing_user', cf_web_client, exception_handler=empty_on_404) | 0.47926 | 0.049543 |
from core.GUI import *
from engine.AttackTarget import AttackTarget
from engine.SetFollow import SetFollow
EnabledAutoAttack = False
TargetNumber = 0
priority = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
monsters = [
"Rat",
"CaveRat",
"Orc",
"OrcWarrior",
"OrcSpearman",
"Cyclops",
"Rotworm",
"AnyCorym",
"CorymCharlatan",
"CorymSkirmisher",
"CorymVanguard",
"Stonerefiner"
]
monster = 'Rat'
class ShowMap:
def __init__(self, root, SQMs, BattlePosition):
self.ShowMap = GUI('AutoAttack', 'Module: Auto Attack')
self.ShowMap.DefaultWindow('DefaultWindow')
def SetAutoAttack():
global EnabledAutoAttack
if not EnabledAutoAttack:
EnabledAutoAttack = True
ButtonEnabled.configure(text='AutoAttack: ON')
combine_funcs(ScanAutoAttack(), ScanFollowMode())
else:
EnabledAutoAttack = False
ButtonEnabled.configure(text='AutoAttack: OFF')
def ScanAutoAttack():
if EnabledAutoAttack:
global TargetNumber
monster = monster2.get()
TargetNumber = AttackTarget(monster, BattlePosition, SQMs, TargetNumber)
if EnabledAutoAttack:
root.after(300, ScanAutoAttack)
def ScanFollowMode():
if EnabledAutoAttack:
follow_x_pos, follow_y_pos = SetFollow()
if follow_x_pos != 0 and follow_y_pos != 0:
past_mouse_position = pyautogui.position()
pyautogui.leftClick(follow_x_pos, follow_y_pos)
pyautogui.moveTo(past_mouse_position)
if EnabledAutoAttack:
root.after(3000, ScanFollowMode)
CheckPrint = tk.BooleanVar()
LowMana = tk.BooleanVar()
AttackOne = tk.BooleanVar()
monster2 = tk.StringVar()
monster2.set(monster)
PriorityOne = tk.IntVar()
PriorityOne.set(1)
self.ShowMap.addButton('Ok', self.ShowMap.destroyWindow, [84, 29, 130, 504], [127, 17, 8], [123, 13, 5])
global EnabledAutoAttack
if not EnabledAutoAttack:
ButtonEnabled = self.ShowMap.addButton('AutoAttack: OFF', SetAutoAttack, [328, 29, 12, 469],
[127, 17, 8], [123, 13, 5])
else:
ButtonEnabled = self.ShowMap.addButton('AutoAttack: ON', SetAutoAttack, [328, 29, 12, 469],
[127, 17, 8], [123, 13, 5])
ButtonPrint = self.ShowMap.addCheck(CheckPrint, [10, 408], [120, 98, 51], 0, "Print on Tibia's screen")
ButtonLowMana = self.ShowMap.addCheck(LowMana, [10, 440], [120, 98, 51], 0, "Low Mana Warnings")
CheckAttackOne = self.ShowMap.addCheck(AttackOne, [32, 74], [130, 16, 6], 1, 'Monster One')
OptionMonstersOne = self.ShowMap.addOption(monster2, monsters, [155, 70])
PriorityMonstersOne = self.ShowMap.addOption(PriorityOne, priority, [240, 70])
self.ShowMap.loop() | modules/ShowMap.py | from core.GUI import *
from engine.AttackTarget import AttackTarget
from engine.SetFollow import SetFollow
EnabledAutoAttack = False
TargetNumber = 0
priority = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
monsters = [
"Rat",
"CaveRat",
"Orc",
"OrcWarrior",
"OrcSpearman",
"Cyclops",
"Rotworm",
"AnyCorym",
"CorymCharlatan",
"CorymSkirmisher",
"CorymVanguard",
"Stonerefiner"
]
monster = 'Rat'
class ShowMap:
def __init__(self, root, SQMs, BattlePosition):
self.ShowMap = GUI('AutoAttack', 'Module: Auto Attack')
self.ShowMap.DefaultWindow('DefaultWindow')
def SetAutoAttack():
global EnabledAutoAttack
if not EnabledAutoAttack:
EnabledAutoAttack = True
ButtonEnabled.configure(text='AutoAttack: ON')
combine_funcs(ScanAutoAttack(), ScanFollowMode())
else:
EnabledAutoAttack = False
ButtonEnabled.configure(text='AutoAttack: OFF')
def ScanAutoAttack():
if EnabledAutoAttack:
global TargetNumber
monster = monster2.get()
TargetNumber = AttackTarget(monster, BattlePosition, SQMs, TargetNumber)
if EnabledAutoAttack:
root.after(300, ScanAutoAttack)
def ScanFollowMode():
if EnabledAutoAttack:
follow_x_pos, follow_y_pos = SetFollow()
if follow_x_pos != 0 and follow_y_pos != 0:
past_mouse_position = pyautogui.position()
pyautogui.leftClick(follow_x_pos, follow_y_pos)
pyautogui.moveTo(past_mouse_position)
if EnabledAutoAttack:
root.after(3000, ScanFollowMode)
CheckPrint = tk.BooleanVar()
LowMana = tk.BooleanVar()
AttackOne = tk.BooleanVar()
monster2 = tk.StringVar()
monster2.set(monster)
PriorityOne = tk.IntVar()
PriorityOne.set(1)
self.ShowMap.addButton('Ok', self.ShowMap.destroyWindow, [84, 29, 130, 504], [127, 17, 8], [123, 13, 5])
global EnabledAutoAttack
if not EnabledAutoAttack:
ButtonEnabled = self.ShowMap.addButton('AutoAttack: OFF', SetAutoAttack, [328, 29, 12, 469],
[127, 17, 8], [123, 13, 5])
else:
ButtonEnabled = self.ShowMap.addButton('AutoAttack: ON', SetAutoAttack, [328, 29, 12, 469],
[127, 17, 8], [123, 13, 5])
ButtonPrint = self.ShowMap.addCheck(CheckPrint, [10, 408], [120, 98, 51], 0, "Print on Tibia's screen")
ButtonLowMana = self.ShowMap.addCheck(LowMana, [10, 440], [120, 98, 51], 0, "Low Mana Warnings")
CheckAttackOne = self.ShowMap.addCheck(AttackOne, [32, 74], [130, 16, 6], 1, 'Monster One')
OptionMonstersOne = self.ShowMap.addOption(monster2, monsters, [155, 70])
PriorityMonstersOne = self.ShowMap.addOption(PriorityOne, priority, [240, 70])
self.ShowMap.loop() | 0.404037 | 0.195019 |
from copy import deepcopy
from flask_restful import Resource, request
import jsonschema
import caps
import common
from rest.rest_exceptions import BadRequest, InternalError
from rest.rest_auth import auth
from config import ConfigStore
class CapsMba(Resource):
"""
Handles /caps/mba requests
"""
@staticmethod
@auth.login_required
def get():
"""
Handles GET /caps/mba request.
Returns:
response, status code
"""
res = {
'mba_enabled': not caps.mba_bw_enabled(),
'mba_bw_enabled': caps.mba_bw_enabled()
}
return res, 200
class CapsMbaCtrl(Resource):
"""
Handles /caps/mba_ctrl HTTP requests
"""
@staticmethod
@auth.login_required
def get():
"""
Handles HTTP /caps/mba_ctrl request.
Retrieve MBA CTRL capability and current state details
Returns:
response, status code
"""
res = {
'supported': caps.mba_bw_supported(),
'enabled': caps.mba_bw_enabled()
}
return res, 200
@staticmethod
@auth.login_required
def put():
"""
Handles PUT /caps/mba_ctrl request.
Raises BadRequest, InternalError
Returns:
response, status code
"""
json_data = request.get_json()
# validate request
try:
schema, resolver = ConfigStore.load_json_schema('modify_mba_ctrl.json')
jsonschema.validate(json_data, schema, resolver=resolver)
except jsonschema.ValidationError as error:
raise BadRequest("Request validation failed - %s" % (str(error)))
if not caps.mba_bw_supported():
return {'message': "MBA CTRL not supported!"}, 409
if common.CONFIG_STORE.is_any_pool_defined():
return {'message': "Please remove all Pools first!"}, 409
data = deepcopy(common.CONFIG_STORE.get_config())
CapsMbaCtrl.set_mba_ctrl_enabled(data, json_data['enabled'])
common.CONFIG_STORE.set_config(data)
return {'message': "MBA CTRL status changed."}, 200
@staticmethod
def set_mba_ctrl_enabled(data, enabled):
if 'mba_ctrl' not in data:
data['mba_ctrl'] = {}
data['mba_ctrl']['enabled'] = enabled
class CapsRdtIface(Resource):
"""
Handles /caps/rdt_iface HTTP requests
"""
@staticmethod
@auth.login_required
def get():
"""
Handles HTTP /caps/rdt_iface request.
Retrieve RDT current and supported interface types
Returns:
response, status code
"""
res = {
'interface': common.PQOS_API.current_iface(),
'interface_supported': common.PQOS_API.supported_iface()
}
return res, 200
@staticmethod
@auth.login_required
def put():
"""
Handles PUT /caps/rdt_iface request.
Raises BadRequest, InternalError
Returns:
response, status code
"""
json_data = request.get_json()
# validate request
try:
schema, resolver = ConfigStore.load_json_schema('modify_rdt_iface.json')
jsonschema.validate(json_data, schema, resolver=resolver)
except jsonschema.ValidationError as error:
raise BadRequest("Request validation failed - %s" % (str(error)))
if not json_data['interface'] in common.PQOS_API.supported_iface():
raise BadRequest("RDT interface '%s' not supported!" % (json_data['interface']))
if common.CONFIG_STORE.is_any_pool_defined():
return {'message': "Please remove all Pools first!"}, 409
data = deepcopy(common.CONFIG_STORE.get_config())
if 'rdt_iface' not in data:
data['rdt_iface'] = {}
data['rdt_iface']['interface'] = json_data['interface']
CapsMbaCtrl.set_mba_ctrl_enabled(data, False)
common.CONFIG_STORE.set_config(data)
res = {'message': "RDT Interface modified"}
return res, 200 | appqos/rest/rest_rdt.py | from copy import deepcopy
from flask_restful import Resource, request
import jsonschema
import caps
import common
from rest.rest_exceptions import BadRequest, InternalError
from rest.rest_auth import auth
from config import ConfigStore
class CapsMba(Resource):
"""
Handles /caps/mba requests
"""
@staticmethod
@auth.login_required
def get():
"""
Handles GET /caps/mba request.
Returns:
response, status code
"""
res = {
'mba_enabled': not caps.mba_bw_enabled(),
'mba_bw_enabled': caps.mba_bw_enabled()
}
return res, 200
class CapsMbaCtrl(Resource):
"""
Handles /caps/mba_ctrl HTTP requests
"""
@staticmethod
@auth.login_required
def get():
"""
Handles HTTP /caps/mba_ctrl request.
Retrieve MBA CTRL capability and current state details
Returns:
response, status code
"""
res = {
'supported': caps.mba_bw_supported(),
'enabled': caps.mba_bw_enabled()
}
return res, 200
@staticmethod
@auth.login_required
def put():
"""
Handles PUT /caps/mba_ctrl request.
Raises BadRequest, InternalError
Returns:
response, status code
"""
json_data = request.get_json()
# validate request
try:
schema, resolver = ConfigStore.load_json_schema('modify_mba_ctrl.json')
jsonschema.validate(json_data, schema, resolver=resolver)
except jsonschema.ValidationError as error:
raise BadRequest("Request validation failed - %s" % (str(error)))
if not caps.mba_bw_supported():
return {'message': "MBA CTRL not supported!"}, 409
if common.CONFIG_STORE.is_any_pool_defined():
return {'message': "Please remove all Pools first!"}, 409
data = deepcopy(common.CONFIG_STORE.get_config())
CapsMbaCtrl.set_mba_ctrl_enabled(data, json_data['enabled'])
common.CONFIG_STORE.set_config(data)
return {'message': "MBA CTRL status changed."}, 200
@staticmethod
def set_mba_ctrl_enabled(data, enabled):
if 'mba_ctrl' not in data:
data['mba_ctrl'] = {}
data['mba_ctrl']['enabled'] = enabled
class CapsRdtIface(Resource):
"""
Handles /caps/rdt_iface HTTP requests
"""
@staticmethod
@auth.login_required
def get():
"""
Handles HTTP /caps/rdt_iface request.
Retrieve RDT current and supported interface types
Returns:
response, status code
"""
res = {
'interface': common.PQOS_API.current_iface(),
'interface_supported': common.PQOS_API.supported_iface()
}
return res, 200
@staticmethod
@auth.login_required
def put():
"""
Handles PUT /caps/rdt_iface request.
Raises BadRequest, InternalError
Returns:
response, status code
"""
json_data = request.get_json()
# validate request
try:
schema, resolver = ConfigStore.load_json_schema('modify_rdt_iface.json')
jsonschema.validate(json_data, schema, resolver=resolver)
except jsonschema.ValidationError as error:
raise BadRequest("Request validation failed - %s" % (str(error)))
if not json_data['interface'] in common.PQOS_API.supported_iface():
raise BadRequest("RDT interface '%s' not supported!" % (json_data['interface']))
if common.CONFIG_STORE.is_any_pool_defined():
return {'message': "Please remove all Pools first!"}, 409
data = deepcopy(common.CONFIG_STORE.get_config())
if 'rdt_iface' not in data:
data['rdt_iface'] = {}
data['rdt_iface']['interface'] = json_data['interface']
CapsMbaCtrl.set_mba_ctrl_enabled(data, False)
common.CONFIG_STORE.set_config(data)
res = {'message': "RDT Interface modified"}
return res, 200 | 0.439747 | 0.060919 |
# # Create figures for talk
#
# Generate figures for talk using stored variables from simulation experiments
# In[ ]:
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import os
import sys
import glob
import pickle
import pandas as pd
import numpy as np
from plotnine import (ggplot,
labs,
geom_line,
geom_point,
geom_errorbar,
aes,
ggsave,
theme_bw,
theme,
scale_color_manual,
element_blank,
element_text,
element_rect,
element_line)
sys.path.append("../../")
from functions import utils
import warnings
warnings.filterwarnings(action='ignore')
from numpy.random import seed
randomState = 123
seed(randomState)
# In[ ]:
# Read in config variables
config_file = os.path.abspath(os.path.join(os.getcwd(),"../../configs", "config_Pa_experiment.tsv"))
params = utils.read_config(config_file)
# In[ ]:
# Load parameters
local_dir = params["local_dir"]
dataset_name = params["dataset_name"]
analysis_name = params["analysis_name"]
correction_method = params["correction_method"]
# In[2]:
# File directories
base_dir = os.path.abspath(
os.path.join(
os.getcwd(), "../.."))
similarity_uncorrected_file = os.path.join(
base_dir,
"results",
"saved_variables",
dataset_name +"_experiment_lvl_sim_similarity_uncorrected_"+correction_method+".pickle")
ci_uncorrected_file = os.path.join(
base_dir,
"results",
"saved_variables",
dataset_name +"_experiment_lvl_sim_ci_uncorrected_"+correction_method+".pickle")
similarity_corrected_file = os.path.join(
base_dir,
"results",
"saved_variables",
dataset_name +"_experiment_lvl_sim_similarity_corrected_"+correction_method+".pickle")
ci_corrected_file = os.path.join(
base_dir,
"results",
"saved_variables",
dataset_name +"_experiment_lvl_sim_ci_corrected_"+correction_method+".pickle")
permuted_score_file = os.path.join(
base_dir,
"results",
"saved_variables",
dataset_name +"_experiment_lvl_sim_permuted.npy")
# In[3]:
# Output files
svcca_uncorrected_file = os.path.join(
local_dir,
"output",
"talk figures",
dataset_name +"_experiment_lvl_sim_svcca_uncorrected_"+correction_method+".png")
svcca_uncorrected_blk_file = os.path.join(
local_dir,
"output",
"talk figures",
dataset_name +"_experiment_lvl_sim_svcca_uncorrected_"+correction_method+"_blk.png")
svcca_overlay_file = os.path.join(
local_dir,
"output",
"talk figures",
dataset_name +"_experiment_lvl_sim_svcca_overlay_"+correction_method+".png")
svcca_overlay_blk_file = os.path.join(
local_dir,
"output",
"talk figures",
dataset_name +"_experiment_lvl_sim_svcca_overlay_"+correction_method+"_blk.png")
svcca_overlay_long_file = os.path.join(
local_dir,
"output",
"talk figures",
dataset_name +"_experiment_lvl_sim_svcca_overlay_"+correction_method+"_long.png")
svcca_overlay_long_blk_file = os.path.join(
local_dir,
"output",
"talk figures",
dataset_name +"_experiment_lvl_sim_svcca_overlay_"+correction_method+"_long_blk.png")
# In[4]:
# Load pickled files
uncorrected_svcca = pd.read_pickle(similarity_uncorrected_file)
err_uncorrected_svcca = pd.read_pickle(ci_uncorrected_file)
corrected_svcca = pd.read_pickle(similarity_corrected_file)
err_corrected_svcca = pd.read_pickle(ci_corrected_file)
permuted_score = np.load(permuted_score_file)
# In[5]:
# Concatenate error bars
uncorrected_svcca_err = pd.concat([uncorrected_svcca, err_uncorrected_svcca], axis=1)
corrected_svcca_err = pd.concat([corrected_svcca, err_corrected_svcca], axis=1)
# In[6]:
# Add group label
uncorrected_svcca_err['Group'] = 'uncorrected'
corrected_svcca_err['Group'] = 'corrected'
# In[7]:
# Concatenate dataframes
all_svcca = pd.concat([uncorrected_svcca_err, corrected_svcca_err])
all_svcca
# In[8]:
# Plot - uncorrected only
lst_num_experiments = list(all_svcca.index[0:int(len(all_svcca.index)/2)])
threshold = pd.DataFrame(
pd.np.tile(
permuted_score,
(len(lst_num_experiments), 1)),
index=lst_num_experiments,
columns=['score'])
g = ggplot(all_svcca[all_svcca['Group'] == 'uncorrected']) + geom_line(all_svcca[all_svcca['Group'] == 'uncorrected'],
aes(x=lst_num_experiments, y='score', color='Group'),
size=1.5) \
+ geom_point(aes(x=lst_num_experiments, y='score'),
color ='darkgrey',
size=0.5) \
+ geom_errorbar(all_svcca[all_svcca['Group'] == 'uncorrected'],
aes(x=lst_num_experiments, ymin='ymin', ymax='ymax'),
color='darkgrey') \
+ geom_line(threshold,
aes(x=lst_num_experiments, y='score'),
linetype='dashed',
size=1,
color="darkgrey",
show_legend=False) \
+ labs(x = "Number of Partitions",
y = "Similarity score (SVCCA)",
title = "Similarity across varying numbers of partitions") \
+ theme(plot_title=element_text(weight='bold'),
plot_background=element_rect(fill="white"),
panel_background=element_rect(fill="white"),
panel_grid_major_x=element_line(color="lightgrey"),
panel_grid_major_y=element_line(color="lightgrey"),
axis_line=element_line(color="grey"),
legend_key=element_rect(fill='white', colour='white')
) \
+ scale_color_manual(['#b3e5fc']) \
print(g)
ggsave(plot=g, filename=svcca_uncorrected_file, dpi=300)
# In[9]:
# Plot - black
lst_num_experiments = list(all_svcca.index[0:int(len(all_svcca.index)/2)])
threshold = pd.DataFrame(
pd.np.tile(
permuted_score,
(len(lst_num_experiments), 1)),
index=lst_num_experiments,
columns=['score'])
g = ggplot(all_svcca[all_svcca['Group'] == 'uncorrected']) + geom_line(all_svcca[all_svcca['Group'] == 'uncorrected'],
aes(x=lst_num_experiments, y='score', color='Group'),
size=1.5) \
+ geom_point(aes(x=lst_num_experiments, y='score'),
color ='darkgrey',
size=0.5) \
+ geom_errorbar(all_svcca[all_svcca['Group'] == 'uncorrected'],
aes(x=lst_num_experiments, ymin='ymin', ymax='ymax'),
color='darkgrey') \
+ geom_line(threshold,
aes(x=lst_num_experiments, y='score'),
linetype='dashed',
size=1,
color="darkgrey",
show_legend=False) \
+ labs(x = "Number of Partitions",
y = "Similarity score (SVCCA)",
title = "Similarity across varying numbers of partitions") \
+ theme(plot_background=element_rect(fill='black'),
legend_title_align = "center",
legend_background=element_rect(fill='black', colour='black'),
legend_key=element_rect(fill='black', colour='black'),
legend_title=element_text(colour="white"),
legend_text=element_text(colour="white"),
plot_title=element_text(weight='bold', colour="white"),
panel_background=element_rect(fill="black"),
axis_line=element_line(color="white"),
axis_text=element_text(color="white"),
axis_title=element_text(color="white"),
panel_grid_major_x=element_line(color="lightgrey"),
panel_grid_major_y=element_line(color="lightgrey"),
strip_text=element_text(colour="white"),
strip_background=element_blank()
) \
+ scale_color_manual(['#b3e5fc']) \
print(g)
ggsave(plot=g, filename=svcca_uncorrected_blk_file, dpi=300)
# In[10]:
# Plot
lst_num_experiments = list(all_svcca.index)
threshold = pd.DataFrame(
pd.np.tile(
permuted_score,
(len(lst_num_experiments), 1)),
index=lst_num_experiments,
columns=['score'])
g = ggplot(all_svcca) + geom_line(all_svcca,
aes(x=lst_num_experiments, y='score', color='Group'),
size=1.5) \
+ geom_point(aes(x=lst_num_experiments, y='score'),
color ='darkgrey',
size=0.5) \
+ geom_errorbar(all_svcca,
aes(x=lst_num_experiments, ymin='ymin', ymax='ymax'),
color='darkgrey') \
+ geom_line(threshold,
aes(x=lst_num_experiments, y='score'),
linetype='dashed',
size=1,
color="darkgrey",
show_legend=False) \
+ labs(x = "Number of Partitions",
y = "Similarity score (SVCCA)",
title = "Similarity across varying numbers of partitions") \
+ theme(plot_title=element_text(weight='bold'),
plot_background=element_rect(fill="white"),
panel_background=element_rect(fill="white"),
panel_grid_major_x=element_line(color="lightgrey"),
panel_grid_major_y=element_line(color="lightgrey"),
axis_line=element_line(color="grey"),
legend_key=element_rect(fill='white', colour='white')
) \
+ scale_color_manual(['#1976d2', '#b3e5fc']) \
print(g)
ggsave(plot=g, filename=svcca_overlay_file, dpi=300)
# In[11]:
# Plot - black
lst_num_experiments = list(all_svcca.index)
threshold = pd.DataFrame(
pd.np.tile(
permuted_score,
(len(lst_num_experiments), 1)),
index=lst_num_experiments,
columns=['score'])
g = ggplot(all_svcca) + geom_line(all_svcca,
aes(x=lst_num_experiments, y='score', color='Group'),
size=1.5) \
+ geom_point(aes(x=lst_num_experiments, y='score'),
color ='darkgrey',
size=0.5) \
+ geom_errorbar(all_svcca,
aes(x=lst_num_experiments, ymin='ymin', ymax='ymax'),
color='darkgrey') \
+ geom_line(threshold,
aes(x=lst_num_experiments, y='score'),
linetype='dashed',
size=1,
color="lightgrey",
show_legend=False) \
+ labs(x = "Number of Partitions",
y = "Similarity score (SVCCA)",
title = "Similarity across varying numbers of partitions") \
+ theme(plot_background=element_rect(fill='black'),
legend_title_align = "center",
legend_background=element_rect(fill='black', colour='black'),
legend_key=element_rect(fill='black', colour='black'),
legend_title=element_text(colour="white"),
legend_text=element_text(colour="white"),
plot_title=element_text(weight='bold', colour="white"),
panel_background=element_rect(fill="black"),
axis_line=element_line(color="white"),
axis_text=element_text(color="white"),
axis_title=element_text(color="white"),
panel_grid_major_x=element_line(color="lightgrey"),
panel_grid_major_y=element_line(color="lightgrey"),
strip_text=element_text(colour="white"),
strip_background=element_blank()
) \
+ scale_color_manual(['#1976d2', '#b3e5fc']) \
print(g)
ggsave(plot=g, filename=svcca_overlay_blk_file, dpi=300)
# In[12]:
# Plot - elongated
lst_num_experiments = list(all_svcca.index)
threshold = pd.DataFrame(
pd.np.tile(
permuted_score,
(len(lst_num_experiments), 1)),
index=lst_num_experiments,
columns=['score'])
g = ggplot(all_svcca) + geom_line(all_svcca,
aes(x=lst_num_experiments, y='score', color='Group'),
size=1) \
+ geom_point(aes(x=lst_num_experiments, y='score'),
color ='darkgrey',
size=0.5) \
+ geom_errorbar(all_svcca,
aes(x=lst_num_experiments, ymin='ymin', ymax='ymax'),
color='darkgrey') \
+ geom_line(threshold,
aes(x=lst_num_experiments, y='score'),
linetype='dashed',
size=1,
color="darkgrey",
show_legend=False) \
+ labs(x = "Sources of technical variation",
y = "Similarity score (SVCCA)",
title = "Similarity across varying numbers of partitions") \
+ theme(plot_title=element_text(weight='bold'),
plot_background=element_rect(fill="white"),
panel_background=element_rect(fill="white"),
panel_grid_major_x=element_line(color="lightgrey"),
panel_grid_major_y=element_line(color="lightgrey"),
axis_line=element_line(color="grey"),
legend_key=element_rect(fill='white', colour='white'),
aspect_ratio=0.4
) \
+ scale_color_manual(['#1976d2', '#b3e5fc']) \
print(g)
ggsave(plot=g, filename=svcca_overlay_long_file, dpi=300)
# In[13]:
# Plot - black
lst_num_experiments = list(all_svcca.index)
threshold = pd.DataFrame(
pd.np.tile(
permuted_score,
(len(lst_num_experiments), 1)),
index=lst_num_experiments,
columns=['score'])
g = ggplot(all_svcca) + geom_line(all_svcca,
aes(x=lst_num_experiments, y='score', color='Group'),
size=1) \
+ geom_point(aes(x=lst_num_experiments, y='score'),
color ='darkgrey',
size=0.5) \
+ geom_errorbar(all_svcca,
aes(x=lst_num_experiments, ymin='ymin', ymax='ymax'),
color='darkgrey') \
+ geom_line(threshold,
aes(x=lst_num_experiments, y='score'),
linetype='dashed',
size=1,
color="lightgrey",
show_legend=False) \
+ labs(x = "Sources of technical variation",
y = "Similarity score (SVCCA)",
title = "Similarity across varying numbers of partitions") \
+ theme(plot_background=element_rect(fill='black'),
legend_title_align = "center",
legend_background=element_rect(fill='black', colour='black'),
legend_key=element_rect(fill='black', colour='black'),
legend_title=element_text(colour="white"),
legend_text=element_text(colour="white"),
plot_title=element_text(weight='bold', colour="white"),
panel_background=element_rect(fill="black"),
axis_line=element_line(color="white"),
axis_text=element_text(color="white"),
axis_title=element_text(color="white"),
panel_grid_major_x=element_line(color="lightgrey"),
panel_grid_major_y=element_line(color="lightgrey"),
strip_text=element_text(colour="white"),
aspect_ratio=0.4,
strip_background=element_blank()
) \
+ scale_color_manual(['#1976d2', '#b3e5fc']) \
print(g)
ggsave(plot=g, filename=svcca_overlay_long_blk_file, dpi=300) | archive/scripts/Pa_experiment_lvl_sim/nbconverted/4_create_figs_talk.py |
# # Create figures for talk
#
# Generate figures for talk using stored variables from simulation experiments
# In[ ]:
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import os
import sys
import glob
import pickle
import pandas as pd
import numpy as np
from plotnine import (ggplot,
labs,
geom_line,
geom_point,
geom_errorbar,
aes,
ggsave,
theme_bw,
theme,
scale_color_manual,
element_blank,
element_text,
element_rect,
element_line)
sys.path.append("../../")
from functions import utils
import warnings
warnings.filterwarnings(action='ignore')
from numpy.random import seed
randomState = 123
seed(randomState)
# In[ ]:
# Read in config variables
config_file = os.path.abspath(os.path.join(os.getcwd(),"../../configs", "config_Pa_experiment.tsv"))
params = utils.read_config(config_file)
# In[ ]:
# Load parameters
local_dir = params["local_dir"]
dataset_name = params["dataset_name"]
analysis_name = params["analysis_name"]
correction_method = params["correction_method"]
# In[2]:
# File directories
base_dir = os.path.abspath(
os.path.join(
os.getcwd(), "../.."))
similarity_uncorrected_file = os.path.join(
base_dir,
"results",
"saved_variables",
dataset_name +"_experiment_lvl_sim_similarity_uncorrected_"+correction_method+".pickle")
ci_uncorrected_file = os.path.join(
base_dir,
"results",
"saved_variables",
dataset_name +"_experiment_lvl_sim_ci_uncorrected_"+correction_method+".pickle")
similarity_corrected_file = os.path.join(
base_dir,
"results",
"saved_variables",
dataset_name +"_experiment_lvl_sim_similarity_corrected_"+correction_method+".pickle")
ci_corrected_file = os.path.join(
base_dir,
"results",
"saved_variables",
dataset_name +"_experiment_lvl_sim_ci_corrected_"+correction_method+".pickle")
permuted_score_file = os.path.join(
base_dir,
"results",
"saved_variables",
dataset_name +"_experiment_lvl_sim_permuted.npy")
# In[3]:
# Output files
svcca_uncorrected_file = os.path.join(
local_dir,
"output",
"talk figures",
dataset_name +"_experiment_lvl_sim_svcca_uncorrected_"+correction_method+".png")
svcca_uncorrected_blk_file = os.path.join(
local_dir,
"output",
"talk figures",
dataset_name +"_experiment_lvl_sim_svcca_uncorrected_"+correction_method+"_blk.png")
svcca_overlay_file = os.path.join(
local_dir,
"output",
"talk figures",
dataset_name +"_experiment_lvl_sim_svcca_overlay_"+correction_method+".png")
svcca_overlay_blk_file = os.path.join(
local_dir,
"output",
"talk figures",
dataset_name +"_experiment_lvl_sim_svcca_overlay_"+correction_method+"_blk.png")
svcca_overlay_long_file = os.path.join(
local_dir,
"output",
"talk figures",
dataset_name +"_experiment_lvl_sim_svcca_overlay_"+correction_method+"_long.png")
svcca_overlay_long_blk_file = os.path.join(
local_dir,
"output",
"talk figures",
dataset_name +"_experiment_lvl_sim_svcca_overlay_"+correction_method+"_long_blk.png")
# In[4]:
# Load pickled files
uncorrected_svcca = pd.read_pickle(similarity_uncorrected_file)
err_uncorrected_svcca = pd.read_pickle(ci_uncorrected_file)
corrected_svcca = pd.read_pickle(similarity_corrected_file)
err_corrected_svcca = pd.read_pickle(ci_corrected_file)
permuted_score = np.load(permuted_score_file)
# In[5]:
# Concatenate error bars
uncorrected_svcca_err = pd.concat([uncorrected_svcca, err_uncorrected_svcca], axis=1)
corrected_svcca_err = pd.concat([corrected_svcca, err_corrected_svcca], axis=1)
# In[6]:
# Add group label
uncorrected_svcca_err['Group'] = 'uncorrected'
corrected_svcca_err['Group'] = 'corrected'
# In[7]:
# Concatenate dataframes
all_svcca = pd.concat([uncorrected_svcca_err, corrected_svcca_err])
all_svcca
# In[8]:
# Plot - uncorrected only
lst_num_experiments = list(all_svcca.index[0:int(len(all_svcca.index)/2)])
threshold = pd.DataFrame(
pd.np.tile(
permuted_score,
(len(lst_num_experiments), 1)),
index=lst_num_experiments,
columns=['score'])
g = ggplot(all_svcca[all_svcca['Group'] == 'uncorrected']) + geom_line(all_svcca[all_svcca['Group'] == 'uncorrected'],
aes(x=lst_num_experiments, y='score', color='Group'),
size=1.5) \
+ geom_point(aes(x=lst_num_experiments, y='score'),
color ='darkgrey',
size=0.5) \
+ geom_errorbar(all_svcca[all_svcca['Group'] == 'uncorrected'],
aes(x=lst_num_experiments, ymin='ymin', ymax='ymax'),
color='darkgrey') \
+ geom_line(threshold,
aes(x=lst_num_experiments, y='score'),
linetype='dashed',
size=1,
color="darkgrey",
show_legend=False) \
+ labs(x = "Number of Partitions",
y = "Similarity score (SVCCA)",
title = "Similarity across varying numbers of partitions") \
+ theme(plot_title=element_text(weight='bold'),
plot_background=element_rect(fill="white"),
panel_background=element_rect(fill="white"),
panel_grid_major_x=element_line(color="lightgrey"),
panel_grid_major_y=element_line(color="lightgrey"),
axis_line=element_line(color="grey"),
legend_key=element_rect(fill='white', colour='white')
) \
+ scale_color_manual(['#b3e5fc']) \
print(g)
ggsave(plot=g, filename=svcca_uncorrected_file, dpi=300)
# In[9]:
# Plot - black
lst_num_experiments = list(all_svcca.index[0:int(len(all_svcca.index)/2)])
threshold = pd.DataFrame(
pd.np.tile(
permuted_score,
(len(lst_num_experiments), 1)),
index=lst_num_experiments,
columns=['score'])
g = ggplot(all_svcca[all_svcca['Group'] == 'uncorrected']) + geom_line(all_svcca[all_svcca['Group'] == 'uncorrected'],
aes(x=lst_num_experiments, y='score', color='Group'),
size=1.5) \
+ geom_point(aes(x=lst_num_experiments, y='score'),
color ='darkgrey',
size=0.5) \
+ geom_errorbar(all_svcca[all_svcca['Group'] == 'uncorrected'],
aes(x=lst_num_experiments, ymin='ymin', ymax='ymax'),
color='darkgrey') \
+ geom_line(threshold,
aes(x=lst_num_experiments, y='score'),
linetype='dashed',
size=1,
color="darkgrey",
show_legend=False) \
+ labs(x = "Number of Partitions",
y = "Similarity score (SVCCA)",
title = "Similarity across varying numbers of partitions") \
+ theme(plot_background=element_rect(fill='black'),
legend_title_align = "center",
legend_background=element_rect(fill='black', colour='black'),
legend_key=element_rect(fill='black', colour='black'),
legend_title=element_text(colour="white"),
legend_text=element_text(colour="white"),
plot_title=element_text(weight='bold', colour="white"),
panel_background=element_rect(fill="black"),
axis_line=element_line(color="white"),
axis_text=element_text(color="white"),
axis_title=element_text(color="white"),
panel_grid_major_x=element_line(color="lightgrey"),
panel_grid_major_y=element_line(color="lightgrey"),
strip_text=element_text(colour="white"),
strip_background=element_blank()
) \
+ scale_color_manual(['#b3e5fc']) \
print(g)
ggsave(plot=g, filename=svcca_uncorrected_blk_file, dpi=300)
# In[10]:
# Plot
lst_num_experiments = list(all_svcca.index)
threshold = pd.DataFrame(
pd.np.tile(
permuted_score,
(len(lst_num_experiments), 1)),
index=lst_num_experiments,
columns=['score'])
g = ggplot(all_svcca) + geom_line(all_svcca,
aes(x=lst_num_experiments, y='score', color='Group'),
size=1.5) \
+ geom_point(aes(x=lst_num_experiments, y='score'),
color ='darkgrey',
size=0.5) \
+ geom_errorbar(all_svcca,
aes(x=lst_num_experiments, ymin='ymin', ymax='ymax'),
color='darkgrey') \
+ geom_line(threshold,
aes(x=lst_num_experiments, y='score'),
linetype='dashed',
size=1,
color="darkgrey",
show_legend=False) \
+ labs(x = "Number of Partitions",
y = "Similarity score (SVCCA)",
title = "Similarity across varying numbers of partitions") \
+ theme(plot_title=element_text(weight='bold'),
plot_background=element_rect(fill="white"),
panel_background=element_rect(fill="white"),
panel_grid_major_x=element_line(color="lightgrey"),
panel_grid_major_y=element_line(color="lightgrey"),
axis_line=element_line(color="grey"),
legend_key=element_rect(fill='white', colour='white')
) \
+ scale_color_manual(['#1976d2', '#b3e5fc']) \
print(g)
ggsave(plot=g, filename=svcca_overlay_file, dpi=300)
# In[11]:
# Plot - black
lst_num_experiments = list(all_svcca.index)
threshold = pd.DataFrame(
pd.np.tile(
permuted_score,
(len(lst_num_experiments), 1)),
index=lst_num_experiments,
columns=['score'])
g = ggplot(all_svcca) + geom_line(all_svcca,
aes(x=lst_num_experiments, y='score', color='Group'),
size=1.5) \
+ geom_point(aes(x=lst_num_experiments, y='score'),
color ='darkgrey',
size=0.5) \
+ geom_errorbar(all_svcca,
aes(x=lst_num_experiments, ymin='ymin', ymax='ymax'),
color='darkgrey') \
+ geom_line(threshold,
aes(x=lst_num_experiments, y='score'),
linetype='dashed',
size=1,
color="lightgrey",
show_legend=False) \
+ labs(x = "Number of Partitions",
y = "Similarity score (SVCCA)",
title = "Similarity across varying numbers of partitions") \
+ theme(plot_background=element_rect(fill='black'),
legend_title_align = "center",
legend_background=element_rect(fill='black', colour='black'),
legend_key=element_rect(fill='black', colour='black'),
legend_title=element_text(colour="white"),
legend_text=element_text(colour="white"),
plot_title=element_text(weight='bold', colour="white"),
panel_background=element_rect(fill="black"),
axis_line=element_line(color="white"),
axis_text=element_text(color="white"),
axis_title=element_text(color="white"),
panel_grid_major_x=element_line(color="lightgrey"),
panel_grid_major_y=element_line(color="lightgrey"),
strip_text=element_text(colour="white"),
strip_background=element_blank()
) \
+ scale_color_manual(['#1976d2', '#b3e5fc']) \
print(g)
ggsave(plot=g, filename=svcca_overlay_blk_file, dpi=300)
# In[12]:
# Plot - elongated
lst_num_experiments = list(all_svcca.index)
threshold = pd.DataFrame(
pd.np.tile(
permuted_score,
(len(lst_num_experiments), 1)),
index=lst_num_experiments,
columns=['score'])
g = ggplot(all_svcca) + geom_line(all_svcca,
aes(x=lst_num_experiments, y='score', color='Group'),
size=1) \
+ geom_point(aes(x=lst_num_experiments, y='score'),
color ='darkgrey',
size=0.5) \
+ geom_errorbar(all_svcca,
aes(x=lst_num_experiments, ymin='ymin', ymax='ymax'),
color='darkgrey') \
+ geom_line(threshold,
aes(x=lst_num_experiments, y='score'),
linetype='dashed',
size=1,
color="darkgrey",
show_legend=False) \
+ labs(x = "Sources of technical variation",
y = "Similarity score (SVCCA)",
title = "Similarity across varying numbers of partitions") \
+ theme(plot_title=element_text(weight='bold'),
plot_background=element_rect(fill="white"),
panel_background=element_rect(fill="white"),
panel_grid_major_x=element_line(color="lightgrey"),
panel_grid_major_y=element_line(color="lightgrey"),
axis_line=element_line(color="grey"),
legend_key=element_rect(fill='white', colour='white'),
aspect_ratio=0.4
) \
+ scale_color_manual(['#1976d2', '#b3e5fc']) \
print(g)
ggsave(plot=g, filename=svcca_overlay_long_file, dpi=300)
# In[13]:
# Plot - black
lst_num_experiments = list(all_svcca.index)
threshold = pd.DataFrame(
pd.np.tile(
permuted_score,
(len(lst_num_experiments), 1)),
index=lst_num_experiments,
columns=['score'])
g = ggplot(all_svcca) + geom_line(all_svcca,
aes(x=lst_num_experiments, y='score', color='Group'),
size=1) \
+ geom_point(aes(x=lst_num_experiments, y='score'),
color ='darkgrey',
size=0.5) \
+ geom_errorbar(all_svcca,
aes(x=lst_num_experiments, ymin='ymin', ymax='ymax'),
color='darkgrey') \
+ geom_line(threshold,
aes(x=lst_num_experiments, y='score'),
linetype='dashed',
size=1,
color="lightgrey",
show_legend=False) \
+ labs(x = "Sources of technical variation",
y = "Similarity score (SVCCA)",
title = "Similarity across varying numbers of partitions") \
+ theme(plot_background=element_rect(fill='black'),
legend_title_align = "center",
legend_background=element_rect(fill='black', colour='black'),
legend_key=element_rect(fill='black', colour='black'),
legend_title=element_text(colour="white"),
legend_text=element_text(colour="white"),
plot_title=element_text(weight='bold', colour="white"),
panel_background=element_rect(fill="black"),
axis_line=element_line(color="white"),
axis_text=element_text(color="white"),
axis_title=element_text(color="white"),
panel_grid_major_x=element_line(color="lightgrey"),
panel_grid_major_y=element_line(color="lightgrey"),
strip_text=element_text(colour="white"),
aspect_ratio=0.4,
strip_background=element_blank()
) \
+ scale_color_manual(['#1976d2', '#b3e5fc']) \
print(g)
ggsave(plot=g, filename=svcca_overlay_long_blk_file, dpi=300) | 0.432303 | 0.168994 |
from operator import attrgetter
import random
from bs4 import BeautifulSoup
from django.utils.translation import ugettext as _
from natsort import natsorted
from reportlab.lib import colors
from reportlab.lib.units import cm
from reportlab.platypus import PageBreak, Paragraph, Spacer, Table, TableStyle
from openslides.config.api import config
from openslides.participant.models import Group, User
from openslides.utils.pdf import stylesheet
from .models import Category, Motion
# Needed to count the delegates
# TODO: find another way to do this.
def motions_to_pdf(pdf):
"""
Create a PDF with all motions.
"""
motions = Motion.objects.all()
motions = natsorted(motions, key=attrgetter('identifier'))
all_motion_cover(pdf, motions)
for motion in motions:
pdf.append(PageBreak())
motion_to_pdf(pdf, motion)
def motion_to_pdf(pdf, motion):
"""
Create a PDF for one motion.
"""
identifier = ""
if motion.identifier:
identifier = " %s" % motion.identifier
pdf.append(Paragraph(_("Motion") + "%s: %s" % (identifier, motion.title), stylesheet['Heading1']))
motion_data = []
# submitter
cell1a = []
cell1a.append(Spacer(0, 0.2 * cm))
cell1a.append(Paragraph("<font name='Ubuntu-Bold'>%s:</font>" % _("Submitter"),
stylesheet['Heading4']))
cell1b = []
cell1b.append(Spacer(0, 0.2 * cm))
for submitter in motion.submitter.all():
cell1b.append(Paragraph(unicode(submitter), stylesheet['Normal']))
motion_data.append([cell1a, cell1b])
# TODO: choose this in workflow
if motion.state.allow_submitter_edit:
# Cell for the signature
cell2a = []
cell2b = []
cell2a.append(Paragraph("<font name='Ubuntu-Bold'>%s:</font>" %
_("Signature"), stylesheet['Heading4']))
cell2b.append(Paragraph(42 * "_", stylesheet['Signaturefield']))
cell2b.append(Spacer(0, 0.1 * cm))
cell2b.append(Spacer(0, 0.2 * cm))
motion_data.append([cell2a, cell2b])
# supporters
if config['motion_min_supporters']:
cell3a = []
cell3b = []
cell3a.append(Paragraph("<font name='Ubuntu-Bold'>%s:</font><seqreset id='counter'>"
% _("Supporters"), stylesheet['Heading4']))
supporters = motion.supporter.all()
for supporter in supporters:
cell3b.append(Paragraph("<seq id='counter'/>. %s" % unicode(supporter),
stylesheet['Normal']))
cell3b.append(Spacer(0, 0.2 * cm))
motion_data.append([cell3a, cell3b])
# Motion state
cell4a = []
cell4b = []
cell4a.append(Paragraph("<font name='Ubuntu-Bold'>%s:</font>" % _("State"),
stylesheet['Heading4']))
cell4b.append(Paragraph(_(motion.state.name), stylesheet['Normal']))
motion_data.append([cell4a, cell4b])
# Version number
if motion.versions.count() > 1:
version = motion.get_active_version()
cell5a = []
cell5b = []
cell5a.append(Paragraph("<font name='Ubuntu-Bold'>%s:</font>" % _("Version"),
stylesheet['Heading4']))
cell5b.append(Paragraph("%s" % version.version_number, stylesheet['Normal']))
motion_data.append([cell5a, cell5b])
# voting results
polls = []
for poll in motion.polls.all():
if not poll.has_votes():
continue
polls.append(poll)
if polls:
cell6a = []
cell6b = []
cell6a.append(Paragraph("<font name='Ubuntu-Bold'>%s:</font>" %
_("Vote results"), stylesheet['Heading4']))
ballotcounter = 0
for poll in polls:
ballotcounter += 1
option = poll.get_options()[0]
yes, no, abstain = (option['Yes'], option['No'], option['Abstain'])
valid, invalid, votescast = ('', '', '')
if poll.votesvalid is not None:
valid = "<br/>%s: %s" % (_("Valid votes"), poll.print_votesvalid())
if poll.votesinvalid is not None:
invalid = "<br/>%s: %s" % (_("Invalid votes"), poll.print_votesinvalid())
if poll.votescast is not None:
votescast = "<br/>%s: %s" % (_("Votes cast"), poll.print_votescast())
if len(polls) > 1:
cell6b.append(Paragraph("%s. %s" % (ballotcounter, _("Vote")),
stylesheet['Bold']))
cell6b.append(Paragraph(
"%s: %s <br/> %s: %s <br/> %s: %s <br/> %s %s %s" %
(_("Yes"), yes, _("No"), no, _("Abstention"), abstain, valid, invalid, votescast),
stylesheet['Normal']))
cell6b.append(Spacer(0, 0.2 * cm))
motion_data.append([cell6a, cell6b])
# Creating Table
table = Table(motion_data)
table._argW[0] = 4.5 * cm
table._argW[1] = 11 * cm
table.setStyle(TableStyle([('BOX', (0, 0), (-1, -1), 1, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP')]))
pdf.append(table)
pdf.append(Spacer(0, 1 * cm))
# motion title
pdf.append(Paragraph(motion.title, stylesheet['Heading3']))
# motion text
convert_html_to_reportlab(pdf, motion.text)
pdf.append(Spacer(0, 1 * cm))
# motion reason
if motion.reason:
pdf.append(Paragraph(_("Reason") + ":", stylesheet['Heading3']))
convert_html_to_reportlab(pdf, motion.reason)
return pdf
def convert_html_to_reportlab(pdf, text):
# parsing and replacing not supported html tags for reportlab...
soup = BeautifulSoup(text)
# read all list elements...
for element in soup.find_all('li'):
# ... and replace ul list elements with <para><bullet>•</bullet>...<para>
if element.parent.name == "ul":
# nested lists
if element.ul or element.ol:
for i in element.find_all('li'):
element.insert_before(i)
element.clear()
else:
element.name = "para"
bullet_tag = soup.new_tag("bullet")
bullet_tag.string = u"•"
element.insert(0, bullet_tag)
# ... and replace ol list elements with <para><bullet><seq id="%id"></seq>.</bullet>...</para>
if element.parent.name == "ol":
# set list id if element is the first of numbered list
if not element.find_previous_sibling():
id = random.randrange(0, 101)
# nested lists
if element.ul or element.ol:
for i in element.find_all('li'):
element.insert_before(i)
element.clear()
else:
element.name = "para"
element.insert(0, soup.new_tag("bullet"))
element.bullet.insert(0, soup.new_tag("seq"))
element.bullet.seq['id'] = id
element.bullet.insert(1, ".")
# remove tags which are not supported by reportlab (replace tags with their children tags)
for tag in soup.find_all('ul'):
tag.unwrap()
for tag in soup.find_all('ol'):
tag.unwrap()
for tag in soup.find_all('li'):
tag.unwrap()
for tag in soup.find_all('span'):
tag.unwrap()
# print paragraphs with numbers
text = soup.body.contents
paragraph_number = 1
for paragraph in text:
paragraph = unicode(paragraph)
# ignore empty paragraphs (created by newlines/tabs of ckeditor)
if paragraph == '\n' or paragraph == '\n\n' or paragraph == '\n\t':
continue
if "<pre>" in paragraph:
txt = paragraph.replace('\n', '<br/>').replace(' ', ' ')
if config["motion_pdf_paragraph_numbering"]:
pdf.append(Paragraph(txt, stylesheet['InnerMonotypeParagraph'], str(paragraph_number)))
paragraph_number += 1
else:
pdf.append(Paragraph(txt, stylesheet['InnerMonotypeParagraph']))
elif "<para>" in paragraph:
pdf.append(Paragraph(paragraph, stylesheet['InnerListParagraph']))
elif "<seqreset" in paragraph:
pass
elif "<h1>" in paragraph:
pdf.append(Paragraph(paragraph, stylesheet['InnerH1Paragraph']))
elif "<h2>" in paragraph:
pdf.append(Paragraph(paragraph, stylesheet['InnerH2Paragraph']))
elif "<h3>" in paragraph:
pdf.append(Paragraph(paragraph, stylesheet['InnerH3Paragraph']))
else:
if config["motion_pdf_paragraph_numbering"]:
pdf.append(Paragraph(paragraph, stylesheet['InnerParagraph'], str(paragraph_number)))
paragraph_number += 1
else:
pdf.append(Paragraph(paragraph, stylesheet['InnerParagraph']))
def all_motion_cover(pdf, motions):
"""
Create a coverpage for all motions.
"""
pdf.append(Paragraph(config["motion_pdf_title"], stylesheet['Heading1']))
preamble = config["motion_pdf_preamble"]
if preamble:
pdf.append(Paragraph("%s" % preamble.replace('\r\n', '<br/>'), stylesheet['Paragraph']))
pdf.append(Spacer(0, 0.75 * cm))
# list of categories
categories = False
for i, category in enumerate(Category.objects.all()):
categories = True
if i == 0:
pdf.append(Paragraph(_("Categories"), stylesheet['Heading2']))
pdf.append(Paragraph("%s %s" % (category.prefix, category.name), stylesheet['Paragraph']))
if categories:
pdf.append(PageBreak())
# list of motions
if not motions:
pdf.append(Paragraph(_("No motions available."), stylesheet['Heading3']))
else:
for motion in motions:
identifier = ""
if motion.identifier:
identifier = "%s " % motion.identifier
pdf.append(Paragraph("%s %s" % (identifier, motion.title), stylesheet['Heading3']))
def motion_poll_to_pdf(pdf, poll):
circle = "*" # = Unicode Character 'HEAVY LARGE CIRCLE' (U+2B55)
cell = []
cell.append(Spacer(0, 0.8 * cm))
cell.append(Paragraph(_("Motion No. %s") % poll.motion.identifier, stylesheet['Ballot_title']))
cell.append(Paragraph(poll.motion.title, stylesheet['Ballot_subtitle']))
cell.append(Paragraph(_("%d. Vote") % poll.poll_number, stylesheet['Ballot_description']))
cell.append(Spacer(0, 0.5 * cm))
cell.append(Paragraph("<font name='circlefont' size='15'>%s</font> <font name='Ubuntu'>%s</font>"
% (circle, unicode(_("Yes"))), stylesheet['Ballot_option']))
cell.append(Paragraph("<font name='circlefont' size='15'>%s</font> <font name='Ubuntu'>%s</font>"
% (circle, unicode(_("No"))), stylesheet['Ballot_option']))
cell.append(Paragraph("<font name='circlefont' size='15'>%s</font> <font name='Ubuntu'>%s</font>"
% (circle, unicode(_("Abstention"))), stylesheet['Ballot_option']))
data = []
# get ballot papers config values
ballot_papers_selection = config["motion_pdf_ballot_papers_selection"]
ballot_papers_number = config["motion_pdf_ballot_papers_number"]
# set number of ballot papers
if ballot_papers_selection == "NUMBER_OF_DELEGATES":
# TODO: get this number from persons
try:
if Group.objects.get(pk=3):
number = User.objects.filter(groups__pk=3).count()
except Group.DoesNotExist:
number = 0
elif ballot_papers_selection == "NUMBER_OF_ALL_PARTICIPANTS":
# TODO: get the number from the persons
number = int(User.objects.count())
else: # ballot_papers_selection == "CUSTOM_NUMBER"
number = int(ballot_papers_number)
number = max(1, number)
# print ballot papers
if number > 0:
# TODO: try [cell, cell] * (number / 2)
for user in xrange(number / 2):
data.append([cell, cell])
rest = number % 2
if rest:
data.append([cell, ''])
t = Table(data, 10.5 * cm, 7.42 * cm)
t.setStyle(TableStyle(
[('GRID', (0, 0), (-1, -1), 0.25, colors.grey),
('VALIGN', (0, 0), (-1, -1), 'TOP')]))
pdf.append(t) | openslides/motion/pdf.py |
from operator import attrgetter
import random
from bs4 import BeautifulSoup
from django.utils.translation import ugettext as _
from natsort import natsorted
from reportlab.lib import colors
from reportlab.lib.units import cm
from reportlab.platypus import PageBreak, Paragraph, Spacer, Table, TableStyle
from openslides.config.api import config
from openslides.participant.models import Group, User
from openslides.utils.pdf import stylesheet
from .models import Category, Motion
# Needed to count the delegates
# TODO: find another way to do this.
def motions_to_pdf(pdf):
"""
Create a PDF with all motions.
"""
motions = Motion.objects.all()
motions = natsorted(motions, key=attrgetter('identifier'))
all_motion_cover(pdf, motions)
for motion in motions:
pdf.append(PageBreak())
motion_to_pdf(pdf, motion)
def motion_to_pdf(pdf, motion):
"""
Create a PDF for one motion.
"""
identifier = ""
if motion.identifier:
identifier = " %s" % motion.identifier
pdf.append(Paragraph(_("Motion") + "%s: %s" % (identifier, motion.title), stylesheet['Heading1']))
motion_data = []
# submitter
cell1a = []
cell1a.append(Spacer(0, 0.2 * cm))
cell1a.append(Paragraph("<font name='Ubuntu-Bold'>%s:</font>" % _("Submitter"),
stylesheet['Heading4']))
cell1b = []
cell1b.append(Spacer(0, 0.2 * cm))
for submitter in motion.submitter.all():
cell1b.append(Paragraph(unicode(submitter), stylesheet['Normal']))
motion_data.append([cell1a, cell1b])
# TODO: choose this in workflow
if motion.state.allow_submitter_edit:
# Cell for the signature
cell2a = []
cell2b = []
cell2a.append(Paragraph("<font name='Ubuntu-Bold'>%s:</font>" %
_("Signature"), stylesheet['Heading4']))
cell2b.append(Paragraph(42 * "_", stylesheet['Signaturefield']))
cell2b.append(Spacer(0, 0.1 * cm))
cell2b.append(Spacer(0, 0.2 * cm))
motion_data.append([cell2a, cell2b])
# supporters
if config['motion_min_supporters']:
cell3a = []
cell3b = []
cell3a.append(Paragraph("<font name='Ubuntu-Bold'>%s:</font><seqreset id='counter'>"
% _("Supporters"), stylesheet['Heading4']))
supporters = motion.supporter.all()
for supporter in supporters:
cell3b.append(Paragraph("<seq id='counter'/>. %s" % unicode(supporter),
stylesheet['Normal']))
cell3b.append(Spacer(0, 0.2 * cm))
motion_data.append([cell3a, cell3b])
# Motion state
cell4a = []
cell4b = []
cell4a.append(Paragraph("<font name='Ubuntu-Bold'>%s:</font>" % _("State"),
stylesheet['Heading4']))
cell4b.append(Paragraph(_(motion.state.name), stylesheet['Normal']))
motion_data.append([cell4a, cell4b])
# Version number
if motion.versions.count() > 1:
version = motion.get_active_version()
cell5a = []
cell5b = []
cell5a.append(Paragraph("<font name='Ubuntu-Bold'>%s:</font>" % _("Version"),
stylesheet['Heading4']))
cell5b.append(Paragraph("%s" % version.version_number, stylesheet['Normal']))
motion_data.append([cell5a, cell5b])
# voting results
polls = []
for poll in motion.polls.all():
if not poll.has_votes():
continue
polls.append(poll)
if polls:
cell6a = []
cell6b = []
cell6a.append(Paragraph("<font name='Ubuntu-Bold'>%s:</font>" %
_("Vote results"), stylesheet['Heading4']))
ballotcounter = 0
for poll in polls:
ballotcounter += 1
option = poll.get_options()[0]
yes, no, abstain = (option['Yes'], option['No'], option['Abstain'])
valid, invalid, votescast = ('', '', '')
if poll.votesvalid is not None:
valid = "<br/>%s: %s" % (_("Valid votes"), poll.print_votesvalid())
if poll.votesinvalid is not None:
invalid = "<br/>%s: %s" % (_("Invalid votes"), poll.print_votesinvalid())
if poll.votescast is not None:
votescast = "<br/>%s: %s" % (_("Votes cast"), poll.print_votescast())
if len(polls) > 1:
cell6b.append(Paragraph("%s. %s" % (ballotcounter, _("Vote")),
stylesheet['Bold']))
cell6b.append(Paragraph(
"%s: %s <br/> %s: %s <br/> %s: %s <br/> %s %s %s" %
(_("Yes"), yes, _("No"), no, _("Abstention"), abstain, valid, invalid, votescast),
stylesheet['Normal']))
cell6b.append(Spacer(0, 0.2 * cm))
motion_data.append([cell6a, cell6b])
# Creating Table
table = Table(motion_data)
table._argW[0] = 4.5 * cm
table._argW[1] = 11 * cm
table.setStyle(TableStyle([('BOX', (0, 0), (-1, -1), 1, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP')]))
pdf.append(table)
pdf.append(Spacer(0, 1 * cm))
# motion title
pdf.append(Paragraph(motion.title, stylesheet['Heading3']))
# motion text
convert_html_to_reportlab(pdf, motion.text)
pdf.append(Spacer(0, 1 * cm))
# motion reason
if motion.reason:
pdf.append(Paragraph(_("Reason") + ":", stylesheet['Heading3']))
convert_html_to_reportlab(pdf, motion.reason)
return pdf
def convert_html_to_reportlab(pdf, text):
# parsing and replacing not supported html tags for reportlab...
soup = BeautifulSoup(text)
# read all list elements...
for element in soup.find_all('li'):
# ... and replace ul list elements with <para><bullet>•</bullet>...<para>
if element.parent.name == "ul":
# nested lists
if element.ul or element.ol:
for i in element.find_all('li'):
element.insert_before(i)
element.clear()
else:
element.name = "para"
bullet_tag = soup.new_tag("bullet")
bullet_tag.string = u"•"
element.insert(0, bullet_tag)
# ... and replace ol list elements with <para><bullet><seq id="%id"></seq>.</bullet>...</para>
if element.parent.name == "ol":
# set list id if element is the first of numbered list
if not element.find_previous_sibling():
id = random.randrange(0, 101)
# nested lists
if element.ul or element.ol:
for i in element.find_all('li'):
element.insert_before(i)
element.clear()
else:
element.name = "para"
element.insert(0, soup.new_tag("bullet"))
element.bullet.insert(0, soup.new_tag("seq"))
element.bullet.seq['id'] = id
element.bullet.insert(1, ".")
# remove tags which are not supported by reportlab (replace tags with their children tags)
for tag in soup.find_all('ul'):
tag.unwrap()
for tag in soup.find_all('ol'):
tag.unwrap()
for tag in soup.find_all('li'):
tag.unwrap()
for tag in soup.find_all('span'):
tag.unwrap()
# print paragraphs with numbers
text = soup.body.contents
paragraph_number = 1
for paragraph in text:
paragraph = unicode(paragraph)
# ignore empty paragraphs (created by newlines/tabs of ckeditor)
if paragraph == '\n' or paragraph == '\n\n' or paragraph == '\n\t':
continue
if "<pre>" in paragraph:
txt = paragraph.replace('\n', '<br/>').replace(' ', ' ')
if config["motion_pdf_paragraph_numbering"]:
pdf.append(Paragraph(txt, stylesheet['InnerMonotypeParagraph'], str(paragraph_number)))
paragraph_number += 1
else:
pdf.append(Paragraph(txt, stylesheet['InnerMonotypeParagraph']))
elif "<para>" in paragraph:
pdf.append(Paragraph(paragraph, stylesheet['InnerListParagraph']))
elif "<seqreset" in paragraph:
pass
elif "<h1>" in paragraph:
pdf.append(Paragraph(paragraph, stylesheet['InnerH1Paragraph']))
elif "<h2>" in paragraph:
pdf.append(Paragraph(paragraph, stylesheet['InnerH2Paragraph']))
elif "<h3>" in paragraph:
pdf.append(Paragraph(paragraph, stylesheet['InnerH3Paragraph']))
else:
if config["motion_pdf_paragraph_numbering"]:
pdf.append(Paragraph(paragraph, stylesheet['InnerParagraph'], str(paragraph_number)))
paragraph_number += 1
else:
pdf.append(Paragraph(paragraph, stylesheet['InnerParagraph']))
def all_motion_cover(pdf, motions):
"""
Create a coverpage for all motions.
"""
pdf.append(Paragraph(config["motion_pdf_title"], stylesheet['Heading1']))
preamble = config["motion_pdf_preamble"]
if preamble:
pdf.append(Paragraph("%s" % preamble.replace('\r\n', '<br/>'), stylesheet['Paragraph']))
pdf.append(Spacer(0, 0.75 * cm))
# list of categories
categories = False
for i, category in enumerate(Category.objects.all()):
categories = True
if i == 0:
pdf.append(Paragraph(_("Categories"), stylesheet['Heading2']))
pdf.append(Paragraph("%s %s" % (category.prefix, category.name), stylesheet['Paragraph']))
if categories:
pdf.append(PageBreak())
# list of motions
if not motions:
pdf.append(Paragraph(_("No motions available."), stylesheet['Heading3']))
else:
for motion in motions:
identifier = ""
if motion.identifier:
identifier = "%s " % motion.identifier
pdf.append(Paragraph("%s %s" % (identifier, motion.title), stylesheet['Heading3']))
def motion_poll_to_pdf(pdf, poll):
circle = "*" # = Unicode Character 'HEAVY LARGE CIRCLE' (U+2B55)
cell = []
cell.append(Spacer(0, 0.8 * cm))
cell.append(Paragraph(_("Motion No. %s") % poll.motion.identifier, stylesheet['Ballot_title']))
cell.append(Paragraph(poll.motion.title, stylesheet['Ballot_subtitle']))
cell.append(Paragraph(_("%d. Vote") % poll.poll_number, stylesheet['Ballot_description']))
cell.append(Spacer(0, 0.5 * cm))
cell.append(Paragraph("<font name='circlefont' size='15'>%s</font> <font name='Ubuntu'>%s</font>"
% (circle, unicode(_("Yes"))), stylesheet['Ballot_option']))
cell.append(Paragraph("<font name='circlefont' size='15'>%s</font> <font name='Ubuntu'>%s</font>"
% (circle, unicode(_("No"))), stylesheet['Ballot_option']))
cell.append(Paragraph("<font name='circlefont' size='15'>%s</font> <font name='Ubuntu'>%s</font>"
% (circle, unicode(_("Abstention"))), stylesheet['Ballot_option']))
data = []
# get ballot papers config values
ballot_papers_selection = config["motion_pdf_ballot_papers_selection"]
ballot_papers_number = config["motion_pdf_ballot_papers_number"]
# set number of ballot papers
if ballot_papers_selection == "NUMBER_OF_DELEGATES":
# TODO: get this number from persons
try:
if Group.objects.get(pk=3):
number = User.objects.filter(groups__pk=3).count()
except Group.DoesNotExist:
number = 0
elif ballot_papers_selection == "NUMBER_OF_ALL_PARTICIPANTS":
# TODO: get the number from the persons
number = int(User.objects.count())
else: # ballot_papers_selection == "CUSTOM_NUMBER"
number = int(ballot_papers_number)
number = max(1, number)
# print ballot papers
if number > 0:
# TODO: try [cell, cell] * (number / 2)
for user in xrange(number / 2):
data.append([cell, cell])
rest = number % 2
if rest:
data.append([cell, ''])
t = Table(data, 10.5 * cm, 7.42 * cm)
t.setStyle(TableStyle(
[('GRID', (0, 0), (-1, -1), 0.25, colors.grey),
('VALIGN', (0, 0), (-1, -1), 'TOP')]))
pdf.append(t) | 0.208421 | 0.154695 |
from __future__ import print_function
import io
import logging
import logging.handlers
import sys
import threading
import time
try:
import argparse
except ImportError:
sys.stderr.write("""
ntploggps: can't find the Python argparse module
If your Python version is < 2.7, then manual installation is needed:
# pip install argparse
""")
sys.exit(1)
try:
import gps
except ImportError as e:
sys.stderr.write("ntploggps: can't find Python GPSD library.\n")
sys.stderr.write("%s\n" % e)
sys.exit(1)
class logfile_header_class(logging.handlers.TimedRotatingFileHandler):
'A class to modify the file logging handler.'
def doRollover(self):
'function to add header to new file on rotaion.'
if str is bytes:
super(logfile_header_class, self).doRollover()
else:
super().doRollover()
self.stream.write('# Time Device TDOP nSat\n')
def logging_setup():
"Create logging object"
logFormat = logging.Formatter('%(message)s')
# Create logger for gpsd
Logger = logging.getLogger()
Logger.setLevel(logging.INFO)
# Create file handler
if args.logfile:
# log to logfile
file = logfile_header_class(
args.logfile[0],
utc=True,
when='midnight',
interval=1)
else:
# log to stdout
file = logging.StreamHandler(sys.stdout)
file.setLevel(logging.INFO)
# Create the formatter and add it to the handler
file.setFormatter(logFormat)
# Add the handler to the logger
Logger.addHandler(file)
return Logger
parser = argparse.ArgumentParser(description="gpsd log file generator",
epilog="""
See the manual page for details.
""")
parser.add_argument('-l', '--logfile',
dest='logfile',
help="append log data to LOGFILE instead of stdout",
nargs=1)
parser.add_argument('-o', '--once',
action="store_true",
dest='once',
help="log one line, then exit")
parser.add_argument('-w', '--wait',
default=[5],
dest='wait',
help="wait WAIT seconds after each log line, default 5",
nargs=1,
type=int)
parser.add_argument('-v', '--verbose',
action="store_true",
dest='verbose',
help="be verbose")
parser.add_argument('-V', '--version',
action="version",
version="ntploggps ntpsec-@NTPSEC_VERSION_EXTENDED@")
args = parser.parse_args()
if args.verbose:
print("ntploggps: arguments:")
print(args)
if args.logfile:
# log to logfile
try:
out = open(args.logfile[0], mode='a')
except io.UnsupportedOperation as e:
sys.stderr.write("ntploggps: can't open logfile %s\n" % args.logfile)
sys.stderr.write("%s\n" % e)
sys.exit(1)
if args.verbose:
print("ntploggps: opened log file %s" % args.logfile[0])
else:
# log to stdout
out = sys.stdout
class GpsPoller(threading.Thread):
running = False # True when thread is running. Quit when set False
def __init__(self):
threading.Thread.__init__(self)
self.device = None
self.satellites_used = None
self.tdop = None
# start the streaming of gps data
try:
self.gpsd = gps.gps(mode=gps.WATCH_ENABLE)
except BaseException as e:
sys.stderr.write("ntploggps: Can't connect to gpsd, %s\n"
" Is gpsd running?\n" % e)
sys.exit(1)
self.running = True
def run(self):
while gpsp.running:
if self.gpsd.read() == -1:
self.running = False
break
if hasattr(self.gpsd, "data"):
if self.gpsd.data.get("class") == "SKY":
self.satellites_used = 0
self.tdop = self.gpsd.data.get("tdop", 0)
for sat in self.gpsd.data.get("satellites", []):
if sat["used"]:
self.satellites_used += 1
elif self.gpsd.data.get("class") == "TPV":
self.device = self.gpsd.data.get("device")
@property
def time(self):
"Return the gpsd time fix"
t = self.gpsd.fix.time
if isinstance(t, int):
return t
if isinstance(t, float):
if not gps.isfinite(t):
return None
return t
return gps.isotime(t)
if __name__ == '__main__':
# this is the main thread
if args.verbose:
print("ntploggps: creating poll thread")
gpsp = GpsPoller() # create the thread
try:
# Create the logger instance
Logger = logging_setup()
# Create data layout
Logger.info("# Time Device TDOP nSat")
gpsp.start() # start it up
last_time = 0
while gpsp.running:
# It may take a second or two to get good data
try:
current_time = gpsp.time
device = gpsp.device
tdop = gpsp.tdop
satellites_used = gpsp.satellites_used
if current_time is not None and \
device is not None and \
satellites_used is not None and \
tdop is not None:
if last_time != current_time:
s = '%i %s %f %d' % (current_time, device, tdop,
satellites_used)
Logger.info(s)
last_time = current_time
if args.once:
# just once
break
except AttributeError as e:
print('parse error\n')
# wait a bit before next log
time.sleep(args.wait[0])
except (KeyboardInterrupt, SystemExit): # when you press ctrl+c
args.once = True # stop the retry loop
if args.verbose:
print("\nKilling Thread...")
else:
# print a blank line to make bash happy
print("")
except Exception as e: # any error, signal
print(e)
# tell the thread to die
gpsp.running = False
# wait for the thread to finish what it's doing
gpsp.join()
if args.verbose:
print("ntploggps: Done -- Exiting.") | ntpclients/ntploggps.py | from __future__ import print_function
import io
import logging
import logging.handlers
import sys
import threading
import time
try:
import argparse
except ImportError:
sys.stderr.write("""
ntploggps: can't find the Python argparse module
If your Python version is < 2.7, then manual installation is needed:
# pip install argparse
""")
sys.exit(1)
try:
import gps
except ImportError as e:
sys.stderr.write("ntploggps: can't find Python GPSD library.\n")
sys.stderr.write("%s\n" % e)
sys.exit(1)
class logfile_header_class(logging.handlers.TimedRotatingFileHandler):
'A class to modify the file logging handler.'
def doRollover(self):
'function to add header to new file on rotaion.'
if str is bytes:
super(logfile_header_class, self).doRollover()
else:
super().doRollover()
self.stream.write('# Time Device TDOP nSat\n')
def logging_setup():
"Create logging object"
logFormat = logging.Formatter('%(message)s')
# Create logger for gpsd
Logger = logging.getLogger()
Logger.setLevel(logging.INFO)
# Create file handler
if args.logfile:
# log to logfile
file = logfile_header_class(
args.logfile[0],
utc=True,
when='midnight',
interval=1)
else:
# log to stdout
file = logging.StreamHandler(sys.stdout)
file.setLevel(logging.INFO)
# Create the formatter and add it to the handler
file.setFormatter(logFormat)
# Add the handler to the logger
Logger.addHandler(file)
return Logger
parser = argparse.ArgumentParser(description="gpsd log file generator",
epilog="""
See the manual page for details.
""")
parser.add_argument('-l', '--logfile',
dest='logfile',
help="append log data to LOGFILE instead of stdout",
nargs=1)
parser.add_argument('-o', '--once',
action="store_true",
dest='once',
help="log one line, then exit")
parser.add_argument('-w', '--wait',
default=[5],
dest='wait',
help="wait WAIT seconds after each log line, default 5",
nargs=1,
type=int)
parser.add_argument('-v', '--verbose',
action="store_true",
dest='verbose',
help="be verbose")
parser.add_argument('-V', '--version',
action="version",
version="ntploggps ntpsec-@NTPSEC_VERSION_EXTENDED@")
args = parser.parse_args()
if args.verbose:
print("ntploggps: arguments:")
print(args)
if args.logfile:
# log to logfile
try:
out = open(args.logfile[0], mode='a')
except io.UnsupportedOperation as e:
sys.stderr.write("ntploggps: can't open logfile %s\n" % args.logfile)
sys.stderr.write("%s\n" % e)
sys.exit(1)
if args.verbose:
print("ntploggps: opened log file %s" % args.logfile[0])
else:
# log to stdout
out = sys.stdout
class GpsPoller(threading.Thread):
running = False # True when thread is running. Quit when set False
def __init__(self):
threading.Thread.__init__(self)
self.device = None
self.satellites_used = None
self.tdop = None
# start the streaming of gps data
try:
self.gpsd = gps.gps(mode=gps.WATCH_ENABLE)
except BaseException as e:
sys.stderr.write("ntploggps: Can't connect to gpsd, %s\n"
" Is gpsd running?\n" % e)
sys.exit(1)
self.running = True
def run(self):
while gpsp.running:
if self.gpsd.read() == -1:
self.running = False
break
if hasattr(self.gpsd, "data"):
if self.gpsd.data.get("class") == "SKY":
self.satellites_used = 0
self.tdop = self.gpsd.data.get("tdop", 0)
for sat in self.gpsd.data.get("satellites", []):
if sat["used"]:
self.satellites_used += 1
elif self.gpsd.data.get("class") == "TPV":
self.device = self.gpsd.data.get("device")
@property
def time(self):
"Return the gpsd time fix"
t = self.gpsd.fix.time
if isinstance(t, int):
return t
if isinstance(t, float):
if not gps.isfinite(t):
return None
return t
return gps.isotime(t)
if __name__ == '__main__':
# this is the main thread
if args.verbose:
print("ntploggps: creating poll thread")
gpsp = GpsPoller() # create the thread
try:
# Create the logger instance
Logger = logging_setup()
# Create data layout
Logger.info("# Time Device TDOP nSat")
gpsp.start() # start it up
last_time = 0
while gpsp.running:
# It may take a second or two to get good data
try:
current_time = gpsp.time
device = gpsp.device
tdop = gpsp.tdop
satellites_used = gpsp.satellites_used
if current_time is not None and \
device is not None and \
satellites_used is not None and \
tdop is not None:
if last_time != current_time:
s = '%i %s %f %d' % (current_time, device, tdop,
satellites_used)
Logger.info(s)
last_time = current_time
if args.once:
# just once
break
except AttributeError as e:
print('parse error\n')
# wait a bit before next log
time.sleep(args.wait[0])
except (KeyboardInterrupt, SystemExit): # when you press ctrl+c
args.once = True # stop the retry loop
if args.verbose:
print("\nKilling Thread...")
else:
# print a blank line to make bash happy
print("")
except Exception as e: # any error, signal
print(e)
# tell the thread to die
gpsp.running = False
# wait for the thread to finish what it's doing
gpsp.join()
if args.verbose:
print("ntploggps: Done -- Exiting.") | 0.304765 | 0.081009 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy as sp
import pandas as pd
from numpy.linalg import svd
class CorrespondenceAnalysis(object):
"""Correspondence analysis (CA).
Methods:
fit: Fit correspondence analysis.
get_coordinates: Get symmetric or asymmetric map coordinates.
score_inertia: Get score inertia.
### Usage
```python
corranal = CA(aggregate_cnt)
corranal.fit()
coord_df = corranal.get_coordinates()
inertia_prop = corranal.score_inertia()
```
"""
def __init__(self, df):
"""Create a new Correspondence Analysis.
Args:
df: Pandas DataFrame, with row and column names.
Raises:
TypeError: Input data is not a pandas DataFrame
ValueError: Input data contains missing values.
TypeError: Input data contains data types other than numeric.
"""
if isinstance(df, pd.DataFrame) is not True:
raise TypeError('Input data is not a Pandas DataFrame.')
self._rows = np.array(df.index)
self._cols = np.array(df.columns)
self._np_data = np.array(df.values)
if np.isnan(self._np_data).any():
raise ValueError('Input data contains missing values.')
if np.issubdtype(self._np_data.dtype, np.number) is not True:
raise TypeError('Input data contains data types other than numeric.')
def fit(self):
"""Compute Correspondence Analysis.
Fit method is to
- perform generalized singular value decomposition (SVD) for
correspondence matrix and
- compute principal and standard coordinates for rows and columns.
Returns:
self: Object.
"""
p_corrmat = self._np_data / self._np_data.sum()
r_profile = p_corrmat.sum(axis=1).reshape(p_corrmat.shape[0], 1)
c_profile = p_corrmat.sum(axis=0).reshape(p_corrmat.shape[1], 1)
Dr_invsqrt = np.diag(np.power(r_profile, -1/2).T[0])
Dc_invsqrt = np.diag(np.power(c_profile, -1/2).T[0])
ker_mat = np.subtract(p_corrmat, np.dot(r_profile, c_profile.T))
left_mat = Dr_invsqrt
right_mat = Dc_invsqrt
weighted_lse = left_mat.dot(ker_mat).dot(right_mat)
U, sv, Vt = svd(weighted_lse, full_matrices=False)
self._Dr_invsqrt = Dr_invsqrt
self._Dc_invsqrt = Dc_invsqrt
self._U = U
self._V = Vt.T
self._SV = np.diag(sv)
self._inertia = np.power(sv, 2)
# Pricipal coordinates for rows and columns.
self._F = self._Dr_invsqrt.dot(self._U).dot(self._SV)
self._G = self._Dc_invsqrt.dot(self._V).dot(self._SV)
# Standard coordinates for rows and columns.
self._Phi = self._Dr_invsqrt.dot(self._U)
self._Gam = self._Dc_invsqrt.dot(self._V)
return self
def _coordinates_df(self, array_x1, array_x2):
"""Create pandas DataFrame with coordinates in rows and columns.
Args:
array_x1: numpy array, coordinates in rows.
array_x2: numpy array, coordinates in columns.
Returns:
coord_df: A Pandas DataFrame with columns
{'x_1',..., 'x_K', 'point', 'coord'}:
- x_k, k=1,...,K: K-dimensional coordinates.
- point: row and column points for labeling.
- coord: {'row', 'col'}, indicates row point or column point.
"""
row_df = pd.DataFrame(
array_x1,
columns=['x' + str(i) for i in (np.arange(array_x1.shape[1]) + 1)])
row_df['point'] = self._rows
row_df['coord'] = 'row'
col_df = pd.DataFrame(
array_x2,
columns=['x' + str(i) for i in (np.arange(array_x2.shape[1]) + 1)])
col_df['point'] = self._cols
col_df['coord'] = 'col'
coord_df = pd.concat([row_df, col_df], ignore_index=True)
return coord_df
def get_coordinates(self, option='symmetric'):
"""Take coordinates in rows and columns for symmetric or assymetric map.
For symmetric vs. asymmetric map:
- For symmetric map, we can catch row-to-row and column-to-column
association (maybe not row-to-column association);
- For asymmetric map, we can further catch row-to-column association.
Args:
option: string, in one of the following three:
'symmetric': symmetric map with
- rows and columns in principal coordinates.
'rowprincipal': asymmetric map with
- rows in principal coordinates and
- columns in standard coordinates.
'colprincipal': asymmetric map with
- rows in standard coordinates and
- columns in principal coordinates.
Returns:
Pandas DataFrame, contains coordinates, row and column points.
Raises:
ValueError: Option only includes {"symmetric", "rowprincipal", "colprincipal"}.
"""
if option == 'symmetric':
# Symmetric map
return self._coordinates_df(self._F, self._G)
elif option == 'rowprincipal':
# Row principal asymmetric map
return self._coordinates_df(self._F, self._Gam)
elif option == 'colprincipal':
# Column principal asymmetric map
return self._coordinates_df(self._Phi, self._G)
else:
raise ValueError(
'Option only includes {"symmetric", "rowprincipal", "colprincipal"}.')
def score_inertia(self):
"""Score inertia.
Returns:
A NumPy array, contains proportions of total inertia explained
in coordinate dimensions.
"""
inertia = self._inertia
inertia_prop = (inertia / inertia.sum()).cumsum()
return inertia_prop | correspondence_analysis.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy as sp
import pandas as pd
from numpy.linalg import svd
class CorrespondenceAnalysis(object):
"""Correspondence analysis (CA).
Methods:
fit: Fit correspondence analysis.
get_coordinates: Get symmetric or asymmetric map coordinates.
score_inertia: Get score inertia.
### Usage
```python
corranal = CA(aggregate_cnt)
corranal.fit()
coord_df = corranal.get_coordinates()
inertia_prop = corranal.score_inertia()
```
"""
def __init__(self, df):
"""Create a new Correspondence Analysis.
Args:
df: Pandas DataFrame, with row and column names.
Raises:
TypeError: Input data is not a pandas DataFrame
ValueError: Input data contains missing values.
TypeError: Input data contains data types other than numeric.
"""
if isinstance(df, pd.DataFrame) is not True:
raise TypeError('Input data is not a Pandas DataFrame.')
self._rows = np.array(df.index)
self._cols = np.array(df.columns)
self._np_data = np.array(df.values)
if np.isnan(self._np_data).any():
raise ValueError('Input data contains missing values.')
if np.issubdtype(self._np_data.dtype, np.number) is not True:
raise TypeError('Input data contains data types other than numeric.')
def fit(self):
"""Compute Correspondence Analysis.
Fit method is to
- perform generalized singular value decomposition (SVD) for
correspondence matrix and
- compute principal and standard coordinates for rows and columns.
Returns:
self: Object.
"""
p_corrmat = self._np_data / self._np_data.sum()
r_profile = p_corrmat.sum(axis=1).reshape(p_corrmat.shape[0], 1)
c_profile = p_corrmat.sum(axis=0).reshape(p_corrmat.shape[1], 1)
Dr_invsqrt = np.diag(np.power(r_profile, -1/2).T[0])
Dc_invsqrt = np.diag(np.power(c_profile, -1/2).T[0])
ker_mat = np.subtract(p_corrmat, np.dot(r_profile, c_profile.T))
left_mat = Dr_invsqrt
right_mat = Dc_invsqrt
weighted_lse = left_mat.dot(ker_mat).dot(right_mat)
U, sv, Vt = svd(weighted_lse, full_matrices=False)
self._Dr_invsqrt = Dr_invsqrt
self._Dc_invsqrt = Dc_invsqrt
self._U = U
self._V = Vt.T
self._SV = np.diag(sv)
self._inertia = np.power(sv, 2)
# Pricipal coordinates for rows and columns.
self._F = self._Dr_invsqrt.dot(self._U).dot(self._SV)
self._G = self._Dc_invsqrt.dot(self._V).dot(self._SV)
# Standard coordinates for rows and columns.
self._Phi = self._Dr_invsqrt.dot(self._U)
self._Gam = self._Dc_invsqrt.dot(self._V)
return self
def _coordinates_df(self, array_x1, array_x2):
"""Create pandas DataFrame with coordinates in rows and columns.
Args:
array_x1: numpy array, coordinates in rows.
array_x2: numpy array, coordinates in columns.
Returns:
coord_df: A Pandas DataFrame with columns
{'x_1',..., 'x_K', 'point', 'coord'}:
- x_k, k=1,...,K: K-dimensional coordinates.
- point: row and column points for labeling.
- coord: {'row', 'col'}, indicates row point or column point.
"""
row_df = pd.DataFrame(
array_x1,
columns=['x' + str(i) for i in (np.arange(array_x1.shape[1]) + 1)])
row_df['point'] = self._rows
row_df['coord'] = 'row'
col_df = pd.DataFrame(
array_x2,
columns=['x' + str(i) for i in (np.arange(array_x2.shape[1]) + 1)])
col_df['point'] = self._cols
col_df['coord'] = 'col'
coord_df = pd.concat([row_df, col_df], ignore_index=True)
return coord_df
def get_coordinates(self, option='symmetric'):
"""Take coordinates in rows and columns for symmetric or assymetric map.
For symmetric vs. asymmetric map:
- For symmetric map, we can catch row-to-row and column-to-column
association (maybe not row-to-column association);
- For asymmetric map, we can further catch row-to-column association.
Args:
option: string, in one of the following three:
'symmetric': symmetric map with
- rows and columns in principal coordinates.
'rowprincipal': asymmetric map with
- rows in principal coordinates and
- columns in standard coordinates.
'colprincipal': asymmetric map with
- rows in standard coordinates and
- columns in principal coordinates.
Returns:
Pandas DataFrame, contains coordinates, row and column points.
Raises:
ValueError: Option only includes {"symmetric", "rowprincipal", "colprincipal"}.
"""
if option == 'symmetric':
# Symmetric map
return self._coordinates_df(self._F, self._G)
elif option == 'rowprincipal':
# Row principal asymmetric map
return self._coordinates_df(self._F, self._Gam)
elif option == 'colprincipal':
# Column principal asymmetric map
return self._coordinates_df(self._Phi, self._G)
else:
raise ValueError(
'Option only includes {"symmetric", "rowprincipal", "colprincipal"}.')
def score_inertia(self):
"""Score inertia.
Returns:
A NumPy array, contains proportions of total inertia explained
in coordinate dimensions.
"""
inertia = self._inertia
inertia_prop = (inertia / inertia.sum()).cumsum()
return inertia_prop | 0.911036 | 0.715262 |
from typing import TypeVar
from pickle import HIGHEST_PROTOCOL
from io import BytesIO, SEEK_SET
from adam.curriculum.curriculum_utils import phase1_instances, PHASE1_CHOOSER_FACTORY
from adam.language.language_utils import phase1_language_generator
from adam.ontology.phase1_ontology import GAILA_PHASE_1_ONTOLOGY
from adam.pickle import AdamPickler, AdamUnpickler
from adam.learner import LearningExample
from adam.learner.integrated_learner import IntegratedTemplateLearner
from adam.learner.language_mode import LanguageMode
from adam.learner.object_recognizer import SHARED_WORLD_ITEMS
from adam.perception import GROUND_PERCEPTION, LEARNER_PERCEPTION
from adam.situation.templates.phase1_templates import sampled
from tests.learner import LANGUAGE_MODE_TO_TEMPLATE_LEARNER_OBJECT_RECOGNIZER
from tests.learner.subset_verb_learner_test import drink_test_template
T = TypeVar("T") # pylint:disable=invalid-name
def _pickle_and_unpickle_object(input_object: T) -> T:
stream = BytesIO()
pickler = AdamPickler(file=stream, protocol=HIGHEST_PROTOCOL)
pickler.dump(input_object)
stream.seek(0, SEEK_SET)
unpickler = AdamUnpickler(file=stream)
return unpickler.load()
def test_pickle_preserves_shared_world_item_identity():
for item in SHARED_WORLD_ITEMS:
new_item = _pickle_and_unpickle_object(item)
assert new_item is item
def test_pickle_preserves_ground_perception_identity():
new_ground_perception = _pickle_and_unpickle_object(GROUND_PERCEPTION)
assert new_ground_perception is GROUND_PERCEPTION
def test_pickle_preserves_learner_perception_identity():
new_ground_perception = _pickle_and_unpickle_object(LEARNER_PERCEPTION)
assert new_ground_perception is LEARNER_PERCEPTION
def test_object_recognition_with_drink_perception():
"""
Regression test to confirm we can perform object recognition on a pickled and unpickled "drink"
perception. If we do this using the normal pickling interface we get an error. This test checks
that we don't run into such an error when we instead pickle and unpickle the perception using
the AdamPickler and AdamUnpickler.
See https://github.com/isi-vista/adam/issues/958.
"""
language_mode = LanguageMode.ENGLISH
template = drink_test_template()
curriculum = phase1_instances(
"train",
sampled(
template,
max_to_sample=3,
ontology=GAILA_PHASE_1_ONTOLOGY,
chooser=PHASE1_CHOOSER_FACTORY(),
block_multiple_of_the_same_type=True,
),
language_generator=phase1_language_generator(language_mode),
)
object_recognizer = LANGUAGE_MODE_TO_TEMPLATE_LEARNER_OBJECT_RECOGNIZER[language_mode]
learner = IntegratedTemplateLearner(object_learner=object_recognizer)
for (_, linguistic_description, perceptual_representation) in curriculum.instances():
new_perceptual_representation = _pickle_and_unpickle_object(
perceptual_representation
)
learner.observe(
LearningExample(new_perceptual_representation, linguistic_description)
) | tests/pickle_test.py | from typing import TypeVar
from pickle import HIGHEST_PROTOCOL
from io import BytesIO, SEEK_SET
from adam.curriculum.curriculum_utils import phase1_instances, PHASE1_CHOOSER_FACTORY
from adam.language.language_utils import phase1_language_generator
from adam.ontology.phase1_ontology import GAILA_PHASE_1_ONTOLOGY
from adam.pickle import AdamPickler, AdamUnpickler
from adam.learner import LearningExample
from adam.learner.integrated_learner import IntegratedTemplateLearner
from adam.learner.language_mode import LanguageMode
from adam.learner.object_recognizer import SHARED_WORLD_ITEMS
from adam.perception import GROUND_PERCEPTION, LEARNER_PERCEPTION
from adam.situation.templates.phase1_templates import sampled
from tests.learner import LANGUAGE_MODE_TO_TEMPLATE_LEARNER_OBJECT_RECOGNIZER
from tests.learner.subset_verb_learner_test import drink_test_template
T = TypeVar("T") # pylint:disable=invalid-name
def _pickle_and_unpickle_object(input_object: T) -> T:
stream = BytesIO()
pickler = AdamPickler(file=stream, protocol=HIGHEST_PROTOCOL)
pickler.dump(input_object)
stream.seek(0, SEEK_SET)
unpickler = AdamUnpickler(file=stream)
return unpickler.load()
def test_pickle_preserves_shared_world_item_identity():
for item in SHARED_WORLD_ITEMS:
new_item = _pickle_and_unpickle_object(item)
assert new_item is item
def test_pickle_preserves_ground_perception_identity():
new_ground_perception = _pickle_and_unpickle_object(GROUND_PERCEPTION)
assert new_ground_perception is GROUND_PERCEPTION
def test_pickle_preserves_learner_perception_identity():
new_ground_perception = _pickle_and_unpickle_object(LEARNER_PERCEPTION)
assert new_ground_perception is LEARNER_PERCEPTION
def test_object_recognition_with_drink_perception():
"""
Regression test to confirm we can perform object recognition on a pickled and unpickled "drink"
perception. If we do this using the normal pickling interface we get an error. This test checks
that we don't run into such an error when we instead pickle and unpickle the perception using
the AdamPickler and AdamUnpickler.
See https://github.com/isi-vista/adam/issues/958.
"""
language_mode = LanguageMode.ENGLISH
template = drink_test_template()
curriculum = phase1_instances(
"train",
sampled(
template,
max_to_sample=3,
ontology=GAILA_PHASE_1_ONTOLOGY,
chooser=PHASE1_CHOOSER_FACTORY(),
block_multiple_of_the_same_type=True,
),
language_generator=phase1_language_generator(language_mode),
)
object_recognizer = LANGUAGE_MODE_TO_TEMPLATE_LEARNER_OBJECT_RECOGNIZER[language_mode]
learner = IntegratedTemplateLearner(object_learner=object_recognizer)
for (_, linguistic_description, perceptual_representation) in curriculum.instances():
new_perceptual_representation = _pickle_and_unpickle_object(
perceptual_representation
)
learner.observe(
LearningExample(new_perceptual_representation, linguistic_description)
) | 0.792424 | 0.183758 |
from flask import request
from sqlalchemy.exc import IntegrityError
from redash import models
from redash.handlers.base import (BaseResource,
get_object_or_404, paginate)
from redash.permissions import require_access, view_only
class QueryFavoriteResource(BaseResource):
def post(self, query_id):
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
require_access(query, self.current_user, view_only)
fav = models.Favorite(org_id=self.current_org.id, object=query, user=self.current_user)
models.db.session.add(fav)
try:
models.db.session.commit()
except IntegrityError as e:
if 'unique_favorite' in str(e):
models.db.session.rollback()
else:
raise e
self.record_event({
'action': 'favorite',
'object_id': query.id,
'object_type': 'query'
})
def delete(self, query_id):
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
require_access(query, self.current_user, view_only)
models.Favorite.query.filter(
models.Favorite.object_id == query_id,
models.Favorite.object_type == 'Query',
models.Favorite.user == self.current_user,
).delete()
models.db.session.commit()
self.record_event({
'action': 'favorite',
'object_id': query.id,
'object_type': 'query'
})
class DashboardFavoriteResource(BaseResource):
def post(self, object_id):
dashboard = get_object_or_404(models.Dashboard.get_by_slug_and_org, object_id, self.current_org)
fav = models.Favorite(org_id=self.current_org.id, object=dashboard, user=self.current_user)
models.db.session.add(fav)
try:
models.db.session.commit()
except IntegrityError as e:
if 'unique_favorite' in str(e):
models.db.session.rollback()
else:
raise e
self.record_event({
'action': 'favorite',
'object_id': dashboard.id,
'object_type': 'dashboard'
})
def delete(self, object_id):
dashboard = get_object_or_404(models.Dashboard.get_by_slug_and_org, object_id, self.current_org)
models.Favorite.query.filter(models.Favorite.object == dashboard, models.Favorite.user == self.current_user).delete()
models.db.session.commit()
self.record_event({
'action': 'unfavorite',
'object_id': dashboard.id,
'object_type': 'dashboard'
}) | redash/handlers/favorites.py | from flask import request
from sqlalchemy.exc import IntegrityError
from redash import models
from redash.handlers.base import (BaseResource,
get_object_or_404, paginate)
from redash.permissions import require_access, view_only
class QueryFavoriteResource(BaseResource):
def post(self, query_id):
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
require_access(query, self.current_user, view_only)
fav = models.Favorite(org_id=self.current_org.id, object=query, user=self.current_user)
models.db.session.add(fav)
try:
models.db.session.commit()
except IntegrityError as e:
if 'unique_favorite' in str(e):
models.db.session.rollback()
else:
raise e
self.record_event({
'action': 'favorite',
'object_id': query.id,
'object_type': 'query'
})
def delete(self, query_id):
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
require_access(query, self.current_user, view_only)
models.Favorite.query.filter(
models.Favorite.object_id == query_id,
models.Favorite.object_type == 'Query',
models.Favorite.user == self.current_user,
).delete()
models.db.session.commit()
self.record_event({
'action': 'favorite',
'object_id': query.id,
'object_type': 'query'
})
class DashboardFavoriteResource(BaseResource):
def post(self, object_id):
dashboard = get_object_or_404(models.Dashboard.get_by_slug_and_org, object_id, self.current_org)
fav = models.Favorite(org_id=self.current_org.id, object=dashboard, user=self.current_user)
models.db.session.add(fav)
try:
models.db.session.commit()
except IntegrityError as e:
if 'unique_favorite' in str(e):
models.db.session.rollback()
else:
raise e
self.record_event({
'action': 'favorite',
'object_id': dashboard.id,
'object_type': 'dashboard'
})
def delete(self, object_id):
dashboard = get_object_or_404(models.Dashboard.get_by_slug_and_org, object_id, self.current_org)
models.Favorite.query.filter(models.Favorite.object == dashboard, models.Favorite.user == self.current_user).delete()
models.db.session.commit()
self.record_event({
'action': 'unfavorite',
'object_id': dashboard.id,
'object_type': 'dashboard'
}) | 0.413004 | 0.071461 |
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import http.server
import http.client
import json
import socketserver
class OpenFDAClient():
OPENFDA_API_URL = "api.fda.gov"
OPENFDA_API_EVENT = "/drug/event.json"
def get_all_drugs(self,drug):
conn = http.client.HTTPSConnection(self.OPENFDA_API_URL)
conn.request("GET", self.OPENFDA_API_EVENT + '?search=patient.drug.medicinalproduct:'+drug+'&limit=10')
r1 = conn.getresponse()
print(r1.status, r1.reason)
data1 = r1.read()
data = data1.decode('utf8')
events = json.loads(data)
return events
def get_medicinalproduct(self,company):
conn = http.client.HTTPSConnection(self.OPENFDA_API_URL)
conn.request("GET", self.OPENFDA_API_EVENT + '?search=companynumb:'+company+'&limit=10')
r1 = conn.getresponse()
print(r1.status, r1.reason)
data1 = r1.read()
data = data1.decode('utf8')
events = json.loads(data)
return events
def get_event(self, limit):
conn = http.client.HTTPSConnection(self.OPENFDA_API_URL)
conn.request("GET", self.OPENFDA_API_EVENT + '?limit='+limit)
r1 = conn.getresponse()
print(r1.status, r1.reason)
data1 = r1.read()
data = data1.decode('utf8')
events = json.loads(data)
return events
class OpenFDAHTML():
def get_main_page(self):
html = '''
<html>
<head>
<title>OpenFDA app</title>
</head>
<body>
<h1>OpenFDA CloolApp</h1>
<form method='get' action='listDrugs'>
limit: <input type='text' name='limit'></input>
<input type='submit' value='Drug list: Send to OpenFDA'></input>
</form>
<form method='get' action='searchDrug'>
drug: <input type='text' name='drug'></input>
<input type='submit' value='Drug Search: Send to OpenFDA'></input>
</form>
<form method='get' action='listCompanies'>
limit: <input type='text' name='limit'></input>
<input type='submit' value='List of Company numbers: Send to OpenFDA'></input>
</form>
<form method='get' action='searchCompany'>
company: <input type='text' name='company'></input>
<input type='submit' value='Company Search: Send to OpenFDA'></input>
</form>
<form method='get' action='listGender'>
limit: <input type='text' name='limit'></input>
<input type='submit' value='listGender'></input>
</form>
</body>
</html>
'''
return html
def drug_page(self,medicamentos):
s=''
for drug in medicamentos:
s += "<li>"+drug+"</li>"
html='''
<html>
<head></head>
<body>
<ol>
%s
</ol>
</body>
</html>''' %(s)
return html
def not_found(self):
html='''
<html>
<head>Error 404
</head>
<body>
<h1>Error 404 FIle not found</h1>
</body>
</html>'''
return html
class OpenFDAParser():
def get_drug(self, events):
medicamentos=[]
for event in events['results']:
medicamentos+=[event['patient']['drug'][0]['medicinalproduct']]
return medicamentos
def get_com_num(self, events):
com_num=[]
for event in events['results']:
com_num+=[event['companynumb']]
return com_num
def get_gender(self,events):
gender=[]
for event in events['results']:
gender+=[event['patient']['patientsex']]
return gender
class testHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
client = OpenFDAClient()
HTML = OpenFDAHTML()
parser = OpenFDAParser()
if self.path == '/' :
html = HTML.get_main_page()
self.send_response(200)
elif 'listDrugs' in self.path:
limit=self.path.split('=')[1]
events = client.get_event(limit)
medicamentos = parser.get_drug(events)
html = HTML.drug_page(medicamentos)
self.send_response(200)
elif 'searchDrug' in self.path:
drug=self.path.split('=')[1]
events = client.get_all_drugs(drug)
com_num=parser.get_com_num(events)
html=HTML.drug_page(com_num)
self.send_response(200)
elif 'listCompanies' in self.path:
limit=self.path.split('=')[1]
events = client.get_event(limit)
com_num = parser.get_com_num(events)
html = HTML.drug_page(com_num)
self.send_response(200)
elif 'searchCompany' in self.path:
com_num=self.path.split('=')[1]
events = client.get_medicinalproduct(com_num)
medicinalproduct= parser.get_drug(events)
html = HTML.drug_page(medicinalproduct)
self.send_response(200)
elif 'listGender' in self.path:
limit=self.path.split('=')[1]
events=client.get_event(limit)
gender=parser.get_gender(events)
html = HTML.drug_page(gender)
self.send_response(200)
else:
html=HTML.not_found()
self.send_response(404)
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write(bytes(html,'utf8'))
return | web.py |
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import http.server
import http.client
import json
import socketserver
class OpenFDAClient():
OPENFDA_API_URL = "api.fda.gov"
OPENFDA_API_EVENT = "/drug/event.json"
def get_all_drugs(self,drug):
conn = http.client.HTTPSConnection(self.OPENFDA_API_URL)
conn.request("GET", self.OPENFDA_API_EVENT + '?search=patient.drug.medicinalproduct:'+drug+'&limit=10')
r1 = conn.getresponse()
print(r1.status, r1.reason)
data1 = r1.read()
data = data1.decode('utf8')
events = json.loads(data)
return events
def get_medicinalproduct(self,company):
conn = http.client.HTTPSConnection(self.OPENFDA_API_URL)
conn.request("GET", self.OPENFDA_API_EVENT + '?search=companynumb:'+company+'&limit=10')
r1 = conn.getresponse()
print(r1.status, r1.reason)
data1 = r1.read()
data = data1.decode('utf8')
events = json.loads(data)
return events
def get_event(self, limit):
conn = http.client.HTTPSConnection(self.OPENFDA_API_URL)
conn.request("GET", self.OPENFDA_API_EVENT + '?limit='+limit)
r1 = conn.getresponse()
print(r1.status, r1.reason)
data1 = r1.read()
data = data1.decode('utf8')
events = json.loads(data)
return events
class OpenFDAHTML():
def get_main_page(self):
html = '''
<html>
<head>
<title>OpenFDA app</title>
</head>
<body>
<h1>OpenFDA CloolApp</h1>
<form method='get' action='listDrugs'>
limit: <input type='text' name='limit'></input>
<input type='submit' value='Drug list: Send to OpenFDA'></input>
</form>
<form method='get' action='searchDrug'>
drug: <input type='text' name='drug'></input>
<input type='submit' value='Drug Search: Send to OpenFDA'></input>
</form>
<form method='get' action='listCompanies'>
limit: <input type='text' name='limit'></input>
<input type='submit' value='List of Company numbers: Send to OpenFDA'></input>
</form>
<form method='get' action='searchCompany'>
company: <input type='text' name='company'></input>
<input type='submit' value='Company Search: Send to OpenFDA'></input>
</form>
<form method='get' action='listGender'>
limit: <input type='text' name='limit'></input>
<input type='submit' value='listGender'></input>
</form>
</body>
</html>
'''
return html
def drug_page(self,medicamentos):
s=''
for drug in medicamentos:
s += "<li>"+drug+"</li>"
html='''
<html>
<head></head>
<body>
<ol>
%s
</ol>
</body>
</html>''' %(s)
return html
def not_found(self):
html='''
<html>
<head>Error 404
</head>
<body>
<h1>Error 404 FIle not found</h1>
</body>
</html>'''
return html
class OpenFDAParser():
def get_drug(self, events):
medicamentos=[]
for event in events['results']:
medicamentos+=[event['patient']['drug'][0]['medicinalproduct']]
return medicamentos
def get_com_num(self, events):
com_num=[]
for event in events['results']:
com_num+=[event['companynumb']]
return com_num
def get_gender(self,events):
gender=[]
for event in events['results']:
gender+=[event['patient']['patientsex']]
return gender
class testHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
client = OpenFDAClient()
HTML = OpenFDAHTML()
parser = OpenFDAParser()
if self.path == '/' :
html = HTML.get_main_page()
self.send_response(200)
elif 'listDrugs' in self.path:
limit=self.path.split('=')[1]
events = client.get_event(limit)
medicamentos = parser.get_drug(events)
html = HTML.drug_page(medicamentos)
self.send_response(200)
elif 'searchDrug' in self.path:
drug=self.path.split('=')[1]
events = client.get_all_drugs(drug)
com_num=parser.get_com_num(events)
html=HTML.drug_page(com_num)
self.send_response(200)
elif 'listCompanies' in self.path:
limit=self.path.split('=')[1]
events = client.get_event(limit)
com_num = parser.get_com_num(events)
html = HTML.drug_page(com_num)
self.send_response(200)
elif 'searchCompany' in self.path:
com_num=self.path.split('=')[1]
events = client.get_medicinalproduct(com_num)
medicinalproduct= parser.get_drug(events)
html = HTML.drug_page(medicinalproduct)
self.send_response(200)
elif 'listGender' in self.path:
limit=self.path.split('=')[1]
events=client.get_event(limit)
gender=parser.get_gender(events)
html = HTML.drug_page(gender)
self.send_response(200)
else:
html=HTML.not_found()
self.send_response(404)
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write(bytes(html,'utf8'))
return | 0.375363 | 0.093927 |
from aiida.orm.data.int import Int
from aiida.orm.data.list import List
from aiida.orm.data.str import Str
from aiida.orm.calculation.inline import make_inline
from aiida.work import submit
from aiida.work.persistence import ObjectLoader
from aiida.work.workfunctions import workfunction
from aiida.work.workchain import WorkChain, ToContext, append_
class NestedWorkChain(WorkChain):
"""
Nested workchain which creates a workflow where the nesting level is equal to its input.
"""
@classmethod
def define(cls, spec):
super(NestedWorkChain, cls).define(spec)
spec.input('inp', valid_type=Int)
spec.outline(
cls.do_submit,
cls.finalize
)
spec.output('output', valid_type=Int, required=True)
def do_submit(self):
if self.should_submit():
self.report('Submitting nested workchain.')
return ToContext(
workchain=append_(self.submit(
NestedWorkChain,
inp=self.inputs.inp - 1
))
)
def should_submit(self):
return int(self.inputs.inp) > 0
def finalize(self):
if self.should_submit():
self.report('Getting sub-workchain output.')
sub_workchain = self.ctx.workchain[0]
self.out('output', sub_workchain.out.output + 1)
else:
self.report('Bottom-level workchain reached.')
self.out('output', Int(0))
class SerializeWorkChain(WorkChain):
@classmethod
def define(cls, spec):
super(SerializeWorkChain, cls).define(spec)
spec.input(
'test',
valid_type=Str,
serializer=lambda x: Str(ObjectLoader().identify_object(x))
)
spec.outline(cls.echo)
def echo(self):
self.out('output', self.inputs.test)
class NestedInputNamespace(WorkChain):
@classmethod
def define(cls, spec):
super(NestedInputNamespace, cls).define(spec)
spec.input('foo.bar.baz', valid_type=Int)
spec.output('output', valid_type=Int)
spec.outline(cls.do_echo)
def do_echo(self):
self.out('output', self.inputs.foo.bar.baz)
class ListEcho(WorkChain):
@classmethod
def define(cls, spec):
super(ListEcho, cls).define(spec)
spec.input('list', valid_type=List)
spec.output('output', valid_type=List)
spec.outline(cls.do_echo)
def do_echo(self):
self.out('output', self.inputs.list)
class DynamicNonDbInput(WorkChain):
@classmethod
def define(cls, spec):
super(DynamicNonDbInput, cls).define(spec)
spec.input_namespace('namespace', dynamic=True)
spec.output('output', valid_type=List)
spec.outline(cls.do_test)
def do_test(self):
input_list = self.inputs.namespace.input
assert isinstance(input_list, list)
assert not isinstance(input_list, List)
self.out('output', List(list=list(input_list)))
class DynamicDbInput(WorkChain):
@classmethod
def define(cls, spec):
super(DynamicDbInput, cls).define(spec)
spec.input_namespace('namespace', dynamic=True)
spec.output('output', valid_type=Int)
spec.outline(cls.do_test)
def do_test(self):
input_value = self.inputs.namespace.input
assert isinstance(input_value, Int)
self.out('output', input_value)
class DynamicMixedInput(WorkChain):
@classmethod
def define(cls, spec):
super(DynamicMixedInput, cls).define(spec)
spec.input_namespace('namespace', dynamic=True)
spec.output('output', valid_type=Int)
spec.outline(cls.do_test)
def do_test(self):
input_non_db = self.inputs.namespace.inputs['input_non_db']
input_db = self.inputs.namespace.inputs['input_db']
assert isinstance(input_non_db, int)
assert not isinstance(input_non_db, Int)
assert isinstance(input_db, Int)
self.out('output', input_db + input_non_db)
class InlineCalcRunnerWorkChain(WorkChain):
"""
WorkChain which calls an InlineCalculation in its step.
"""
@classmethod
def define(cls, spec):
super(InlineCalcRunnerWorkChain, cls).define(spec)
spec.input('input', valid_type=Str)
spec.output('output', valid_type=Str)
spec.outline(cls.do_run)
def do_run(self):
self.out('output', echo_inline(input_string=self.inputs.input)[1]['output'])
class WorkFunctionRunnerWorkChain(WorkChain):
"""
WorkChain which calls a workfunction in its step
"""
@classmethod
def define(cls, spec):
super(WorkFunctionRunnerWorkChain, cls).define(spec)
spec.input('input', valid_type=Str)
spec.output('output', valid_type=Str)
spec.outline(cls.do_run)
def do_run(self):
self.out('output', echo(self.inputs.input))
@workfunction
def echo(value):
return value
@make_inline
def echo_inline(input_string):
return {'output': input_string} | .ci/workchains.py | from aiida.orm.data.int import Int
from aiida.orm.data.list import List
from aiida.orm.data.str import Str
from aiida.orm.calculation.inline import make_inline
from aiida.work import submit
from aiida.work.persistence import ObjectLoader
from aiida.work.workfunctions import workfunction
from aiida.work.workchain import WorkChain, ToContext, append_
class NestedWorkChain(WorkChain):
"""
Nested workchain which creates a workflow where the nesting level is equal to its input.
"""
@classmethod
def define(cls, spec):
super(NestedWorkChain, cls).define(spec)
spec.input('inp', valid_type=Int)
spec.outline(
cls.do_submit,
cls.finalize
)
spec.output('output', valid_type=Int, required=True)
def do_submit(self):
if self.should_submit():
self.report('Submitting nested workchain.')
return ToContext(
workchain=append_(self.submit(
NestedWorkChain,
inp=self.inputs.inp - 1
))
)
def should_submit(self):
return int(self.inputs.inp) > 0
def finalize(self):
if self.should_submit():
self.report('Getting sub-workchain output.')
sub_workchain = self.ctx.workchain[0]
self.out('output', sub_workchain.out.output + 1)
else:
self.report('Bottom-level workchain reached.')
self.out('output', Int(0))
class SerializeWorkChain(WorkChain):
@classmethod
def define(cls, spec):
super(SerializeWorkChain, cls).define(spec)
spec.input(
'test',
valid_type=Str,
serializer=lambda x: Str(ObjectLoader().identify_object(x))
)
spec.outline(cls.echo)
def echo(self):
self.out('output', self.inputs.test)
class NestedInputNamespace(WorkChain):
@classmethod
def define(cls, spec):
super(NestedInputNamespace, cls).define(spec)
spec.input('foo.bar.baz', valid_type=Int)
spec.output('output', valid_type=Int)
spec.outline(cls.do_echo)
def do_echo(self):
self.out('output', self.inputs.foo.bar.baz)
class ListEcho(WorkChain):
@classmethod
def define(cls, spec):
super(ListEcho, cls).define(spec)
spec.input('list', valid_type=List)
spec.output('output', valid_type=List)
spec.outline(cls.do_echo)
def do_echo(self):
self.out('output', self.inputs.list)
class DynamicNonDbInput(WorkChain):
@classmethod
def define(cls, spec):
super(DynamicNonDbInput, cls).define(spec)
spec.input_namespace('namespace', dynamic=True)
spec.output('output', valid_type=List)
spec.outline(cls.do_test)
def do_test(self):
input_list = self.inputs.namespace.input
assert isinstance(input_list, list)
assert not isinstance(input_list, List)
self.out('output', List(list=list(input_list)))
class DynamicDbInput(WorkChain):
@classmethod
def define(cls, spec):
super(DynamicDbInput, cls).define(spec)
spec.input_namespace('namespace', dynamic=True)
spec.output('output', valid_type=Int)
spec.outline(cls.do_test)
def do_test(self):
input_value = self.inputs.namespace.input
assert isinstance(input_value, Int)
self.out('output', input_value)
class DynamicMixedInput(WorkChain):
@classmethod
def define(cls, spec):
super(DynamicMixedInput, cls).define(spec)
spec.input_namespace('namespace', dynamic=True)
spec.output('output', valid_type=Int)
spec.outline(cls.do_test)
def do_test(self):
input_non_db = self.inputs.namespace.inputs['input_non_db']
input_db = self.inputs.namespace.inputs['input_db']
assert isinstance(input_non_db, int)
assert not isinstance(input_non_db, Int)
assert isinstance(input_db, Int)
self.out('output', input_db + input_non_db)
class InlineCalcRunnerWorkChain(WorkChain):
"""
WorkChain which calls an InlineCalculation in its step.
"""
@classmethod
def define(cls, spec):
super(InlineCalcRunnerWorkChain, cls).define(spec)
spec.input('input', valid_type=Str)
spec.output('output', valid_type=Str)
spec.outline(cls.do_run)
def do_run(self):
self.out('output', echo_inline(input_string=self.inputs.input)[1]['output'])
class WorkFunctionRunnerWorkChain(WorkChain):
"""
WorkChain which calls a workfunction in its step
"""
@classmethod
def define(cls, spec):
super(WorkFunctionRunnerWorkChain, cls).define(spec)
spec.input('input', valid_type=Str)
spec.output('output', valid_type=Str)
spec.outline(cls.do_run)
def do_run(self):
self.out('output', echo(self.inputs.input))
@workfunction
def echo(value):
return value
@make_inline
def echo_inline(input_string):
return {'output': input_string} | 0.697403 | 0.286576 |
from logging import getLogger
from ClusterShell.NodeSet import NodeSet
class TelemetryUtils():
# pylint: disable=too-many-nested-blocks
"""Defines a object used to verify telemetry information."""
ENGINE_CONTAINER_METRICS = [
"engine_pool_container_handles",
"engine_pool_ops_cont_close",
"engine_pool_ops_cont_destroy",
"engine_pool_ops_cont_open"]
ENGINE_POOL_METRICS = [
"engine_pool_entries_dtx_batched_degree",
"engine_pool_entries_dtx_batched_total",
"engine_pool_ops_akey_enum",
"engine_pool_ops_akey_punch",
"engine_pool_ops_compound",
"engine_pool_ops_dkey_enum",
"engine_pool_ops_dkey_punch",
"engine_pool_ops_dtx_abort",
"engine_pool_ops_dtx_check",
"engine_pool_ops_dtx_commit",
"engine_pool_ops_dtx_refresh",
"engine_pool_ops_ec_agg",
"engine_pool_ops_ec_rep",
"engine_pool_ops_fetch",
"engine_pool_ops_key_query",
"engine_pool_ops_migrate",
"engine_pool_ops_obj_enum",
"engine_pool_ops_obj_punch",
"engine_pool_ops_obj_sync",
"engine_pool_ops_recx_enum",
"engine_pool_ops_tgt_akey_punch",
"engine_pool_ops_tgt_dkey_punch",
"engine_pool_ops_tgt_punch",
"engine_pool_ops_tgt_update",
"engine_pool_ops_update",
"engine_pool_pool_handles",
"engine_pool_resent",
"engine_pool_restarted",
"engine_pool_started_at",
"engine_pool_xferred_fetch",
"engine_pool_xferred_update"]
ENGINE_EVENT_METRICS = [
"engine_events_dead_ranks",
"engine_events_last_event_ts",
"engine_servicing_at",
"engine_started_at"]
ENGINE_IO_DTX_COMMITTABLE_METRICS = [
"engine_io_dtx_committable",
"engine_io_dtx_committable_max",
"engine_io_dtx_committable_mean",
"engine_io_dtx_committable_min",
"engine_io_dtx_committable_stddev"]
ENGINE_IO_DTX_COMMITTED_METRICS = [
"engine_io_dtx_committed",
"engine_io_dtx_committed_max",
"engine_io_dtx_committed_mean",
"engine_io_dtx_committed_min",
"engine_io_dtx_committed_stddev"]
ENGINE_IO_LATENCY_FETCH_METRICS = [
"engine_io_latency_fetch",
"engine_io_latency_fetch_max",
"engine_io_latency_fetch_mean",
"engine_io_latency_fetch_min",
"engine_io_latency_fetch_stddev"]
ENGINE_IO_LATENCY_UPDATE_METRICS = [
"engine_io_latency_update",
"engine_io_latency_update_max",
"engine_io_latency_update_mean",
"engine_io_latency_update_min",
"engine_io_latency_update_stddev"]
ENGINE_IO_OPS_AKEY_ENUM_METRICS = [
"engine_io_ops_akey_enum_active",
"engine_io_ops_akey_enum_active_max",
"engine_io_ops_akey_enum_active_mean",
"engine_io_ops_akey_enum_active_min",
"engine_io_ops_akey_enum_active_stddev"]
ENGINE_IO_OPS_AKEY_ENUM_LATENCY_METRICS = [
"engine_io_ops_akey_enum_latency",
"engine_io_ops_akey_enum_latency_max",
"engine_io_ops_akey_enum_latency_mean",
"engine_io_ops_akey_enum_latency_min",
"engine_io_ops_akey_enum_latency_stddev"]
ENGINE_IO_OPS_AKEY_PUNCH_ACTIVE_METRICS = [
"engine_io_ops_akey_punch_active",
"engine_io_ops_akey_punch_active_max",
"engine_io_ops_akey_punch_active_mean",
"engine_io_ops_akey_punch_active_min",
"engine_io_ops_akey_punch_active_stddev"]
ENGINE_IO_OPS_AKEY_PUNCH_LATENCY_METRICS = [
"engine_io_ops_akey_punch_latency",
"engine_io_ops_akey_punch_latency_max",
"engine_io_ops_akey_punch_latency_mean",
"engine_io_ops_akey_punch_latency_min",
"engine_io_ops_akey_punch_latency_stddev"]
ENGINE_IO_OPS_COMPOUND_ACTIVE_METRICS = [
"engine_io_ops_compound_active",
"engine_io_ops_compound_active_max",
"engine_io_ops_compound_active_mean",
"engine_io_ops_compound_active_min",
"engine_io_ops_compound_active_stddev"]
ENGINE_IO_OPS_COMPOUND_LATENCY_METRICS = [
"engine_io_ops_compound_latency",
"engine_io_ops_compound_latency_max",
"engine_io_ops_compound_latency_mean",
"engine_io_ops_compound_latency_min",
"engine_io_ops_compound_latency_stddev"]
ENGINE_IO_OPS_DKEY_ENUM_ACTIVE_METRICS = [
"engine_io_ops_dkey_enum_active",
"engine_io_ops_dkey_enum_active_max",
"engine_io_ops_dkey_enum_active_mean",
"engine_io_ops_dkey_enum_active_min",
"engine_io_ops_dkey_enum_active_stddev"]
ENGINE_IO_OPS_DKEY_ENUM_LATENCY_METRICS = [
"engine_io_ops_dkey_enum_latency",
"engine_io_ops_dkey_enum_latency_max",
"engine_io_ops_dkey_enum_latency_mean",
"engine_io_ops_dkey_enum_latency_min",
"engine_io_ops_dkey_enum_latency_stddev"]
ENGINE_IO_OPS_DKEY_PUNCH_ACTIVE_METRICS = [
"engine_io_ops_dkey_punch_active",
"engine_io_ops_dkey_punch_active_max",
"engine_io_ops_dkey_punch_active_mean",
"engine_io_ops_dkey_punch_active_min",
"engine_io_ops_dkey_punch_active_stddev"]
ENGINE_IO_OPS_DKEY_PUNCH_LATENCY_METRICS = [
"engine_io_ops_dkey_punch_latency",
"engine_io_ops_dkey_punch_latency_max",
"engine_io_ops_dkey_punch_latency_mean",
"engine_io_ops_dkey_punch_latency_min",
"engine_io_ops_dkey_punch_latency_stddev"]
ENGINE_IO_OPS_EC_AGG_ACTIVE_METRICS = [
"engine_io_ops_ec_agg_active",
"engine_io_ops_ec_agg_active_max",
"engine_io_ops_ec_agg_active_mean",
"engine_io_ops_ec_agg_active_min",
"engine_io_ops_ec_agg_active_stddev"]
ENGINE_IO_OPS_EC_AGG_LATENCY_METRICS = [
"engine_io_ops_ec_agg_latency",
"engine_io_ops_ec_agg_latency_max",
"engine_io_ops_ec_agg_latency_mean",
"engine_io_ops_ec_agg_latency_min",
"engine_io_ops_ec_agg_latency_stddev"]
ENGINE_IO_OPS_EC_REP_ACTIVE_METRICS = [
"engine_io_ops_ec_rep_active",
"engine_io_ops_ec_rep_active_max",
"engine_io_ops_ec_rep_active_mean",
"engine_io_ops_ec_rep_active_min",
"engine_io_ops_ec_rep_active_stddev"]
ENGINE_IO_OPS_EC_REP_LATENCY_METRICS = [
"engine_io_ops_ec_rep_latency",
"engine_io_ops_ec_rep_latency_max",
"engine_io_ops_ec_rep_latency_mean",
"engine_io_ops_ec_rep_latency_min",
"engine_io_ops_ec_rep_latency_stddev"]
ENGINE_IO_OPS_FETCH_ACTIVE_METRICS = [
"engine_io_ops_fetch_active",
"engine_io_ops_fetch_active_max",
"engine_io_ops_fetch_active_mean",
"engine_io_ops_fetch_active_min",
"engine_io_ops_fetch_active_stddev"]
ENGINE_IO_OPS_KEY_QUERY_ACTIVE_METRICS = [
"engine_io_ops_key_query_active",
"engine_io_ops_key_query_active_max",
"engine_io_ops_key_query_active_mean",
"engine_io_ops_key_query_active_min",
"engine_io_ops_key_query_active_stddev"]
ENGINE_IO_OPS_KEY_QUERY_LATENCY_METRICS = [
"engine_io_ops_key_query_latency",
"engine_io_ops_key_query_latency_max",
"engine_io_ops_key_query_latency_mean",
"engine_io_ops_key_query_latency_min",
"engine_io_ops_key_query_latency_stddev"]
ENGINE_IO_OPS_MIGRATE_ACTIVE_METRICS = [
"engine_io_ops_migrate_active",
"engine_io_ops_migrate_active_max",
"engine_io_ops_migrate_active_mean",
"engine_io_ops_migrate_active_min",
"engine_io_ops_migrate_active_stddev"]
ENGINE_IO_OPS_MIGRATE_LATENCY_METRICS = [
"engine_io_ops_migrate_latency",
"engine_io_ops_migrate_latency_max",
"engine_io_ops_migrate_latency_mean",
"engine_io_ops_migrate_latency_min",
"engine_io_ops_migrate_latency_stddev"]
ENGINE_IO_OPS_OBJ_ENUM_ACTIVE_METRICS = [
"engine_io_ops_obj_enum_active",
"engine_io_ops_obj_enum_active_max",
"engine_io_ops_obj_enum_active_mean",
"engine_io_ops_obj_enum_active_min",
"engine_io_ops_obj_enum_active_stddev"]
ENGINE_IO_OPS_OBJ_ENUM_LATENCY_METRICS = [
"engine_io_ops_obj_enum_latency",
"engine_io_ops_obj_enum_latency_max",
"engine_io_ops_obj_enum_latency_mean",
"engine_io_ops_obj_enum_latency_min",
"engine_io_ops_obj_enum_latency_stddev"]
ENGINE_IO_OPS_OBJ_PUNCH_ACTIVE_METRICS = [
"engine_io_ops_obj_punch_active",
"engine_io_ops_obj_punch_active_max",
"engine_io_ops_obj_punch_active_mean",
"engine_io_ops_obj_punch_active_min",
"engine_io_ops_obj_punch_active_stddev"]
ENGINE_IO_OPS_OBJ_PUNCH_LATENCY_METRICS = [
"engine_io_ops_obj_punch_latency",
"engine_io_ops_obj_punch_latency_max",
"engine_io_ops_obj_punch_latency_mean",
"engine_io_ops_obj_punch_latency_min",
"engine_io_ops_obj_punch_latency_stddev"]
ENGINE_IO_OPS_OBJ_SYNC_ACTIVE_METRICS = [
"engine_io_ops_obj_sync_active",
"engine_io_ops_obj_sync_active_max",
"engine_io_ops_obj_sync_active_mean",
"engine_io_ops_obj_sync_active_min",
"engine_io_ops_obj_sync_active_stddev"]
ENGINE_IO_OPS_OBJ_SYNC_LATENCY_METRICS = [
"engine_io_ops_obj_sync_latency",
"engine_io_ops_obj_sync_latency_max",
"engine_io_ops_obj_sync_latency_mean",
"engine_io_ops_obj_sync_latency_min",
"engine_io_ops_obj_sync_latency_stddev"]
ENGINE_IO_OPS_RECX_ENUM_ACTIVE_METRICS = [
"engine_io_ops_recx_enum_active",
"engine_io_ops_recx_enum_active_max",
"engine_io_ops_recx_enum_active_mean",
"engine_io_ops_recx_enum_active_min",
"engine_io_ops_recx_enum_active_stddev"]
ENGINE_IO_OPS_RECX_ENUM_LATENCY_METRICS = [
"engine_io_ops_recx_enum_latency",
"engine_io_ops_recx_enum_latency_max",
"engine_io_ops_recx_enum_latency_mean",
"engine_io_ops_recx_enum_latency_min",
"engine_io_ops_recx_enum_latency_stddev"]
ENGINE_IO_OPS_TGT_AKEY_PUNCH_ACTIVE_METRICS = [
"engine_io_ops_tgt_akey_punch_active",
"engine_io_ops_tgt_akey_punch_active_max",
"engine_io_ops_tgt_akey_punch_active_mean",
"engine_io_ops_tgt_akey_punch_active_min",
"engine_io_ops_tgt_akey_punch_active_stddev"]
ENGINE_IO_OPS_TGT_AKEY_PUNCH_LATENCY_METRICS = [
"engine_io_ops_tgt_akey_punch_latency",
"engine_io_ops_tgt_akey_punch_latency_max",
"engine_io_ops_tgt_akey_punch_latency_mean",
"engine_io_ops_tgt_akey_punch_latency_min",
"engine_io_ops_tgt_akey_punch_latency_stddev"]
ENGINE_IO_OPS_TGT_DKEY_PUNCH_ACTIVE_METRICS = [
"engine_io_ops_tgt_dkey_punch_active",
"engine_io_ops_tgt_dkey_punch_active_max",
"engine_io_ops_tgt_dkey_punch_active_mean",
"engine_io_ops_tgt_dkey_punch_active_min",
"engine_io_ops_tgt_dkey_punch_active_stddev"]
ENGINE_IO_OPS_TGT_DKEY_PUNCH_LATENCY_METRICS = [
"engine_io_ops_tgt_dkey_punch_latency",
"engine_io_ops_tgt_dkey_punch_latency_max",
"engine_io_ops_tgt_dkey_punch_latency_mean",
"engine_io_ops_tgt_dkey_punch_latency_min",
"engine_io_ops_tgt_dkey_punch_latency_stddev"]
ENGINE_IO_OPS_TGT_PUNCH_ACTIVE_METRICS = [
"engine_io_ops_tgt_punch_active",
"engine_io_ops_tgt_punch_active_max",
"engine_io_ops_tgt_punch_active_mean",
"engine_io_ops_tgt_punch_active_min",
"engine_io_ops_tgt_punch_active_stddev"]
ENGINE_IO_OPS_TGT_PUNCH_LATENCY_METRICS = [
"engine_io_ops_tgt_punch_latency",
"engine_io_ops_tgt_punch_latency_max",
"engine_io_ops_tgt_punch_latency_mean",
"engine_io_ops_tgt_punch_latency_min",
"engine_io_ops_tgt_punch_latency_stddev"]
ENGINE_IO_OPS_TGT_UPDATE_ACTIVE_METRICS = [
"engine_io_ops_tgt_update_active",
"engine_io_ops_tgt_update_active_max",
"engine_io_ops_tgt_update_active_mean",
"engine_io_ops_tgt_update_active_min",
"engine_io_ops_tgt_update_active_stddev"]
ENGINE_IO_OPS_UPDATE_ACTIVE_METRICS = [
"engine_io_ops_update_active",
"engine_io_ops_update_active_max",
"engine_io_ops_update_active_mean",
"engine_io_ops_update_active_min",
"engine_io_ops_update_active_stddev"]
ENGINE_IO_METRICS = ENGINE_IO_DTX_COMMITTABLE_METRICS +\
ENGINE_IO_DTX_COMMITTED_METRICS +\
ENGINE_IO_LATENCY_FETCH_METRICS +\
ENGINE_IO_LATENCY_UPDATE_METRICS +\
ENGINE_IO_OPS_AKEY_ENUM_METRICS +\
ENGINE_IO_OPS_AKEY_ENUM_LATENCY_METRICS +\
ENGINE_IO_OPS_AKEY_PUNCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_AKEY_PUNCH_LATENCY_METRICS +\
ENGINE_IO_OPS_COMPOUND_ACTIVE_METRICS +\
ENGINE_IO_OPS_COMPOUND_LATENCY_METRICS +\
ENGINE_IO_OPS_DKEY_ENUM_ACTIVE_METRICS +\
ENGINE_IO_OPS_DKEY_ENUM_LATENCY_METRICS +\
ENGINE_IO_OPS_DKEY_PUNCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_DKEY_PUNCH_LATENCY_METRICS +\
ENGINE_IO_OPS_EC_AGG_ACTIVE_METRICS +\
ENGINE_IO_OPS_EC_AGG_LATENCY_METRICS +\
ENGINE_IO_OPS_EC_REP_ACTIVE_METRICS +\
ENGINE_IO_OPS_EC_REP_LATENCY_METRICS +\
ENGINE_IO_OPS_FETCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_KEY_QUERY_ACTIVE_METRICS +\
ENGINE_IO_OPS_KEY_QUERY_LATENCY_METRICS +\
ENGINE_IO_OPS_MIGRATE_ACTIVE_METRICS +\
ENGINE_IO_OPS_MIGRATE_LATENCY_METRICS +\
ENGINE_IO_OPS_OBJ_ENUM_ACTIVE_METRICS +\
ENGINE_IO_OPS_OBJ_ENUM_LATENCY_METRICS +\
ENGINE_IO_OPS_OBJ_PUNCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_OBJ_PUNCH_LATENCY_METRICS +\
ENGINE_IO_OPS_OBJ_SYNC_ACTIVE_METRICS +\
ENGINE_IO_OPS_OBJ_SYNC_LATENCY_METRICS +\
ENGINE_IO_OPS_RECX_ENUM_ACTIVE_METRICS +\
ENGINE_IO_OPS_RECX_ENUM_LATENCY_METRICS +\
ENGINE_IO_OPS_TGT_AKEY_PUNCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_TGT_AKEY_PUNCH_LATENCY_METRICS +\
ENGINE_IO_OPS_TGT_DKEY_PUNCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_TGT_DKEY_PUNCH_LATENCY_METRICS +\
ENGINE_IO_OPS_TGT_PUNCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_TGT_PUNCH_LATENCY_METRICS +\
ENGINE_IO_OPS_TGT_UPDATE_ACTIVE_METRICS +\
ENGINE_IO_OPS_UPDATE_ACTIVE_METRICS
ENGINE_NET_METRICS = [
"engine_net_ofi_sockets_failed_addr",
"engine_net_ofi_sockets_req_timeout",
"engine_net_ofi_sockets_uri_lookup_timeout",
"engine_net_uri_lookup_other",
"engine_net_uri_lookup_self"]
ENGINE_RANK_METRICS = [
"engine_rank"]
GO_METRICS = [
"go_gc_duration_seconds",
"go_goroutines",
"go_info",
"go_memstats_alloc_bytes",
"go_memstats_alloc_bytes_total",
"go_memstats_buck_hash_sys_bytes",
"go_memstats_frees_total",
"go_memstats_gc_cpu_fraction",
"go_memstats_gc_sys_bytes",
"go_memstats_heap_alloc_bytes",
"go_memstats_heap_idle_bytes",
"go_memstats_heap_inuse_bytes",
"go_memstats_heap_objects",
"go_memstats_heap_released_bytes",
"go_memstats_heap_sys_bytes",
"go_memstats_last_gc_time_seconds",
"go_memstats_lookups_total",
"go_memstats_mallocs_total",
"go_memstats_mcache_inuse_bytes",
"go_memstats_mcache_sys_bytes",
"go_memstats_mspan_inuse_bytes",
"go_memstats_mspan_sys_bytes",
"go_memstats_next_gc_bytes",
"go_memstats_other_sys_bytes",
"go_memstats_stack_inuse_bytes",
"go_memstats_stack_sys_bytes",
"go_memstats_sys_bytes",
"go_threads"]
PROCESS_METRICS = [
"process_cpu_seconds_total",
"process_max_fds",
"process_open_fds",
"process_resident_memory_bytes",
"process_start_time_seconds",
"process_virtual_memory_bytes",
"process_virtual_memory_max_bytes"]
ENGINE_NVME_METRICS = [
"engine_nvme_<id>_commands_checksum_mismatch",
"engine_nvme_<id>_commands_ctrl_busy_time",
"engine_nvme_<id>_commands_data_units_read",
"engine_nvme_<id>_commands_data_units_written",
"engine_nvme_<id>_commands_host_read_cmds",
"engine_nvme_<id>_commands_host_write_cmds",
"engine_nvme_<id>_commands_media_errs",
"engine_nvme_<id>_commands_read_errs",
"engine_nvme_<id>_commands_unmap_errs",
"engine_nvme_<id>_commands_write_errs",
"engine_nvme_<id>_power_cycles",
"engine_nvme_<id>_power_on_hours",
"engine_nvme_<id>_read_only_warn",
"engine_nvme_<id>_reliability_avail_spare",
"engine_nvme_<id>_reliability_avail_spare_threshold",
"engine_nvme_<id>_reliability_avail_spare_warn",
"engine_nvme_<id>_reliability_percentage_used",
"engine_nvme_<id>_reliability_reliability_warn",
"engine_nvme_<id>_temp_crit_time",
"engine_nvme_<id>_temp_current",
"engine_nvme_<id>_temp_warn",
"engine_nvme_<id>_temp_warn_time",
"engine_nvme_<id>_unsafe_shutdowns",
"engine_nvme_<id>_volatile_mem_warn",
"engine_nvme_<id>_vendor_program_fail_cnt_norm",
"engine_nvme_<id>_vendor_program_fail_cnt_raw",
"engine_nvme_<id>_vendor_erase_fail_cnt_norm",
"engine_nvme_<id>_vendor_erase_fail_cnt_raw",
"engine_nvme_<id>_vendor_wear_leveling_cnt_norm",
"engine_nvme_<id>_vendor_wear_leveling_cnt_min",
"engine_nvme_<id>_vendor_wear_leveling_cnt_max",
"engine_nvme_<id>_vendor_wear_leveling_cnt_avg",
"engine_nvme_<id>_vendor_endtoend_err_cnt_raw",
"engine_nvme_<id>_vendor_crc_err_cnt_raw",
"engine_nvme_<id>_vendor_media_wear_raw",
"engine_nvme_<id>_vendor_host_reads_raw",
"engine_nvme_<id>_vendor_crc_workload_timer_raw",
"engine_nvme_<id>_vendor_thermal_throttle_status_raw",
"engine_nvme_<id>_vendor_thermal_throttle_event_cnt",
"engine_nvme_<id>_vendor_retry_buffer_overflow_cnt",
"engine_nvme_<id>_vendor_pll_lock_loss_cnt",
"engine_nvme_<id>_vendor_nand_bytes_written",
"engine_nvme_<id>_vendor_host_bytes_written"]
def __init__(self, dmg, servers):
"""Create a TelemetryUtils object.
Args:
dmg (DmgCommand): the DmgCommand object configured to communicate
with the servers
servers (list): a list of server host names
"""
self.log = getLogger(__name__)
self.dmg = dmg
self.hosts = NodeSet.fromlist(servers)
def get_all_server_metrics_names(self, server, with_pools=False):
"""Get all the telemetry metrics names for this server.
Args:
server (DaosServerCommand): the server from which to determine what
metrics will be available
Returns:
list: all of the telemetry metrics names for this server
"""
all_metrics_names = list(self.ENGINE_EVENT_METRICS)
all_metrics_names.extend(self.ENGINE_IO_METRICS)
all_metrics_names.extend(self.ENGINE_NET_METRICS)
all_metrics_names.extend(self.ENGINE_RANK_METRICS)
all_metrics_names.extend(self.GO_METRICS)
all_metrics_names.extend(self.PROCESS_METRICS)
if with_pools:
all_metrics_names.extend(self.ENGINE_POOL_METRICS)
all_metrics_names.extend(self.ENGINE_CONTAINER_METRICS)
# Add NVMe metrics for any NVMe devices configured for this server
for nvme_list in server.manager.job.get_engine_values("bdev_list"):
for nvme in nvme_list if nvme_list is not None else []:
# Replace the '<id>' placeholder with the actual NVMe ID
nvme_id = nvme.replace(":", "_").replace(".", "_")
nvme_metrics = [
name.replace("<id>", nvme_id)
for name in self.ENGINE_NVME_METRICS]
all_metrics_names.extend(nvme_metrics)
return all_metrics_names
def list_metrics(self):
"""List the available metrics for each host.
Returns:
dict: a dictionary of host keys linked to a list of metric names
"""
info = {}
self.log.info("Listing telemetry metrics from %s", self.hosts)
for host in self.hosts:
data = self.dmg.telemetry_metrics_list(host=host)
info[host] = []
if "response" in data:
if "available_metric_sets" in data["response"]:
for entry in data["response"]["available_metric_sets"]:
if "name" in entry:
info[host].append(entry["name"])
return info
def get_metrics(self, name):
"""Obtain the specified metric information for each host.
Args:
name (str): Comma-separated list of metric names to query.
Returns:
dict: a dictionary of host keys linked to metric data for each
metric name specified
"""
info = {}
self.log.info("Querying telemetry metric %s from %s", name, self.hosts)
for host in self.hosts:
data = self.dmg.telemetry_metrics_query(host=host, metrics=name)
info[host] = {}
if "response" in data:
if "metric_sets" in data["response"]:
for entry in data["response"]["metric_sets"]:
info[host][entry["name"]] = {
"description": entry["description"],
"metrics": entry["metrics"]
}
return info
def get_container_metrics(self):
"""Get the container telemetry metrics.
Returns:
dict: dictionary of dictionaries of container metric names and
values per server host key
"""
data = {}
info = self.get_metrics(",".join(self.ENGINE_CONTAINER_METRICS))
self.log.info("Container Telemetry Information")
for host in info:
data[host] = {name: 0 for name in self.ENGINE_CONTAINER_METRICS}
for name in self.ENGINE_CONTAINER_METRICS:
if name in info[host]:
for metric in info[host][name]["metrics"]:
self.log.info(
" %s (%s): %s (%s)",
info[host][name]["description"], name,
metric["value"], host)
data[host][name] = metric["value"]
return data
def get_pool_metrics(self, specific_metrics=None):
"""Get the pool telemetry metrics.
Args:
specific_metrics(list): list of specific pool metrics
Returns:
dict: dictionary of dictionaries of pool metric names and
values per server host key
"""
data = {}
if specific_metrics is None:
specific_metrics = self.ENGINE_POOL_METRICS
info = self.get_metrics(",".join(specific_metrics))
self.log.info("Pool Telemetry Information")
for name in specific_metrics:
for index, host in enumerate(info):
if name in info[host]:
if index == 0:
self.log.info(
" %s (%s):",
name, info[host][name]["description"])
self.log.info(
" %-12s %-4s %-6s %s",
"Host", "Rank", "Target", "Value")
if name not in data:
data[name] = {}
if host not in data[name]:
data[name][host] = {}
for metric in info[host][name]["metrics"]:
if "labels" in metric:
if ("rank" in metric["labels"]
and "target" in metric["labels"]):
rank = metric["labels"]["rank"]
target = metric["labels"]["target"]
if rank not in data[name][host]:
data[name][host][rank] = {}
if target not in data[name][host][rank]:
data[name][host][rank][target] = {}
data[name][host][rank][target] = \
metric["value"]
self.log.info(
" %-12s %-4s %-6s %s",
host, rank, target, metric["value"])
return data
def get_io_metrics(self, test_metrics=None):
"""Get the io telemetry metrics.
Args:
test_metrics (str list, optional): Comma-separated list of metric
names to query. By default, test_metrics is entire
ENGINE_IO_METRICS.
Returns:
dict: dictionary of dictionaries of container metric names and
values per server host key
"""
data = {}
if test_metrics is None:
test_metrics = self.ENGINE_IO_METRICS
info = self.get_metrics(",".join(test_metrics))
self.log.info("Telemetry Information")
for name in test_metrics:
for index, host in enumerate(info):
if name in info[host]:
if index == 0:
self.log.info(
" %s (%s):",
name, info[host][name]["description"])
self.log.info(
" %-12s %-4s %-6s %-6s %s",
"Host", "Rank", "Target", "Size", "Value")
if name not in data:
data[name] = {}
if host not in data[name]:
data[name][host] = {}
for metric in info[host][name]["metrics"]:
if "labels" in metric:
if ("rank" in metric["labels"]
and "target" in metric["labels"]
and "size" in metric["labels"]):
rank = metric["labels"]["rank"]
target = metric["labels"]["target"]
size = metric["labels"]["size"]
if rank not in data[name][host]:
data[name][host][rank] = {}
if target not in data[name][host][rank]:
data[name][host][rank][target] = {}
data[name][host][rank][target][size] = \
metric["value"]
self.log.info(
" %-12s %-4s %-6s %-6s %s",
host, rank, target, size, metric["value"])
elif ("rank" in metric["labels"]
and "target" in metric["labels"]):
rank = metric["labels"]["rank"]
target = metric["labels"]["target"]
if rank not in data[name][host]:
data[name][host][rank] = {}
if target not in data[name][host][rank]:
data[name][host][rank][target] = {}
data[name][host][rank][target]["-"] = \
metric["value"]
self.log.info(
" %-12s %-4s %-6s %-6s %s",
host, rank, target, "-", metric["value"])
return data
def check_container_metrics(self, open_count=None, active_count=None,
close_count=None, destroy_count=None):
"""Verify the container telemetry metrics.
Args:
open_count (dict, optional): Number of times cont_open has been
called per host key. Defaults to None.
active_count (dict, optional): Number of open container handles per
host key. Defaults to None.
close_count (dict, optional): Number of times cont_close has been
called per host key. Defaults to None.
destroy_count (dict, optional): Number of times cont_destroy has
been called per host key. Defaults to None.
Returns:
list: list of errors detected
"""
errors = []
expected = {
"engine_pool_ops_cont_open": open_count,
"engine_pool_container_handles": active_count,
"engine_pool_ops_cont_close": close_count,
"engine_pool_ops_cont_destroy": destroy_count,
}
data = self.get_container_metrics()
for host in data:
for name in expected:
if name in data[host]:
if (expected[name] is not None
and host in expected[name]
and expected[name][host] != data[host][name]):
errors.append(
"{} mismatch on {}: expected={}; actual={}".format(
name, host, expected[name][host],
data[host][name]))
else:
errors.append("No {} data for {}".format(name, host))
return errors | src/tests/ftest/util/telemetry_utils.py | from logging import getLogger
from ClusterShell.NodeSet import NodeSet
class TelemetryUtils():
# pylint: disable=too-many-nested-blocks
"""Defines a object used to verify telemetry information."""
ENGINE_CONTAINER_METRICS = [
"engine_pool_container_handles",
"engine_pool_ops_cont_close",
"engine_pool_ops_cont_destroy",
"engine_pool_ops_cont_open"]
ENGINE_POOL_METRICS = [
"engine_pool_entries_dtx_batched_degree",
"engine_pool_entries_dtx_batched_total",
"engine_pool_ops_akey_enum",
"engine_pool_ops_akey_punch",
"engine_pool_ops_compound",
"engine_pool_ops_dkey_enum",
"engine_pool_ops_dkey_punch",
"engine_pool_ops_dtx_abort",
"engine_pool_ops_dtx_check",
"engine_pool_ops_dtx_commit",
"engine_pool_ops_dtx_refresh",
"engine_pool_ops_ec_agg",
"engine_pool_ops_ec_rep",
"engine_pool_ops_fetch",
"engine_pool_ops_key_query",
"engine_pool_ops_migrate",
"engine_pool_ops_obj_enum",
"engine_pool_ops_obj_punch",
"engine_pool_ops_obj_sync",
"engine_pool_ops_recx_enum",
"engine_pool_ops_tgt_akey_punch",
"engine_pool_ops_tgt_dkey_punch",
"engine_pool_ops_tgt_punch",
"engine_pool_ops_tgt_update",
"engine_pool_ops_update",
"engine_pool_pool_handles",
"engine_pool_resent",
"engine_pool_restarted",
"engine_pool_started_at",
"engine_pool_xferred_fetch",
"engine_pool_xferred_update"]
ENGINE_EVENT_METRICS = [
"engine_events_dead_ranks",
"engine_events_last_event_ts",
"engine_servicing_at",
"engine_started_at"]
ENGINE_IO_DTX_COMMITTABLE_METRICS = [
"engine_io_dtx_committable",
"engine_io_dtx_committable_max",
"engine_io_dtx_committable_mean",
"engine_io_dtx_committable_min",
"engine_io_dtx_committable_stddev"]
ENGINE_IO_DTX_COMMITTED_METRICS = [
"engine_io_dtx_committed",
"engine_io_dtx_committed_max",
"engine_io_dtx_committed_mean",
"engine_io_dtx_committed_min",
"engine_io_dtx_committed_stddev"]
ENGINE_IO_LATENCY_FETCH_METRICS = [
"engine_io_latency_fetch",
"engine_io_latency_fetch_max",
"engine_io_latency_fetch_mean",
"engine_io_latency_fetch_min",
"engine_io_latency_fetch_stddev"]
ENGINE_IO_LATENCY_UPDATE_METRICS = [
"engine_io_latency_update",
"engine_io_latency_update_max",
"engine_io_latency_update_mean",
"engine_io_latency_update_min",
"engine_io_latency_update_stddev"]
ENGINE_IO_OPS_AKEY_ENUM_METRICS = [
"engine_io_ops_akey_enum_active",
"engine_io_ops_akey_enum_active_max",
"engine_io_ops_akey_enum_active_mean",
"engine_io_ops_akey_enum_active_min",
"engine_io_ops_akey_enum_active_stddev"]
ENGINE_IO_OPS_AKEY_ENUM_LATENCY_METRICS = [
"engine_io_ops_akey_enum_latency",
"engine_io_ops_akey_enum_latency_max",
"engine_io_ops_akey_enum_latency_mean",
"engine_io_ops_akey_enum_latency_min",
"engine_io_ops_akey_enum_latency_stddev"]
ENGINE_IO_OPS_AKEY_PUNCH_ACTIVE_METRICS = [
"engine_io_ops_akey_punch_active",
"engine_io_ops_akey_punch_active_max",
"engine_io_ops_akey_punch_active_mean",
"engine_io_ops_akey_punch_active_min",
"engine_io_ops_akey_punch_active_stddev"]
ENGINE_IO_OPS_AKEY_PUNCH_LATENCY_METRICS = [
"engine_io_ops_akey_punch_latency",
"engine_io_ops_akey_punch_latency_max",
"engine_io_ops_akey_punch_latency_mean",
"engine_io_ops_akey_punch_latency_min",
"engine_io_ops_akey_punch_latency_stddev"]
ENGINE_IO_OPS_COMPOUND_ACTIVE_METRICS = [
"engine_io_ops_compound_active",
"engine_io_ops_compound_active_max",
"engine_io_ops_compound_active_mean",
"engine_io_ops_compound_active_min",
"engine_io_ops_compound_active_stddev"]
ENGINE_IO_OPS_COMPOUND_LATENCY_METRICS = [
"engine_io_ops_compound_latency",
"engine_io_ops_compound_latency_max",
"engine_io_ops_compound_latency_mean",
"engine_io_ops_compound_latency_min",
"engine_io_ops_compound_latency_stddev"]
ENGINE_IO_OPS_DKEY_ENUM_ACTIVE_METRICS = [
"engine_io_ops_dkey_enum_active",
"engine_io_ops_dkey_enum_active_max",
"engine_io_ops_dkey_enum_active_mean",
"engine_io_ops_dkey_enum_active_min",
"engine_io_ops_dkey_enum_active_stddev"]
ENGINE_IO_OPS_DKEY_ENUM_LATENCY_METRICS = [
"engine_io_ops_dkey_enum_latency",
"engine_io_ops_dkey_enum_latency_max",
"engine_io_ops_dkey_enum_latency_mean",
"engine_io_ops_dkey_enum_latency_min",
"engine_io_ops_dkey_enum_latency_stddev"]
ENGINE_IO_OPS_DKEY_PUNCH_ACTIVE_METRICS = [
"engine_io_ops_dkey_punch_active",
"engine_io_ops_dkey_punch_active_max",
"engine_io_ops_dkey_punch_active_mean",
"engine_io_ops_dkey_punch_active_min",
"engine_io_ops_dkey_punch_active_stddev"]
ENGINE_IO_OPS_DKEY_PUNCH_LATENCY_METRICS = [
"engine_io_ops_dkey_punch_latency",
"engine_io_ops_dkey_punch_latency_max",
"engine_io_ops_dkey_punch_latency_mean",
"engine_io_ops_dkey_punch_latency_min",
"engine_io_ops_dkey_punch_latency_stddev"]
ENGINE_IO_OPS_EC_AGG_ACTIVE_METRICS = [
"engine_io_ops_ec_agg_active",
"engine_io_ops_ec_agg_active_max",
"engine_io_ops_ec_agg_active_mean",
"engine_io_ops_ec_agg_active_min",
"engine_io_ops_ec_agg_active_stddev"]
ENGINE_IO_OPS_EC_AGG_LATENCY_METRICS = [
"engine_io_ops_ec_agg_latency",
"engine_io_ops_ec_agg_latency_max",
"engine_io_ops_ec_agg_latency_mean",
"engine_io_ops_ec_agg_latency_min",
"engine_io_ops_ec_agg_latency_stddev"]
ENGINE_IO_OPS_EC_REP_ACTIVE_METRICS = [
"engine_io_ops_ec_rep_active",
"engine_io_ops_ec_rep_active_max",
"engine_io_ops_ec_rep_active_mean",
"engine_io_ops_ec_rep_active_min",
"engine_io_ops_ec_rep_active_stddev"]
ENGINE_IO_OPS_EC_REP_LATENCY_METRICS = [
"engine_io_ops_ec_rep_latency",
"engine_io_ops_ec_rep_latency_max",
"engine_io_ops_ec_rep_latency_mean",
"engine_io_ops_ec_rep_latency_min",
"engine_io_ops_ec_rep_latency_stddev"]
ENGINE_IO_OPS_FETCH_ACTIVE_METRICS = [
"engine_io_ops_fetch_active",
"engine_io_ops_fetch_active_max",
"engine_io_ops_fetch_active_mean",
"engine_io_ops_fetch_active_min",
"engine_io_ops_fetch_active_stddev"]
ENGINE_IO_OPS_KEY_QUERY_ACTIVE_METRICS = [
"engine_io_ops_key_query_active",
"engine_io_ops_key_query_active_max",
"engine_io_ops_key_query_active_mean",
"engine_io_ops_key_query_active_min",
"engine_io_ops_key_query_active_stddev"]
ENGINE_IO_OPS_KEY_QUERY_LATENCY_METRICS = [
"engine_io_ops_key_query_latency",
"engine_io_ops_key_query_latency_max",
"engine_io_ops_key_query_latency_mean",
"engine_io_ops_key_query_latency_min",
"engine_io_ops_key_query_latency_stddev"]
ENGINE_IO_OPS_MIGRATE_ACTIVE_METRICS = [
"engine_io_ops_migrate_active",
"engine_io_ops_migrate_active_max",
"engine_io_ops_migrate_active_mean",
"engine_io_ops_migrate_active_min",
"engine_io_ops_migrate_active_stddev"]
ENGINE_IO_OPS_MIGRATE_LATENCY_METRICS = [
"engine_io_ops_migrate_latency",
"engine_io_ops_migrate_latency_max",
"engine_io_ops_migrate_latency_mean",
"engine_io_ops_migrate_latency_min",
"engine_io_ops_migrate_latency_stddev"]
ENGINE_IO_OPS_OBJ_ENUM_ACTIVE_METRICS = [
"engine_io_ops_obj_enum_active",
"engine_io_ops_obj_enum_active_max",
"engine_io_ops_obj_enum_active_mean",
"engine_io_ops_obj_enum_active_min",
"engine_io_ops_obj_enum_active_stddev"]
ENGINE_IO_OPS_OBJ_ENUM_LATENCY_METRICS = [
"engine_io_ops_obj_enum_latency",
"engine_io_ops_obj_enum_latency_max",
"engine_io_ops_obj_enum_latency_mean",
"engine_io_ops_obj_enum_latency_min",
"engine_io_ops_obj_enum_latency_stddev"]
ENGINE_IO_OPS_OBJ_PUNCH_ACTIVE_METRICS = [
"engine_io_ops_obj_punch_active",
"engine_io_ops_obj_punch_active_max",
"engine_io_ops_obj_punch_active_mean",
"engine_io_ops_obj_punch_active_min",
"engine_io_ops_obj_punch_active_stddev"]
ENGINE_IO_OPS_OBJ_PUNCH_LATENCY_METRICS = [
"engine_io_ops_obj_punch_latency",
"engine_io_ops_obj_punch_latency_max",
"engine_io_ops_obj_punch_latency_mean",
"engine_io_ops_obj_punch_latency_min",
"engine_io_ops_obj_punch_latency_stddev"]
ENGINE_IO_OPS_OBJ_SYNC_ACTIVE_METRICS = [
"engine_io_ops_obj_sync_active",
"engine_io_ops_obj_sync_active_max",
"engine_io_ops_obj_sync_active_mean",
"engine_io_ops_obj_sync_active_min",
"engine_io_ops_obj_sync_active_stddev"]
ENGINE_IO_OPS_OBJ_SYNC_LATENCY_METRICS = [
"engine_io_ops_obj_sync_latency",
"engine_io_ops_obj_sync_latency_max",
"engine_io_ops_obj_sync_latency_mean",
"engine_io_ops_obj_sync_latency_min",
"engine_io_ops_obj_sync_latency_stddev"]
ENGINE_IO_OPS_RECX_ENUM_ACTIVE_METRICS = [
"engine_io_ops_recx_enum_active",
"engine_io_ops_recx_enum_active_max",
"engine_io_ops_recx_enum_active_mean",
"engine_io_ops_recx_enum_active_min",
"engine_io_ops_recx_enum_active_stddev"]
ENGINE_IO_OPS_RECX_ENUM_LATENCY_METRICS = [
"engine_io_ops_recx_enum_latency",
"engine_io_ops_recx_enum_latency_max",
"engine_io_ops_recx_enum_latency_mean",
"engine_io_ops_recx_enum_latency_min",
"engine_io_ops_recx_enum_latency_stddev"]
ENGINE_IO_OPS_TGT_AKEY_PUNCH_ACTIVE_METRICS = [
"engine_io_ops_tgt_akey_punch_active",
"engine_io_ops_tgt_akey_punch_active_max",
"engine_io_ops_tgt_akey_punch_active_mean",
"engine_io_ops_tgt_akey_punch_active_min",
"engine_io_ops_tgt_akey_punch_active_stddev"]
ENGINE_IO_OPS_TGT_AKEY_PUNCH_LATENCY_METRICS = [
"engine_io_ops_tgt_akey_punch_latency",
"engine_io_ops_tgt_akey_punch_latency_max",
"engine_io_ops_tgt_akey_punch_latency_mean",
"engine_io_ops_tgt_akey_punch_latency_min",
"engine_io_ops_tgt_akey_punch_latency_stddev"]
ENGINE_IO_OPS_TGT_DKEY_PUNCH_ACTIVE_METRICS = [
"engine_io_ops_tgt_dkey_punch_active",
"engine_io_ops_tgt_dkey_punch_active_max",
"engine_io_ops_tgt_dkey_punch_active_mean",
"engine_io_ops_tgt_dkey_punch_active_min",
"engine_io_ops_tgt_dkey_punch_active_stddev"]
ENGINE_IO_OPS_TGT_DKEY_PUNCH_LATENCY_METRICS = [
"engine_io_ops_tgt_dkey_punch_latency",
"engine_io_ops_tgt_dkey_punch_latency_max",
"engine_io_ops_tgt_dkey_punch_latency_mean",
"engine_io_ops_tgt_dkey_punch_latency_min",
"engine_io_ops_tgt_dkey_punch_latency_stddev"]
ENGINE_IO_OPS_TGT_PUNCH_ACTIVE_METRICS = [
"engine_io_ops_tgt_punch_active",
"engine_io_ops_tgt_punch_active_max",
"engine_io_ops_tgt_punch_active_mean",
"engine_io_ops_tgt_punch_active_min",
"engine_io_ops_tgt_punch_active_stddev"]
ENGINE_IO_OPS_TGT_PUNCH_LATENCY_METRICS = [
"engine_io_ops_tgt_punch_latency",
"engine_io_ops_tgt_punch_latency_max",
"engine_io_ops_tgt_punch_latency_mean",
"engine_io_ops_tgt_punch_latency_min",
"engine_io_ops_tgt_punch_latency_stddev"]
ENGINE_IO_OPS_TGT_UPDATE_ACTIVE_METRICS = [
"engine_io_ops_tgt_update_active",
"engine_io_ops_tgt_update_active_max",
"engine_io_ops_tgt_update_active_mean",
"engine_io_ops_tgt_update_active_min",
"engine_io_ops_tgt_update_active_stddev"]
ENGINE_IO_OPS_UPDATE_ACTIVE_METRICS = [
"engine_io_ops_update_active",
"engine_io_ops_update_active_max",
"engine_io_ops_update_active_mean",
"engine_io_ops_update_active_min",
"engine_io_ops_update_active_stddev"]
ENGINE_IO_METRICS = ENGINE_IO_DTX_COMMITTABLE_METRICS +\
ENGINE_IO_DTX_COMMITTED_METRICS +\
ENGINE_IO_LATENCY_FETCH_METRICS +\
ENGINE_IO_LATENCY_UPDATE_METRICS +\
ENGINE_IO_OPS_AKEY_ENUM_METRICS +\
ENGINE_IO_OPS_AKEY_ENUM_LATENCY_METRICS +\
ENGINE_IO_OPS_AKEY_PUNCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_AKEY_PUNCH_LATENCY_METRICS +\
ENGINE_IO_OPS_COMPOUND_ACTIVE_METRICS +\
ENGINE_IO_OPS_COMPOUND_LATENCY_METRICS +\
ENGINE_IO_OPS_DKEY_ENUM_ACTIVE_METRICS +\
ENGINE_IO_OPS_DKEY_ENUM_LATENCY_METRICS +\
ENGINE_IO_OPS_DKEY_PUNCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_DKEY_PUNCH_LATENCY_METRICS +\
ENGINE_IO_OPS_EC_AGG_ACTIVE_METRICS +\
ENGINE_IO_OPS_EC_AGG_LATENCY_METRICS +\
ENGINE_IO_OPS_EC_REP_ACTIVE_METRICS +\
ENGINE_IO_OPS_EC_REP_LATENCY_METRICS +\
ENGINE_IO_OPS_FETCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_KEY_QUERY_ACTIVE_METRICS +\
ENGINE_IO_OPS_KEY_QUERY_LATENCY_METRICS +\
ENGINE_IO_OPS_MIGRATE_ACTIVE_METRICS +\
ENGINE_IO_OPS_MIGRATE_LATENCY_METRICS +\
ENGINE_IO_OPS_OBJ_ENUM_ACTIVE_METRICS +\
ENGINE_IO_OPS_OBJ_ENUM_LATENCY_METRICS +\
ENGINE_IO_OPS_OBJ_PUNCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_OBJ_PUNCH_LATENCY_METRICS +\
ENGINE_IO_OPS_OBJ_SYNC_ACTIVE_METRICS +\
ENGINE_IO_OPS_OBJ_SYNC_LATENCY_METRICS +\
ENGINE_IO_OPS_RECX_ENUM_ACTIVE_METRICS +\
ENGINE_IO_OPS_RECX_ENUM_LATENCY_METRICS +\
ENGINE_IO_OPS_TGT_AKEY_PUNCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_TGT_AKEY_PUNCH_LATENCY_METRICS +\
ENGINE_IO_OPS_TGT_DKEY_PUNCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_TGT_DKEY_PUNCH_LATENCY_METRICS +\
ENGINE_IO_OPS_TGT_PUNCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_TGT_PUNCH_LATENCY_METRICS +\
ENGINE_IO_OPS_TGT_UPDATE_ACTIVE_METRICS +\
ENGINE_IO_OPS_UPDATE_ACTIVE_METRICS
ENGINE_NET_METRICS = [
"engine_net_ofi_sockets_failed_addr",
"engine_net_ofi_sockets_req_timeout",
"engine_net_ofi_sockets_uri_lookup_timeout",
"engine_net_uri_lookup_other",
"engine_net_uri_lookup_self"]
ENGINE_RANK_METRICS = [
"engine_rank"]
GO_METRICS = [
"go_gc_duration_seconds",
"go_goroutines",
"go_info",
"go_memstats_alloc_bytes",
"go_memstats_alloc_bytes_total",
"go_memstats_buck_hash_sys_bytes",
"go_memstats_frees_total",
"go_memstats_gc_cpu_fraction",
"go_memstats_gc_sys_bytes",
"go_memstats_heap_alloc_bytes",
"go_memstats_heap_idle_bytes",
"go_memstats_heap_inuse_bytes",
"go_memstats_heap_objects",
"go_memstats_heap_released_bytes",
"go_memstats_heap_sys_bytes",
"go_memstats_last_gc_time_seconds",
"go_memstats_lookups_total",
"go_memstats_mallocs_total",
"go_memstats_mcache_inuse_bytes",
"go_memstats_mcache_sys_bytes",
"go_memstats_mspan_inuse_bytes",
"go_memstats_mspan_sys_bytes",
"go_memstats_next_gc_bytes",
"go_memstats_other_sys_bytes",
"go_memstats_stack_inuse_bytes",
"go_memstats_stack_sys_bytes",
"go_memstats_sys_bytes",
"go_threads"]
PROCESS_METRICS = [
"process_cpu_seconds_total",
"process_max_fds",
"process_open_fds",
"process_resident_memory_bytes",
"process_start_time_seconds",
"process_virtual_memory_bytes",
"process_virtual_memory_max_bytes"]
ENGINE_NVME_METRICS = [
"engine_nvme_<id>_commands_checksum_mismatch",
"engine_nvme_<id>_commands_ctrl_busy_time",
"engine_nvme_<id>_commands_data_units_read",
"engine_nvme_<id>_commands_data_units_written",
"engine_nvme_<id>_commands_host_read_cmds",
"engine_nvme_<id>_commands_host_write_cmds",
"engine_nvme_<id>_commands_media_errs",
"engine_nvme_<id>_commands_read_errs",
"engine_nvme_<id>_commands_unmap_errs",
"engine_nvme_<id>_commands_write_errs",
"engine_nvme_<id>_power_cycles",
"engine_nvme_<id>_power_on_hours",
"engine_nvme_<id>_read_only_warn",
"engine_nvme_<id>_reliability_avail_spare",
"engine_nvme_<id>_reliability_avail_spare_threshold",
"engine_nvme_<id>_reliability_avail_spare_warn",
"engine_nvme_<id>_reliability_percentage_used",
"engine_nvme_<id>_reliability_reliability_warn",
"engine_nvme_<id>_temp_crit_time",
"engine_nvme_<id>_temp_current",
"engine_nvme_<id>_temp_warn",
"engine_nvme_<id>_temp_warn_time",
"engine_nvme_<id>_unsafe_shutdowns",
"engine_nvme_<id>_volatile_mem_warn",
"engine_nvme_<id>_vendor_program_fail_cnt_norm",
"engine_nvme_<id>_vendor_program_fail_cnt_raw",
"engine_nvme_<id>_vendor_erase_fail_cnt_norm",
"engine_nvme_<id>_vendor_erase_fail_cnt_raw",
"engine_nvme_<id>_vendor_wear_leveling_cnt_norm",
"engine_nvme_<id>_vendor_wear_leveling_cnt_min",
"engine_nvme_<id>_vendor_wear_leveling_cnt_max",
"engine_nvme_<id>_vendor_wear_leveling_cnt_avg",
"engine_nvme_<id>_vendor_endtoend_err_cnt_raw",
"engine_nvme_<id>_vendor_crc_err_cnt_raw",
"engine_nvme_<id>_vendor_media_wear_raw",
"engine_nvme_<id>_vendor_host_reads_raw",
"engine_nvme_<id>_vendor_crc_workload_timer_raw",
"engine_nvme_<id>_vendor_thermal_throttle_status_raw",
"engine_nvme_<id>_vendor_thermal_throttle_event_cnt",
"engine_nvme_<id>_vendor_retry_buffer_overflow_cnt",
"engine_nvme_<id>_vendor_pll_lock_loss_cnt",
"engine_nvme_<id>_vendor_nand_bytes_written",
"engine_nvme_<id>_vendor_host_bytes_written"]
def __init__(self, dmg, servers):
"""Create a TelemetryUtils object.
Args:
dmg (DmgCommand): the DmgCommand object configured to communicate
with the servers
servers (list): a list of server host names
"""
self.log = getLogger(__name__)
self.dmg = dmg
self.hosts = NodeSet.fromlist(servers)
def get_all_server_metrics_names(self, server, with_pools=False):
"""Get all the telemetry metrics names for this server.
Args:
server (DaosServerCommand): the server from which to determine what
metrics will be available
Returns:
list: all of the telemetry metrics names for this server
"""
all_metrics_names = list(self.ENGINE_EVENT_METRICS)
all_metrics_names.extend(self.ENGINE_IO_METRICS)
all_metrics_names.extend(self.ENGINE_NET_METRICS)
all_metrics_names.extend(self.ENGINE_RANK_METRICS)
all_metrics_names.extend(self.GO_METRICS)
all_metrics_names.extend(self.PROCESS_METRICS)
if with_pools:
all_metrics_names.extend(self.ENGINE_POOL_METRICS)
all_metrics_names.extend(self.ENGINE_CONTAINER_METRICS)
# Add NVMe metrics for any NVMe devices configured for this server
for nvme_list in server.manager.job.get_engine_values("bdev_list"):
for nvme in nvme_list if nvme_list is not None else []:
# Replace the '<id>' placeholder with the actual NVMe ID
nvme_id = nvme.replace(":", "_").replace(".", "_")
nvme_metrics = [
name.replace("<id>", nvme_id)
for name in self.ENGINE_NVME_METRICS]
all_metrics_names.extend(nvme_metrics)
return all_metrics_names
def list_metrics(self):
"""List the available metrics for each host.
Returns:
dict: a dictionary of host keys linked to a list of metric names
"""
info = {}
self.log.info("Listing telemetry metrics from %s", self.hosts)
for host in self.hosts:
data = self.dmg.telemetry_metrics_list(host=host)
info[host] = []
if "response" in data:
if "available_metric_sets" in data["response"]:
for entry in data["response"]["available_metric_sets"]:
if "name" in entry:
info[host].append(entry["name"])
return info
def get_metrics(self, name):
"""Obtain the specified metric information for each host.
Args:
name (str): Comma-separated list of metric names to query.
Returns:
dict: a dictionary of host keys linked to metric data for each
metric name specified
"""
info = {}
self.log.info("Querying telemetry metric %s from %s", name, self.hosts)
for host in self.hosts:
data = self.dmg.telemetry_metrics_query(host=host, metrics=name)
info[host] = {}
if "response" in data:
if "metric_sets" in data["response"]:
for entry in data["response"]["metric_sets"]:
info[host][entry["name"]] = {
"description": entry["description"],
"metrics": entry["metrics"]
}
return info
def get_container_metrics(self):
"""Get the container telemetry metrics.
Returns:
dict: dictionary of dictionaries of container metric names and
values per server host key
"""
data = {}
info = self.get_metrics(",".join(self.ENGINE_CONTAINER_METRICS))
self.log.info("Container Telemetry Information")
for host in info:
data[host] = {name: 0 for name in self.ENGINE_CONTAINER_METRICS}
for name in self.ENGINE_CONTAINER_METRICS:
if name in info[host]:
for metric in info[host][name]["metrics"]:
self.log.info(
" %s (%s): %s (%s)",
info[host][name]["description"], name,
metric["value"], host)
data[host][name] = metric["value"]
return data
def get_pool_metrics(self, specific_metrics=None):
"""Get the pool telemetry metrics.
Args:
specific_metrics(list): list of specific pool metrics
Returns:
dict: dictionary of dictionaries of pool metric names and
values per server host key
"""
data = {}
if specific_metrics is None:
specific_metrics = self.ENGINE_POOL_METRICS
info = self.get_metrics(",".join(specific_metrics))
self.log.info("Pool Telemetry Information")
for name in specific_metrics:
for index, host in enumerate(info):
if name in info[host]:
if index == 0:
self.log.info(
" %s (%s):",
name, info[host][name]["description"])
self.log.info(
" %-12s %-4s %-6s %s",
"Host", "Rank", "Target", "Value")
if name not in data:
data[name] = {}
if host not in data[name]:
data[name][host] = {}
for metric in info[host][name]["metrics"]:
if "labels" in metric:
if ("rank" in metric["labels"]
and "target" in metric["labels"]):
rank = metric["labels"]["rank"]
target = metric["labels"]["target"]
if rank not in data[name][host]:
data[name][host][rank] = {}
if target not in data[name][host][rank]:
data[name][host][rank][target] = {}
data[name][host][rank][target] = \
metric["value"]
self.log.info(
" %-12s %-4s %-6s %s",
host, rank, target, metric["value"])
return data
def get_io_metrics(self, test_metrics=None):
"""Get the io telemetry metrics.
Args:
test_metrics (str list, optional): Comma-separated list of metric
names to query. By default, test_metrics is entire
ENGINE_IO_METRICS.
Returns:
dict: dictionary of dictionaries of container metric names and
values per server host key
"""
data = {}
if test_metrics is None:
test_metrics = self.ENGINE_IO_METRICS
info = self.get_metrics(",".join(test_metrics))
self.log.info("Telemetry Information")
for name in test_metrics:
for index, host in enumerate(info):
if name in info[host]:
if index == 0:
self.log.info(
" %s (%s):",
name, info[host][name]["description"])
self.log.info(
" %-12s %-4s %-6s %-6s %s",
"Host", "Rank", "Target", "Size", "Value")
if name not in data:
data[name] = {}
if host not in data[name]:
data[name][host] = {}
for metric in info[host][name]["metrics"]:
if "labels" in metric:
if ("rank" in metric["labels"]
and "target" in metric["labels"]
and "size" in metric["labels"]):
rank = metric["labels"]["rank"]
target = metric["labels"]["target"]
size = metric["labels"]["size"]
if rank not in data[name][host]:
data[name][host][rank] = {}
if target not in data[name][host][rank]:
data[name][host][rank][target] = {}
data[name][host][rank][target][size] = \
metric["value"]
self.log.info(
" %-12s %-4s %-6s %-6s %s",
host, rank, target, size, metric["value"])
elif ("rank" in metric["labels"]
and "target" in metric["labels"]):
rank = metric["labels"]["rank"]
target = metric["labels"]["target"]
if rank not in data[name][host]:
data[name][host][rank] = {}
if target not in data[name][host][rank]:
data[name][host][rank][target] = {}
data[name][host][rank][target]["-"] = \
metric["value"]
self.log.info(
" %-12s %-4s %-6s %-6s %s",
host, rank, target, "-", metric["value"])
return data
def check_container_metrics(self, open_count=None, active_count=None,
close_count=None, destroy_count=None):
"""Verify the container telemetry metrics.
Args:
open_count (dict, optional): Number of times cont_open has been
called per host key. Defaults to None.
active_count (dict, optional): Number of open container handles per
host key. Defaults to None.
close_count (dict, optional): Number of times cont_close has been
called per host key. Defaults to None.
destroy_count (dict, optional): Number of times cont_destroy has
been called per host key. Defaults to None.
Returns:
list: list of errors detected
"""
errors = []
expected = {
"engine_pool_ops_cont_open": open_count,
"engine_pool_container_handles": active_count,
"engine_pool_ops_cont_close": close_count,
"engine_pool_ops_cont_destroy": destroy_count,
}
data = self.get_container_metrics()
for host in data:
for name in expected:
if name in data[host]:
if (expected[name] is not None
and host in expected[name]
and expected[name][host] != data[host][name]):
errors.append(
"{} mismatch on {}: expected={}; actual={}".format(
name, host, expected[name][host],
data[host][name]))
else:
errors.append("No {} data for {}".format(name, host))
return errors | 0.534855 | 0.146179 |
import random
import grafanalib.core as G
import pytest
def dummy_grid_pos() -> G.GridPos:
return G.GridPos(h=1, w=2, x=3, y=4)
def dummy_data_link() -> G.DataLink:
return G.DataLink(
title='dummy title',
linkUrl='https://www.dummy-link-url.com',
isNewTab=True
)
def dummy_evaluator() -> G.Evaluator:
return G.Evaluator(
type=G.EVAL_GT,
params=42
)
def dummy_alert_condition() -> G.AlertCondition:
return G.AlertCondition(
target=G.Target(),
evaluator=G.Evaluator(
type=G.EVAL_GT,
params=42),
timeRange=G.TimeRange(
from_time='5m',
to_time='now'
),
operator=G.OP_AND,
reducerType=G.RTYPE_AVG,
)
def test_template_defaults():
t = G.Template(
name='test',
query='1m,5m,10m,30m,1h,3h,12h,1d',
type='interval',
default='1m',
)
assert t.to_json_data()['current']['text'] == '1m'
assert t.to_json_data()['current']['value'] == '1m'
def test_custom_template_ok():
t = G.Template(
name='test',
query='1,2,3',
default='1',
type='custom',
)
assert len(t.to_json_data()['options']) == 3
assert t.to_json_data()['current']['text'] == '1'
assert t.to_json_data()['current']['value'] == '1'
def test_custom_template_dont_override_options():
t = G.Template(
name='test',
query='1,2,3',
default='1',
options=[
{
"value": '1',
"selected": True,
"text": 'some text 1',
},
{
"value": '2',
"selected": False,
"text": 'some text 2',
},
{
"value": '3',
"selected": False,
"text": 'some text 3',
},
],
type='custom',
)
assert len(t.to_json_data()['options']) == 3
assert t.to_json_data()['current']['text'] == 'some text 1'
assert t.to_json_data()['current']['value'] == '1'
def test_table():
t = G.Table(
dataSource='some data source',
targets=[
G.Target(expr='some expr'),
],
title='table title',
transformations=[
{
"id": "seriesToRows",
"options": {}
},
{
"id": "organize",
"options": {
"excludeByName": {
"Time": True
},
"indexByName": {},
"renameByName": {
"Value": "Dummy"
}
}
}
]
)
assert len(t.to_json_data()['transformations']) == 2
assert t.to_json_data()['transformations'][0]["id"] == "seriesToRows"
def test_stat_no_repeat():
t = G.Stat(
title='dummy',
dataSource='data source',
targets=[
G.Target(expr='some expr')
]
)
assert t.to_json_data()['repeat'] is None
assert t.to_json_data()['repeatDirection'] is None
assert t.to_json_data()['maxPerRow'] is None
def test_DiscreteColorMappingItem_exception_checks():
with pytest.raises(TypeError):
G.DiscreteColorMappingItem(123)
with pytest.raises(TypeError):
G.DiscreteColorMappingItem("foo", color=123)
def test_DiscreteColorMappingItem():
t = G.DiscreteColorMappingItem('foo')
json_data = t.to_json_data()
assert json_data['text'] == 'foo'
assert json_data['color'] == G.GREY1
t = G.DiscreteColorMappingItem('foo', color='bar')
json_data = t.to_json_data()
assert json_data['text'] == 'foo'
assert json_data['color'] == 'bar'
def test_Discrete_exceptions():
with pytest.raises(ValueError):
G.Discrete(legendSortBy='foo')
with pytest.raises(TypeError):
G.Discrete(rangeMaps=[123, 456])
with pytest.raises(TypeError):
G.Discrete(valueMaps=['foo', 'bar'])
with pytest.raises(TypeError):
G.Discrete(lineColor=123)
with pytest.raises(TypeError):
G.Discrete(highlightOnMouseover=123)
def test_Discrete():
colorMap = [
G.DiscreteColorMappingItem('bar', color='baz'),
G.DiscreteColorMappingItem('foz', color='faz')
]
t = G.Discrete(
title='foo',
colorMaps=colorMap,
lineColor='#aabbcc',
metricNameColor=G.RGBA(1, 2, 3, .5),
decimals=123,
highlightOnMouseover=False,
showDistinctCount=True,
showLegendCounts=False,
)
json_data = t.to_json_data()
assert json_data['colorMaps'] == colorMap
assert json_data['title'] == 'foo'
assert json_data['type'] == G.DISCRETE_TYPE
assert json_data['rangeMaps'] == []
assert json_data['valueMaps'] == []
assert json_data['backgroundColor'] == G.RGBA(128, 128, 128, 0.1)
assert json_data['lineColor'] == '#aabbcc'
assert json_data['metricNameColor'] == G.RGBA(1, 2, 3, .5)
assert json_data['timeTextColor'] == "#d8d9da"
assert json_data['valueTextColor'] == "#000000"
assert json_data['decimals'] == 123
assert json_data['legendPercentDecimals'] == 0
assert json_data['rowHeight'] == 50
assert json_data['textSize'] == 24
assert json_data['textSizeTime'] == 12
assert json_data['highlightOnMouseover'] is False
assert json_data['showLegend'] is True
assert json_data['showLegendPercent'] is True
assert json_data['showLegendNames'] is True
assert json_data['showLegendValues'] is True
assert json_data['showTimeAxis'] is True
assert json_data['use12HourClock'] is False
assert json_data['writeMetricNames'] is False
assert json_data['writeLastValue'] is True
assert json_data['writeAllValues'] is False
assert json_data['showDistinctCount'] is True
assert json_data['showLegendCounts'] is False
assert json_data['showLegendTime'] is None
assert json_data['showTransitionCount'] is None
def test_StatValueMappings_exception_checks():
with pytest.raises(TypeError):
G.StatValueMappings(
G.StatValueMappingItem('foo', '0', 'dark-red'),
"not of type StatValueMappingItem",
)
def test_StatValueMappings():
t = G.StatValueMappings(
G.StatValueMappingItem('foo', '0', 'dark-red'), # Value must a string
G.StatValueMappingItem('bar', '1', 'purple'),
)
json_data = t.to_json_data()
assert json_data['type'] == 'value'
assert json_data['options']['0']['text'] == 'foo'
assert json_data['options']['0']['color'] == 'dark-red'
assert json_data['options']['1']['text'] == 'bar'
assert json_data['options']['1']['color'] == 'purple'
def test_StatRangeMappings():
t = G.StatRangeMappings(
'dummy_text',
startValue=10,
endValue=20,
color='dark-red'
)
json_data = t.to_json_data()
assert json_data['type'] == 'range'
assert json_data['options']['from'] == 10
assert json_data['options']['to'] == 20
assert json_data['options']['result']['text'] == 'dummy_text'
assert json_data['options']['result']['color'] == 'dark-red'
def test_StatMapping():
t = G.StatMapping(
'dummy_text',
startValue='foo',
endValue='bar',
)
json_data = t.to_json_data()
assert json_data['text'] == 'dummy_text'
assert json_data['from'] == 'foo'
assert json_data['to'] == 'bar'
def test_stat_with_repeat():
t = G.Stat(
title='dummy',
dataSource='data source',
targets=[
G.Target(expr='some expr')
],
repeat=G.Repeat(
variable="repetitionVariable",
direction='h',
maxPerRow=10
)
)
assert t.to_json_data()['repeat'] == 'repetitionVariable'
assert t.to_json_data()['repeatDirection'] == 'h'
assert t.to_json_data()['maxPerRow'] == 10
def test_single_stat():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
single_stat = G.SingleStat(data_source, targets, title)
data = single_stat.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
def test_dashboard_list():
title = 'dummy title'
dashboard_list = G.DashboardList(title=title)
data = dashboard_list.to_json_data()
assert data['targets'] == []
assert data['datasource'] is None
assert data['title'] == title
assert data['starred'] is True
def test_logs_panel():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
logs = G.Logs(data_source, targets, title)
data = logs.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
assert data['options']['showLabels'] is False
assert data['options']['showCommonLabels'] is False
assert data['options']['showTime'] is False
assert data['options']['wrapLogMessage'] is False
assert data['options']['sortOrder'] == 'Descending'
assert data['options']['dedupStrategy'] == 'none'
assert data['options']['enableLogDetails'] is False
assert data['options']['prettifyLogMessage'] is False
def test_notification():
uid = 'notification_channel'
notification = G.Notification(uid)
data = notification.to_json_data()
assert data['uid'] == uid
def test_graph_panel():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
graph = G.Graph(data_source, targets, title)
data = graph.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
assert 'alert' not in data
def test_panel_extra_json():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
extraJson = {
'fillGradient': 6,
'yaxis': {'align': True},
'legend': {'avg': True},
}
graph = G.Graph(data_source, targets, title, extraJson=extraJson)
data = graph.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
assert 'alert' not in data
assert data['fillGradient'] == 6
assert data['yaxis']['align'] is True
# Nested non-dict object should also be deep-updated
assert data['legend']['max'] is False
assert data['legend']['avg'] is True
def test_graph_panel_threshold():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
thresholds = [
G.GraphThreshold(20.0),
G.GraphThreshold(40.2, colorMode="ok")
]
graph = G.Graph(data_source, targets, title, thresholds=thresholds)
data = graph.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
assert 'alert' not in data
assert data['thresholds'] == thresholds
def test_graph_panel_alert():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
alert = [
G.AlertCondition(G.Target(), G.Evaluator('a', 'b'), G.TimeRange('5', '6'), 'd', 'e')
]
thresholds = [
G.GraphThreshold(20.0),
G.GraphThreshold(40.2, colorMode="ok")
]
graph = G.Graph(data_source, targets, title, thresholds=thresholds, alert=alert)
data = graph.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
assert data['alert'] == alert
assert data['thresholds'] == []
def test_graph_threshold():
value = 20.0
colorMode = "ok"
threshold = G.GraphThreshold(value, colorMode=colorMode)
data = threshold.to_json_data()
assert data['value'] == value
assert data['colorMode'] == colorMode
assert data['fill'] is True
assert data['line'] is True
assert data['op'] == G.EVAL_GT
assert 'fillColor' not in data
assert 'lineColor' not in data
def test_graph_threshold_custom():
value = 20.0
colorMode = "custom"
color = G.GREEN
threshold = G.GraphThreshold(value, colorMode=colorMode, fillColor=color)
data = threshold.to_json_data()
assert data['value'] == value
assert data['colorMode'] == colorMode
assert data['fill'] is True
assert data['line'] is True
assert data['op'] == G.EVAL_GT
assert data['fillColor'] == color
assert data['lineColor'] == G.RED
def test_alert_list():
alert_list = G.AlertList(
dashboardTags=['dummy tag'],
description='dummy description',
gridPos=dummy_grid_pos(),
id=random.randint(1, 10),
links=[dummy_data_link(), dummy_data_link()],
nameFilter='dummy name filter',
stateFilter=[G.ALERTLIST_STATE_ALERTING, G.ALERTLIST_STATE_OK],
title='dummy title'
)
alert_list.to_json_data()
def test_alert():
alert = G.Alert(
name='dummy name',
message='dummy message',
alertConditions=dummy_alert_condition(),
alertRuleTags=dict(alert_rul_dummy_key='alert rul dummy value')
)
alert.to_json_data()
def test_worldmap():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
worldmap = G.Worldmap(data_source, targets, title, circleMaxSize=11)
data = worldmap.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
assert data['circleMaxSize'] == 11
def test_stateTimeline():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
stateTimeline = G.StateTimeline(data_source, targets, title, rowHeight=0.7)
data = stateTimeline.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
assert data['options']['rowHeight'] == 0.7
def test_timeseries():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
timeseries = G.TimeSeries(data_source, targets, title)
data = timeseries.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
assert data['fieldConfig']['overrides'] == []
def test_timeseries_with_overrides():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
overrides = [
{
"matcher": {
"id": "byName",
"options": "min"
},
"properties": [
{
"id": "custom.fillBelowTo",
"value": "min"
},
{
"id": "custom.lineWidth",
"value": 0
}
]
}
]
timeseries = G.TimeSeries(
dataSource=data_source,
targets=targets,
title=title,
overrides=overrides,
)
data = timeseries.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
assert data['fieldConfig']['overrides'] == overrides
def test_news():
title = 'dummy title'
feedUrl = "www.example.com"
news = G.News(title=title, feedUrl=feedUrl)
data = news.to_json_data()
assert data['options']['feedUrl'] == feedUrl
assert data['title'] == title
def test_pieChartv2():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
pie = G.PieChartv2(data_source, targets, title)
data = pie.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title | grafanalib/tests/test_core.py |
import random
import grafanalib.core as G
import pytest
def dummy_grid_pos() -> G.GridPos:
return G.GridPos(h=1, w=2, x=3, y=4)
def dummy_data_link() -> G.DataLink:
return G.DataLink(
title='dummy title',
linkUrl='https://www.dummy-link-url.com',
isNewTab=True
)
def dummy_evaluator() -> G.Evaluator:
return G.Evaluator(
type=G.EVAL_GT,
params=42
)
def dummy_alert_condition() -> G.AlertCondition:
return G.AlertCondition(
target=G.Target(),
evaluator=G.Evaluator(
type=G.EVAL_GT,
params=42),
timeRange=G.TimeRange(
from_time='5m',
to_time='now'
),
operator=G.OP_AND,
reducerType=G.RTYPE_AVG,
)
def test_template_defaults():
t = G.Template(
name='test',
query='1m,5m,10m,30m,1h,3h,12h,1d',
type='interval',
default='1m',
)
assert t.to_json_data()['current']['text'] == '1m'
assert t.to_json_data()['current']['value'] == '1m'
def test_custom_template_ok():
t = G.Template(
name='test',
query='1,2,3',
default='1',
type='custom',
)
assert len(t.to_json_data()['options']) == 3
assert t.to_json_data()['current']['text'] == '1'
assert t.to_json_data()['current']['value'] == '1'
def test_custom_template_dont_override_options():
t = G.Template(
name='test',
query='1,2,3',
default='1',
options=[
{
"value": '1',
"selected": True,
"text": 'some text 1',
},
{
"value": '2',
"selected": False,
"text": 'some text 2',
},
{
"value": '3',
"selected": False,
"text": 'some text 3',
},
],
type='custom',
)
assert len(t.to_json_data()['options']) == 3
assert t.to_json_data()['current']['text'] == 'some text 1'
assert t.to_json_data()['current']['value'] == '1'
def test_table():
t = G.Table(
dataSource='some data source',
targets=[
G.Target(expr='some expr'),
],
title='table title',
transformations=[
{
"id": "seriesToRows",
"options": {}
},
{
"id": "organize",
"options": {
"excludeByName": {
"Time": True
},
"indexByName": {},
"renameByName": {
"Value": "Dummy"
}
}
}
]
)
assert len(t.to_json_data()['transformations']) == 2
assert t.to_json_data()['transformations'][0]["id"] == "seriesToRows"
def test_stat_no_repeat():
t = G.Stat(
title='dummy',
dataSource='data source',
targets=[
G.Target(expr='some expr')
]
)
assert t.to_json_data()['repeat'] is None
assert t.to_json_data()['repeatDirection'] is None
assert t.to_json_data()['maxPerRow'] is None
def test_DiscreteColorMappingItem_exception_checks():
with pytest.raises(TypeError):
G.DiscreteColorMappingItem(123)
with pytest.raises(TypeError):
G.DiscreteColorMappingItem("foo", color=123)
def test_DiscreteColorMappingItem():
t = G.DiscreteColorMappingItem('foo')
json_data = t.to_json_data()
assert json_data['text'] == 'foo'
assert json_data['color'] == G.GREY1
t = G.DiscreteColorMappingItem('foo', color='bar')
json_data = t.to_json_data()
assert json_data['text'] == 'foo'
assert json_data['color'] == 'bar'
def test_Discrete_exceptions():
with pytest.raises(ValueError):
G.Discrete(legendSortBy='foo')
with pytest.raises(TypeError):
G.Discrete(rangeMaps=[123, 456])
with pytest.raises(TypeError):
G.Discrete(valueMaps=['foo', 'bar'])
with pytest.raises(TypeError):
G.Discrete(lineColor=123)
with pytest.raises(TypeError):
G.Discrete(highlightOnMouseover=123)
def test_Discrete():
colorMap = [
G.DiscreteColorMappingItem('bar', color='baz'),
G.DiscreteColorMappingItem('foz', color='faz')
]
t = G.Discrete(
title='foo',
colorMaps=colorMap,
lineColor='#aabbcc',
metricNameColor=G.RGBA(1, 2, 3, .5),
decimals=123,
highlightOnMouseover=False,
showDistinctCount=True,
showLegendCounts=False,
)
json_data = t.to_json_data()
assert json_data['colorMaps'] == colorMap
assert json_data['title'] == 'foo'
assert json_data['type'] == G.DISCRETE_TYPE
assert json_data['rangeMaps'] == []
assert json_data['valueMaps'] == []
assert json_data['backgroundColor'] == G.RGBA(128, 128, 128, 0.1)
assert json_data['lineColor'] == '#aabbcc'
assert json_data['metricNameColor'] == G.RGBA(1, 2, 3, .5)
assert json_data['timeTextColor'] == "#d8d9da"
assert json_data['valueTextColor'] == "#000000"
assert json_data['decimals'] == 123
assert json_data['legendPercentDecimals'] == 0
assert json_data['rowHeight'] == 50
assert json_data['textSize'] == 24
assert json_data['textSizeTime'] == 12
assert json_data['highlightOnMouseover'] is False
assert json_data['showLegend'] is True
assert json_data['showLegendPercent'] is True
assert json_data['showLegendNames'] is True
assert json_data['showLegendValues'] is True
assert json_data['showTimeAxis'] is True
assert json_data['use12HourClock'] is False
assert json_data['writeMetricNames'] is False
assert json_data['writeLastValue'] is True
assert json_data['writeAllValues'] is False
assert json_data['showDistinctCount'] is True
assert json_data['showLegendCounts'] is False
assert json_data['showLegendTime'] is None
assert json_data['showTransitionCount'] is None
def test_StatValueMappings_exception_checks():
with pytest.raises(TypeError):
G.StatValueMappings(
G.StatValueMappingItem('foo', '0', 'dark-red'),
"not of type StatValueMappingItem",
)
def test_StatValueMappings():
t = G.StatValueMappings(
G.StatValueMappingItem('foo', '0', 'dark-red'), # Value must a string
G.StatValueMappingItem('bar', '1', 'purple'),
)
json_data = t.to_json_data()
assert json_data['type'] == 'value'
assert json_data['options']['0']['text'] == 'foo'
assert json_data['options']['0']['color'] == 'dark-red'
assert json_data['options']['1']['text'] == 'bar'
assert json_data['options']['1']['color'] == 'purple'
def test_StatRangeMappings():
t = G.StatRangeMappings(
'dummy_text',
startValue=10,
endValue=20,
color='dark-red'
)
json_data = t.to_json_data()
assert json_data['type'] == 'range'
assert json_data['options']['from'] == 10
assert json_data['options']['to'] == 20
assert json_data['options']['result']['text'] == 'dummy_text'
assert json_data['options']['result']['color'] == 'dark-red'
def test_StatMapping():
t = G.StatMapping(
'dummy_text',
startValue='foo',
endValue='bar',
)
json_data = t.to_json_data()
assert json_data['text'] == 'dummy_text'
assert json_data['from'] == 'foo'
assert json_data['to'] == 'bar'
def test_stat_with_repeat():
t = G.Stat(
title='dummy',
dataSource='data source',
targets=[
G.Target(expr='some expr')
],
repeat=G.Repeat(
variable="repetitionVariable",
direction='h',
maxPerRow=10
)
)
assert t.to_json_data()['repeat'] == 'repetitionVariable'
assert t.to_json_data()['repeatDirection'] == 'h'
assert t.to_json_data()['maxPerRow'] == 10
def test_single_stat():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
single_stat = G.SingleStat(data_source, targets, title)
data = single_stat.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
def test_dashboard_list():
title = 'dummy title'
dashboard_list = G.DashboardList(title=title)
data = dashboard_list.to_json_data()
assert data['targets'] == []
assert data['datasource'] is None
assert data['title'] == title
assert data['starred'] is True
def test_logs_panel():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
logs = G.Logs(data_source, targets, title)
data = logs.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
assert data['options']['showLabels'] is False
assert data['options']['showCommonLabels'] is False
assert data['options']['showTime'] is False
assert data['options']['wrapLogMessage'] is False
assert data['options']['sortOrder'] == 'Descending'
assert data['options']['dedupStrategy'] == 'none'
assert data['options']['enableLogDetails'] is False
assert data['options']['prettifyLogMessage'] is False
def test_notification():
uid = 'notification_channel'
notification = G.Notification(uid)
data = notification.to_json_data()
assert data['uid'] == uid
def test_graph_panel():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
graph = G.Graph(data_source, targets, title)
data = graph.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
assert 'alert' not in data
def test_panel_extra_json():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
extraJson = {
'fillGradient': 6,
'yaxis': {'align': True},
'legend': {'avg': True},
}
graph = G.Graph(data_source, targets, title, extraJson=extraJson)
data = graph.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
assert 'alert' not in data
assert data['fillGradient'] == 6
assert data['yaxis']['align'] is True
# Nested non-dict object should also be deep-updated
assert data['legend']['max'] is False
assert data['legend']['avg'] is True
def test_graph_panel_threshold():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
thresholds = [
G.GraphThreshold(20.0),
G.GraphThreshold(40.2, colorMode="ok")
]
graph = G.Graph(data_source, targets, title, thresholds=thresholds)
data = graph.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
assert 'alert' not in data
assert data['thresholds'] == thresholds
def test_graph_panel_alert():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
alert = [
G.AlertCondition(G.Target(), G.Evaluator('a', 'b'), G.TimeRange('5', '6'), 'd', 'e')
]
thresholds = [
G.GraphThreshold(20.0),
G.GraphThreshold(40.2, colorMode="ok")
]
graph = G.Graph(data_source, targets, title, thresholds=thresholds, alert=alert)
data = graph.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
assert data['alert'] == alert
assert data['thresholds'] == []
def test_graph_threshold():
value = 20.0
colorMode = "ok"
threshold = G.GraphThreshold(value, colorMode=colorMode)
data = threshold.to_json_data()
assert data['value'] == value
assert data['colorMode'] == colorMode
assert data['fill'] is True
assert data['line'] is True
assert data['op'] == G.EVAL_GT
assert 'fillColor' not in data
assert 'lineColor' not in data
def test_graph_threshold_custom():
value = 20.0
colorMode = "custom"
color = G.GREEN
threshold = G.GraphThreshold(value, colorMode=colorMode, fillColor=color)
data = threshold.to_json_data()
assert data['value'] == value
assert data['colorMode'] == colorMode
assert data['fill'] is True
assert data['line'] is True
assert data['op'] == G.EVAL_GT
assert data['fillColor'] == color
assert data['lineColor'] == G.RED
def test_alert_list():
alert_list = G.AlertList(
dashboardTags=['dummy tag'],
description='dummy description',
gridPos=dummy_grid_pos(),
id=random.randint(1, 10),
links=[dummy_data_link(), dummy_data_link()],
nameFilter='dummy name filter',
stateFilter=[G.ALERTLIST_STATE_ALERTING, G.ALERTLIST_STATE_OK],
title='dummy title'
)
alert_list.to_json_data()
def test_alert():
alert = G.Alert(
name='dummy name',
message='dummy message',
alertConditions=dummy_alert_condition(),
alertRuleTags=dict(alert_rul_dummy_key='alert rul dummy value')
)
alert.to_json_data()
def test_worldmap():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
worldmap = G.Worldmap(data_source, targets, title, circleMaxSize=11)
data = worldmap.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
assert data['circleMaxSize'] == 11
def test_stateTimeline():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
stateTimeline = G.StateTimeline(data_source, targets, title, rowHeight=0.7)
data = stateTimeline.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
assert data['options']['rowHeight'] == 0.7
def test_timeseries():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
timeseries = G.TimeSeries(data_source, targets, title)
data = timeseries.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
assert data['fieldConfig']['overrides'] == []
def test_timeseries_with_overrides():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
overrides = [
{
"matcher": {
"id": "byName",
"options": "min"
},
"properties": [
{
"id": "custom.fillBelowTo",
"value": "min"
},
{
"id": "custom.lineWidth",
"value": 0
}
]
}
]
timeseries = G.TimeSeries(
dataSource=data_source,
targets=targets,
title=title,
overrides=overrides,
)
data = timeseries.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
assert data['fieldConfig']['overrides'] == overrides
def test_news():
title = 'dummy title'
feedUrl = "www.example.com"
news = G.News(title=title, feedUrl=feedUrl)
data = news.to_json_data()
assert data['options']['feedUrl'] == feedUrl
assert data['title'] == title
def test_pieChartv2():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
pie = G.PieChartv2(data_source, targets, title)
data = pie.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title | 0.592077 | 0.551755 |
import pathlib
from subprocess import call, run
from os import path
from json import dumps
import base64
from requests import get, post
import noma.config as cfg
def check_wallet():
"""
This will either import an existing seed (or our own generated one),
or use LND to create one.
It will also create a password either randomly or use an existing password
provided)
:return str: Status
"""
if cfg.LND_PATH.exists():
if not cfg.WALLET_PATH.exists():
create_wallet()
else:
print("❌ Error: LND not initialized")
print("Wallet already exists!")
print("Please backup and move: " + str(cfg.WALLET_PATH))
print("and then restart lnd")
else:
print("❌ Error: lnd directory does not exist!")
def encodemacaroons(macaroonfile=cfg.MACAROON_PATH, tlsfile=cfg.TLS_CERT_PATH):
"""base64url encode macaroon and TLS certificate"""
if path.exists(str(macaroonfile)) and path.exists(str(tlsfile)):
with open(path.expanduser(macaroonfile), "rb") as f:
macaroon_bytes = f.read()
with open(path.expanduser(tlsfile), "rb") as f:
tls_bytes = f.read()
macaroonencoded = base64.urlsafe_b64encode(macaroon_bytes)
tlsdecoded = tls_bytes.decode("utf-8")
tlstrim = (
tlsdecoded.replace("\n", "")
.replace("-----BEGIN CERTIFICATE-----", "")
.replace("-----END CERTIFICATE-----", "")
.replace("+", "-")
.replace("/", "_")
.replace("=", "")
)
tlsencoded = tlstrim.encode("utf-8")
return {
"status": "OK",
"certificate": tlsencoded,
"macaroon": macaroonencoded,
}
else:
return {"status": "File Not Found"}
def connectstring(
hostname=cfg.URL_GRPC,
macaroonfile=cfg.MACAROON_PATH,
tlsfile=cfg.TLS_CERT_PATH,
):
"""Show lndconnect string for remote wallets such as Zap"""
result = encodemacaroons(macaroonfile, tlsfile)
if result["status"] == "OK":
macaroon_string = str(result["macaroon"], "utf-8")
cert_string = str(result["certificate"], "utf-8")
print(
"lndconnect://"
+ hostname
+ "?cert="
+ cert_string
+ "&macaroon="
+ macaroon_string
)
else:
print(result["status"])
def autounlock():
"""Auto-unlock lnd using password.txt, tls.cert"""
password_str = open(cfg.PASSWORD_FILE_PATH, "r").read().rstrip()
password_bytes = str(password_str).encode("utf-8")
data = {"wallet_password": base64.b64encode(password_bytes).decode()}
try:
response = post(
cfg.URL_UNLOCKWALLET, verify=cfg.TLS_CERT_PATH, data=dumps(data)
)
except Exception:
# Silence connection errors when lnd is not running
pass
else:
try:
print(response.json())
except Exception:
# JSON will fail to decode when unlocked already since response is
# empty
pass
def get_kv(key, section="", config_path=""):
"""
Parse key-value config files and print out values
:param key: left part of key value pair
:param config_path: path to config file
:param section: [section] of the kv pair
:return: value of key
"""
from configparser import ConfigParser
if not config_path:
config_path = cfg.LND_CONF
if not section:
section = "Application Options"
parser = ConfigParser(strict=False)
with open(config_path) as lines:
parser.read_file(lines)
return parser.get(section, key)
def set_kv(key, value, section="", config_path=""):
"""
Parse key-value config files and write them out with a key-value change
Note: comments are lost!
:param key: left part of key value pair
:param value: right part of key value pair
:param section: optional name of section to set in
:param config_path: path to file
:return:
"""
from configparser import ConfigParser
if not section:
section = "Application Options"
if not config_path:
config_path = cfg.LND_CONF
parser = ConfigParser(strict=False)
with open(config_path) as lines:
parser.read_file(lines)
parser.set(section, key, value)
with open(config_path, "w") as file:
parser.write(file, space_around_delimiters=False)
file.close()
def setup_tor(version=""):
"""Add tor hidden service to lnd"""
if not version:
version = "v3"
hostname_path = "/var/lib/tor/lnd-{}/hostname".format(version)
try:
print("Adding externalip directive to lnd for tor")
with open(hostname_path, "r") as hostname:
set_kv("externalip", hostname.read(), "Application Options")
except Exception as error:
print(error.__class__.__name__, ":", error)
def set_bitcoind(password, user="", lnd_config=""):
"""Add bitcoind rpc username and password to lnd"""
if not user:
user = "lncm"
if not lnd_config:
lnd_config = cfg.LND_CONF
if pathlib.Path(lnd_config).is_file():
set_kv("bitcoind.rpcuser", user, "Bitcoind", lnd_config)
set_kv("bitcoind.rpcpass", password, "Bitcoind", lnd_config)
def autoconnect(list_path=""):
"""Auto-connect to a list of nodes in lnd/autoconnect.txt"""
print("Connecting to:")
if not list_path:
list_path = pathlib.Path(cfg.LND_PATH / "autoconnect.txt")
with open(list_path) as address_list:
for address in address_list:
print(address.strip())
call(
[
"docker",
"exec",
cfg.LND_MODE + "_lnd_1",
"lncli",
"connect",
address.strip(),
]
)
def check():
"""Check lnd filesystem structure"""
if cfg.LND_PATH.is_dir():
print("✅ lnd directory exists")
else:
print("❌ lnd directory missing")
if cfg.LND_CONF.is_file():
print("✅ lnd.conf exists")
else:
print("❌ lnd.conf missing")
if cfg.LND_PATH.is_dir() and cfg.LND_CONF.is_file():
return True
return False
def backup():
"""Export and backup latest channel.db from lnd via ssh"""
# secure remote backups via scp
if cfg.CHANNEL_BACKUP.is_file():
# scp options:
# -B for non-interactive batch mode
# -p to preserve modification & access time, modes
complete = run(["scp",
"-B",
"-i {}".format(cfg.SSH_IDENTITY),
"-p",
"-P {}".format(cfg.SSH_PORT),
"{}".format(cfg.CHANNEL_BACKUP),
"{}".format(cfg.SSH_TARGET)])
return complete.returncode
print("Error: channel.backup not found")
return exit(1)
def savepeers():
"""Save list of peers to file on disk for reconnecting"""
# TODO: export list of peers to text file on disk
print("Not implemented yet")
def randompass(string_length=10):
"""Generate random password"""
from random import choice
from string import ascii_letters
letters = ascii_letters
return "".join(choice(letters) for i in range(string_length))
def _write_password(password_str):
"""Write a generated password to file, either the TEMP_PASSWORD_FILE_PATH
or the PASSWORD_FILE_PATH depending on whether SAVE_PASSWORD_CONTROL_FILE
exists."""
if not path.exists(cfg.SAVE_PASSWORD_CONTROL_FILE):
# Use temporary file if there is a password control file there
temp_password_file = open(cfg.PASSWORD_FILE_PATH, "w")
temp_password_file.write(password_str)
temp_password_file.close()
else:
# Use password.txt if password_control_file exists
password_file = open(cfg.PASSWORD_FILE_PATH, "w")
password_file.write(password_str)
password_file.close()
def _wallet_password():
"""Either load the wallet password from PASSWORD_FILE_PATH, or generate a new
password, save it to file, and in either case return the password"""
# Check if there is an existing file, if not generate a random password
if not path.exists(cfg.PASSWORD_FILE_PATH):
# password file doesnt exist
password_str = randompass(string_length=15)
_write_password(password_str)
else:
# Get password from file if password file already exists
password_str = open(cfg.PASSWORD_FILE_PATH, "r").read().rstrip()
return password_str
def _generate_and_save_seed():
"""Generate a wallet seed, save it to SEED_FILENAME, and return it"""
mnemonic = None
return_data = get(cfg.URL_GENSEED, verify=cfg.TLS_CERT_PATH)
if return_data.status_code == 200:
json_seed_creation = return_data.json()
mnemonic = json_seed_creation["cipher_seed_mnemonic"]
seed_file = open(cfg.SEED_FILENAME, "w")
for word in mnemonic:
seed_file.write(word + "\n")
seed_file.close()
# Data doesnt get set if cant create the seed but that is fine, handle
# it later
return mnemonic
def _load_seed():
"""Load the wallet seed from SEED_FILENAME and return it"""
# Seed exists
seed_file = open(cfg.SEED_FILENAME, "r")
seed_file_words = seed_file.readlines()
mnemonic = []
for importword in seed_file_words:
mnemonic.append(importword.replace("\n", ""))
return mnemonic
def _wallet_data(password_str):
"""Build and return the wallet `data` dict with the mnemonic and wallet
password"""
# Convert password to byte encoded
password_bytes = str(password_str).encode("utf-8")
# Send request to generate seed if seed file doesnt exist
if not path.exists(cfg.SEED_FILENAME):
mnemonic = _generate_and_save_seed()
else:
mnemonic = _load_seed()
if mnemonic:
# Generate init wallet file from what was posted
return {
"cipher_seed_mnemonic": mnemonic,
"wallet_password": base64.b64encode(password_bytes).decode(),
}
return {}
def create_wallet():
"""
1. Check if there's already a wallet. If there is, then exit.
2. Check for password.txt
3. If doesn't exist then check for whether we should save the password
(SAVE_PASSWORD_CONTROL_FILE exists) or not
4. If password.txt exists import password in.
5. If password.txt doesn't exist and we don't save the password, create a
password and save it in temporary path as defined in PASSWORD_FILE_PATH
6. Now start the wallet creation. Look for a seed defined in SEED_FILENAME,
if not existing then generate a wallet based on the seed by LND.
"""
password_str = _wallet_password()
# Step 1 get seed from web or file
data = _wallet_data(password_str)
# Step 2: Create wallet
if data:
# Data is defined so proceed
return_data = post(
cfg.URL_INITWALLET, verify=cfg.TLS_CERT_PATH, data=dumps(data)
)
if return_data.status_code == 200:
print("✅ Create wallet is successful")
else:
print("❌ Create wallet is not successful")
else:
print("❌ Error: cannot proceed, wallet data is not defined")
if __name__ == "__main__":
print("This file is not meant to be run directly") | noma/lnd.py | import pathlib
from subprocess import call, run
from os import path
from json import dumps
import base64
from requests import get, post
import noma.config as cfg
def check_wallet():
"""
This will either import an existing seed (or our own generated one),
or use LND to create one.
It will also create a password either randomly or use an existing password
provided)
:return str: Status
"""
if cfg.LND_PATH.exists():
if not cfg.WALLET_PATH.exists():
create_wallet()
else:
print("❌ Error: LND not initialized")
print("Wallet already exists!")
print("Please backup and move: " + str(cfg.WALLET_PATH))
print("and then restart lnd")
else:
print("❌ Error: lnd directory does not exist!")
def encodemacaroons(macaroonfile=cfg.MACAROON_PATH, tlsfile=cfg.TLS_CERT_PATH):
"""base64url encode macaroon and TLS certificate"""
if path.exists(str(macaroonfile)) and path.exists(str(tlsfile)):
with open(path.expanduser(macaroonfile), "rb") as f:
macaroon_bytes = f.read()
with open(path.expanduser(tlsfile), "rb") as f:
tls_bytes = f.read()
macaroonencoded = base64.urlsafe_b64encode(macaroon_bytes)
tlsdecoded = tls_bytes.decode("utf-8")
tlstrim = (
tlsdecoded.replace("\n", "")
.replace("-----BEGIN CERTIFICATE-----", "")
.replace("-----END CERTIFICATE-----", "")
.replace("+", "-")
.replace("/", "_")
.replace("=", "")
)
tlsencoded = tlstrim.encode("utf-8")
return {
"status": "OK",
"certificate": tlsencoded,
"macaroon": macaroonencoded,
}
else:
return {"status": "File Not Found"}
def connectstring(
hostname=cfg.URL_GRPC,
macaroonfile=cfg.MACAROON_PATH,
tlsfile=cfg.TLS_CERT_PATH,
):
"""Show lndconnect string for remote wallets such as Zap"""
result = encodemacaroons(macaroonfile, tlsfile)
if result["status"] == "OK":
macaroon_string = str(result["macaroon"], "utf-8")
cert_string = str(result["certificate"], "utf-8")
print(
"lndconnect://"
+ hostname
+ "?cert="
+ cert_string
+ "&macaroon="
+ macaroon_string
)
else:
print(result["status"])
def autounlock():
"""Auto-unlock lnd using password.txt, tls.cert"""
password_str = open(cfg.PASSWORD_FILE_PATH, "r").read().rstrip()
password_bytes = str(password_str).encode("utf-8")
data = {"wallet_password": base64.b64encode(password_bytes).decode()}
try:
response = post(
cfg.URL_UNLOCKWALLET, verify=cfg.TLS_CERT_PATH, data=dumps(data)
)
except Exception:
# Silence connection errors when lnd is not running
pass
else:
try:
print(response.json())
except Exception:
# JSON will fail to decode when unlocked already since response is
# empty
pass
def get_kv(key, section="", config_path=""):
"""
Parse key-value config files and print out values
:param key: left part of key value pair
:param config_path: path to config file
:param section: [section] of the kv pair
:return: value of key
"""
from configparser import ConfigParser
if not config_path:
config_path = cfg.LND_CONF
if not section:
section = "Application Options"
parser = ConfigParser(strict=False)
with open(config_path) as lines:
parser.read_file(lines)
return parser.get(section, key)
def set_kv(key, value, section="", config_path=""):
"""
Parse key-value config files and write them out with a key-value change
Note: comments are lost!
:param key: left part of key value pair
:param value: right part of key value pair
:param section: optional name of section to set in
:param config_path: path to file
:return:
"""
from configparser import ConfigParser
if not section:
section = "Application Options"
if not config_path:
config_path = cfg.LND_CONF
parser = ConfigParser(strict=False)
with open(config_path) as lines:
parser.read_file(lines)
parser.set(section, key, value)
with open(config_path, "w") as file:
parser.write(file, space_around_delimiters=False)
file.close()
def setup_tor(version=""):
"""Add tor hidden service to lnd"""
if not version:
version = "v3"
hostname_path = "/var/lib/tor/lnd-{}/hostname".format(version)
try:
print("Adding externalip directive to lnd for tor")
with open(hostname_path, "r") as hostname:
set_kv("externalip", hostname.read(), "Application Options")
except Exception as error:
print(error.__class__.__name__, ":", error)
def set_bitcoind(password, user="", lnd_config=""):
"""Add bitcoind rpc username and password to lnd"""
if not user:
user = "lncm"
if not lnd_config:
lnd_config = cfg.LND_CONF
if pathlib.Path(lnd_config).is_file():
set_kv("bitcoind.rpcuser", user, "Bitcoind", lnd_config)
set_kv("bitcoind.rpcpass", password, "Bitcoind", lnd_config)
def autoconnect(list_path=""):
"""Auto-connect to a list of nodes in lnd/autoconnect.txt"""
print("Connecting to:")
if not list_path:
list_path = pathlib.Path(cfg.LND_PATH / "autoconnect.txt")
with open(list_path) as address_list:
for address in address_list:
print(address.strip())
call(
[
"docker",
"exec",
cfg.LND_MODE + "_lnd_1",
"lncli",
"connect",
address.strip(),
]
)
def check():
"""Check lnd filesystem structure"""
if cfg.LND_PATH.is_dir():
print("✅ lnd directory exists")
else:
print("❌ lnd directory missing")
if cfg.LND_CONF.is_file():
print("✅ lnd.conf exists")
else:
print("❌ lnd.conf missing")
if cfg.LND_PATH.is_dir() and cfg.LND_CONF.is_file():
return True
return False
def backup():
"""Export and backup latest channel.db from lnd via ssh"""
# secure remote backups via scp
if cfg.CHANNEL_BACKUP.is_file():
# scp options:
# -B for non-interactive batch mode
# -p to preserve modification & access time, modes
complete = run(["scp",
"-B",
"-i {}".format(cfg.SSH_IDENTITY),
"-p",
"-P {}".format(cfg.SSH_PORT),
"{}".format(cfg.CHANNEL_BACKUP),
"{}".format(cfg.SSH_TARGET)])
return complete.returncode
print("Error: channel.backup not found")
return exit(1)
def savepeers():
"""Save list of peers to file on disk for reconnecting"""
# TODO: export list of peers to text file on disk
print("Not implemented yet")
def randompass(string_length=10):
"""Generate random password"""
from random import choice
from string import ascii_letters
letters = ascii_letters
return "".join(choice(letters) for i in range(string_length))
def _write_password(password_str):
"""Write a generated password to file, either the TEMP_PASSWORD_FILE_PATH
or the PASSWORD_FILE_PATH depending on whether SAVE_PASSWORD_CONTROL_FILE
exists."""
if not path.exists(cfg.SAVE_PASSWORD_CONTROL_FILE):
# Use temporary file if there is a password control file there
temp_password_file = open(cfg.PASSWORD_FILE_PATH, "w")
temp_password_file.write(password_str)
temp_password_file.close()
else:
# Use password.txt if password_control_file exists
password_file = open(cfg.PASSWORD_FILE_PATH, "w")
password_file.write(password_str)
password_file.close()
def _wallet_password():
"""Either load the wallet password from PASSWORD_FILE_PATH, or generate a new
password, save it to file, and in either case return the password"""
# Check if there is an existing file, if not generate a random password
if not path.exists(cfg.PASSWORD_FILE_PATH):
# password file doesnt exist
password_str = randompass(string_length=15)
_write_password(password_str)
else:
# Get password from file if password file already exists
password_str = open(cfg.PASSWORD_FILE_PATH, "r").read().rstrip()
return password_str
def _generate_and_save_seed():
"""Generate a wallet seed, save it to SEED_FILENAME, and return it"""
mnemonic = None
return_data = get(cfg.URL_GENSEED, verify=cfg.TLS_CERT_PATH)
if return_data.status_code == 200:
json_seed_creation = return_data.json()
mnemonic = json_seed_creation["cipher_seed_mnemonic"]
seed_file = open(cfg.SEED_FILENAME, "w")
for word in mnemonic:
seed_file.write(word + "\n")
seed_file.close()
# Data doesnt get set if cant create the seed but that is fine, handle
# it later
return mnemonic
def _load_seed():
"""Load the wallet seed from SEED_FILENAME and return it"""
# Seed exists
seed_file = open(cfg.SEED_FILENAME, "r")
seed_file_words = seed_file.readlines()
mnemonic = []
for importword in seed_file_words:
mnemonic.append(importword.replace("\n", ""))
return mnemonic
def _wallet_data(password_str):
"""Build and return the wallet `data` dict with the mnemonic and wallet
password"""
# Convert password to byte encoded
password_bytes = str(password_str).encode("utf-8")
# Send request to generate seed if seed file doesnt exist
if not path.exists(cfg.SEED_FILENAME):
mnemonic = _generate_and_save_seed()
else:
mnemonic = _load_seed()
if mnemonic:
# Generate init wallet file from what was posted
return {
"cipher_seed_mnemonic": mnemonic,
"wallet_password": base64.b64encode(password_bytes).decode(),
}
return {}
def create_wallet():
"""
1. Check if there's already a wallet. If there is, then exit.
2. Check for password.txt
3. If doesn't exist then check for whether we should save the password
(SAVE_PASSWORD_CONTROL_FILE exists) or not
4. If password.txt exists import password in.
5. If password.txt doesn't exist and we don't save the password, create a
password and save it in temporary path as defined in PASSWORD_FILE_PATH
6. Now start the wallet creation. Look for a seed defined in SEED_FILENAME,
if not existing then generate a wallet based on the seed by LND.
"""
password_str = _wallet_password()
# Step 1 get seed from web or file
data = _wallet_data(password_str)
# Step 2: Create wallet
if data:
# Data is defined so proceed
return_data = post(
cfg.URL_INITWALLET, verify=cfg.TLS_CERT_PATH, data=dumps(data)
)
if return_data.status_code == 200:
print("✅ Create wallet is successful")
else:
print("❌ Create wallet is not successful")
else:
print("❌ Error: cannot proceed, wallet data is not defined")
if __name__ == "__main__":
print("This file is not meant to be run directly") | 0.348202 | 0.10393 |
import numpy as np
from . import het_compiled
from ...utilities.interpolate import interpolate_coord_robust, interpolate_coord
from ...utilities.multidim import batch_multiply_ith_dimension, multiply_ith_dimension
from typing import Optional, Sequence, Any, List, Tuple, Union
import copy
class LawOfMotion:
"""Abstract class representing a matrix that operates on state space.
Rather than giant Ns*Ns matrix (even if sparse), some other representation
almost always desirable; such representations are subclasses of this."""
def __matmul__(self, X):
pass
@property
def T(self):
pass
def lottery_1d(a, a_grid, monotonic=False):
if not monotonic:
return PolicyLottery1D(*interpolate_coord_robust(a_grid, a), a_grid)
else:
return PolicyLottery1D(*interpolate_coord(a_grid, a), a_grid)
class PolicyLottery1D(LawOfMotion):
# TODO: always operates on final dimension, make more general!
def __init__(self, i, pi, grid, forward=True):
# flatten non-policy dimensions into one because that's what methods accept
self.i = i.reshape((-1,) + grid.shape)
self.flatshape = self.i.shape
self.pi = pi.reshape(self.flatshape)
# but store original shape so we can convert all outputs to it
self.shape = i.shape
self.grid = grid
# also store shape of the endogenous grid itself
self.endog_shape = self.shape[-1:]
self.forward = forward
@property
def T(self):
newself = copy.copy(self)
newself.forward = not self.forward
return newself
def __matmul__(self, X):
if self.forward:
return het_compiled.forward_policy_1d(X.reshape(self.flatshape), self.i, self.pi).reshape(self.shape)
else:
return het_compiled.expectation_policy_1d(X.reshape(self.flatshape), self.i, self.pi).reshape(self.shape)
class ShockedPolicyLottery1D(PolicyLottery1D):
def __matmul__(self, X):
if self.forward:
return het_compiled.forward_policy_shock_1d(X.reshape(self.flatshape), self.i, self.pi).reshape(self.shape)
else:
raise NotImplementedError
def lottery_2d(a, b, a_grid, b_grid, monotonic=False):
if not monotonic:
return PolicyLottery2D(*interpolate_coord_robust(a_grid, a),
*interpolate_coord_robust(b_grid, b), a_grid, b_grid)
if monotonic:
# right now we have no monotonic 2D examples, so this shouldn't be called
return PolicyLottery2D(*interpolate_coord(a_grid, a),
*interpolate_coord(b_grid, b), a_grid, b_grid)
class PolicyLottery2D(LawOfMotion):
def __init__(self, i1, pi1, i2, pi2, grid1, grid2, forward=True):
# flatten non-policy dimensions into one because that's what methods accept
self.i1 = i1.reshape((-1,) + grid1.shape + grid2.shape)
self.flatshape = self.i1.shape
self.i2 = i2.reshape(self.flatshape)
self.pi1 = pi1.reshape(self.flatshape)
self.pi2 = pi2.reshape(self.flatshape)
# but store original shape so we can convert all outputs to it
self.shape = i1.shape
self.grid1 = grid1
self.grid2 = grid2
# also store shape of the endogenous grid itself
self.endog_shape = self.shape[-2:]
self.forward = forward
@property
def T(self):
newself = copy.copy(self)
newself.forward = not self.forward
return newself
def __matmul__(self, X):
if self.forward:
return het_compiled.forward_policy_2d(X.reshape(self.flatshape), self.i1, self.i2,
self.pi1, self.pi2).reshape(self.shape)
else:
return het_compiled.expectation_policy_2d(X.reshape(self.flatshape), self.i1, self.i2,
self.pi1, self.pi2).reshape(self.shape)
class ShockedPolicyLottery2D(PolicyLottery2D):
def __matmul__(self, X):
if self.forward:
return het_compiled.forward_policy_shock_2d(X.reshape(self.flatshape), self.i, self.pi).reshape(self.shape)
else:
raise NotImplementedError
class Markov(LawOfMotion):
def __init__(self, Pi, i):
self.Pi = Pi
self.i = i
@property
def T(self):
newself = copy.copy(self)
newself.Pi = newself.Pi.T
if isinstance(newself.Pi, np.ndarray):
# optimizing: copy to get right order in memory
newself.Pi = newself.Pi.copy()
return newself
def __matmul__(self, X):
return multiply_ith_dimension(self.Pi, self.i, X)
class DiscreteChoice(LawOfMotion):
def __init__(self, P, i):
self.P = P # choice prob P(d|...s_i...), 0 for unavailable choices
self.i = i # dimension of state space that will be updated
# cache "transposed" version of this, since we'll always need both!
self.forward = True
self.P_T = P.swapaxes(0, 1+self.i).copy()
@property
def T(self):
newself = copy.copy(self)
newself.forward = not self.forward
return newself
def __matmul__(self, X):
if self.forward:
return batch_multiply_ith_dimension(self.P, self.i, X)
else:
return batch_multiply_ith_dimension(self.P_T, self.i, X) | src/sequence_jacobian/blocks/support/law_of_motion.py | import numpy as np
from . import het_compiled
from ...utilities.interpolate import interpolate_coord_robust, interpolate_coord
from ...utilities.multidim import batch_multiply_ith_dimension, multiply_ith_dimension
from typing import Optional, Sequence, Any, List, Tuple, Union
import copy
class LawOfMotion:
"""Abstract class representing a matrix that operates on state space.
Rather than giant Ns*Ns matrix (even if sparse), some other representation
almost always desirable; such representations are subclasses of this."""
def __matmul__(self, X):
pass
@property
def T(self):
pass
def lottery_1d(a, a_grid, monotonic=False):
if not monotonic:
return PolicyLottery1D(*interpolate_coord_robust(a_grid, a), a_grid)
else:
return PolicyLottery1D(*interpolate_coord(a_grid, a), a_grid)
class PolicyLottery1D(LawOfMotion):
# TODO: always operates on final dimension, make more general!
def __init__(self, i, pi, grid, forward=True):
# flatten non-policy dimensions into one because that's what methods accept
self.i = i.reshape((-1,) + grid.shape)
self.flatshape = self.i.shape
self.pi = pi.reshape(self.flatshape)
# but store original shape so we can convert all outputs to it
self.shape = i.shape
self.grid = grid
# also store shape of the endogenous grid itself
self.endog_shape = self.shape[-1:]
self.forward = forward
@property
def T(self):
newself = copy.copy(self)
newself.forward = not self.forward
return newself
def __matmul__(self, X):
if self.forward:
return het_compiled.forward_policy_1d(X.reshape(self.flatshape), self.i, self.pi).reshape(self.shape)
else:
return het_compiled.expectation_policy_1d(X.reshape(self.flatshape), self.i, self.pi).reshape(self.shape)
class ShockedPolicyLottery1D(PolicyLottery1D):
def __matmul__(self, X):
if self.forward:
return het_compiled.forward_policy_shock_1d(X.reshape(self.flatshape), self.i, self.pi).reshape(self.shape)
else:
raise NotImplementedError
def lottery_2d(a, b, a_grid, b_grid, monotonic=False):
if not monotonic:
return PolicyLottery2D(*interpolate_coord_robust(a_grid, a),
*interpolate_coord_robust(b_grid, b), a_grid, b_grid)
if monotonic:
# right now we have no monotonic 2D examples, so this shouldn't be called
return PolicyLottery2D(*interpolate_coord(a_grid, a),
*interpolate_coord(b_grid, b), a_grid, b_grid)
class PolicyLottery2D(LawOfMotion):
def __init__(self, i1, pi1, i2, pi2, grid1, grid2, forward=True):
# flatten non-policy dimensions into one because that's what methods accept
self.i1 = i1.reshape((-1,) + grid1.shape + grid2.shape)
self.flatshape = self.i1.shape
self.i2 = i2.reshape(self.flatshape)
self.pi1 = pi1.reshape(self.flatshape)
self.pi2 = pi2.reshape(self.flatshape)
# but store original shape so we can convert all outputs to it
self.shape = i1.shape
self.grid1 = grid1
self.grid2 = grid2
# also store shape of the endogenous grid itself
self.endog_shape = self.shape[-2:]
self.forward = forward
@property
def T(self):
newself = copy.copy(self)
newself.forward = not self.forward
return newself
def __matmul__(self, X):
if self.forward:
return het_compiled.forward_policy_2d(X.reshape(self.flatshape), self.i1, self.i2,
self.pi1, self.pi2).reshape(self.shape)
else:
return het_compiled.expectation_policy_2d(X.reshape(self.flatshape), self.i1, self.i2,
self.pi1, self.pi2).reshape(self.shape)
class ShockedPolicyLottery2D(PolicyLottery2D):
def __matmul__(self, X):
if self.forward:
return het_compiled.forward_policy_shock_2d(X.reshape(self.flatshape), self.i, self.pi).reshape(self.shape)
else:
raise NotImplementedError
class Markov(LawOfMotion):
def __init__(self, Pi, i):
self.Pi = Pi
self.i = i
@property
def T(self):
newself = copy.copy(self)
newself.Pi = newself.Pi.T
if isinstance(newself.Pi, np.ndarray):
# optimizing: copy to get right order in memory
newself.Pi = newself.Pi.copy()
return newself
def __matmul__(self, X):
return multiply_ith_dimension(self.Pi, self.i, X)
class DiscreteChoice(LawOfMotion):
def __init__(self, P, i):
self.P = P # choice prob P(d|...s_i...), 0 for unavailable choices
self.i = i # dimension of state space that will be updated
# cache "transposed" version of this, since we'll always need both!
self.forward = True
self.P_T = P.swapaxes(0, 1+self.i).copy()
@property
def T(self):
newself = copy.copy(self)
newself.forward = not self.forward
return newself
def __matmul__(self, X):
if self.forward:
return batch_multiply_ith_dimension(self.P, self.i, X)
else:
return batch_multiply_ith_dimension(self.P_T, self.i, X) | 0.844794 | 0.442275 |
import copy
import logging
class EventCollectRecorder():
"""EventCollectRecorder implements an recorder that logs events into a regular text file. On
each arrival of an event (with a new time) a line is written into the log file. In each line
all the states of the events are printed. Like this, a space separated value list is created
that can be used by many tools, e.g. gnuplot or libreoffice.
Bevor an event can be repoted is has to be registered which typically should happen before the
first event occurs to ensure that all the lines have the same content from the start. During
registration an event also gets assigned a source (for identification) and a column where it
will be printed inside the lines. Along with each event a timestamp has to be provided that
clearly gives the events the right order. As processing perhaps happens in an unpredictible
order events perhaps are not deliverd exactly in the timely order. To compensate from a
certain amount of nonlinear processing, the class maintains a time based cache. When an
event occurs that is located in the past, it will be sorted at the right position in the
cache. As long as event are in the cache they may be also overwritten with updated data.
Also, a grouping of events can be achieved when the events are reported using identical
timestamp.
"""
def __init__(self, path, cache_duration=2):
self._ostream = open(path, "a", encoding="utf-8")
self._cache_duration = cache_duration
self._head = {"Time" : 0}
self._tail = copy.copy(self._head)
self._cache = []
self._source_from_pos_lookup = ["Time"]
def __del__(self):
try:
self._dump_events()
self._ostream.close()
except BaseException:
pass
def register_event_source(self, source, pos, default):
""" Register a new event from the given source to be printed as pos culumn in the text
lines. Source and pos has to be unique for each event. Until the event occures the first
time, the default value is used for event state.
Arguments:
source -- The event source, unique key for identification of the event
pos -- The columnt number the event is prints in the output lines
default -- The value of event state until the event is received the fist time
"""
logging.info("Registering event source %s at position %d", source, pos)
# Ensure there are enough positions in list
self._source_from_pos_lookup.extend([None] * (pos + 1 - len(self._source_from_pos_lookup)))
# Check if position is used already
if self._source_from_pos_lookup[pos]:
raise Exception("Event registration for source {} failed: "
"Position {} is already given to source {}"
.format(source, pos, self._source_from_pos_lookup[pos]))
# Check if source key is used already
if source in self._head:
raise Exception("Event registration for source {} failed: Source already in use"
.format(source))
self._source_from_pos_lookup[pos] = source
self._propagate_event(source, -1, default)
def create_event(self, source, time, event):
""" Set the state of the event from source source to event. Use time to locate the event
in time. In case event is past the previous reported event, it will be sorted into the
right location in the cache. Events that fall outside the cache window will be written to
disk
Arguments:
source -- The event source, unique key for identification of the event
time -- Location of the event in time
event -- The event value
"""
logging.info("New event time:source:event %f:%s:%s", time, source, str(event))
if source not in self._head:
raise Exception("Event creation failed: Source {} is not registered"
.format(source))
if time < self._head["Time"] - self._cache_duration:
raise Exception("Event creation failed: Time ({}) outside of _cache ({})"
.format(time, self._cache))
if time > self._head["Time"]:
self._append_event(source, time, event)
else:
self._insert_event(source, time, event)
logging.debug("%s @ %f -> %s", source, time, str(self._cache))
def _append_event(self, source, time, event):
logging.debug("Inserting event at head")
self._head[source] = event
self._head["Time"] = time
self._cache.append(copy.copy(self._head))
self._dump_events(time - self._cache_duration)
def _insert_event(self, source, time, event):
cur_num = -1
current = None
for cur_num, current in enumerate(self._cache):
if time <= current["Time"]:
break
else:
raise Exception("Internal error: Order of events is not plausible")
# In case _cache entry for given time exists already, just update
if time == current["Time"]:
logging.debug("Updating existing at %d", cur_num)
else:
logging.debug("Inserting before %d", cur_num)
if cur_num:
new_event = copy.copy(self._cache[cur_num - 1])
else:
new_event = copy.copy(self._tail)
new_event["Time"] = time
self._cache.insert(cur_num, new_event)
self._propagate_event(source, cur_num, event)
def _propagate_event(self, source, cache_entry_num, new_message):
"""Propagate event change from tail through _cache until head.
Propagation is stopped when a more recent event update happended already.
Propagation will also propagate missing sources in the _cache
"""
if -1 == cache_entry_num:
current_message = self._tail.get(source)
self._tail[source] = new_message
cache_entry_num = 0
else:
current_message = self._cache[cache_entry_num].get(source)
# Propagate in _cache as long as event in _cache is the same. A different
# event indicates that there has been already an event creation for the
# time of the _cache entry
for current in self._cache[cache_entry_num:]:
if current.get(source) == current_message:
current[source] = new_message
else: break
else:
self._head[source] = new_message
def _dump_events(self, time=None):
num = 0
for num, event in enumerate(self._cache):
if (time is None) or (time > event["Time"]):
self._tail = event
text = self._format_event(event)
self._ostream.write(text + '\n')
else: break
else:
num += 1
if num:
self._cache = self._cache[num:]
self._ostream.flush()
return num
def _format_event(self, event):
text = ""
for source in self._source_from_pos_lookup:
if source:
text += str(event[source]) + " "
return text[:-1] | event_collect_recorder.py | import copy
import logging
class EventCollectRecorder():
"""EventCollectRecorder implements an recorder that logs events into a regular text file. On
each arrival of an event (with a new time) a line is written into the log file. In each line
all the states of the events are printed. Like this, a space separated value list is created
that can be used by many tools, e.g. gnuplot or libreoffice.
Bevor an event can be repoted is has to be registered which typically should happen before the
first event occurs to ensure that all the lines have the same content from the start. During
registration an event also gets assigned a source (for identification) and a column where it
will be printed inside the lines. Along with each event a timestamp has to be provided that
clearly gives the events the right order. As processing perhaps happens in an unpredictible
order events perhaps are not deliverd exactly in the timely order. To compensate from a
certain amount of nonlinear processing, the class maintains a time based cache. When an
event occurs that is located in the past, it will be sorted at the right position in the
cache. As long as event are in the cache they may be also overwritten with updated data.
Also, a grouping of events can be achieved when the events are reported using identical
timestamp.
"""
def __init__(self, path, cache_duration=2):
self._ostream = open(path, "a", encoding="utf-8")
self._cache_duration = cache_duration
self._head = {"Time" : 0}
self._tail = copy.copy(self._head)
self._cache = []
self._source_from_pos_lookup = ["Time"]
def __del__(self):
try:
self._dump_events()
self._ostream.close()
except BaseException:
pass
def register_event_source(self, source, pos, default):
""" Register a new event from the given source to be printed as pos culumn in the text
lines. Source and pos has to be unique for each event. Until the event occures the first
time, the default value is used for event state.
Arguments:
source -- The event source, unique key for identification of the event
pos -- The columnt number the event is prints in the output lines
default -- The value of event state until the event is received the fist time
"""
logging.info("Registering event source %s at position %d", source, pos)
# Ensure there are enough positions in list
self._source_from_pos_lookup.extend([None] * (pos + 1 - len(self._source_from_pos_lookup)))
# Check if position is used already
if self._source_from_pos_lookup[pos]:
raise Exception("Event registration for source {} failed: "
"Position {} is already given to source {}"
.format(source, pos, self._source_from_pos_lookup[pos]))
# Check if source key is used already
if source in self._head:
raise Exception("Event registration for source {} failed: Source already in use"
.format(source))
self._source_from_pos_lookup[pos] = source
self._propagate_event(source, -1, default)
def create_event(self, source, time, event):
""" Set the state of the event from source source to event. Use time to locate the event
in time. In case event is past the previous reported event, it will be sorted into the
right location in the cache. Events that fall outside the cache window will be written to
disk
Arguments:
source -- The event source, unique key for identification of the event
time -- Location of the event in time
event -- The event value
"""
logging.info("New event time:source:event %f:%s:%s", time, source, str(event))
if source not in self._head:
raise Exception("Event creation failed: Source {} is not registered"
.format(source))
if time < self._head["Time"] - self._cache_duration:
raise Exception("Event creation failed: Time ({}) outside of _cache ({})"
.format(time, self._cache))
if time > self._head["Time"]:
self._append_event(source, time, event)
else:
self._insert_event(source, time, event)
logging.debug("%s @ %f -> %s", source, time, str(self._cache))
def _append_event(self, source, time, event):
logging.debug("Inserting event at head")
self._head[source] = event
self._head["Time"] = time
self._cache.append(copy.copy(self._head))
self._dump_events(time - self._cache_duration)
def _insert_event(self, source, time, event):
cur_num = -1
current = None
for cur_num, current in enumerate(self._cache):
if time <= current["Time"]:
break
else:
raise Exception("Internal error: Order of events is not plausible")
# In case _cache entry for given time exists already, just update
if time == current["Time"]:
logging.debug("Updating existing at %d", cur_num)
else:
logging.debug("Inserting before %d", cur_num)
if cur_num:
new_event = copy.copy(self._cache[cur_num - 1])
else:
new_event = copy.copy(self._tail)
new_event["Time"] = time
self._cache.insert(cur_num, new_event)
self._propagate_event(source, cur_num, event)
def _propagate_event(self, source, cache_entry_num, new_message):
"""Propagate event change from tail through _cache until head.
Propagation is stopped when a more recent event update happended already.
Propagation will also propagate missing sources in the _cache
"""
if -1 == cache_entry_num:
current_message = self._tail.get(source)
self._tail[source] = new_message
cache_entry_num = 0
else:
current_message = self._cache[cache_entry_num].get(source)
# Propagate in _cache as long as event in _cache is the same. A different
# event indicates that there has been already an event creation for the
# time of the _cache entry
for current in self._cache[cache_entry_num:]:
if current.get(source) == current_message:
current[source] = new_message
else: break
else:
self._head[source] = new_message
def _dump_events(self, time=None):
num = 0
for num, event in enumerate(self._cache):
if (time is None) or (time > event["Time"]):
self._tail = event
text = self._format_event(event)
self._ostream.write(text + '\n')
else: break
else:
num += 1
if num:
self._cache = self._cache[num:]
self._ostream.flush()
return num
def _format_event(self, event):
text = ""
for source in self._source_from_pos_lookup:
if source:
text += str(event[source]) + " "
return text[:-1] | 0.625781 | 0.459622 |
from typing import Any, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._azure_quota_extension_api_enums import *
class CommonResourceProperties(msrest.serialization.Model):
"""Resource properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type. Example: "Microsoft.Quota/quotas".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CommonResourceProperties, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class CreateGenericQuotaRequestParameters(msrest.serialization.Model):
"""Quota change requests information.
:param value: Quota change requests.
:type value: list[~azure.mgmt.quota.models.CurrentQuotaLimitBase]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[CurrentQuotaLimitBase]'},
}
def __init__(
self,
*,
value: Optional[List["CurrentQuotaLimitBase"]] = None,
**kwargs
):
super(CreateGenericQuotaRequestParameters, self).__init__(**kwargs)
self.value = value
class CurrentQuotaLimitBase(msrest.serialization.Model):
"""Quota limit.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource ID.
:vartype id: str
:ivar type: The resource type.
:vartype type: str
:ivar name: The resource name.
:vartype name: str
:param properties: Quota properties for the specified resource, based on the API called, Quotas
or Usages.
:type properties: ~azure.mgmt.quota.models.QuotaProperties
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
'name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'QuotaProperties'},
}
def __init__(
self,
*,
properties: Optional["QuotaProperties"] = None,
**kwargs
):
super(CurrentQuotaLimitBase, self).__init__(**kwargs)
self.id = None
self.type = None
self.name = None
self.properties = properties
class CurrentUsagesBase(msrest.serialization.Model):
"""Resource usage.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource ID.
:vartype id: str
:ivar type: The resource type.
:vartype type: str
:ivar name: The resource name.
:vartype name: str
:param properties: Usage properties for the specified resource.
:type properties: ~azure.mgmt.quota.models.UsagesProperties
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
'name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'UsagesProperties'},
}
def __init__(
self,
*,
properties: Optional["UsagesProperties"] = None,
**kwargs
):
super(CurrentUsagesBase, self).__init__(**kwargs)
self.id = None
self.type = None
self.name = None
self.properties = properties
class ExceptionResponse(msrest.serialization.Model):
"""Error.
:param error: API error details.
:type error: ~azure.mgmt.quota.models.ServiceError
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ServiceError'},
}
def __init__(
self,
*,
error: Optional["ServiceError"] = None,
**kwargs
):
super(ExceptionResponse, self).__init__(**kwargs)
self.error = error
class LimitJsonObject(msrest.serialization.Model):
"""LimitJson abstract class.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: LimitValue.
All required parameters must be populated in order to send to Azure.
:param limit_object_type: Required. The limit object type.Constant filled by server. Possible
values include: "LimitValue".
:type limit_object_type: str or ~azure.mgmt.quota.models.LimitType
"""
_validation = {
'limit_object_type': {'required': True},
}
_attribute_map = {
'limit_object_type': {'key': 'limitObjectType', 'type': 'str'},
}
_subtype_map = {
'limit_object_type': {'LimitValue': 'LimitValue'}
}
def __init__(
self,
**kwargs
):
super(LimitJsonObject, self).__init__(**kwargs)
self.limit_object_type = None # type: Optional[str]
class LimitObject(msrest.serialization.Model):
"""The resource quota limit value.
All required parameters must be populated in order to send to Azure.
:param value: Required. The quota/limit value.
:type value: int
:param limit_object_type: The limit object type. Possible values include: "LimitValue".
:type limit_object_type: str or ~azure.mgmt.quota.models.LimitType
:param limit_type: The quota or usages limit types. Possible values include: "Independent",
"Shared".
:type limit_type: str or ~azure.mgmt.quota.models.QuotaLimitTypes
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'int'},
'limit_object_type': {'key': 'limitObjectType', 'type': 'str'},
'limit_type': {'key': 'limitType', 'type': 'str'},
}
def __init__(
self,
*,
value: int,
limit_object_type: Optional[Union[str, "LimitType"]] = None,
limit_type: Optional[Union[str, "QuotaLimitTypes"]] = None,
**kwargs
):
super(LimitObject, self).__init__(**kwargs)
self.value = value
self.limit_object_type = limit_object_type
self.limit_type = limit_type
class LimitValue(LimitJsonObject, LimitObject):
"""The resource quota limit.
All required parameters must be populated in order to send to Azure.
:param value: Required. The quota/limit value.
:type value: int
:param limit_type: The quota or usages limit types. Possible values include: "Independent",
"Shared".
:type limit_type: str or ~azure.mgmt.quota.models.QuotaLimitTypes
:param limit_object_type: Required. The limit object type.Constant filled by server. Possible
values include: "LimitValue".
:type limit_object_type: str or ~azure.mgmt.quota.models.LimitType
"""
_validation = {
'value': {'required': True},
'limit_object_type': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'int'},
'limit_type': {'key': 'limitType', 'type': 'str'},
'limit_object_type': {'key': 'limitObjectType', 'type': 'str'},
}
def __init__(
self,
*,
value: int,
limit_type: Optional[Union[str, "QuotaLimitTypes"]] = None,
**kwargs
):
super(LimitValue, self).__init__(value=value, limit_type=limit_type, **kwargs)
self.value = value
self.limit_type = limit_type
self.limit_object_type = 'LimitValue' # type: str
self.limit_object_type = 'LimitValue' # type: str
class OperationDisplay(msrest.serialization.Model):
"""OperationDisplay.
:param provider: Provider name.
:type provider: str
:param resource: Resource name.
:type resource: str
:param operation: Operation name.
:type operation: str
:param description: Operation description.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class OperationList(msrest.serialization.Model):
"""OperationList.
:param value:
:type value: list[~azure.mgmt.quota.models.OperationResponse]
:param next_link: URL to get the next page of items.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[OperationResponse]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["OperationResponse"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(OperationList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class OperationResponse(msrest.serialization.Model):
"""OperationResponse.
:param name:
:type name: str
:param display:
:type display: ~azure.mgmt.quota.models.OperationDisplay
:param origin:
:type origin: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display: Optional["OperationDisplay"] = None,
origin: Optional[str] = None,
**kwargs
):
super(OperationResponse, self).__init__(**kwargs)
self.name = name
self.display = display
self.origin = origin
class QuotaLimits(msrest.serialization.Model):
"""Quota limits.
:param value: List of quota limits.
:type value: list[~azure.mgmt.quota.models.CurrentQuotaLimitBase]
:param next_link: The URI used to fetch the next page of quota limits. When there are no more
pages, this string is null.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[CurrentQuotaLimitBase]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["CurrentQuotaLimitBase"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(QuotaLimits, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class QuotaLimitsResponse(msrest.serialization.Model):
"""Quota limits request response.
:param value: List of quota limits with the quota request status.
:type value: list[~azure.mgmt.quota.models.CurrentQuotaLimitBase]
:param next_link: The URI used to fetch the next page of quota limits. When there are no more
pages, this is null.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[CurrentQuotaLimitBase]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["CurrentQuotaLimitBase"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(QuotaLimitsResponse, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class QuotaProperties(msrest.serialization.Model):
"""Quota properties for the specified resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param limit: Resource quota limit properties.
:type limit: ~azure.mgmt.quota.models.LimitJsonObject
:ivar unit: The quota units, such as Count and Bytes. When requesting quota, use the **unit**
value returned in the GET response in the request body of your PUT operation.
:vartype unit: str
:param name: Resource name provided by the resource provider. Use this property name when
requesting quota.
:type name: ~azure.mgmt.quota.models.ResourceName
:param resource_type: Resource type name.
:type resource_type: str
:ivar quota_period: The time period over which the quota usage values are summarized. For
example:
*P1D (per one day)*\ PT1M (per one minute)
*PT1S (per one second).
This parameter is optional because, for some resources like compute, the period is irrelevant.
:vartype quota_period: str
:ivar is_quota_applicable: States if quota can be requested for this resource.
:vartype is_quota_applicable: bool
:param properties: Additional properties for the specific resource provider.
:type properties: any
"""
_validation = {
'unit': {'readonly': True},
'quota_period': {'readonly': True},
'is_quota_applicable': {'readonly': True},
}
_attribute_map = {
'limit': {'key': 'limit', 'type': 'LimitJsonObject'},
'unit': {'key': 'unit', 'type': 'str'},
'name': {'key': 'name', 'type': 'ResourceName'},
'resource_type': {'key': 'resourceType', 'type': 'str'},
'quota_period': {'key': 'quotaPeriod', 'type': 'str'},
'is_quota_applicable': {'key': 'isQuotaApplicable', 'type': 'bool'},
'properties': {'key': 'properties', 'type': 'object'},
}
def __init__(
self,
*,
limit: Optional["LimitJsonObject"] = None,
name: Optional["ResourceName"] = None,
resource_type: Optional[str] = None,
properties: Optional[Any] = None,
**kwargs
):
super(QuotaProperties, self).__init__(**kwargs)
self.limit = limit
self.unit = None
self.name = name
self.resource_type = resource_type
self.quota_period = None
self.is_quota_applicable = None
self.properties = properties
class QuotaRequestDetails(msrest.serialization.Model):
"""List of quota requests with details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Quota request ID.
:vartype id: str
:ivar name: Quota request name.
:vartype name: str
:ivar type: Resource type. "Microsoft.Quota/quotas".
:vartype type: str
:ivar provisioning_state: The quota request status. Possible values include: "Accepted",
"Invalid", "Succeeded", "Failed", "InProgress".
:vartype provisioning_state: str or ~azure.mgmt.quota.models.QuotaRequestState
:ivar message: User-friendly status message.
:vartype message: str
:param error: Error details of the quota request.
:type error: ~azure.mgmt.quota.models.ServiceErrorDetail
:ivar request_submit_time: The quota request submission time. The date conforms to the
following format specified by the ISO 8601 standard: yyyy-MM-ddTHH:mm:ssZ.
:vartype request_submit_time: ~datetime.datetime
:param value: Quota request details.
:type value: list[~azure.mgmt.quota.models.SubRequest]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'message': {'readonly': True},
'request_submit_time': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'message': {'key': 'properties.message', 'type': 'str'},
'error': {'key': 'properties.error', 'type': 'ServiceErrorDetail'},
'request_submit_time': {'key': 'properties.requestSubmitTime', 'type': 'iso-8601'},
'value': {'key': 'properties.value', 'type': '[SubRequest]'},
}
def __init__(
self,
*,
error: Optional["ServiceErrorDetail"] = None,
value: Optional[List["SubRequest"]] = None,
**kwargs
):
super(QuotaRequestDetails, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.provisioning_state = None
self.message = None
self.error = error
self.request_submit_time = None
self.value = value
class QuotaRequestDetailsList(msrest.serialization.Model):
"""Quota request information.
:param value: Quota request details.
:type value: list[~azure.mgmt.quota.models.QuotaRequestDetails]
:param next_link: The URI for fetching the next page of quota limits. When there are no more
pages, this string is null.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[QuotaRequestDetails]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["QuotaRequestDetails"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(QuotaRequestDetailsList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class QuotaRequestOneResourceSubmitResponse(msrest.serialization.Model):
"""Quota request response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Quota request ID.
:vartype id: str
:ivar name: The name of the quota request.
:vartype name: str
:ivar type: Resource type. "Microsoft.Quota/ServiceLimitRequests".
:vartype type: str
:ivar provisioning_state: Quota request status. Possible values include: "Accepted", "Invalid",
"Succeeded", "Failed", "InProgress".
:vartype provisioning_state: str or ~azure.mgmt.quota.models.QuotaRequestState
:ivar message: User-friendly status message.
:vartype message: str
:ivar request_submit_time: Quota request submission time. The date conforms to the following
ISO 8601 standard format: yyyy-MM-ddTHH:mm:ssZ.
:vartype request_submit_time: ~datetime.datetime
:param limit: Resource quota limit properties.
:type limit: ~azure.mgmt.quota.models.LimitObject
:ivar current_value: Usage information for the current resource.
:vartype current_value: int
:param unit: The quota limit units, such as Count and Bytes. When requesting quota, use the
**unit** value returned in the GET response in the request body of your PUT operation.
:type unit: str
:param name_properties_name: Resource name provided by the resource provider. Use this property
name when requesting quota.
:type name_properties_name: ~azure.mgmt.quota.models.ResourceName
:param resource_type: Resource type name.
:type resource_type: str
:ivar quota_period: The time period over which the quota usage values are summarized. For
example:
*P1D (per one day)*\ PT1M (per one minute)
*PT1S (per one second).
This parameter is optional because, for some resources like compute, the period is irrelevant.
:vartype quota_period: str
:ivar is_quota_applicable: States if quota can be requested for this resource.
:vartype is_quota_applicable: bool
:param error: Error details of the quota request.
:type error: ~azure.mgmt.quota.models.ServiceErrorDetail
:param properties: Additional properties for the specific resource provider.
:type properties: any
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'message': {'readonly': True},
'request_submit_time': {'readonly': True},
'current_value': {'readonly': True},
'quota_period': {'readonly': True},
'is_quota_applicable': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'message': {'key': 'properties.message', 'type': 'str'},
'request_submit_time': {'key': 'properties.requestSubmitTime', 'type': 'iso-8601'},
'limit': {'key': 'properties.limit', 'type': 'LimitObject'},
'current_value': {'key': 'properties.currentValue', 'type': 'int'},
'unit': {'key': 'properties.unit', 'type': 'str'},
'name_properties_name': {'key': 'properties.name', 'type': 'ResourceName'},
'resource_type': {'key': 'properties.resourceType', 'type': 'str'},
'quota_period': {'key': 'properties.quotaPeriod', 'type': 'str'},
'is_quota_applicable': {'key': 'properties.isQuotaApplicable', 'type': 'bool'},
'error': {'key': 'properties.error', 'type': 'ServiceErrorDetail'},
'properties': {'key': 'properties.properties', 'type': 'object'},
}
def __init__(
self,
*,
limit: Optional["LimitObject"] = None,
unit: Optional[str] = None,
name_properties_name: Optional["ResourceName"] = None,
resource_type: Optional[str] = None,
error: Optional["ServiceErrorDetail"] = None,
properties: Optional[Any] = None,
**kwargs
):
super(QuotaRequestOneResourceSubmitResponse, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.provisioning_state = None
self.message = None
self.request_submit_time = None
self.limit = limit
self.current_value = None
self.unit = unit
self.name_properties_name = name_properties_name
self.resource_type = resource_type
self.quota_period = None
self.is_quota_applicable = None
self.error = error
self.properties = properties
class QuotaRequestProperties(msrest.serialization.Model):
"""Quota request properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provisioning_state: The quota request status. Possible values include: "Accepted",
"Invalid", "Succeeded", "Failed", "InProgress".
:vartype provisioning_state: str or ~azure.mgmt.quota.models.QuotaRequestState
:ivar message: User-friendly status message.
:vartype message: str
:param error: Error details of the quota request.
:type error: ~azure.mgmt.quota.models.ServiceErrorDetail
:ivar request_submit_time: The quota request submission time. The date conforms to the
following format specified by the ISO 8601 standard: yyyy-MM-ddTHH:mm:ssZ.
:vartype request_submit_time: ~datetime.datetime
:param value: Quota request details.
:type value: list[~azure.mgmt.quota.models.SubRequest]
"""
_validation = {
'provisioning_state': {'readonly': True},
'message': {'readonly': True},
'request_submit_time': {'readonly': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'error': {'key': 'error', 'type': 'ServiceErrorDetail'},
'request_submit_time': {'key': 'requestSubmitTime', 'type': 'iso-8601'},
'value': {'key': 'value', 'type': '[SubRequest]'},
}
def __init__(
self,
*,
error: Optional["ServiceErrorDetail"] = None,
value: Optional[List["SubRequest"]] = None,
**kwargs
):
super(QuotaRequestProperties, self).__init__(**kwargs)
self.provisioning_state = None
self.message = None
self.error = error
self.request_submit_time = None
self.value = value
class QuotaRequestSubmitResponse(msrest.serialization.Model):
"""Quota request response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Quota request ID.
:vartype id: str
:ivar name: Quota request name.
:vartype name: str
:param properties: Quota request details.
:type properties: ~azure.mgmt.quota.models.QuotaRequestProperties
:ivar type: Resource type. "Microsoft.Quota/quotas".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'QuotaRequestProperties'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
properties: Optional["QuotaRequestProperties"] = None,
**kwargs
):
super(QuotaRequestSubmitResponse, self).__init__(**kwargs)
self.id = None
self.name = None
self.properties = properties
self.type = None
class QuotaRequestSubmitResponse202(msrest.serialization.Model):
"""The quota request response with the quota request ID.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The quota request ID. To check the request status, use the **id** value in a `Quota
Request Status
<https://docs.microsoft.com/en-us/rest/api/reserved-vm-instances/quotarequeststatus/get>`_ GET
operation.
:vartype id: str
:ivar name: Operation ID.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar provisioning_state: Quota request status. Possible values include: "Accepted", "Invalid",
"Succeeded", "Failed", "InProgress".
:vartype provisioning_state: str or ~azure.mgmt.quota.models.QuotaRequestState
:ivar message: User-friendly message.
:vartype message: str
:param limit: Resource quota limit properties.
:type limit: ~azure.mgmt.quota.models.LimitObject
:param unit: The quota limit units, such as Count and Bytes. When requesting quota, use the
**unit** value returned in the GET response in the request body of your PUT operation.
:type unit: str
:param name_properties_name: Resource name provided by the resource provider. Use this property
name when requesting quota.
:type name_properties_name: ~azure.mgmt.quota.models.ResourceName
:param resource_type: Resource type name.
:type resource_type: str
:ivar quota_period: The time period over which the quota usage values are summarized. For
example:
*P1D (per one day)*\ PT1M (per one minute)
*PT1S (per one second).
This parameter is optional because, for some resources like compute, the period is irrelevant.
:vartype quota_period: str
:param properties: Additional properties for the specific resource provider.
:type properties: any
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'message': {'readonly': True},
'quota_period': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'message': {'key': 'properties.message', 'type': 'str'},
'limit': {'key': 'properties.limit', 'type': 'LimitObject'},
'unit': {'key': 'properties.unit', 'type': 'str'},
'name_properties_name': {'key': 'properties.name', 'type': 'ResourceName'},
'resource_type': {'key': 'properties.resourceType', 'type': 'str'},
'quota_period': {'key': 'properties.quotaPeriod', 'type': 'str'},
'properties': {'key': 'properties.properties', 'type': 'object'},
}
def __init__(
self,
*,
limit: Optional["LimitObject"] = None,
unit: Optional[str] = None,
name_properties_name: Optional["ResourceName"] = None,
resource_type: Optional[str] = None,
properties: Optional[Any] = None,
**kwargs
):
super(QuotaRequestSubmitResponse202, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.provisioning_state = None
self.message = None
self.limit = limit
self.unit = unit
self.name_properties_name = name_properties_name
self.resource_type = resource_type
self.quota_period = None
self.properties = properties
class ResourceName(msrest.serialization.Model):
"""Name of the resource provided by the resource Provider. When requesting quota, use this property name.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: Resource name.
:type value: str
:ivar localized_value: Resource display name.
:vartype localized_value: str
"""
_validation = {
'localized_value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[str] = None,
**kwargs
):
super(ResourceName, self).__init__(**kwargs)
self.value = value
self.localized_value = None
class ServiceError(msrest.serialization.Model):
"""API error details.
Variables are only populated by the server, and will be ignored when sending a request.
:param code: Error code.
:type code: str
:param message: Error message.
:type message: str
:ivar details: List of error details.
:vartype details: list[~azure.mgmt.quota.models.ServiceErrorDetail]
"""
_validation = {
'details': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[ServiceErrorDetail]'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(ServiceError, self).__init__(**kwargs)
self.code = code
self.message = message
self.details = None
class ServiceErrorDetail(msrest.serialization.Model):
"""Error details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Error code.
:vartype code: str
:ivar message: Error message.
:vartype message: str
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ServiceErrorDetail, self).__init__(**kwargs)
self.code = None
self.message = None
class SubRequest(msrest.serialization.Model):
"""Request property.
Variables are only populated by the server, and will be ignored when sending a request.
:param name: Resource name.
:type name: ~azure.mgmt.quota.models.ResourceName
:ivar resource_type: Resource type for which the quota properties were requested.
:vartype resource_type: str
:param unit: Quota limit units, such as Count and Bytes. When requesting quota, use the
**unit** value returned in the GET response in the request body of your PUT operation.
:type unit: str
:ivar provisioning_state: The quota request status. Possible values include: "Accepted",
"Invalid", "Succeeded", "Failed", "InProgress".
:vartype provisioning_state: str or ~azure.mgmt.quota.models.QuotaRequestState
:ivar message: User-friendly status message.
:vartype message: str
:ivar sub_request_id: Quota request ID.
:vartype sub_request_id: str
:param limit: Resource quota limit properties.
:type limit: ~azure.mgmt.quota.models.LimitJsonObject
"""
_validation = {
'resource_type': {'readonly': True},
'provisioning_state': {'readonly': True},
'message': {'readonly': True},
'sub_request_id': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'ResourceName'},
'resource_type': {'key': 'resourceType', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'sub_request_id': {'key': 'subRequestId', 'type': 'str'},
'limit': {'key': 'limit', 'type': 'LimitJsonObject'},
}
def __init__(
self,
*,
name: Optional["ResourceName"] = None,
unit: Optional[str] = None,
limit: Optional["LimitJsonObject"] = None,
**kwargs
):
super(SubRequest, self).__init__(**kwargs)
self.name = name
self.resource_type = None
self.unit = unit
self.provisioning_state = None
self.message = None
self.sub_request_id = None
self.limit = limit
class UsagesLimits(msrest.serialization.Model):
"""Quota limits.
:param value: List of quota limits.
:type value: list[~azure.mgmt.quota.models.CurrentUsagesBase]
:param next_link: The URI used to fetch the next page of quota limits. When there are no more
pages, this is null.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[CurrentUsagesBase]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["CurrentUsagesBase"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(UsagesLimits, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class UsagesObject(msrest.serialization.Model):
"""The resource usages value.
All required parameters must be populated in order to send to Azure.
:param value: Required. The usages value.
:type value: int
:param usages_type: The quota or usages limit types. Possible values include: "Individual",
"Combined".
:type usages_type: str or ~azure.mgmt.quota.models.UsagesTypes
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'int'},
'usages_type': {'key': 'usagesType', 'type': 'str'},
}
def __init__(
self,
*,
value: int,
usages_type: Optional[Union[str, "UsagesTypes"]] = None,
**kwargs
):
super(UsagesObject, self).__init__(**kwargs)
self.value = value
self.usages_type = usages_type
class UsagesProperties(msrest.serialization.Model):
"""Usage properties for the specified resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param usages: The quota limit properties for this resource.
:type usages: ~azure.mgmt.quota.models.UsagesObject
:ivar unit: The units for the quota usage, such as Count and Bytes. When requesting quota, use
the **unit** value returned in the GET response in the request body of your PUT operation.
:vartype unit: str
:param name: Resource name provided by the resource provider. Use this property name when
requesting quota.
:type name: ~azure.mgmt.quota.models.ResourceName
:param resource_type: The name of the resource type.
:type resource_type: str
:ivar quota_period: The time period for the summary of the quota usage values. For example:
*P1D (per one day)*\ PT1M (per one minute)
*PT1S (per one second).
This parameter is optional because it is not relevant for all resources such as compute.
:vartype quota_period: str
:ivar is_quota_applicable: States if quota can be requested for this resource.
:vartype is_quota_applicable: bool
:param properties: Additional properties for the specific resource provider.
:type properties: any
"""
_validation = {
'unit': {'readonly': True},
'quota_period': {'readonly': True},
'is_quota_applicable': {'readonly': True},
}
_attribute_map = {
'usages': {'key': 'usages', 'type': 'UsagesObject'},
'unit': {'key': 'unit', 'type': 'str'},
'name': {'key': 'name', 'type': 'ResourceName'},
'resource_type': {'key': 'resourceType', 'type': 'str'},
'quota_period': {'key': 'quotaPeriod', 'type': 'str'},
'is_quota_applicable': {'key': 'isQuotaApplicable', 'type': 'bool'},
'properties': {'key': 'properties', 'type': 'object'},
}
def __init__(
self,
*,
usages: Optional["UsagesObject"] = None,
name: Optional["ResourceName"] = None,
resource_type: Optional[str] = None,
properties: Optional[Any] = None,
**kwargs
):
super(UsagesProperties, self).__init__(**kwargs)
self.usages = usages
self.unit = None
self.name = name
self.resource_type = resource_type
self.quota_period = None
self.is_quota_applicable = None
self.properties = properties | sdk/quota/azure-mgmt-quota/azure/mgmt/quota/models/_models_py3.py |
from typing import Any, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._azure_quota_extension_api_enums import *
class CommonResourceProperties(msrest.serialization.Model):
"""Resource properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type. Example: "Microsoft.Quota/quotas".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CommonResourceProperties, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class CreateGenericQuotaRequestParameters(msrest.serialization.Model):
"""Quota change requests information.
:param value: Quota change requests.
:type value: list[~azure.mgmt.quota.models.CurrentQuotaLimitBase]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[CurrentQuotaLimitBase]'},
}
def __init__(
self,
*,
value: Optional[List["CurrentQuotaLimitBase"]] = None,
**kwargs
):
super(CreateGenericQuotaRequestParameters, self).__init__(**kwargs)
self.value = value
class CurrentQuotaLimitBase(msrest.serialization.Model):
"""Quota limit.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource ID.
:vartype id: str
:ivar type: The resource type.
:vartype type: str
:ivar name: The resource name.
:vartype name: str
:param properties: Quota properties for the specified resource, based on the API called, Quotas
or Usages.
:type properties: ~azure.mgmt.quota.models.QuotaProperties
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
'name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'QuotaProperties'},
}
def __init__(
self,
*,
properties: Optional["QuotaProperties"] = None,
**kwargs
):
super(CurrentQuotaLimitBase, self).__init__(**kwargs)
self.id = None
self.type = None
self.name = None
self.properties = properties
class CurrentUsagesBase(msrest.serialization.Model):
"""Resource usage.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource ID.
:vartype id: str
:ivar type: The resource type.
:vartype type: str
:ivar name: The resource name.
:vartype name: str
:param properties: Usage properties for the specified resource.
:type properties: ~azure.mgmt.quota.models.UsagesProperties
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
'name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'UsagesProperties'},
}
def __init__(
self,
*,
properties: Optional["UsagesProperties"] = None,
**kwargs
):
super(CurrentUsagesBase, self).__init__(**kwargs)
self.id = None
self.type = None
self.name = None
self.properties = properties
class ExceptionResponse(msrest.serialization.Model):
"""Error.
:param error: API error details.
:type error: ~azure.mgmt.quota.models.ServiceError
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ServiceError'},
}
def __init__(
self,
*,
error: Optional["ServiceError"] = None,
**kwargs
):
super(ExceptionResponse, self).__init__(**kwargs)
self.error = error
class LimitJsonObject(msrest.serialization.Model):
"""LimitJson abstract class.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: LimitValue.
All required parameters must be populated in order to send to Azure.
:param limit_object_type: Required. The limit object type.Constant filled by server. Possible
values include: "LimitValue".
:type limit_object_type: str or ~azure.mgmt.quota.models.LimitType
"""
_validation = {
'limit_object_type': {'required': True},
}
_attribute_map = {
'limit_object_type': {'key': 'limitObjectType', 'type': 'str'},
}
_subtype_map = {
'limit_object_type': {'LimitValue': 'LimitValue'}
}
def __init__(
self,
**kwargs
):
super(LimitJsonObject, self).__init__(**kwargs)
self.limit_object_type = None # type: Optional[str]
class LimitObject(msrest.serialization.Model):
"""The resource quota limit value.
All required parameters must be populated in order to send to Azure.
:param value: Required. The quota/limit value.
:type value: int
:param limit_object_type: The limit object type. Possible values include: "LimitValue".
:type limit_object_type: str or ~azure.mgmt.quota.models.LimitType
:param limit_type: The quota or usages limit types. Possible values include: "Independent",
"Shared".
:type limit_type: str or ~azure.mgmt.quota.models.QuotaLimitTypes
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'int'},
'limit_object_type': {'key': 'limitObjectType', 'type': 'str'},
'limit_type': {'key': 'limitType', 'type': 'str'},
}
def __init__(
self,
*,
value: int,
limit_object_type: Optional[Union[str, "LimitType"]] = None,
limit_type: Optional[Union[str, "QuotaLimitTypes"]] = None,
**kwargs
):
super(LimitObject, self).__init__(**kwargs)
self.value = value
self.limit_object_type = limit_object_type
self.limit_type = limit_type
class LimitValue(LimitJsonObject, LimitObject):
"""The resource quota limit.
All required parameters must be populated in order to send to Azure.
:param value: Required. The quota/limit value.
:type value: int
:param limit_type: The quota or usages limit types. Possible values include: "Independent",
"Shared".
:type limit_type: str or ~azure.mgmt.quota.models.QuotaLimitTypes
:param limit_object_type: Required. The limit object type.Constant filled by server. Possible
values include: "LimitValue".
:type limit_object_type: str or ~azure.mgmt.quota.models.LimitType
"""
_validation = {
'value': {'required': True},
'limit_object_type': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'int'},
'limit_type': {'key': 'limitType', 'type': 'str'},
'limit_object_type': {'key': 'limitObjectType', 'type': 'str'},
}
def __init__(
self,
*,
value: int,
limit_type: Optional[Union[str, "QuotaLimitTypes"]] = None,
**kwargs
):
super(LimitValue, self).__init__(value=value, limit_type=limit_type, **kwargs)
self.value = value
self.limit_type = limit_type
self.limit_object_type = 'LimitValue' # type: str
self.limit_object_type = 'LimitValue' # type: str
class OperationDisplay(msrest.serialization.Model):
"""OperationDisplay.
:param provider: Provider name.
:type provider: str
:param resource: Resource name.
:type resource: str
:param operation: Operation name.
:type operation: str
:param description: Operation description.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class OperationList(msrest.serialization.Model):
"""OperationList.
:param value:
:type value: list[~azure.mgmt.quota.models.OperationResponse]
:param next_link: URL to get the next page of items.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[OperationResponse]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["OperationResponse"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(OperationList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class OperationResponse(msrest.serialization.Model):
"""OperationResponse.
:param name:
:type name: str
:param display:
:type display: ~azure.mgmt.quota.models.OperationDisplay
:param origin:
:type origin: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display: Optional["OperationDisplay"] = None,
origin: Optional[str] = None,
**kwargs
):
super(OperationResponse, self).__init__(**kwargs)
self.name = name
self.display = display
self.origin = origin
class QuotaLimits(msrest.serialization.Model):
"""Quota limits.
:param value: List of quota limits.
:type value: list[~azure.mgmt.quota.models.CurrentQuotaLimitBase]
:param next_link: The URI used to fetch the next page of quota limits. When there are no more
pages, this string is null.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[CurrentQuotaLimitBase]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["CurrentQuotaLimitBase"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(QuotaLimits, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class QuotaLimitsResponse(msrest.serialization.Model):
"""Quota limits request response.
:param value: List of quota limits with the quota request status.
:type value: list[~azure.mgmt.quota.models.CurrentQuotaLimitBase]
:param next_link: The URI used to fetch the next page of quota limits. When there are no more
pages, this is null.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[CurrentQuotaLimitBase]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["CurrentQuotaLimitBase"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(QuotaLimitsResponse, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class QuotaProperties(msrest.serialization.Model):
"""Quota properties for the specified resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param limit: Resource quota limit properties.
:type limit: ~azure.mgmt.quota.models.LimitJsonObject
:ivar unit: The quota units, such as Count and Bytes. When requesting quota, use the **unit**
value returned in the GET response in the request body of your PUT operation.
:vartype unit: str
:param name: Resource name provided by the resource provider. Use this property name when
requesting quota.
:type name: ~azure.mgmt.quota.models.ResourceName
:param resource_type: Resource type name.
:type resource_type: str
:ivar quota_period: The time period over which the quota usage values are summarized. For
example:
*P1D (per one day)*\ PT1M (per one minute)
*PT1S (per one second).
This parameter is optional because, for some resources like compute, the period is irrelevant.
:vartype quota_period: str
:ivar is_quota_applicable: States if quota can be requested for this resource.
:vartype is_quota_applicable: bool
:param properties: Additional properties for the specific resource provider.
:type properties: any
"""
_validation = {
'unit': {'readonly': True},
'quota_period': {'readonly': True},
'is_quota_applicable': {'readonly': True},
}
_attribute_map = {
'limit': {'key': 'limit', 'type': 'LimitJsonObject'},
'unit': {'key': 'unit', 'type': 'str'},
'name': {'key': 'name', 'type': 'ResourceName'},
'resource_type': {'key': 'resourceType', 'type': 'str'},
'quota_period': {'key': 'quotaPeriod', 'type': 'str'},
'is_quota_applicable': {'key': 'isQuotaApplicable', 'type': 'bool'},
'properties': {'key': 'properties', 'type': 'object'},
}
def __init__(
self,
*,
limit: Optional["LimitJsonObject"] = None,
name: Optional["ResourceName"] = None,
resource_type: Optional[str] = None,
properties: Optional[Any] = None,
**kwargs
):
super(QuotaProperties, self).__init__(**kwargs)
self.limit = limit
self.unit = None
self.name = name
self.resource_type = resource_type
self.quota_period = None
self.is_quota_applicable = None
self.properties = properties
class QuotaRequestDetails(msrest.serialization.Model):
"""List of quota requests with details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Quota request ID.
:vartype id: str
:ivar name: Quota request name.
:vartype name: str
:ivar type: Resource type. "Microsoft.Quota/quotas".
:vartype type: str
:ivar provisioning_state: The quota request status. Possible values include: "Accepted",
"Invalid", "Succeeded", "Failed", "InProgress".
:vartype provisioning_state: str or ~azure.mgmt.quota.models.QuotaRequestState
:ivar message: User-friendly status message.
:vartype message: str
:param error: Error details of the quota request.
:type error: ~azure.mgmt.quota.models.ServiceErrorDetail
:ivar request_submit_time: The quota request submission time. The date conforms to the
following format specified by the ISO 8601 standard: yyyy-MM-ddTHH:mm:ssZ.
:vartype request_submit_time: ~datetime.datetime
:param value: Quota request details.
:type value: list[~azure.mgmt.quota.models.SubRequest]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'message': {'readonly': True},
'request_submit_time': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'message': {'key': 'properties.message', 'type': 'str'},
'error': {'key': 'properties.error', 'type': 'ServiceErrorDetail'},
'request_submit_time': {'key': 'properties.requestSubmitTime', 'type': 'iso-8601'},
'value': {'key': 'properties.value', 'type': '[SubRequest]'},
}
def __init__(
self,
*,
error: Optional["ServiceErrorDetail"] = None,
value: Optional[List["SubRequest"]] = None,
**kwargs
):
super(QuotaRequestDetails, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.provisioning_state = None
self.message = None
self.error = error
self.request_submit_time = None
self.value = value
class QuotaRequestDetailsList(msrest.serialization.Model):
"""Quota request information.
:param value: Quota request details.
:type value: list[~azure.mgmt.quota.models.QuotaRequestDetails]
:param next_link: The URI for fetching the next page of quota limits. When there are no more
pages, this string is null.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[QuotaRequestDetails]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["QuotaRequestDetails"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(QuotaRequestDetailsList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class QuotaRequestOneResourceSubmitResponse(msrest.serialization.Model):
"""Quota request response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Quota request ID.
:vartype id: str
:ivar name: The name of the quota request.
:vartype name: str
:ivar type: Resource type. "Microsoft.Quota/ServiceLimitRequests".
:vartype type: str
:ivar provisioning_state: Quota request status. Possible values include: "Accepted", "Invalid",
"Succeeded", "Failed", "InProgress".
:vartype provisioning_state: str or ~azure.mgmt.quota.models.QuotaRequestState
:ivar message: User-friendly status message.
:vartype message: str
:ivar request_submit_time: Quota request submission time. The date conforms to the following
ISO 8601 standard format: yyyy-MM-ddTHH:mm:ssZ.
:vartype request_submit_time: ~datetime.datetime
:param limit: Resource quota limit properties.
:type limit: ~azure.mgmt.quota.models.LimitObject
:ivar current_value: Usage information for the current resource.
:vartype current_value: int
:param unit: The quota limit units, such as Count and Bytes. When requesting quota, use the
**unit** value returned in the GET response in the request body of your PUT operation.
:type unit: str
:param name_properties_name: Resource name provided by the resource provider. Use this property
name when requesting quota.
:type name_properties_name: ~azure.mgmt.quota.models.ResourceName
:param resource_type: Resource type name.
:type resource_type: str
:ivar quota_period: The time period over which the quota usage values are summarized. For
example:
*P1D (per one day)*\ PT1M (per one minute)
*PT1S (per one second).
This parameter is optional because, for some resources like compute, the period is irrelevant.
:vartype quota_period: str
:ivar is_quota_applicable: States if quota can be requested for this resource.
:vartype is_quota_applicable: bool
:param error: Error details of the quota request.
:type error: ~azure.mgmt.quota.models.ServiceErrorDetail
:param properties: Additional properties for the specific resource provider.
:type properties: any
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'message': {'readonly': True},
'request_submit_time': {'readonly': True},
'current_value': {'readonly': True},
'quota_period': {'readonly': True},
'is_quota_applicable': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'message': {'key': 'properties.message', 'type': 'str'},
'request_submit_time': {'key': 'properties.requestSubmitTime', 'type': 'iso-8601'},
'limit': {'key': 'properties.limit', 'type': 'LimitObject'},
'current_value': {'key': 'properties.currentValue', 'type': 'int'},
'unit': {'key': 'properties.unit', 'type': 'str'},
'name_properties_name': {'key': 'properties.name', 'type': 'ResourceName'},
'resource_type': {'key': 'properties.resourceType', 'type': 'str'},
'quota_period': {'key': 'properties.quotaPeriod', 'type': 'str'},
'is_quota_applicable': {'key': 'properties.isQuotaApplicable', 'type': 'bool'},
'error': {'key': 'properties.error', 'type': 'ServiceErrorDetail'},
'properties': {'key': 'properties.properties', 'type': 'object'},
}
def __init__(
self,
*,
limit: Optional["LimitObject"] = None,
unit: Optional[str] = None,
name_properties_name: Optional["ResourceName"] = None,
resource_type: Optional[str] = None,
error: Optional["ServiceErrorDetail"] = None,
properties: Optional[Any] = None,
**kwargs
):
super(QuotaRequestOneResourceSubmitResponse, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.provisioning_state = None
self.message = None
self.request_submit_time = None
self.limit = limit
self.current_value = None
self.unit = unit
self.name_properties_name = name_properties_name
self.resource_type = resource_type
self.quota_period = None
self.is_quota_applicable = None
self.error = error
self.properties = properties
class QuotaRequestProperties(msrest.serialization.Model):
"""Quota request properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provisioning_state: The quota request status. Possible values include: "Accepted",
"Invalid", "Succeeded", "Failed", "InProgress".
:vartype provisioning_state: str or ~azure.mgmt.quota.models.QuotaRequestState
:ivar message: User-friendly status message.
:vartype message: str
:param error: Error details of the quota request.
:type error: ~azure.mgmt.quota.models.ServiceErrorDetail
:ivar request_submit_time: The quota request submission time. The date conforms to the
following format specified by the ISO 8601 standard: yyyy-MM-ddTHH:mm:ssZ.
:vartype request_submit_time: ~datetime.datetime
:param value: Quota request details.
:type value: list[~azure.mgmt.quota.models.SubRequest]
"""
_validation = {
'provisioning_state': {'readonly': True},
'message': {'readonly': True},
'request_submit_time': {'readonly': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'error': {'key': 'error', 'type': 'ServiceErrorDetail'},
'request_submit_time': {'key': 'requestSubmitTime', 'type': 'iso-8601'},
'value': {'key': 'value', 'type': '[SubRequest]'},
}
def __init__(
self,
*,
error: Optional["ServiceErrorDetail"] = None,
value: Optional[List["SubRequest"]] = None,
**kwargs
):
super(QuotaRequestProperties, self).__init__(**kwargs)
self.provisioning_state = None
self.message = None
self.error = error
self.request_submit_time = None
self.value = value
class QuotaRequestSubmitResponse(msrest.serialization.Model):
"""Quota request response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Quota request ID.
:vartype id: str
:ivar name: Quota request name.
:vartype name: str
:param properties: Quota request details.
:type properties: ~azure.mgmt.quota.models.QuotaRequestProperties
:ivar type: Resource type. "Microsoft.Quota/quotas".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'QuotaRequestProperties'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
properties: Optional["QuotaRequestProperties"] = None,
**kwargs
):
super(QuotaRequestSubmitResponse, self).__init__(**kwargs)
self.id = None
self.name = None
self.properties = properties
self.type = None
class QuotaRequestSubmitResponse202(msrest.serialization.Model):
"""The quota request response with the quota request ID.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The quota request ID. To check the request status, use the **id** value in a `Quota
Request Status
<https://docs.microsoft.com/en-us/rest/api/reserved-vm-instances/quotarequeststatus/get>`_ GET
operation.
:vartype id: str
:ivar name: Operation ID.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar provisioning_state: Quota request status. Possible values include: "Accepted", "Invalid",
"Succeeded", "Failed", "InProgress".
:vartype provisioning_state: str or ~azure.mgmt.quota.models.QuotaRequestState
:ivar message: User-friendly message.
:vartype message: str
:param limit: Resource quota limit properties.
:type limit: ~azure.mgmt.quota.models.LimitObject
:param unit: The quota limit units, such as Count and Bytes. When requesting quota, use the
**unit** value returned in the GET response in the request body of your PUT operation.
:type unit: str
:param name_properties_name: Resource name provided by the resource provider. Use this property
name when requesting quota.
:type name_properties_name: ~azure.mgmt.quota.models.ResourceName
:param resource_type: Resource type name.
:type resource_type: str
:ivar quota_period: The time period over which the quota usage values are summarized. For
example:
*P1D (per one day)*\ PT1M (per one minute)
*PT1S (per one second).
This parameter is optional because, for some resources like compute, the period is irrelevant.
:vartype quota_period: str
:param properties: Additional properties for the specific resource provider.
:type properties: any
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'message': {'readonly': True},
'quota_period': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'message': {'key': 'properties.message', 'type': 'str'},
'limit': {'key': 'properties.limit', 'type': 'LimitObject'},
'unit': {'key': 'properties.unit', 'type': 'str'},
'name_properties_name': {'key': 'properties.name', 'type': 'ResourceName'},
'resource_type': {'key': 'properties.resourceType', 'type': 'str'},
'quota_period': {'key': 'properties.quotaPeriod', 'type': 'str'},
'properties': {'key': 'properties.properties', 'type': 'object'},
}
def __init__(
self,
*,
limit: Optional["LimitObject"] = None,
unit: Optional[str] = None,
name_properties_name: Optional["ResourceName"] = None,
resource_type: Optional[str] = None,
properties: Optional[Any] = None,
**kwargs
):
super(QuotaRequestSubmitResponse202, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.provisioning_state = None
self.message = None
self.limit = limit
self.unit = unit
self.name_properties_name = name_properties_name
self.resource_type = resource_type
self.quota_period = None
self.properties = properties
class ResourceName(msrest.serialization.Model):
"""Name of the resource provided by the resource Provider. When requesting quota, use this property name.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: Resource name.
:type value: str
:ivar localized_value: Resource display name.
:vartype localized_value: str
"""
_validation = {
'localized_value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[str] = None,
**kwargs
):
super(ResourceName, self).__init__(**kwargs)
self.value = value
self.localized_value = None
class ServiceError(msrest.serialization.Model):
"""API error details.
Variables are only populated by the server, and will be ignored when sending a request.
:param code: Error code.
:type code: str
:param message: Error message.
:type message: str
:ivar details: List of error details.
:vartype details: list[~azure.mgmt.quota.models.ServiceErrorDetail]
"""
_validation = {
'details': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[ServiceErrorDetail]'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(ServiceError, self).__init__(**kwargs)
self.code = code
self.message = message
self.details = None
class ServiceErrorDetail(msrest.serialization.Model):
"""Error details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Error code.
:vartype code: str
:ivar message: Error message.
:vartype message: str
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ServiceErrorDetail, self).__init__(**kwargs)
self.code = None
self.message = None
class SubRequest(msrest.serialization.Model):
"""Request property.
Variables are only populated by the server, and will be ignored when sending a request.
:param name: Resource name.
:type name: ~azure.mgmt.quota.models.ResourceName
:ivar resource_type: Resource type for which the quota properties were requested.
:vartype resource_type: str
:param unit: Quota limit units, such as Count and Bytes. When requesting quota, use the
**unit** value returned in the GET response in the request body of your PUT operation.
:type unit: str
:ivar provisioning_state: The quota request status. Possible values include: "Accepted",
"Invalid", "Succeeded", "Failed", "InProgress".
:vartype provisioning_state: str or ~azure.mgmt.quota.models.QuotaRequestState
:ivar message: User-friendly status message.
:vartype message: str
:ivar sub_request_id: Quota request ID.
:vartype sub_request_id: str
:param limit: Resource quota limit properties.
:type limit: ~azure.mgmt.quota.models.LimitJsonObject
"""
_validation = {
'resource_type': {'readonly': True},
'provisioning_state': {'readonly': True},
'message': {'readonly': True},
'sub_request_id': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'ResourceName'},
'resource_type': {'key': 'resourceType', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'sub_request_id': {'key': 'subRequestId', 'type': 'str'},
'limit': {'key': 'limit', 'type': 'LimitJsonObject'},
}
def __init__(
self,
*,
name: Optional["ResourceName"] = None,
unit: Optional[str] = None,
limit: Optional["LimitJsonObject"] = None,
**kwargs
):
super(SubRequest, self).__init__(**kwargs)
self.name = name
self.resource_type = None
self.unit = unit
self.provisioning_state = None
self.message = None
self.sub_request_id = None
self.limit = limit
class UsagesLimits(msrest.serialization.Model):
"""Quota limits.
:param value: List of quota limits.
:type value: list[~azure.mgmt.quota.models.CurrentUsagesBase]
:param next_link: The URI used to fetch the next page of quota limits. When there are no more
pages, this is null.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[CurrentUsagesBase]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["CurrentUsagesBase"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(UsagesLimits, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class UsagesObject(msrest.serialization.Model):
"""The resource usages value.
All required parameters must be populated in order to send to Azure.
:param value: Required. The usages value.
:type value: int
:param usages_type: The quota or usages limit types. Possible values include: "Individual",
"Combined".
:type usages_type: str or ~azure.mgmt.quota.models.UsagesTypes
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'int'},
'usages_type': {'key': 'usagesType', 'type': 'str'},
}
def __init__(
self,
*,
value: int,
usages_type: Optional[Union[str, "UsagesTypes"]] = None,
**kwargs
):
super(UsagesObject, self).__init__(**kwargs)
self.value = value
self.usages_type = usages_type
class UsagesProperties(msrest.serialization.Model):
"""Usage properties for the specified resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param usages: The quota limit properties for this resource.
:type usages: ~azure.mgmt.quota.models.UsagesObject
:ivar unit: The units for the quota usage, such as Count and Bytes. When requesting quota, use
the **unit** value returned in the GET response in the request body of your PUT operation.
:vartype unit: str
:param name: Resource name provided by the resource provider. Use this property name when
requesting quota.
:type name: ~azure.mgmt.quota.models.ResourceName
:param resource_type: The name of the resource type.
:type resource_type: str
:ivar quota_period: The time period for the summary of the quota usage values. For example:
*P1D (per one day)*\ PT1M (per one minute)
*PT1S (per one second).
This parameter is optional because it is not relevant for all resources such as compute.
:vartype quota_period: str
:ivar is_quota_applicable: States if quota can be requested for this resource.
:vartype is_quota_applicable: bool
:param properties: Additional properties for the specific resource provider.
:type properties: any
"""
_validation = {
'unit': {'readonly': True},
'quota_period': {'readonly': True},
'is_quota_applicable': {'readonly': True},
}
_attribute_map = {
'usages': {'key': 'usages', 'type': 'UsagesObject'},
'unit': {'key': 'unit', 'type': 'str'},
'name': {'key': 'name', 'type': 'ResourceName'},
'resource_type': {'key': 'resourceType', 'type': 'str'},
'quota_period': {'key': 'quotaPeriod', 'type': 'str'},
'is_quota_applicable': {'key': 'isQuotaApplicable', 'type': 'bool'},
'properties': {'key': 'properties', 'type': 'object'},
}
def __init__(
self,
*,
usages: Optional["UsagesObject"] = None,
name: Optional["ResourceName"] = None,
resource_type: Optional[str] = None,
properties: Optional[Any] = None,
**kwargs
):
super(UsagesProperties, self).__init__(**kwargs)
self.usages = usages
self.unit = None
self.name = name
self.resource_type = resource_type
self.quota_period = None
self.is_quota_applicable = None
self.properties = properties | 0.944855 | 0.16228 |
import logging
from iotbx.reflection_file_reader import any_reflection_file
from mmtbx.scaling import data_statistics
from six.moves import StringIO
logger = logging.getLogger("xia2.Modules.CctbxFrenchWilson")
def do_french_wilson(mtz_file, hklout, anomalous=False):
logger.debug("Reading reflections from %s", mtz_file)
result = any_reflection_file(mtz_file)
assert result.file_type() == "ccp4_mtz"
mtz_object = result.file_content()
output = StringIO()
mtz_object.show_summary(out=output)
for ma in result.as_miller_arrays(merge_equivalents=False):
if anomalous and ma.info().labels == [
"I(+)",
"SIGI(+)",
"I(-)",
"SIGI(-)",
]:
assert ma.anomalous_flag()
intensities = ma.merge_equivalents().array() # XXX why is this necessary?
elif ma.info().labels == ["IMEAN", "SIGIMEAN"]:
assert not ma.anomalous_flag()
intensities = ma
else:
intensities = None
if intensities:
assert intensities.is_xray_intensity_array()
amplitudes = intensities.french_wilson(log=output)
assert amplitudes.is_xray_amplitude_array()
dano = None
if amplitudes.anomalous_flag():
dano = amplitudes.anomalous_differences()
if not intensities.space_group().is_centric():
merged_intensities = intensities.merge_equivalents().array()
wilson_scaling = data_statistics.wilson_scaling(
miller_array=merged_intensities, n_residues=200
) # XXX default n_residues?
wilson_scaling.show(out=output)
mtz_dataset = mtz_object.crystals()[1].datasets()[0]
mtz_dataset.add_miller_array(amplitudes, column_root_label="F")
if dano is not None:
mtz_dataset.add_miller_array(
dano, column_root_label="DANO", column_types="DQ"
)
mtz_object.add_history("cctbx.french_wilson analysis")
mtz_object.show_summary(out=output)
logger.debug("Writing reflections to %s", hklout)
mtz_object.write(hklout)
return output.getvalue() | Modules/CctbxFrenchWilson.py | import logging
from iotbx.reflection_file_reader import any_reflection_file
from mmtbx.scaling import data_statistics
from six.moves import StringIO
logger = logging.getLogger("xia2.Modules.CctbxFrenchWilson")
def do_french_wilson(mtz_file, hklout, anomalous=False):
logger.debug("Reading reflections from %s", mtz_file)
result = any_reflection_file(mtz_file)
assert result.file_type() == "ccp4_mtz"
mtz_object = result.file_content()
output = StringIO()
mtz_object.show_summary(out=output)
for ma in result.as_miller_arrays(merge_equivalents=False):
if anomalous and ma.info().labels == [
"I(+)",
"SIGI(+)",
"I(-)",
"SIGI(-)",
]:
assert ma.anomalous_flag()
intensities = ma.merge_equivalents().array() # XXX why is this necessary?
elif ma.info().labels == ["IMEAN", "SIGIMEAN"]:
assert not ma.anomalous_flag()
intensities = ma
else:
intensities = None
if intensities:
assert intensities.is_xray_intensity_array()
amplitudes = intensities.french_wilson(log=output)
assert amplitudes.is_xray_amplitude_array()
dano = None
if amplitudes.anomalous_flag():
dano = amplitudes.anomalous_differences()
if not intensities.space_group().is_centric():
merged_intensities = intensities.merge_equivalents().array()
wilson_scaling = data_statistics.wilson_scaling(
miller_array=merged_intensities, n_residues=200
) # XXX default n_residues?
wilson_scaling.show(out=output)
mtz_dataset = mtz_object.crystals()[1].datasets()[0]
mtz_dataset.add_miller_array(amplitudes, column_root_label="F")
if dano is not None:
mtz_dataset.add_miller_array(
dano, column_root_label="DANO", column_types="DQ"
)
mtz_object.add_history("cctbx.french_wilson analysis")
mtz_object.show_summary(out=output)
logger.debug("Writing reflections to %s", hklout)
mtz_object.write(hklout)
return output.getvalue() | 0.440229 | 0.349339 |
import sqlite3
import config
class TokensModel:
def __init__(self, uuid, ipaddr, username, password, sa_name, va_name, domain):
self.uuid = uuid
self.ipaddr = ipaddr
self.username = username
self.password = password
self.sa_name = sa_name
self.va_name = va_name
self.domain = domain
@classmethod
def find_by_uuid(cls, uuid, table):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
query = ""
if table == "device_store":
query = "SELECT * FROM device_store WHERE uuid=?"
elif table == "device_status_store":
query = "SELECT * FROM device_status_store WHERE uuid=?"
elif table == "validation_store":
query = "SELECT * FROM validation_store WHERE uuid=?"
elif table == "upload_info_store":
query = "SELECT * FROM upload_info_store WHERE uuid=?"
elif table == "slr_request_code_tbl":
query = "SELECT * FROM slr_request_code_tbl WHERE uuid=?"
result = cursor.execute(query, (uuid,))
rows = result.fetchall()
connection.close()
print("==>> Printing rows from within classmethod: find_by_uuid <<==")
print(rows)
return rows
@classmethod
def join_by_uuid(cls, uuid, table1, table2):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
query = ""
if table1 == "slr_request_code_tbl" and table2 == "device_store":
# query = "SELECT slr_request_code_tbl.uuid, slr_request_code_tbl.ipaddr," \
# " slr_request_code_tbl.step1, slr_request_code_tbl.step2, slr_request_code_tbl.step3," \
# " slr_request_code_tbl.authz_req_code, slr_request_code_tbl.license_count," \
# " slr_request_code_tbl.license_entitlement_tag, device_store.sa_name, device_store.va_name," \
# " device_store.domain, device_store.device_uuid FROM slr_request_code_tbl INNER JOIN device_store" \
# " ON slr_request_code_tbl.uuid = device_store.uuid WHERE slr_request_code_tbl.uuid=?"
query = "SELECT slr_request_code_tbl.uuid, slr_request_code_tbl.ipaddr," \
" slr_request_code_tbl.step1, slr_request_code_tbl.step2, slr_request_code_tbl.step3," \
" slr_request_code_tbl.authz_req_code, slr_request_code_tbl.license_count," \
" slr_request_code_tbl.license_entitlement_tag, device_store.sa_name, device_store.va_name," \
" device_store.domain, device_store.device_uuid, slr_request_code_tbl.authz_response_code" \
" FROM slr_request_code_tbl INNER JOIN device_store" \
" ON slr_request_code_tbl.device_uuid = device_store.device_uuid WHERE slr_request_code_tbl.uuid=?"
result = cursor.execute(query, (uuid,))
# result = cursor.execute(query)
rows = result.fetchall()
connection.close()
print("==>> Printing rows from within classmethod: join_by_uuid <<==")
print(rows)
return rows
@classmethod
def find_by_uuid_column(cls, uuid, table, column):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
query = ""
if table == "device_store":
query = "SELECT " + column + " FROM device_store WHERE uuid=?"
elif table == "device_status_store":
query = "SELECT " + column + " FROM device_status_store WHERE uuid=?"
elif table == "validation_store":
query = "SELECT " + column + " FROM validation_store WHERE uuid=?"
elif table == "upload_info_store":
query = "SELECT " + column + " FROM upload_info_store WHERE uuid=?"
result = cursor.execute(query, (uuid,))
rows = result.fetchall()
connection.close()
print("==>> Printing rows from within classmethod: find_by_uuid_column <<==")
print(rows)
return rows
@classmethod
def find_last_records(cls, user, table, order_column):
# For debugging
print("In find_last_records ...")
print("user: ", user)
print("table:", table)
print("order_column:", order_column)
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
query = ""
if table == "upload_info_store":
query = "SELECT * FROM upload_info_store" \
" LIMIT 10 OFFSET (SELECT COUNT(*) FROM upload_info_store)-10"
result = cursor.execute(query,)
rows = result.fetchall()
connection.close()
print("==>> Printing rows from within classmethod: find_last_records <<==")
print(rows)
return rows
@classmethod
def find_by_uuid_and_column(cls, uuid, table, column, column_value):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
query = ""
if table == "device_store":
query = "SELECT * FROM device_store WHERE uuid=? AND " + column + "=?"
elif table == "device_status_store":
query = "SELECT * FROM device_status_store WHERE uuid=? AND " + column + "=?"
elif table == "validation_store":
query = "SELECT * FROM validation_store WHERE uuid=? AND " + column + "=?"
elif table == "upload_info_store":
query = "SELECT * FROM upload_info_store WHERE uuid=? AND " + column + "=?"
result = cursor.execute(query, (uuid, column_value))
rows = result.fetchall()
connection.close()
print("==>> Printing rows from within classmethod: find_by_uuid_and_column <<==")
print(rows)
return rows
@classmethod
def find_by_uuid_distinct(cls, uuid, table):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
query = "SELECT DISTINCT domain FROM ? WHERE uuid=?"
result = cursor.execute(query, (table, uuid))
rows = result.fetchall()
connection.close()
print("==>> Printing rows for unique domain names <<==")
print(rows)
print(rows[0][0])
return rows
@classmethod
def find_by_uuid_slice(cls, uuid, page, table):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
offset = (page - 1) * 10
print("IN method find_by_uuid_slice...")
print("uuid is:", uuid)
print("page is:", page)
print("Posts per page / offset is:", config.POSTS_PER_PAGE)
print("Offset value is:", offset)
if table == "device_store":
query = "SELECT * FROM device_store WHERE uuid=? LIMIT ? OFFSET ?"
elif table == "device_status_store":
query = "SELECT * FROM device_status_store WHERE uuid=? LIMIT ? OFFSET ?"
elif table == "validation_store":
query = "SELECT * FROM validation_store WHERE uuid=? LIMIT ? OFFSET ?"
elif table == "upload_info_store":
query = "SELECT * FROM upload_info_store WHERE uuid=? LIMIT ? OFFSET ?"
result = cursor.execute(query, (uuid, config.POSTS_PER_PAGE, offset))
rows = result.fetchall()
connection.close()
print("==>> Printing rows for unique domain names <<==")
print(rows)
return rows
@classmethod
def insert(cls, uuid, devices_data_list, table):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
# For debugging
print("In tokens model - insert method...")
print("devices_data_list:", devices_data_list)
print("uuid:", uuid)
print("table:", table)
query = ""
if table == "device_store":
query = "INSERT INTO device_store VALUES (?, ?, ?, ?, ?, ?, ?, ?)"
for device in devices_data_list:
cursor.execute(query, (uuid, device['ipaddr'], device['username'], device['password'],
device['sa_name'], device['va_name'], device['domain'], device['device_uuid']))
elif table == "slr_request_code_tbl":
query_slr = "INSERT INTO slr_request_code_tbl VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
print("Starting data entry into device_store for SLR")
for device in devices_data_list:
cursor.execute(query_slr, (uuid, device['ipaddr'], "NS", "NS", "NS", "", "", device['license'],
device['license_count'], device['tftp_server_ip'],
device['tftp_server_path'], "", device['device_uuid']))
print("Executed data entry into device_store for SLR")
elif table == "validation_store":
query = "INSERT INTO validation_store VALUES (?, ?, ?, ?)"
for device in devices_data_list:
cursor.execute(query, (uuid, device['sa_name'], device['va_name'], device['domain']))
elif table == "device_status_store":
query = "INSERT INTO device_status_store VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)"
for device in devices_data_list:
cursor.execute(query, (uuid, device['ipaddr'], device['username'], device['password'],
device['sa_name'], device['va_name'], device['domain'], device['status'],
device['device_uuid']))
elif table == "upload_info_store":
query = "INSERT INTO upload_info_store VALUES (?, ?, ?, ?, ?, ?)"
for device in devices_data_list:
cursor.execute(query, (uuid, device['userid'], device['filename'], device['type'], device['timestamp'],
device['status']))
connection.commit()
connection.close()
@classmethod
def insert_slr(cls, uuid, devices_data_list, table):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
# For debugging
print("In tokens model - slr_insert method...")
print("devices_data_list:", devices_data_list)
print("uuid:", uuid)
print("table:", table)
query = ""
if table == "slr_request_code_tbl":
query_slr = "INSERT INTO slr_request_code_tbl VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
print("Starting insert data entry into slr_request_code_tbl for SLR")
for device in devices_data_list:
cursor.execute(query_slr, (uuid, device['ipaddr'], device['step1'], device['step2'], device['step3'],
device['authz_req_code'], device['authz_response_code'], device['license'],
device['license_count'], device['tftp_server_ip'],
device['tftp_server_path'], device['license_entitlement_tag'],
device['device_uuid']))
print("Executed insert data entry into slr_request_code_tbl for SLR")
connection.commit()
connection.close()
@classmethod
def update(cls, uuid, response, table):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
# query = ""
print("@@@@@@@@@@ In update method in models/tokens updating: ", table)
print(response)
print(response['status'])
print("@@@@@@@@@@ In update method in models/tokens updated:", table)
if table == "device_status_store":
query = "UPDATE device_status_store SET status=? WHERE ipaddr=? AND uuid=?"
cursor.execute(query, (response['status'], response['ipaddr'], uuid))
elif table == "upload_info_store":
query = "UPDATE upload_info_store SET status=? WHERE uuid=?"
cursor.execute(query, (response['status'], uuid))
connection.commit()
connection.close() | models/tokens.py |
import sqlite3
import config
class TokensModel:
def __init__(self, uuid, ipaddr, username, password, sa_name, va_name, domain):
self.uuid = uuid
self.ipaddr = ipaddr
self.username = username
self.password = password
self.sa_name = sa_name
self.va_name = va_name
self.domain = domain
@classmethod
def find_by_uuid(cls, uuid, table):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
query = ""
if table == "device_store":
query = "SELECT * FROM device_store WHERE uuid=?"
elif table == "device_status_store":
query = "SELECT * FROM device_status_store WHERE uuid=?"
elif table == "validation_store":
query = "SELECT * FROM validation_store WHERE uuid=?"
elif table == "upload_info_store":
query = "SELECT * FROM upload_info_store WHERE uuid=?"
elif table == "slr_request_code_tbl":
query = "SELECT * FROM slr_request_code_tbl WHERE uuid=?"
result = cursor.execute(query, (uuid,))
rows = result.fetchall()
connection.close()
print("==>> Printing rows from within classmethod: find_by_uuid <<==")
print(rows)
return rows
@classmethod
def join_by_uuid(cls, uuid, table1, table2):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
query = ""
if table1 == "slr_request_code_tbl" and table2 == "device_store":
# query = "SELECT slr_request_code_tbl.uuid, slr_request_code_tbl.ipaddr," \
# " slr_request_code_tbl.step1, slr_request_code_tbl.step2, slr_request_code_tbl.step3," \
# " slr_request_code_tbl.authz_req_code, slr_request_code_tbl.license_count," \
# " slr_request_code_tbl.license_entitlement_tag, device_store.sa_name, device_store.va_name," \
# " device_store.domain, device_store.device_uuid FROM slr_request_code_tbl INNER JOIN device_store" \
# " ON slr_request_code_tbl.uuid = device_store.uuid WHERE slr_request_code_tbl.uuid=?"
query = "SELECT slr_request_code_tbl.uuid, slr_request_code_tbl.ipaddr," \
" slr_request_code_tbl.step1, slr_request_code_tbl.step2, slr_request_code_tbl.step3," \
" slr_request_code_tbl.authz_req_code, slr_request_code_tbl.license_count," \
" slr_request_code_tbl.license_entitlement_tag, device_store.sa_name, device_store.va_name," \
" device_store.domain, device_store.device_uuid, slr_request_code_tbl.authz_response_code" \
" FROM slr_request_code_tbl INNER JOIN device_store" \
" ON slr_request_code_tbl.device_uuid = device_store.device_uuid WHERE slr_request_code_tbl.uuid=?"
result = cursor.execute(query, (uuid,))
# result = cursor.execute(query)
rows = result.fetchall()
connection.close()
print("==>> Printing rows from within classmethod: join_by_uuid <<==")
print(rows)
return rows
@classmethod
def find_by_uuid_column(cls, uuid, table, column):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
query = ""
if table == "device_store":
query = "SELECT " + column + " FROM device_store WHERE uuid=?"
elif table == "device_status_store":
query = "SELECT " + column + " FROM device_status_store WHERE uuid=?"
elif table == "validation_store":
query = "SELECT " + column + " FROM validation_store WHERE uuid=?"
elif table == "upload_info_store":
query = "SELECT " + column + " FROM upload_info_store WHERE uuid=?"
result = cursor.execute(query, (uuid,))
rows = result.fetchall()
connection.close()
print("==>> Printing rows from within classmethod: find_by_uuid_column <<==")
print(rows)
return rows
@classmethod
def find_last_records(cls, user, table, order_column):
# For debugging
print("In find_last_records ...")
print("user: ", user)
print("table:", table)
print("order_column:", order_column)
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
query = ""
if table == "upload_info_store":
query = "SELECT * FROM upload_info_store" \
" LIMIT 10 OFFSET (SELECT COUNT(*) FROM upload_info_store)-10"
result = cursor.execute(query,)
rows = result.fetchall()
connection.close()
print("==>> Printing rows from within classmethod: find_last_records <<==")
print(rows)
return rows
@classmethod
def find_by_uuid_and_column(cls, uuid, table, column, column_value):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
query = ""
if table == "device_store":
query = "SELECT * FROM device_store WHERE uuid=? AND " + column + "=?"
elif table == "device_status_store":
query = "SELECT * FROM device_status_store WHERE uuid=? AND " + column + "=?"
elif table == "validation_store":
query = "SELECT * FROM validation_store WHERE uuid=? AND " + column + "=?"
elif table == "upload_info_store":
query = "SELECT * FROM upload_info_store WHERE uuid=? AND " + column + "=?"
result = cursor.execute(query, (uuid, column_value))
rows = result.fetchall()
connection.close()
print("==>> Printing rows from within classmethod: find_by_uuid_and_column <<==")
print(rows)
return rows
@classmethod
def find_by_uuid_distinct(cls, uuid, table):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
query = "SELECT DISTINCT domain FROM ? WHERE uuid=?"
result = cursor.execute(query, (table, uuid))
rows = result.fetchall()
connection.close()
print("==>> Printing rows for unique domain names <<==")
print(rows)
print(rows[0][0])
return rows
@classmethod
def find_by_uuid_slice(cls, uuid, page, table):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
offset = (page - 1) * 10
print("IN method find_by_uuid_slice...")
print("uuid is:", uuid)
print("page is:", page)
print("Posts per page / offset is:", config.POSTS_PER_PAGE)
print("Offset value is:", offset)
if table == "device_store":
query = "SELECT * FROM device_store WHERE uuid=? LIMIT ? OFFSET ?"
elif table == "device_status_store":
query = "SELECT * FROM device_status_store WHERE uuid=? LIMIT ? OFFSET ?"
elif table == "validation_store":
query = "SELECT * FROM validation_store WHERE uuid=? LIMIT ? OFFSET ?"
elif table == "upload_info_store":
query = "SELECT * FROM upload_info_store WHERE uuid=? LIMIT ? OFFSET ?"
result = cursor.execute(query, (uuid, config.POSTS_PER_PAGE, offset))
rows = result.fetchall()
connection.close()
print("==>> Printing rows for unique domain names <<==")
print(rows)
return rows
@classmethod
def insert(cls, uuid, devices_data_list, table):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
# For debugging
print("In tokens model - insert method...")
print("devices_data_list:", devices_data_list)
print("uuid:", uuid)
print("table:", table)
query = ""
if table == "device_store":
query = "INSERT INTO device_store VALUES (?, ?, ?, ?, ?, ?, ?, ?)"
for device in devices_data_list:
cursor.execute(query, (uuid, device['ipaddr'], device['username'], device['password'],
device['sa_name'], device['va_name'], device['domain'], device['device_uuid']))
elif table == "slr_request_code_tbl":
query_slr = "INSERT INTO slr_request_code_tbl VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
print("Starting data entry into device_store for SLR")
for device in devices_data_list:
cursor.execute(query_slr, (uuid, device['ipaddr'], "NS", "NS", "NS", "", "", device['license'],
device['license_count'], device['tftp_server_ip'],
device['tftp_server_path'], "", device['device_uuid']))
print("Executed data entry into device_store for SLR")
elif table == "validation_store":
query = "INSERT INTO validation_store VALUES (?, ?, ?, ?)"
for device in devices_data_list:
cursor.execute(query, (uuid, device['sa_name'], device['va_name'], device['domain']))
elif table == "device_status_store":
query = "INSERT INTO device_status_store VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)"
for device in devices_data_list:
cursor.execute(query, (uuid, device['ipaddr'], device['username'], device['password'],
device['sa_name'], device['va_name'], device['domain'], device['status'],
device['device_uuid']))
elif table == "upload_info_store":
query = "INSERT INTO upload_info_store VALUES (?, ?, ?, ?, ?, ?)"
for device in devices_data_list:
cursor.execute(query, (uuid, device['userid'], device['filename'], device['type'], device['timestamp'],
device['status']))
connection.commit()
connection.close()
@classmethod
def insert_slr(cls, uuid, devices_data_list, table):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
# For debugging
print("In tokens model - slr_insert method...")
print("devices_data_list:", devices_data_list)
print("uuid:", uuid)
print("table:", table)
query = ""
if table == "slr_request_code_tbl":
query_slr = "INSERT INTO slr_request_code_tbl VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
print("Starting insert data entry into slr_request_code_tbl for SLR")
for device in devices_data_list:
cursor.execute(query_slr, (uuid, device['ipaddr'], device['step1'], device['step2'], device['step3'],
device['authz_req_code'], device['authz_response_code'], device['license'],
device['license_count'], device['tftp_server_ip'],
device['tftp_server_path'], device['license_entitlement_tag'],
device['device_uuid']))
print("Executed insert data entry into slr_request_code_tbl for SLR")
connection.commit()
connection.close()
@classmethod
def update(cls, uuid, response, table):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
# query = ""
print("@@@@@@@@@@ In update method in models/tokens updating: ", table)
print(response)
print(response['status'])
print("@@@@@@@@@@ In update method in models/tokens updated:", table)
if table == "device_status_store":
query = "UPDATE device_status_store SET status=? WHERE ipaddr=? AND uuid=?"
cursor.execute(query, (response['status'], response['ipaddr'], uuid))
elif table == "upload_info_store":
query = "UPDATE upload_info_store SET status=? WHERE uuid=?"
cursor.execute(query, (response['status'], uuid))
connection.commit()
connection.close() | 0.33928 | 0.09947 |
import os
from pprint import pprint as pp
import os
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
from pyzbar import pyzbar as zbar
from pprint import pprint as pp
from barcode.rotation import get_rotation
import time
import csv
def detect_from_image(image):
image_norm = image
found = False
frame_res = None
start = time.time()
for deg in range(0, 100, 10):
image_rotated = get_rotation(image_norm, deg)
# image_rotated = image_norm
res = zbar.decode(image_rotated, [5]) # EAN5
# res = []
if len(res) > 0:
found = True
# print("FOUND AT DEG {}; time: {}".format(deg, time.time() - start))
# pp(res)
break
pass
if not found:
for deg in range(-90, 0, 10):
image_rotated = get_rotation(image_norm, deg)
# image_rotated = image_norm
res = zbar.decode(image_rotated, [5]) # EAN5
# res = []
if len(res) > 0:
found = True
# print("FOUND AT DEG {}; time: {}".format(deg, time.time() - start))
# pp(res)
break
pass
# print(time.time() - start)
if not found:
frame_res = 0
else:
if not res[0].data in [b'33333', b'66666', b'99999']:
frame_res = 0
if res[0].data == b'33333':
frame_res = 1
if res[0].data == b'66666':
frame_res = 2
if res[0].data == b'99999':
frame_res = 3
return frame_res
def detect_from_file(filename):
csvname = filename.replace('mp4', 'csv')
csvfile = open(csvname, 'w')
vc = cv.VideoCapture(filename)
ret, image = vc.read()
frame_count = 0
length = int(vc.get(cv.CAP_PROP_FRAME_COUNT))
while ret == True:
frame_count += 1
frame_res = detect_from_image(image)
print('\r', frame_count/length, frame_res, end='')
csvfile.write('{}, {}\n'.format(frame_count, frame_res))
csvfile.flush()
ret, image = vc.read()
print()
csvfile.close()
def plot_frequency(filename):
filename = filename.replace('mp4', 'csv')
plotname = filename.replace('csv', 'png')
data = np.loadtxt(filename, delimiter=',')
# pp(data)
frames = data[:, 0]
detected = data[:, 1]
rot_1 = np.zeros(len(detected))
rot_2 = np.zeros(len(detected))
rot_3 = np.zeros(len(detected))
rot_1[detected == 1] = 1
rot_2[detected == 2] = 1
rot_3[detected == 3] = 1
plt.figure()
plt.title(plotname)
plt.plot(rot_1, label='ROT1')
plt.plot(rot_2, label='ROT2')
plt.plot(rot_3, label='ROT3')
plt.legend()
plt.savefig(plotname)
# plt.show()
pass
if __name__ == '__main__':
for root, dirs, files in os.walk('./videos'):
if len(files) == 0:
print('No Video in ', root)
continue
for f in files:
filename = '{}/{}'.format(root, f)
if not filename.endswith('mp4'):
continue
print('Analysing Video: ', filename)
if not os.path.isfile(filename.replace('mp4', 'csv')):
detect_from_file(filename)
plot_frequency(filename)
# detect_from_file("./videos/schatten/1280x720/50fps/draussen_1280x720_50fps.mp4")
# plot_frequency("./videos/schatten/1280x720/50fps/draussen_1280x720_50fps.mp4") | barcode/detection.py | import os
from pprint import pprint as pp
import os
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
from pyzbar import pyzbar as zbar
from pprint import pprint as pp
from barcode.rotation import get_rotation
import time
import csv
def detect_from_image(image):
image_norm = image
found = False
frame_res = None
start = time.time()
for deg in range(0, 100, 10):
image_rotated = get_rotation(image_norm, deg)
# image_rotated = image_norm
res = zbar.decode(image_rotated, [5]) # EAN5
# res = []
if len(res) > 0:
found = True
# print("FOUND AT DEG {}; time: {}".format(deg, time.time() - start))
# pp(res)
break
pass
if not found:
for deg in range(-90, 0, 10):
image_rotated = get_rotation(image_norm, deg)
# image_rotated = image_norm
res = zbar.decode(image_rotated, [5]) # EAN5
# res = []
if len(res) > 0:
found = True
# print("FOUND AT DEG {}; time: {}".format(deg, time.time() - start))
# pp(res)
break
pass
# print(time.time() - start)
if not found:
frame_res = 0
else:
if not res[0].data in [b'33333', b'66666', b'99999']:
frame_res = 0
if res[0].data == b'33333':
frame_res = 1
if res[0].data == b'66666':
frame_res = 2
if res[0].data == b'99999':
frame_res = 3
return frame_res
def detect_from_file(filename):
csvname = filename.replace('mp4', 'csv')
csvfile = open(csvname, 'w')
vc = cv.VideoCapture(filename)
ret, image = vc.read()
frame_count = 0
length = int(vc.get(cv.CAP_PROP_FRAME_COUNT))
while ret == True:
frame_count += 1
frame_res = detect_from_image(image)
print('\r', frame_count/length, frame_res, end='')
csvfile.write('{}, {}\n'.format(frame_count, frame_res))
csvfile.flush()
ret, image = vc.read()
print()
csvfile.close()
def plot_frequency(filename):
filename = filename.replace('mp4', 'csv')
plotname = filename.replace('csv', 'png')
data = np.loadtxt(filename, delimiter=',')
# pp(data)
frames = data[:, 0]
detected = data[:, 1]
rot_1 = np.zeros(len(detected))
rot_2 = np.zeros(len(detected))
rot_3 = np.zeros(len(detected))
rot_1[detected == 1] = 1
rot_2[detected == 2] = 1
rot_3[detected == 3] = 1
plt.figure()
plt.title(plotname)
plt.plot(rot_1, label='ROT1')
plt.plot(rot_2, label='ROT2')
plt.plot(rot_3, label='ROT3')
plt.legend()
plt.savefig(plotname)
# plt.show()
pass
if __name__ == '__main__':
for root, dirs, files in os.walk('./videos'):
if len(files) == 0:
print('No Video in ', root)
continue
for f in files:
filename = '{}/{}'.format(root, f)
if not filename.endswith('mp4'):
continue
print('Analysing Video: ', filename)
if not os.path.isfile(filename.replace('mp4', 'csv')):
detect_from_file(filename)
plot_frequency(filename)
# detect_from_file("./videos/schatten/1280x720/50fps/draussen_1280x720_50fps.mp4")
# plot_frequency("./videos/schatten/1280x720/50fps/draussen_1280x720_50fps.mp4") | 0.124027 | 0.207054 |
import os
import unittest
import test_env
from test_case import TestCaseWithFuzzer
class CorpusTest(TestCaseWithFuzzer):
def test_find_on_device(self):
data = self.ns.data('corpus')
resource = self.ns.resource('corpus')
self.corpus.find_on_device()
self.assertEqual(self.corpus.nspaths, [data])
self.touch_on_device(self.ns.resource_abspath('corpus/deadbeef'))
self.corpus.find_on_device()
self.assertEqual(self.corpus.nspaths, [data, resource])
def test_add_from_host(self):
# Invalid directory
local_path = 'corpus_dir'
self.assertError(
lambda: self.corpus.add_from_host(local_path),
'No such directory: {}'.format(local_path))
self.host.mkdir(local_path)
# Fuzzer is running
corpus_element = os.path.join(local_path, 'element')
self.host.touch(corpus_element)
self.set_running(
self.fuzzer.package, self.fuzzer.executable, duration=10)
self.assertError(
lambda: self.corpus.add_from_host(local_path),
'fake-package1/fake-target1 is running and must be stopped first.')
self.host.sleep(10)
# Valid
added = self.corpus.add_from_host(local_path)
self.assertEqual(len(added), 1)
self.assertScpTo(
corpus_element, self.ns.data_abspath(self.corpus.nspaths[0]))
def test_add_from_gcs(self):
# Note: this takes advantage of the fact that the FakeCLI always returns
# the same name for temp_dir().
with self.host.temp_dir() as temp_dir:
gcs_url = 'gs://bucket'
cmd = ['gsutil', '-m', 'cp', gcs_url + '/*', temp_dir.pathname]
process = self.get_process(cmd)
process.succeeds = False
self.assertError(
lambda: self.corpus.add_from_gcs(gcs_url),
'Failed to download corpus from GCS.',
'You can skip downloading from GCS with the "--local" flag.')
process.succeeds = True
corpus_element = os.path.join(temp_dir.pathname, 'element')
self.host.touch(corpus_element)
added = self.corpus.add_from_gcs(gcs_url)
self.assertEqual(len(added), 1)
self.assertRan(*cmd)
self.assertScpTo(
corpus_element, self.ns.data_abspath(self.corpus.nspaths[0]))
def test_measure(self):
self.touch_on_device(self.ns.data_abspath('corpus/deadbeef'), size=1000)
self.touch_on_device(self.ns.data_abspath('corpus/feedface'), size=729)
sizes = self.corpus.measure()
self.assertEqual(sizes, (2, 1 + 1728))
if __name__ == '__main__':
unittest.main() | scripts/fuzzing/test/corpus_test.py |
import os
import unittest
import test_env
from test_case import TestCaseWithFuzzer
class CorpusTest(TestCaseWithFuzzer):
def test_find_on_device(self):
data = self.ns.data('corpus')
resource = self.ns.resource('corpus')
self.corpus.find_on_device()
self.assertEqual(self.corpus.nspaths, [data])
self.touch_on_device(self.ns.resource_abspath('corpus/deadbeef'))
self.corpus.find_on_device()
self.assertEqual(self.corpus.nspaths, [data, resource])
def test_add_from_host(self):
# Invalid directory
local_path = 'corpus_dir'
self.assertError(
lambda: self.corpus.add_from_host(local_path),
'No such directory: {}'.format(local_path))
self.host.mkdir(local_path)
# Fuzzer is running
corpus_element = os.path.join(local_path, 'element')
self.host.touch(corpus_element)
self.set_running(
self.fuzzer.package, self.fuzzer.executable, duration=10)
self.assertError(
lambda: self.corpus.add_from_host(local_path),
'fake-package1/fake-target1 is running and must be stopped first.')
self.host.sleep(10)
# Valid
added = self.corpus.add_from_host(local_path)
self.assertEqual(len(added), 1)
self.assertScpTo(
corpus_element, self.ns.data_abspath(self.corpus.nspaths[0]))
def test_add_from_gcs(self):
# Note: this takes advantage of the fact that the FakeCLI always returns
# the same name for temp_dir().
with self.host.temp_dir() as temp_dir:
gcs_url = 'gs://bucket'
cmd = ['gsutil', '-m', 'cp', gcs_url + '/*', temp_dir.pathname]
process = self.get_process(cmd)
process.succeeds = False
self.assertError(
lambda: self.corpus.add_from_gcs(gcs_url),
'Failed to download corpus from GCS.',
'You can skip downloading from GCS with the "--local" flag.')
process.succeeds = True
corpus_element = os.path.join(temp_dir.pathname, 'element')
self.host.touch(corpus_element)
added = self.corpus.add_from_gcs(gcs_url)
self.assertEqual(len(added), 1)
self.assertRan(*cmd)
self.assertScpTo(
corpus_element, self.ns.data_abspath(self.corpus.nspaths[0]))
def test_measure(self):
self.touch_on_device(self.ns.data_abspath('corpus/deadbeef'), size=1000)
self.touch_on_device(self.ns.data_abspath('corpus/feedface'), size=729)
sizes = self.corpus.measure()
self.assertEqual(sizes, (2, 1 + 1728))
if __name__ == '__main__':
unittest.main() | 0.476823 | 0.284862 |
import chumpy
import smpl.smpl_webuser.lbs
from smpl.smpl_webuser.posemapper import posemap
import scipy.sparse as sp
from chumpy.ch import MatVecMult
def ischumpy(x): return hasattr(x, 'dterms')
def verts_decorated(trans, pose,
v_template, J, weights, kintree_table, bs_style, f,
bs_type=None, posedirs=None, betas=None, shapedirs=None, want_Jtr=False):
for which in [trans, pose, v_template, weights, posedirs, betas, shapedirs]:
if which is not None:
assert ischumpy(which)
v = v_template
if shapedirs is not None:
if betas is None:
betas = chumpy.zeros(shapedirs.shape[-1])
v_shaped = v + shapedirs.dot(betas)
else:
v_shaped = v
if posedirs is not None:
v_posed = v_shaped + posedirs.dot(posemap(bs_type)(pose))
else:
v_posed = v_shaped
v = v_posed
if sp.issparse(J):
regressor = J
J_tmpx = MatVecMult(regressor, v_shaped[:,0])
J_tmpy = MatVecMult(regressor, v_shaped[:,1])
J_tmpz = MatVecMult(regressor, v_shaped[:,2])
J = chumpy.vstack((J_tmpx, J_tmpy, J_tmpz)).T
else:
assert(ischumpy(J))
assert(bs_style=='lbs')
result, Jtr = lbs.verts_core(pose, v, J, weights, kintree_table, want_Jtr=True, xp=chumpy)
tr = trans.reshape((1,3))
result = result + tr
Jtr = Jtr + tr
result.trans = trans
result.f = f
result.pose = pose
result.v_template = v_template
result.J = J
result.weights = weights
result.kintree_table = kintree_table
result.bs_style = bs_style
result.bs_type =bs_type
if posedirs is not None:
result.posedirs = posedirs
result.v_posed = v_posed
if shapedirs is not None:
result.shapedirs = shapedirs
result.betas = betas
result.v_shaped = v_shaped
if want_Jtr:
result.J_transformed = Jtr
return result
def verts_core(pose, v, J, weights, kintree_table, bs_style, want_Jtr=False, xp=chumpy):
if xp == chumpy:
assert(hasattr(pose, 'dterms'))
assert(hasattr(v, 'dterms'))
assert(hasattr(J, 'dterms'))
assert(hasattr(weights, 'dterms'))
assert(bs_style=='lbs')
result = smpl.smpl_webuser.lbs.verts_core(pose, v, J, weights, kintree_table, want_Jtr, xp)
return result | smpl/smpl_webuser/verts.py | import chumpy
import smpl.smpl_webuser.lbs
from smpl.smpl_webuser.posemapper import posemap
import scipy.sparse as sp
from chumpy.ch import MatVecMult
def ischumpy(x): return hasattr(x, 'dterms')
def verts_decorated(trans, pose,
v_template, J, weights, kintree_table, bs_style, f,
bs_type=None, posedirs=None, betas=None, shapedirs=None, want_Jtr=False):
for which in [trans, pose, v_template, weights, posedirs, betas, shapedirs]:
if which is not None:
assert ischumpy(which)
v = v_template
if shapedirs is not None:
if betas is None:
betas = chumpy.zeros(shapedirs.shape[-1])
v_shaped = v + shapedirs.dot(betas)
else:
v_shaped = v
if posedirs is not None:
v_posed = v_shaped + posedirs.dot(posemap(bs_type)(pose))
else:
v_posed = v_shaped
v = v_posed
if sp.issparse(J):
regressor = J
J_tmpx = MatVecMult(regressor, v_shaped[:,0])
J_tmpy = MatVecMult(regressor, v_shaped[:,1])
J_tmpz = MatVecMult(regressor, v_shaped[:,2])
J = chumpy.vstack((J_tmpx, J_tmpy, J_tmpz)).T
else:
assert(ischumpy(J))
assert(bs_style=='lbs')
result, Jtr = lbs.verts_core(pose, v, J, weights, kintree_table, want_Jtr=True, xp=chumpy)
tr = trans.reshape((1,3))
result = result + tr
Jtr = Jtr + tr
result.trans = trans
result.f = f
result.pose = pose
result.v_template = v_template
result.J = J
result.weights = weights
result.kintree_table = kintree_table
result.bs_style = bs_style
result.bs_type =bs_type
if posedirs is not None:
result.posedirs = posedirs
result.v_posed = v_posed
if shapedirs is not None:
result.shapedirs = shapedirs
result.betas = betas
result.v_shaped = v_shaped
if want_Jtr:
result.J_transformed = Jtr
return result
def verts_core(pose, v, J, weights, kintree_table, bs_style, want_Jtr=False, xp=chumpy):
if xp == chumpy:
assert(hasattr(pose, 'dterms'))
assert(hasattr(v, 'dterms'))
assert(hasattr(J, 'dterms'))
assert(hasattr(weights, 'dterms'))
assert(bs_style=='lbs')
result = smpl.smpl_webuser.lbs.verts_core(pose, v, J, weights, kintree_table, want_Jtr, xp)
return result | 0.386763 | 0.363675 |
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
import math
import torch
import torch.nn as nn
class _DynamicInputDenseBlock(nn.Module):
def __init__(self, conv_modules, debug):
super(_DynamicInputDenseBlock, self).__init__()
self.conv_modules = conv_modules
self.debug = debug
def forward(self, x):
"""
Use the first element as raw input, and stream the rest of
the inputs through the list of modules, then apply concatenation.
expect x to be [identity, first input, second input, ..]
and len(x) - len(self.conv_modules) = 1 for identity
:param x: Input
:return: Concatenation of the input with 1 or more module outputs
"""
if self.debug:
for i, t in enumerate(x):
print("Current input size[{}]: {}".format(i,
t.size()))
# Init output
out = x[0]
# Apply all given modules and return output
for calc, m in enumerate(self.conv_modules):
out = torch.cat([out, m(x[calc + 1])], 1)
if self.debug:
print("Working on input number: %s" % calc)
print("Added: ", m(x[calc + 1]).size())
print("Current out size {}".format(out.size()))
return out
class MSDLayer(nn.Module):
def __init__(self, in_channels, out_channels,
in_scales, out_scales, orig_scales, args):
"""
Creates a regular/transition MSDLayer. this layer uses DenseNet like concatenation on each scale,
and performs spatial reduction between scales. if input and output scales are different, than this
class creates a transition layer and the first layer (with the largest spatial size) is dropped.
:param current_channels: number of input channels
:param in_scales: number of input scales
:param out_scales: number of output scales
:param orig_scales: number of scales in the first layer of the MSDNet
:param args: other arguments
"""
super(MSDLayer, self).__init__()
# Init vars
self.current_channels = in_channels
self.out_channels = out_channels
self.in_scales = in_scales
self.out_scales = out_scales
self.orig_scales = orig_scales
self.args = args
self.bottleneck = args.msd_bottleneck
self.bottleneck_factor = args.msd_bottleneck_factor
self.growth_factor = self.args.msd_growth_factor
self.debug = self.args.debug
# Define Conv2d/GCN params
self.use_gcn = args.msd_all_gcn
self.conv_l, self.ks, self.pad = get_conv_params(self.use_gcn, args)
# Calculate number of channels to drop and number of
# all dropped channels
self.to_drop = in_scales - out_scales
self.dropped = orig_scales - out_scales # Use this as an offset
self.subnets = self.get_subnets()
def get_subnets(self):
"""
Builds the different scales of the MSD network layer.
:return: A list of scale modules
"""
subnets = nn.ModuleList()
# If this is a transition layer
if self.to_drop:
# Create a reduced feature map for the first scale
# self.dropped > 0 since out_scales < in_scales < orig_scales
in_channels1 = self.current_channels *\
self.growth_factor[self.dropped - 1]
in_channels2 = self.current_channels *\
self.growth_factor[self.dropped]
out_channels = self.out_channels *\
self.growth_factor[self.dropped]
bn_width1 = self.bottleneck_factor[self.dropped - 1]
bn_width2 = self.bottleneck_factor[self.dropped]
subnets.append(self.build_down_densenet(in_channels1,
in_channels2,
out_channels,
self.bottleneck,
bn_width1,
bn_width2))
else:
# Create a normal first scale
in_channels = self.current_channels *\
self.growth_factor[self.dropped]
out_channels = self.out_channels *\
self.growth_factor[self.dropped]
bn_width = self.bottleneck_factor[self.dropped]
subnets.append(self.build_densenet(in_channels,
out_channels,
self.bottleneck,
bn_width))
# Build second+ scales
for scale in range(1, self.out_scales):
in_channels1 = self.current_channels *\
self.growth_factor[self.dropped + scale - 1]
in_channels2 = self.current_channels *\
self.growth_factor[self.dropped + scale]
out_channels = self.out_channels *\
self.growth_factor[self.dropped + scale]
bn_width1 = self.bottleneck_factor[self.dropped + scale - 1]
bn_width2 = self.bottleneck_factor[self.dropped + scale]
subnets.append(self.build_down_densenet(in_channels1,
in_channels2,
out_channels,
self.bottleneck,
bn_width1,
bn_width2))
return subnets
def build_down_densenet(self, in_channels1, in_channels2, out_channels,
bottleneck, bn_width1, bn_width2):
"""
Builds a scale sub-network for scales 2 and up.
:param in_channels1: number of same scale input channels
:param in_channels2: number of upper scale input channels
:param out_channels: number of output channels
:param bottleneck: A flag to perform a channel dimension bottleneck
:param bn_width1: The first input width of the bottleneck factor
:param bn_width2: The first input width of the bottleneck factor
:return: A scale module
"""
conv_module1 = self.convolve(in_channels1, int(out_channels/2), 'down',
bottleneck, bn_width1)
conv_module2 = self.convolve(in_channels2, int(out_channels/2), 'normal',
bottleneck, bn_width2)
conv_modules = [conv_module1, conv_module2]
return _DynamicInputDenseBlock(nn.ModuleList(conv_modules),
self.debug)
def build_densenet(self, in_channels, out_channels, bottleneck, bn_width):
"""
Builds a scale sub-network for the first layer
:param in_channels: number of input channels
:param out_channels: number of output channels
:param bottleneck: A flag to perform a channel dimension bottleneck
:param bn_width: The width of the bottleneck factor
:return: A scale module
"""
conv_module = self.convolve(in_channels, out_channels, 'normal',
bottleneck, bn_width)
return _DynamicInputDenseBlock(nn.ModuleList([conv_module]),
self.debug)
def convolve(self, in_channels, out_channels, conv_type,
bottleneck, bn_width=4):
"""
Doing the main convolution of a specific scale in the
MSD network
:param in_channels: number of input channels
:param out_channels: number of output channels
:param conv_type: convolution type
:param bottleneck: A flag to perform a channel dimension bottleneck
:param bn_width: The width of the bottleneck factor
:return: A Sequential module of the main convolution
"""
conv = nn.Sequential()
tmp_channels = in_channels
# Bottleneck before the convolution
if bottleneck:
tmp_channels = int(min([in_channels, bn_width * out_channels]))
conv.add_module('Bottleneck_1x1', nn.Conv2d(in_channels,
tmp_channels,
kernel_size=1,
stride=1,
padding=0))
conv.add_module('Bottleneck_BN', nn.BatchNorm2d(tmp_channels))
conv.add_module('Bottleneck_ReLU', nn.ReLU(inplace=True))
if conv_type == 'normal':
conv.add_module('Spatial_forward', self.conv_l(tmp_channels,
out_channels,
kernel_size=self.ks,
stride=1,
padding=self.pad))
elif conv_type == 'down':
conv.add_module('Spatial_down', self.conv_l(tmp_channels, out_channels,
kernel_size=self.ks,
stride=2,
padding=self.pad))
else: # Leaving an option to change the main conv type
raise NotImplementedError
conv.add_module('BN_out', nn.BatchNorm2d(out_channels))
conv.add_module('ReLU_out', nn.ReLU(inplace=True))
return conv
def forward(self, x):
cur_input = []
outputs = []
# Prepare the different scales' inputs of the
# current transition/regular layer
if self.to_drop: # Transition
for scale in range(0, self.out_scales):
last_same_scale = x[self.to_drop + scale]
last_upper_scale = x[self.to_drop + scale - 1]
cur_input.append([last_same_scale,
last_upper_scale,
last_same_scale])
else: # Regular
# Add first scale's input
cur_input.append([x[0], x[0]])
# Add second+ scales' input
for scale in range(1, self.out_scales):
last_same_scale = x[scale]
last_upper_scale = x[scale - 1]
cur_input.append([last_same_scale,
last_upper_scale,
last_same_scale])
# Flow inputs in subnets and fill outputs
for scale in range(0, self.out_scales):
outputs.append(self.subnets[scale](cur_input[scale]))
return outputs
class MSDFirstLayer(nn.Module):
def __init__(self, in_channels, out_channels, num_scales, args):
"""
Creates the first layer of the MSD network, which takes
an input tensor (image) and generates a list of size num_scales
with deeper features with smaller (spatial) dimensions.
:param in_channels: number of input channels to the first layer
:param out_channels: number of output channels in the first scale
:param num_scales: number of output scales in the first layer
:param args: other arguments
"""
super(MSDFirstLayer, self).__init__()
# Init params
self.in_channels = in_channels
self.out_channels = out_channels
self.num_scales = num_scales
self.args = args
self.use_gcn = args.msd_gcn
self.conv_l, self.ks, self.pad = get_conv_params(self.use_gcn, args)
if self.use_gcn:
print('| First layer with GCN |')
else:
print('| First layer without GCN |')
self.subnets = self.create_modules()
def create_modules(self):
# Create first scale features
modules = nn.ModuleList()
if 'cifar' in self.args.data:
current_channels = int(self.out_channels *
self.args.msd_growth_factor[0])
current_m = nn.Sequential(
self.conv_l(self.in_channels,
current_channels, kernel_size=self.ks,
stride=1, padding=self.pad),
nn.BatchNorm2d(current_channels),
nn.ReLU(inplace=True)
)
modules.append(current_m)
else:
raise NotImplementedError
# Create second scale features and down
for scale in range(1, self.num_scales):
# Calculate desired output channels
out_channels = int(self.out_channels *
self.args.msd_growth_factor[scale])
# Use a strided convolution to create next scale features
current_m = nn.Sequential(
self.conv_l(current_channels, out_channels,
kernel_size=self.ks,
stride=2, padding=self.pad),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
# Use the output channels size for the next scale
current_channels = out_channels
# Append module
modules.append(current_m)
return modules
def forward(self, x):
output = [None] * self.num_scales
current_input = x
for scale in range(0, self.num_scales):
# Use upper scale as an input
if scale > 0:
current_input = output[scale-1]
output[scale] = self.subnets[scale](current_input)
return output
class Transition(nn.Sequential):
def __init__(self, channels_in, channels_out,
out_scales, offset, growth_factor, args):
"""
Performs 1x1 convolution to increase channels size after reducing a spatial size reduction
in transition layer.
:param channels_in: channels before the transition
:param channels_out: channels after reduction
:param out_scales: number of scales after the transition
:param offset: gap between original number of scales to out_scales
:param growth_factor: densenet channel growth factor
:return: A Parallel trainable array with the scales after channel
reduction
"""
super(Transition, self).__init__()
self.args = args
# Define a parallel stream for the different scales
self.scales = nn.ModuleList()
for i in range(0, out_scales):
cur_in = channels_in * growth_factor[offset + i]
cur_out = channels_out * growth_factor[offset + i]
self.scales.append(self.conv1x1(cur_in, cur_out))
def conv1x1(self, in_channels, out_channels):
"""
Inner function to define the basic operation
:param in_channels: number of input channels
:param out_channels: number of output channels
:return: A Sequential module to perform 1x1 convolution
"""
scale = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
return scale
def forward(self, x):
"""
Propegate output through different scales.
:param x: input to the transition layer
:return: list of scales' outputs
"""
if self.args.debug:
print ("In transition forward!")
output = []
for scale, scale_net in enumerate(self.scales):
if self.args.debug:
print ("Size of x[{}]: {}".format(scale, x[scale].size()))
print ("scale_net[0]: {}".format(scale_net[0]))
output.append(scale_net(x[scale]))
return output
class CifarClassifier(nn.Module):
def __init__(self, num_channels, num_classes):
"""
Classifier of a cifar10/100 image.
:param num_channels: Number of input channels to the classifier
:param num_classes: Number of classes to classify
"""
super(CifarClassifier, self).__init__()
self.inner_channels = 128
self.features = nn.Sequential(
nn.Conv2d(num_channels, self.inner_channels, kernel_size=3,
stride=2, padding=1),
nn.BatchNorm2d(self.inner_channels),
nn.ReLU(inplace=True),
nn.Conv2d(self.inner_channels, self.inner_channels, kernel_size=3,
stride=2, padding=1),
nn.BatchNorm2d(self.inner_channels),
nn.ReLU(inplace=True),
nn.AvgPool2d(2, 2)
)
self.classifier = nn.Linear(self.inner_channels, num_classes)
def forward(self, x):
"""
Drive features to classification.
:param x: Input of the lowest scale of the last layer of
the last block
:return: Cifar object classification result
"""
x = self.features(x)
x = x.view(x.size(0), self.inner_channels)
x = self.classifier(x)
return x
class GCN(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=7, stride=1, padding=1):
"""
Global convolutional network module implementation
:param in_channels: number of input channels
:param out_channels: number of output channels
:param kernel_size: size of conv kernel
:param stride: stride to use in the conv parts
:param padding: padding to use in the conv parts
:param share_weights: use shared weights for every side of GCN
"""
super(GCN, self).__init__()
self.conv_l1 = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1),
padding=(padding, 0), stride=(stride, 1))
self.conv_l2 = nn.Conv2d(out_channels, out_channels, kernel_size=(1, kernel_size),
padding=(0, padding), stride=(1, stride))
self.conv_r1 = nn.Conv2d(in_channels, out_channels, kernel_size=(1, kernel_size),
padding=(0, padding), stride=(1, stride))
self.conv_r2 = nn.Conv2d(out_channels, out_channels, kernel_size=(kernel_size, 1),
padding=(padding, 0), stride=(stride, 1))
def forward(self, x):
if GCN.share_weights:
# Prepare input and state
self.conv_l1.shared = 2
self.conv_l2.shared = 2
xt = x.transpose(2,3)
# Left convs
xl = self.conv_l1(x)
xl = self.conv_l2(xl)
# Right convs
xrt = self.conv_l1(xt)
xrt = self.conv_l2(xrt)
xr = xrt.transpose(2,3)
else:
# Left convs
xl = self.conv_l1(x)
xl = self.conv_l2(xl)
# Right convs
xr = self.conv_r1(x)
xr = self.conv_r2(xr)
return xl + xr
def get_conv_params(use_gcn, args):
"""
Calculates and returns the convulotion parameters
:param use_gcn: flag to use GCN or not
:param args: user defined arguments
:return: convolution type, kernel size and padding
"""
if use_gcn:
GCN.share_weights = args.msd_share_weights
conv_l = GCN
ks = args.msd_gcn_kernel
else:
conv_l = nn.Conv2d
ks = args.msd_kernel
pad = int(math.floor(ks / 2))
return conv_l, ks, pad | models/msdnet_layers.py | from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
import math
import torch
import torch.nn as nn
class _DynamicInputDenseBlock(nn.Module):
def __init__(self, conv_modules, debug):
super(_DynamicInputDenseBlock, self).__init__()
self.conv_modules = conv_modules
self.debug = debug
def forward(self, x):
"""
Use the first element as raw input, and stream the rest of
the inputs through the list of modules, then apply concatenation.
expect x to be [identity, first input, second input, ..]
and len(x) - len(self.conv_modules) = 1 for identity
:param x: Input
:return: Concatenation of the input with 1 or more module outputs
"""
if self.debug:
for i, t in enumerate(x):
print("Current input size[{}]: {}".format(i,
t.size()))
# Init output
out = x[0]
# Apply all given modules and return output
for calc, m in enumerate(self.conv_modules):
out = torch.cat([out, m(x[calc + 1])], 1)
if self.debug:
print("Working on input number: %s" % calc)
print("Added: ", m(x[calc + 1]).size())
print("Current out size {}".format(out.size()))
return out
class MSDLayer(nn.Module):
def __init__(self, in_channels, out_channels,
in_scales, out_scales, orig_scales, args):
"""
Creates a regular/transition MSDLayer. this layer uses DenseNet like concatenation on each scale,
and performs spatial reduction between scales. if input and output scales are different, than this
class creates a transition layer and the first layer (with the largest spatial size) is dropped.
:param current_channels: number of input channels
:param in_scales: number of input scales
:param out_scales: number of output scales
:param orig_scales: number of scales in the first layer of the MSDNet
:param args: other arguments
"""
super(MSDLayer, self).__init__()
# Init vars
self.current_channels = in_channels
self.out_channels = out_channels
self.in_scales = in_scales
self.out_scales = out_scales
self.orig_scales = orig_scales
self.args = args
self.bottleneck = args.msd_bottleneck
self.bottleneck_factor = args.msd_bottleneck_factor
self.growth_factor = self.args.msd_growth_factor
self.debug = self.args.debug
# Define Conv2d/GCN params
self.use_gcn = args.msd_all_gcn
self.conv_l, self.ks, self.pad = get_conv_params(self.use_gcn, args)
# Calculate number of channels to drop and number of
# all dropped channels
self.to_drop = in_scales - out_scales
self.dropped = orig_scales - out_scales # Use this as an offset
self.subnets = self.get_subnets()
def get_subnets(self):
"""
Builds the different scales of the MSD network layer.
:return: A list of scale modules
"""
subnets = nn.ModuleList()
# If this is a transition layer
if self.to_drop:
# Create a reduced feature map for the first scale
# self.dropped > 0 since out_scales < in_scales < orig_scales
in_channels1 = self.current_channels *\
self.growth_factor[self.dropped - 1]
in_channels2 = self.current_channels *\
self.growth_factor[self.dropped]
out_channels = self.out_channels *\
self.growth_factor[self.dropped]
bn_width1 = self.bottleneck_factor[self.dropped - 1]
bn_width2 = self.bottleneck_factor[self.dropped]
subnets.append(self.build_down_densenet(in_channels1,
in_channels2,
out_channels,
self.bottleneck,
bn_width1,
bn_width2))
else:
# Create a normal first scale
in_channels = self.current_channels *\
self.growth_factor[self.dropped]
out_channels = self.out_channels *\
self.growth_factor[self.dropped]
bn_width = self.bottleneck_factor[self.dropped]
subnets.append(self.build_densenet(in_channels,
out_channels,
self.bottleneck,
bn_width))
# Build second+ scales
for scale in range(1, self.out_scales):
in_channels1 = self.current_channels *\
self.growth_factor[self.dropped + scale - 1]
in_channels2 = self.current_channels *\
self.growth_factor[self.dropped + scale]
out_channels = self.out_channels *\
self.growth_factor[self.dropped + scale]
bn_width1 = self.bottleneck_factor[self.dropped + scale - 1]
bn_width2 = self.bottleneck_factor[self.dropped + scale]
subnets.append(self.build_down_densenet(in_channels1,
in_channels2,
out_channels,
self.bottleneck,
bn_width1,
bn_width2))
return subnets
def build_down_densenet(self, in_channels1, in_channels2, out_channels,
bottleneck, bn_width1, bn_width2):
"""
Builds a scale sub-network for scales 2 and up.
:param in_channels1: number of same scale input channels
:param in_channels2: number of upper scale input channels
:param out_channels: number of output channels
:param bottleneck: A flag to perform a channel dimension bottleneck
:param bn_width1: The first input width of the bottleneck factor
:param bn_width2: The first input width of the bottleneck factor
:return: A scale module
"""
conv_module1 = self.convolve(in_channels1, int(out_channels/2), 'down',
bottleneck, bn_width1)
conv_module2 = self.convolve(in_channels2, int(out_channels/2), 'normal',
bottleneck, bn_width2)
conv_modules = [conv_module1, conv_module2]
return _DynamicInputDenseBlock(nn.ModuleList(conv_modules),
self.debug)
def build_densenet(self, in_channels, out_channels, bottleneck, bn_width):
"""
Builds a scale sub-network for the first layer
:param in_channels: number of input channels
:param out_channels: number of output channels
:param bottleneck: A flag to perform a channel dimension bottleneck
:param bn_width: The width of the bottleneck factor
:return: A scale module
"""
conv_module = self.convolve(in_channels, out_channels, 'normal',
bottleneck, bn_width)
return _DynamicInputDenseBlock(nn.ModuleList([conv_module]),
self.debug)
def convolve(self, in_channels, out_channels, conv_type,
bottleneck, bn_width=4):
"""
Doing the main convolution of a specific scale in the
MSD network
:param in_channels: number of input channels
:param out_channels: number of output channels
:param conv_type: convolution type
:param bottleneck: A flag to perform a channel dimension bottleneck
:param bn_width: The width of the bottleneck factor
:return: A Sequential module of the main convolution
"""
conv = nn.Sequential()
tmp_channels = in_channels
# Bottleneck before the convolution
if bottleneck:
tmp_channels = int(min([in_channels, bn_width * out_channels]))
conv.add_module('Bottleneck_1x1', nn.Conv2d(in_channels,
tmp_channels,
kernel_size=1,
stride=1,
padding=0))
conv.add_module('Bottleneck_BN', nn.BatchNorm2d(tmp_channels))
conv.add_module('Bottleneck_ReLU', nn.ReLU(inplace=True))
if conv_type == 'normal':
conv.add_module('Spatial_forward', self.conv_l(tmp_channels,
out_channels,
kernel_size=self.ks,
stride=1,
padding=self.pad))
elif conv_type == 'down':
conv.add_module('Spatial_down', self.conv_l(tmp_channels, out_channels,
kernel_size=self.ks,
stride=2,
padding=self.pad))
else: # Leaving an option to change the main conv type
raise NotImplementedError
conv.add_module('BN_out', nn.BatchNorm2d(out_channels))
conv.add_module('ReLU_out', nn.ReLU(inplace=True))
return conv
def forward(self, x):
cur_input = []
outputs = []
# Prepare the different scales' inputs of the
# current transition/regular layer
if self.to_drop: # Transition
for scale in range(0, self.out_scales):
last_same_scale = x[self.to_drop + scale]
last_upper_scale = x[self.to_drop + scale - 1]
cur_input.append([last_same_scale,
last_upper_scale,
last_same_scale])
else: # Regular
# Add first scale's input
cur_input.append([x[0], x[0]])
# Add second+ scales' input
for scale in range(1, self.out_scales):
last_same_scale = x[scale]
last_upper_scale = x[scale - 1]
cur_input.append([last_same_scale,
last_upper_scale,
last_same_scale])
# Flow inputs in subnets and fill outputs
for scale in range(0, self.out_scales):
outputs.append(self.subnets[scale](cur_input[scale]))
return outputs
class MSDFirstLayer(nn.Module):
def __init__(self, in_channels, out_channels, num_scales, args):
"""
Creates the first layer of the MSD network, which takes
an input tensor (image) and generates a list of size num_scales
with deeper features with smaller (spatial) dimensions.
:param in_channels: number of input channels to the first layer
:param out_channels: number of output channels in the first scale
:param num_scales: number of output scales in the first layer
:param args: other arguments
"""
super(MSDFirstLayer, self).__init__()
# Init params
self.in_channels = in_channels
self.out_channels = out_channels
self.num_scales = num_scales
self.args = args
self.use_gcn = args.msd_gcn
self.conv_l, self.ks, self.pad = get_conv_params(self.use_gcn, args)
if self.use_gcn:
print('| First layer with GCN |')
else:
print('| First layer without GCN |')
self.subnets = self.create_modules()
def create_modules(self):
# Create first scale features
modules = nn.ModuleList()
if 'cifar' in self.args.data:
current_channels = int(self.out_channels *
self.args.msd_growth_factor[0])
current_m = nn.Sequential(
self.conv_l(self.in_channels,
current_channels, kernel_size=self.ks,
stride=1, padding=self.pad),
nn.BatchNorm2d(current_channels),
nn.ReLU(inplace=True)
)
modules.append(current_m)
else:
raise NotImplementedError
# Create second scale features and down
for scale in range(1, self.num_scales):
# Calculate desired output channels
out_channels = int(self.out_channels *
self.args.msd_growth_factor[scale])
# Use a strided convolution to create next scale features
current_m = nn.Sequential(
self.conv_l(current_channels, out_channels,
kernel_size=self.ks,
stride=2, padding=self.pad),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
# Use the output channels size for the next scale
current_channels = out_channels
# Append module
modules.append(current_m)
return modules
def forward(self, x):
output = [None] * self.num_scales
current_input = x
for scale in range(0, self.num_scales):
# Use upper scale as an input
if scale > 0:
current_input = output[scale-1]
output[scale] = self.subnets[scale](current_input)
return output
class Transition(nn.Sequential):
def __init__(self, channels_in, channels_out,
out_scales, offset, growth_factor, args):
"""
Performs 1x1 convolution to increase channels size after reducing a spatial size reduction
in transition layer.
:param channels_in: channels before the transition
:param channels_out: channels after reduction
:param out_scales: number of scales after the transition
:param offset: gap between original number of scales to out_scales
:param growth_factor: densenet channel growth factor
:return: A Parallel trainable array with the scales after channel
reduction
"""
super(Transition, self).__init__()
self.args = args
# Define a parallel stream for the different scales
self.scales = nn.ModuleList()
for i in range(0, out_scales):
cur_in = channels_in * growth_factor[offset + i]
cur_out = channels_out * growth_factor[offset + i]
self.scales.append(self.conv1x1(cur_in, cur_out))
def conv1x1(self, in_channels, out_channels):
"""
Inner function to define the basic operation
:param in_channels: number of input channels
:param out_channels: number of output channels
:return: A Sequential module to perform 1x1 convolution
"""
scale = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
return scale
def forward(self, x):
"""
Propegate output through different scales.
:param x: input to the transition layer
:return: list of scales' outputs
"""
if self.args.debug:
print ("In transition forward!")
output = []
for scale, scale_net in enumerate(self.scales):
if self.args.debug:
print ("Size of x[{}]: {}".format(scale, x[scale].size()))
print ("scale_net[0]: {}".format(scale_net[0]))
output.append(scale_net(x[scale]))
return output
class CifarClassifier(nn.Module):
def __init__(self, num_channels, num_classes):
"""
Classifier of a cifar10/100 image.
:param num_channels: Number of input channels to the classifier
:param num_classes: Number of classes to classify
"""
super(CifarClassifier, self).__init__()
self.inner_channels = 128
self.features = nn.Sequential(
nn.Conv2d(num_channels, self.inner_channels, kernel_size=3,
stride=2, padding=1),
nn.BatchNorm2d(self.inner_channels),
nn.ReLU(inplace=True),
nn.Conv2d(self.inner_channels, self.inner_channels, kernel_size=3,
stride=2, padding=1),
nn.BatchNorm2d(self.inner_channels),
nn.ReLU(inplace=True),
nn.AvgPool2d(2, 2)
)
self.classifier = nn.Linear(self.inner_channels, num_classes)
def forward(self, x):
"""
Drive features to classification.
:param x: Input of the lowest scale of the last layer of
the last block
:return: Cifar object classification result
"""
x = self.features(x)
x = x.view(x.size(0), self.inner_channels)
x = self.classifier(x)
return x
class GCN(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=7, stride=1, padding=1):
"""
Global convolutional network module implementation
:param in_channels: number of input channels
:param out_channels: number of output channels
:param kernel_size: size of conv kernel
:param stride: stride to use in the conv parts
:param padding: padding to use in the conv parts
:param share_weights: use shared weights for every side of GCN
"""
super(GCN, self).__init__()
self.conv_l1 = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1),
padding=(padding, 0), stride=(stride, 1))
self.conv_l2 = nn.Conv2d(out_channels, out_channels, kernel_size=(1, kernel_size),
padding=(0, padding), stride=(1, stride))
self.conv_r1 = nn.Conv2d(in_channels, out_channels, kernel_size=(1, kernel_size),
padding=(0, padding), stride=(1, stride))
self.conv_r2 = nn.Conv2d(out_channels, out_channels, kernel_size=(kernel_size, 1),
padding=(padding, 0), stride=(stride, 1))
def forward(self, x):
if GCN.share_weights:
# Prepare input and state
self.conv_l1.shared = 2
self.conv_l2.shared = 2
xt = x.transpose(2,3)
# Left convs
xl = self.conv_l1(x)
xl = self.conv_l2(xl)
# Right convs
xrt = self.conv_l1(xt)
xrt = self.conv_l2(xrt)
xr = xrt.transpose(2,3)
else:
# Left convs
xl = self.conv_l1(x)
xl = self.conv_l2(xl)
# Right convs
xr = self.conv_r1(x)
xr = self.conv_r2(xr)
return xl + xr
def get_conv_params(use_gcn, args):
"""
Calculates and returns the convulotion parameters
:param use_gcn: flag to use GCN or not
:param args: user defined arguments
:return: convolution type, kernel size and padding
"""
if use_gcn:
GCN.share_weights = args.msd_share_weights
conv_l = GCN
ks = args.msd_gcn_kernel
else:
conv_l = nn.Conv2d
ks = args.msd_kernel
pad = int(math.floor(ks / 2))
return conv_l, ks, pad | 0.901958 | 0.410343 |
from typing import Callable
import cv2
import numpy as np
# how much to crop the image to find the biggest blob
# (this is to avoid picking a biggest blob that is not the worm)
_CROP_PERCENT = 0.15
class ConstantThreshold:
"""
Threshold function that always returns the same threshold
"""
def __init__(self, threshold_value):
self.threshold_value = threshold_value
def __call__(self, frame: np.ndarray) -> int:
return self.threshold_value
class OtsuThreshold(object):
"""
Calculates automatic Otsu threshold on the blurred frame
"""
def __init__(self, blur_kernel):
"""
Creates an Otsu threshold operation with a preprocessing gaussian blur
:param blur_kernel: Gaussian Kernel Size for the blur operation before the Otsu threshold method
to split background and foreground. [height width]. height and width should be odd and can have different values.
"""
self.blur_kernel = blur_kernel
def __call__(self, frame: np.ndarray) -> int:
blurred_frame = cv2.GaussianBlur(frame, self.blur_kernel, 0)
blurred_frame[frame == 0] = 0
background_threshold, _ = cv2.threshold(
blurred_frame[blurred_frame > 0],
0,
255,
cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU,
)
return background_threshold
def segment_foreground(
frame: np.ndarray,
foreground_close_struct_element,
foreground_dilate_struct_element,
threshold_fn: Callable[[np.ndarray], int],
):
"""
Processes a frame to isolate the object of interest (worm) from the background
:param frame: image to process
:param foreground_close_struct_element: morphological element to close holes in the foreground mask
:param foreground_dilate_struct_element: morphological element to expand the foreground mask
:param threshold_fn: function that will return the threshold to separate forefround from background in a frame
:return: segmentation mask with values of 1 for the worm object and 0 for the background,
and average value of the background pixels
"""
# find the threshold to separate foreground from background
background_threshold = threshold_fn(frame)
# use the threshold to deduce background and foreground masks, fill in holes
foreground_mask = (frame > 0).astype(np.uint8) * (frame < background_threshold).astype(np.uint8)
foreground_mask = cv2.morphologyEx(foreground_mask, cv2.MORPH_CLOSE, foreground_close_struct_element)
background_mask = ((frame > 0).astype(np.uint8) - foreground_mask) > 0
# calculate the average background color
background_values = frame[background_mask]
background_color = int(np.mean(background_values)) if len(background_values) > 0 else 0
background_color = frame.dtype.type(background_color)
# process the foreground mask to eliminate non worm objects
# use connected components to find blobs, but focus on the center of the image to find the biggest
# modify foreground_mask to only show the worm object
nb_labels, labels, stats, _ = cv2.connectedComponentsWithStats(foreground_mask)
labels_crop_size = int(_CROP_PERCENT * max(foreground_mask.shape))
labels_cropped = labels[
labels_crop_size : foreground_mask.shape[0] - labels_crop_size,
labels_crop_size : foreground_mask.shape[1] - labels_crop_size,
]
if nb_labels == 1:
foreground_mask.fill(0)
foreground_objects_sizes = [len(np.where(labels_cropped == l)[0]) for l in range(1, nb_labels)]
if len(foreground_objects_sizes) > 0:
biggest_blob_label = np.argmax(foreground_objects_sizes) + 1
foreground_mask[labels != biggest_blob_label] = 0
# add a little padding to the foreground mask
foreground_mask = cv2.dilate(foreground_mask, foreground_dilate_struct_element)
return foreground_mask, background_color | wormpose/dataset/image_processing/image_utils.py | from typing import Callable
import cv2
import numpy as np
# how much to crop the image to find the biggest blob
# (this is to avoid picking a biggest blob that is not the worm)
_CROP_PERCENT = 0.15
class ConstantThreshold:
"""
Threshold function that always returns the same threshold
"""
def __init__(self, threshold_value):
self.threshold_value = threshold_value
def __call__(self, frame: np.ndarray) -> int:
return self.threshold_value
class OtsuThreshold(object):
"""
Calculates automatic Otsu threshold on the blurred frame
"""
def __init__(self, blur_kernel):
"""
Creates an Otsu threshold operation with a preprocessing gaussian blur
:param blur_kernel: Gaussian Kernel Size for the blur operation before the Otsu threshold method
to split background and foreground. [height width]. height and width should be odd and can have different values.
"""
self.blur_kernel = blur_kernel
def __call__(self, frame: np.ndarray) -> int:
blurred_frame = cv2.GaussianBlur(frame, self.blur_kernel, 0)
blurred_frame[frame == 0] = 0
background_threshold, _ = cv2.threshold(
blurred_frame[blurred_frame > 0],
0,
255,
cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU,
)
return background_threshold
def segment_foreground(
frame: np.ndarray,
foreground_close_struct_element,
foreground_dilate_struct_element,
threshold_fn: Callable[[np.ndarray], int],
):
"""
Processes a frame to isolate the object of interest (worm) from the background
:param frame: image to process
:param foreground_close_struct_element: morphological element to close holes in the foreground mask
:param foreground_dilate_struct_element: morphological element to expand the foreground mask
:param threshold_fn: function that will return the threshold to separate forefround from background in a frame
:return: segmentation mask with values of 1 for the worm object and 0 for the background,
and average value of the background pixels
"""
# find the threshold to separate foreground from background
background_threshold = threshold_fn(frame)
# use the threshold to deduce background and foreground masks, fill in holes
foreground_mask = (frame > 0).astype(np.uint8) * (frame < background_threshold).astype(np.uint8)
foreground_mask = cv2.morphologyEx(foreground_mask, cv2.MORPH_CLOSE, foreground_close_struct_element)
background_mask = ((frame > 0).astype(np.uint8) - foreground_mask) > 0
# calculate the average background color
background_values = frame[background_mask]
background_color = int(np.mean(background_values)) if len(background_values) > 0 else 0
background_color = frame.dtype.type(background_color)
# process the foreground mask to eliminate non worm objects
# use connected components to find blobs, but focus on the center of the image to find the biggest
# modify foreground_mask to only show the worm object
nb_labels, labels, stats, _ = cv2.connectedComponentsWithStats(foreground_mask)
labels_crop_size = int(_CROP_PERCENT * max(foreground_mask.shape))
labels_cropped = labels[
labels_crop_size : foreground_mask.shape[0] - labels_crop_size,
labels_crop_size : foreground_mask.shape[1] - labels_crop_size,
]
if nb_labels == 1:
foreground_mask.fill(0)
foreground_objects_sizes = [len(np.where(labels_cropped == l)[0]) for l in range(1, nb_labels)]
if len(foreground_objects_sizes) > 0:
biggest_blob_label = np.argmax(foreground_objects_sizes) + 1
foreground_mask[labels != biggest_blob_label] = 0
# add a little padding to the foreground mask
foreground_mask = cv2.dilate(foreground_mask, foreground_dilate_struct_element)
return foreground_mask, background_color | 0.94323 | 0.730073 |
import asyncio
import logging
import sys
from typing import Sequence
import colorlog # type: ignore
from .list_tasks import list_bare, list_tasks
from .run_tasks import run_tasks
from ..config import ToxConfig, load as load_config
from ..config.cli import get_logging
ROOT_LOGGER = logging.getLogger()
LOGGER = logging.getLogger('main')
def _setup_logging(verbose: str, quiet: bool, logging_fmt: str) -> None:
"""Setup logging."""
for log_handler in list(ROOT_LOGGER.handlers): # remove handlers of libraries
ROOT_LOGGER.removeHandler(log_handler)
if quiet:
ROOT_LOGGER.addHandler(logging.NullHandler())
else:
level = getattr(logging, verbose)
fmt = f'%(log_color)s{logging_fmt}'
formatter = colorlog.ColoredFormatter(fmt)
stream_handler = logging.StreamHandler(stream=sys.stderr)
stream_handler.setLevel(level)
ROOT_LOGGER.setLevel(level)
stream_handler.setFormatter(formatter)
ROOT_LOGGER.addHandler(stream_handler)
LOGGER.debug('task logging to %s', logging.getLevelName(level))
def get_event_loop() -> asyncio.AbstractEventLoop:
if sys.platform == 'win32':
return asyncio.ProactorEventLoop() # on windows IO needs this
return asyncio.new_event_loop() # default on UNIX is fine
def main(argv: Sequence[str]) -> int:
_setup_logging(*get_logging(argv))
loop = get_event_loop()
LOGGER.debug('event loop %r', loop)
# noinspection PyBroadException
try:
asyncio.set_event_loop(loop)
return loop.run_until_complete(execute(argv))
except SystemExit as exception:
return exception.code
except Exception:
LOGGER.exception('')
return -1
finally:
loop.close()
async def execute(argv: Sequence[str]) -> int:
# noinspection PyUnusedLocal
result: int = 1
config: ToxConfig = await load_config(argv)
if config.action == 'run':
result = await run_tasks(config, LOGGER)
elif config.action == 'list':
result = await list_tasks(config, LOGGER)
elif config.action == 'list-bare':
result = await list_bare(config.tasks, LOGGER)
elif config.action == 'list-default-bare':
result = await list_bare(config.default_tasks, LOGGER)
return result | src/toxn/evaluate/__init__.py |
import asyncio
import logging
import sys
from typing import Sequence
import colorlog # type: ignore
from .list_tasks import list_bare, list_tasks
from .run_tasks import run_tasks
from ..config import ToxConfig, load as load_config
from ..config.cli import get_logging
ROOT_LOGGER = logging.getLogger()
LOGGER = logging.getLogger('main')
def _setup_logging(verbose: str, quiet: bool, logging_fmt: str) -> None:
"""Setup logging."""
for log_handler in list(ROOT_LOGGER.handlers): # remove handlers of libraries
ROOT_LOGGER.removeHandler(log_handler)
if quiet:
ROOT_LOGGER.addHandler(logging.NullHandler())
else:
level = getattr(logging, verbose)
fmt = f'%(log_color)s{logging_fmt}'
formatter = colorlog.ColoredFormatter(fmt)
stream_handler = logging.StreamHandler(stream=sys.stderr)
stream_handler.setLevel(level)
ROOT_LOGGER.setLevel(level)
stream_handler.setFormatter(formatter)
ROOT_LOGGER.addHandler(stream_handler)
LOGGER.debug('task logging to %s', logging.getLevelName(level))
def get_event_loop() -> asyncio.AbstractEventLoop:
if sys.platform == 'win32':
return asyncio.ProactorEventLoop() # on windows IO needs this
return asyncio.new_event_loop() # default on UNIX is fine
def main(argv: Sequence[str]) -> int:
_setup_logging(*get_logging(argv))
loop = get_event_loop()
LOGGER.debug('event loop %r', loop)
# noinspection PyBroadException
try:
asyncio.set_event_loop(loop)
return loop.run_until_complete(execute(argv))
except SystemExit as exception:
return exception.code
except Exception:
LOGGER.exception('')
return -1
finally:
loop.close()
async def execute(argv: Sequence[str]) -> int:
# noinspection PyUnusedLocal
result: int = 1
config: ToxConfig = await load_config(argv)
if config.action == 'run':
result = await run_tasks(config, LOGGER)
elif config.action == 'list':
result = await list_tasks(config, LOGGER)
elif config.action == 'list-bare':
result = await list_bare(config.tasks, LOGGER)
elif config.action == 'list-default-bare':
result = await list_bare(config.default_tasks, LOGGER)
return result | 0.488771 | 0.08152 |
import unittest
import numpy
import cupy
from cupy import testing
@testing.parameterize(
# array only
{'shape': (2, 3, 4), 'slices': numpy.array(-1), 'value': 1},
{'shape': (2, 3, 4), 'slices': numpy.array([1, 0]), 'value': 1},
{'shape': (2, 3, 4), 'slices': (slice(None), [1, 2]), 'value': 1},
{'shape': (3, 4, 5),
'slices': (slice(None), [[1, 2], [0, -1]],), 'value': 1},
{'shape': (3, 4, 5),
'slices': (slice(None), slice(None), [[1, 2], [0, 3]]), 'value': 1},
# slice and array
{'shape': (3, 4, 5),
'slices': (slice(None), slice(1, 2), [[1, 3], [0, 2]]), 'value': 1},
# None and array
{'shape': (3, 4, 5),
'slices': (None, [1, -1]), 'value': 1},
{'shape': (3, 4, 5),
'slices': (None, [1, -1], None), 'value': 1},
{'shape': (3, 4, 5),
'slices': (None, None, None, [1, -1]), 'value': 1},
# None, slice and array
{'shape': (3, 4, 5),
'slices': (slice(0, 1), None, [1, -1]), 'value': 1},
{'shape': (3, 4, 5),
'slices': (slice(0, 1), slice(1, 2), [1, -1]), 'value': 1},
{'shape': (3, 4, 5),
'slices': (slice(0, 1), None, slice(1, 2), [1, -1]), 'value': 1},
# broadcasting
{'shape': (3, 4, 5), 'slices': (slice(None), [[1, 2], [0, -1]],),
'value': numpy.arange(3 * 2 * 2 * 5).reshape(3, 2, 2, 5)},
)
@testing.gpu
class TestScatterAddNoDuplicate(unittest.TestCase):
@testing.for_dtypes([numpy.float32, numpy.int32])
@testing.numpy_cupy_array_equal()
def test_scatter_add(self, xp, dtype):
a = xp.zeros(self.shape, dtype)
if xp is cupy:
a.scatter_add(self.slices, self.value)
else:
a[self.slices] = a[self.slices] + self.value
return a
@testing.parameterize(
{'shape': (2, 3), 'slices': ([1, 1], slice(None)), 'value': 1,
'expected': numpy.array([[0, 0, 0], [2, 2, 2]])},
{'shape': (2, 3), 'slices': ([1, 0, 1], slice(None)), 'value': 1,
'expected': numpy.array([[1, 1, 1], [2, 2, 2]])},
{'shape': (2, 3), 'slices': (slice(1, 2), [1, 0, 1]), 'value': 1,
'expected': numpy.array([[0, 0, 0], [1, 2, 0]])},
)
@testing.gpu
class TestScatterAddDuplicateVectorValue(unittest.TestCase):
@testing.for_dtypes([numpy.float32, numpy.int32])
def test_scatter_add(self, dtype):
a = cupy.zeros(self.shape, dtype)
a.scatter_add(self.slices, self.value)
numpy.testing.assert_almost_equal(a.get(), self.expected)
@testing.gpu
class TestScatterAdd(unittest.TestCase):
@testing.for_dtypes([numpy.float32, numpy.int32])
def test_scatter_add_cupy_arguments(self, dtype):
shape = (2, 3)
a = cupy.zeros(shape, dtype)
slices = (cupy.array([1, 1]), slice(None))
a.scatter_add(slices, cupy.array(1.))
testing.assert_array_equal(
a, cupy.array([[0., 0., 0.], [2., 2., 2.]], dtype))
@testing.for_dtypes_combination(
[numpy.float32, numpy.int32, numpy.uint32, numpy.uint64,
numpy.ulonglong], names=['src_dtype', 'dst_dtype'])
def test_scatter_add_differnt_dtypes(self, src_dtype, dst_dtype):
shape = (2, 3)
a = cupy.zeros(shape, dtype=src_dtype)
value = cupy.array(1, dtype=dst_dtype)
slices = ([1, 1], slice(None))
a.scatter_add(slices, value)
numpy.testing.assert_almost_equal(
a.get(),
numpy.array([[0, 0, 0], [2, 2, 2]], dtype=src_dtype)) | tests/cupy_tests/core_tests/test_ndarray_scatter.py | import unittest
import numpy
import cupy
from cupy import testing
@testing.parameterize(
# array only
{'shape': (2, 3, 4), 'slices': numpy.array(-1), 'value': 1},
{'shape': (2, 3, 4), 'slices': numpy.array([1, 0]), 'value': 1},
{'shape': (2, 3, 4), 'slices': (slice(None), [1, 2]), 'value': 1},
{'shape': (3, 4, 5),
'slices': (slice(None), [[1, 2], [0, -1]],), 'value': 1},
{'shape': (3, 4, 5),
'slices': (slice(None), slice(None), [[1, 2], [0, 3]]), 'value': 1},
# slice and array
{'shape': (3, 4, 5),
'slices': (slice(None), slice(1, 2), [[1, 3], [0, 2]]), 'value': 1},
# None and array
{'shape': (3, 4, 5),
'slices': (None, [1, -1]), 'value': 1},
{'shape': (3, 4, 5),
'slices': (None, [1, -1], None), 'value': 1},
{'shape': (3, 4, 5),
'slices': (None, None, None, [1, -1]), 'value': 1},
# None, slice and array
{'shape': (3, 4, 5),
'slices': (slice(0, 1), None, [1, -1]), 'value': 1},
{'shape': (3, 4, 5),
'slices': (slice(0, 1), slice(1, 2), [1, -1]), 'value': 1},
{'shape': (3, 4, 5),
'slices': (slice(0, 1), None, slice(1, 2), [1, -1]), 'value': 1},
# broadcasting
{'shape': (3, 4, 5), 'slices': (slice(None), [[1, 2], [0, -1]],),
'value': numpy.arange(3 * 2 * 2 * 5).reshape(3, 2, 2, 5)},
)
@testing.gpu
class TestScatterAddNoDuplicate(unittest.TestCase):
@testing.for_dtypes([numpy.float32, numpy.int32])
@testing.numpy_cupy_array_equal()
def test_scatter_add(self, xp, dtype):
a = xp.zeros(self.shape, dtype)
if xp is cupy:
a.scatter_add(self.slices, self.value)
else:
a[self.slices] = a[self.slices] + self.value
return a
@testing.parameterize(
{'shape': (2, 3), 'slices': ([1, 1], slice(None)), 'value': 1,
'expected': numpy.array([[0, 0, 0], [2, 2, 2]])},
{'shape': (2, 3), 'slices': ([1, 0, 1], slice(None)), 'value': 1,
'expected': numpy.array([[1, 1, 1], [2, 2, 2]])},
{'shape': (2, 3), 'slices': (slice(1, 2), [1, 0, 1]), 'value': 1,
'expected': numpy.array([[0, 0, 0], [1, 2, 0]])},
)
@testing.gpu
class TestScatterAddDuplicateVectorValue(unittest.TestCase):
@testing.for_dtypes([numpy.float32, numpy.int32])
def test_scatter_add(self, dtype):
a = cupy.zeros(self.shape, dtype)
a.scatter_add(self.slices, self.value)
numpy.testing.assert_almost_equal(a.get(), self.expected)
@testing.gpu
class TestScatterAdd(unittest.TestCase):
@testing.for_dtypes([numpy.float32, numpy.int32])
def test_scatter_add_cupy_arguments(self, dtype):
shape = (2, 3)
a = cupy.zeros(shape, dtype)
slices = (cupy.array([1, 1]), slice(None))
a.scatter_add(slices, cupy.array(1.))
testing.assert_array_equal(
a, cupy.array([[0., 0., 0.], [2., 2., 2.]], dtype))
@testing.for_dtypes_combination(
[numpy.float32, numpy.int32, numpy.uint32, numpy.uint64,
numpy.ulonglong], names=['src_dtype', 'dst_dtype'])
def test_scatter_add_differnt_dtypes(self, src_dtype, dst_dtype):
shape = (2, 3)
a = cupy.zeros(shape, dtype=src_dtype)
value = cupy.array(1, dtype=dst_dtype)
slices = ([1, 1], slice(None))
a.scatter_add(slices, value)
numpy.testing.assert_almost_equal(
a.get(),
numpy.array([[0, 0, 0], [2, 2, 2]], dtype=src_dtype)) | 0.612657 | 0.855369 |
import pytest
import urllib
from django.db import connection
from model_mommy import mommy
URLENCODE_FUNCTION_NAME = "urlencode"
@pytest.fixture()
def add_fun_awards(db):
generated_unique_award_ids = [
"CONT_IDV_ABCDEFG_0123456",
"CONT_IDV_abcdefg_9876543",
"CONT_AWD_._.._..._....",
"CONT_AWD_-_--_---_----",
"ASST_AGG_1008DRCATTHP 01^~@01906470531201403_7022",
"ASST_AGG_12C30000000000006122970000 121/21000_12C3",
"ASST_AGG_17.302-MARYLAND-PRINCE GEORGE'S-20081231-10_1635",
"ASST_NON_30180J015 MOD#2_1448",
"ASST_NON_5% RECAP_8630",
"CONT_AWD_GS30FY0027QP0019405Â_4732_GS30FY0027_4732",
"ASST_NON_R!D1102A37 10_12E2",
"CONT_IDV_[_]_test",
"CONT_IDV_(_)_test",
"CONT_AWD_(())_[[]]_test",
"CONT_AWD_==_++_test",
"CONT_AWD_?_??_test",
"CONT_AWD_^_^^_^^^",
"CONT_AWD_::_;;_:::;;;",
"CONT_AWD_,_,,_,,,",
"CONT_AWD_$_$$_$$$",
"CONT_AWD_%_%%_%%%%",
"☰☱☳☲☶☴ ൠൠൠ ☴☶☲☳☱☰",
"❋❋❋ ALL YOUR BASE ARE BELONG TO US ❋❋❋",
"⎺╲_❪ツ❫_╱⎺",
"питон е јазик на компјутер и змија",
"如果科羅拉多被鋪平會比得克薩斯州大",
"епстеин се није убио",
"kjo frazë nuk bën mirëkuptim",
"何者なにものかによって、爆発物ばくはつぶつが仕掛しかけられたようです。",
]
for id_, generated_unique_award_id in enumerate(generated_unique_award_ids):
mommy.make("awards.award", id=id_, generated_unique_award_id=generated_unique_award_id)
def test_urlencoding_no_change(add_fun_awards):
test_sql = f"""
SELECT generated_unique_award_id,
{URLENCODE_FUNCTION_NAME}(generated_unique_award_id)
from awards
order by id
"""
with connection.cursor() as cursor:
cursor.execute(test_sql)
results = cursor.fetchall()
for i in range(4):
assert results[i][0] == results[i][1], "Safe ASCII characters were incorrectly modified!"
def test_urlencoding_with_urllib(add_fun_awards):
test_sql = f"SELECT generated_unique_award_id, {URLENCODE_FUNCTION_NAME}(generated_unique_award_id) from awards"
with connection.cursor() as cursor:
cursor.execute(test_sql)
results = cursor.fetchall()
for result in results:
urlib_ver = urllib.parse.quote(result[0], safe="")
msg = f"Custom SQL result '{result[1]}' doesn't match urllib function's '{urlib_ver}'"
assert urlib_ver == result[1], msg
def test_reverse_urlencoding_with_urllib(add_fun_awards):
test_sql = f"SELECT generated_unique_award_id, {URLENCODE_FUNCTION_NAME}(generated_unique_award_id) from awards"
with connection.cursor() as cursor:
cursor.execute(test_sql)
results = cursor.fetchall()
for result in results:
msg = f"Original '{result[0]}' doesn't match reverse quote '{urllib.parse.unquote(result[1])}'"
assert urllib.parse.unquote(result[1]) == result[0], msg | usaspending_api/database_scripts/tests/test_custom_sql_functions.py | import pytest
import urllib
from django.db import connection
from model_mommy import mommy
URLENCODE_FUNCTION_NAME = "urlencode"
@pytest.fixture()
def add_fun_awards(db):
generated_unique_award_ids = [
"CONT_IDV_ABCDEFG_0123456",
"CONT_IDV_abcdefg_9876543",
"CONT_AWD_._.._..._....",
"CONT_AWD_-_--_---_----",
"ASST_AGG_1008DRCATTHP 01^~@01906470531201403_7022",
"ASST_AGG_12C30000000000006122970000 121/21000_12C3",
"ASST_AGG_17.302-MARYLAND-PRINCE GEORGE'S-20081231-10_1635",
"ASST_NON_30180J015 MOD#2_1448",
"ASST_NON_5% RECAP_8630",
"CONT_AWD_GS30FY0027QP0019405Â_4732_GS30FY0027_4732",
"ASST_NON_R!D1102A37 10_12E2",
"CONT_IDV_[_]_test",
"CONT_IDV_(_)_test",
"CONT_AWD_(())_[[]]_test",
"CONT_AWD_==_++_test",
"CONT_AWD_?_??_test",
"CONT_AWD_^_^^_^^^",
"CONT_AWD_::_;;_:::;;;",
"CONT_AWD_,_,,_,,,",
"CONT_AWD_$_$$_$$$",
"CONT_AWD_%_%%_%%%%",
"☰☱☳☲☶☴ ൠൠൠ ☴☶☲☳☱☰",
"❋❋❋ ALL YOUR BASE ARE BELONG TO US ❋❋❋",
"⎺╲_❪ツ❫_╱⎺",
"питон е јазик на компјутер и змија",
"如果科羅拉多被鋪平會比得克薩斯州大",
"епстеин се није убио",
"kjo frazë nuk bën mirëkuptim",
"何者なにものかによって、爆発物ばくはつぶつが仕掛しかけられたようです。",
]
for id_, generated_unique_award_id in enumerate(generated_unique_award_ids):
mommy.make("awards.award", id=id_, generated_unique_award_id=generated_unique_award_id)
def test_urlencoding_no_change(add_fun_awards):
test_sql = f"""
SELECT generated_unique_award_id,
{URLENCODE_FUNCTION_NAME}(generated_unique_award_id)
from awards
order by id
"""
with connection.cursor() as cursor:
cursor.execute(test_sql)
results = cursor.fetchall()
for i in range(4):
assert results[i][0] == results[i][1], "Safe ASCII characters were incorrectly modified!"
def test_urlencoding_with_urllib(add_fun_awards):
test_sql = f"SELECT generated_unique_award_id, {URLENCODE_FUNCTION_NAME}(generated_unique_award_id) from awards"
with connection.cursor() as cursor:
cursor.execute(test_sql)
results = cursor.fetchall()
for result in results:
urlib_ver = urllib.parse.quote(result[0], safe="")
msg = f"Custom SQL result '{result[1]}' doesn't match urllib function's '{urlib_ver}'"
assert urlib_ver == result[1], msg
def test_reverse_urlencoding_with_urllib(add_fun_awards):
test_sql = f"SELECT generated_unique_award_id, {URLENCODE_FUNCTION_NAME}(generated_unique_award_id) from awards"
with connection.cursor() as cursor:
cursor.execute(test_sql)
results = cursor.fetchall()
for result in results:
msg = f"Original '{result[0]}' doesn't match reverse quote '{urllib.parse.unquote(result[1])}'"
assert urllib.parse.unquote(result[1]) == result[0], msg | 0.246896 | 0.22325 |
from unittest.mock import patch, MagicMock, PropertyMock, mock_open, call
import pytest
from mountains import main
@patch('mountains.core.handle_args')
@patch('mountains.core.get_http_data')
@patch('mountains.core.get_file_data')
@patch('mountains.core.create_key')
@patch('mountains.core.create_header')
@patch('mountains.core.format_data')
def test_error(m_format, m_header, m_key, m_file, m_http, m_args):
m_args.side_effect = Exception
rv = main()
assert rv == -1
m_http.assert_not_called()
m_file.assert_not_called()
m_key.assert_not_called()
m_header.assert_not_called()
m_format.assert_not_called()
@patch('mountains.core.handle_args')
@patch('mountains.core.get_http_data')
@patch('mountains.core.get_file_data')
@patch('mountains.core.create_key')
@patch('mountains.core.create_header')
@patch('mountains.core.format_data')
def test_args_url(m_format, m_header, m_key, m_file, m_http, m_args):
args = MagicMock()
type(args).url = PropertyMock(return_value='value')
m_args.return_value = args
rv = main()
assert rv == 0
m_http.assert_called_with('value')
m_file.assert_not_called()
@patch('mountains.core.handle_args')
@patch('mountains.core.get_http_data')
@patch('mountains.core.get_file_data')
@patch('mountains.core.create_key')
@patch('mountains.core.create_header')
@patch('mountains.core.format_data')
def test_args_file(m_format, m_header, m_key, m_file, m_http, m_args):
args = MagicMock()
type(args).url = PropertyMock(return_value=None)
type(args).file = PropertyMock(return_value='value')
m_args.return_value = args
rv = main()
assert rv == 0
m_file.assert_called_with('value')
m_http.assert_not_called()
@patch('mountains.core.handle_args')
@patch('mountains.core.get_http_data')
@patch('mountains.core.get_file_data')
@patch('mountains.core.create_key')
@patch('mountains.core.create_header')
@patch('mountains.core.format_data')
def test_line_loop(m_format, m_header, m_key, m_file, m_http, m_args):
lines = iter(['head', 'one', '', 'three'])
m_http.return_value = lines
args = MagicMock()
type(args).url = PropertyMock(return_value='value')
m_args.return_value = args
key = MagicMock()
m_key.return_value = key
m_format.return_value = ''
rv = main()
assert rv == 0
assert m_format.call_count == 2
m_format.assert_has_calls([
call(key, 'one'),
call(key, 'three'),
]) | tests/test_main.py | from unittest.mock import patch, MagicMock, PropertyMock, mock_open, call
import pytest
from mountains import main
@patch('mountains.core.handle_args')
@patch('mountains.core.get_http_data')
@patch('mountains.core.get_file_data')
@patch('mountains.core.create_key')
@patch('mountains.core.create_header')
@patch('mountains.core.format_data')
def test_error(m_format, m_header, m_key, m_file, m_http, m_args):
m_args.side_effect = Exception
rv = main()
assert rv == -1
m_http.assert_not_called()
m_file.assert_not_called()
m_key.assert_not_called()
m_header.assert_not_called()
m_format.assert_not_called()
@patch('mountains.core.handle_args')
@patch('mountains.core.get_http_data')
@patch('mountains.core.get_file_data')
@patch('mountains.core.create_key')
@patch('mountains.core.create_header')
@patch('mountains.core.format_data')
def test_args_url(m_format, m_header, m_key, m_file, m_http, m_args):
args = MagicMock()
type(args).url = PropertyMock(return_value='value')
m_args.return_value = args
rv = main()
assert rv == 0
m_http.assert_called_with('value')
m_file.assert_not_called()
@patch('mountains.core.handle_args')
@patch('mountains.core.get_http_data')
@patch('mountains.core.get_file_data')
@patch('mountains.core.create_key')
@patch('mountains.core.create_header')
@patch('mountains.core.format_data')
def test_args_file(m_format, m_header, m_key, m_file, m_http, m_args):
args = MagicMock()
type(args).url = PropertyMock(return_value=None)
type(args).file = PropertyMock(return_value='value')
m_args.return_value = args
rv = main()
assert rv == 0
m_file.assert_called_with('value')
m_http.assert_not_called()
@patch('mountains.core.handle_args')
@patch('mountains.core.get_http_data')
@patch('mountains.core.get_file_data')
@patch('mountains.core.create_key')
@patch('mountains.core.create_header')
@patch('mountains.core.format_data')
def test_line_loop(m_format, m_header, m_key, m_file, m_http, m_args):
lines = iter(['head', 'one', '', 'three'])
m_http.return_value = lines
args = MagicMock()
type(args).url = PropertyMock(return_value='value')
m_args.return_value = args
key = MagicMock()
m_key.return_value = key
m_format.return_value = ''
rv = main()
assert rv == 0
assert m_format.call_count == 2
m_format.assert_has_calls([
call(key, 'one'),
call(key, 'three'),
]) | 0.722233 | 0.314893 |
import os
import argparse
import shelve
import datetime
import shutil
import numpy as np
import isce
import isceobj
from isceobj.Constants import SPEED_OF_LIGHT
from isceobj.Util.Poly2D import Poly2D
from mroipac.looks.Looks import Looks
def createParser():
'''
Command line parser.
'''
parser = argparse.ArgumentParser( description='Create DEM simulation for merged images')
parser.add_argument('-a','--alks', dest='alks', type=int, default=1,
help = 'Number of azimuth looks')
parser.add_argument('-r','--rlks', dest='rlks', type=int, default=1,
help = 'Number of range looks')
parser.add_argument('-d', '--dem', dest='dem', type=str, required=True,
help = 'Input DEM to use')
parser.add_argument('-m', '--master', dest='master', type=str, required=True,
help = 'Dir with master frame')
parser.add_argument('-o', '--output', dest='outdir', type=str, required=True,
help = 'Output directory')
parser.add_argument('-n','--native', dest='nativedop', action='store_true',
default=False, help='Products in native doppler geometry instead of zero doppler')
parser.add_argument('-l','--legendre', dest='legendre', action='store_true',
default=False, help='Use legendre interpolation instead of hermite')
parser.add_argument('-useGPU', '--useGPU', dest='useGPU',action='store_true', default=False,
help='Allow App to use GPU when available')
return parser
def cmdLineParse(iargs = None):
parser = createParser()
return parser.parse_args(args=iargs)
class Dummy(object):
pass
def runTopoGPU(info, demImage, dop=None, nativedop=False, legendre=False):
from isceobj.Planet.Planet import Planet
from zerodop.GPUtopozero.GPUtopozero import PyTopozero
from isceobj import Constants as CN
from isceobj.Util.Poly2D import Poly2D
from iscesys import DateTimeUtil as DTU
## TODO GPU does not support shadow and layover and local inc file generation
full = False
os.makedirs(info.outdir, exist_ok=True)
# define variables to be used later on
r0 = info.rangeFirstSample + ((info.numberRangeLooks - 1)/2) * info.slantRangePixelSpacing
tbef = info.sensingStart + datetime.timedelta(seconds = ((info.numberAzimuthLooks - 1) /2) / info.prf)
pegHdg = np.radians(info.orbit.getENUHeading(tbef))
width = info.width // info.numberRangeLooks
length = info.length // info.numberAzimuthLooks
dr = info.slantRangePixelSpacing*info.numberRangeLooks
# output file names
latFilename = info.latFilename
lonFilename = info.lonFilename
losFilename = info.losFilename
heightFilename = info.heightFilename
incFilename = info.incFilename
maskFilename = info.maskFilename
# orbit interpolator
if legendre:
omethod = 2 # LEGENDRE INTERPOLATION
else:
omethod = 0 # HERMITE INTERPOLATION
# tracking doppler specifications
if nativedop and (dop is not None):
try:
coeffs = dop._coeffs
except:
coeffs = dop
polyDoppler = Poly2D()
polyDoppler.setWidth(width)
polyDoppler.setLength(length)
polyDoppler.initPoly(rangeOrder = len(coeffs)-1, azimuthOrder=0, coeffs=[coeffs])
else:
print('Zero doppler')
polyDoppler = Poly2D(name='stripmapStack_dopplerPoly')
polyDoppler.setWidth(width)
polyDoppler.setLength(length)
polyDoppler.setNormRange(1.0)
polyDoppler.setNormAzimuth(1.0)
polyDoppler.setMeanRange(0.0)
polyDoppler.setMeanAzimuth(0.0)
polyDoppler.initPoly(rangeOrder=0, azimuthOrder=0, coeffs=[[0.0]])
polyDoppler.createPoly2D()
# dem
demImage.setCaster('read','FLOAT')
demImage.createImage()
# slant range file
slantRangeImage = Poly2D()
slantRangeImage.setWidth(width)
slantRangeImage.setLength(length)
slantRangeImage.setNormRange(1.0)
slantRangeImage.setNormAzimuth(1.0)
slantRangeImage.setMeanRange(0.0)
slantRangeImage.setMeanAzimuth(0.0)
slantRangeImage.initPoly(rangeOrder=1,azimuthOrder=0, coeffs=[[r0,dr]])
slantRangeImage.createPoly2D()
# lat file
latImage = isceobj.createImage()
accessMode = 'write'
dataType = 'DOUBLE'
latImage.initImage(latFilename,accessMode,width,dataType)
latImage.createImage()
# lon file
lonImage = isceobj.createImage()
lonImage.initImage(lonFilename,accessMode,width,dataType)
lonImage.createImage()
# LOS file
losImage = isceobj.createImage()
dataType = 'FLOAT'
bands = 2
scheme = 'BIL'
losImage.initImage(losFilename,accessMode,width,dataType,bands=bands,scheme=scheme)
losImage.setCaster('write','DOUBLE')
losImage.createImage()
# height file
heightImage = isceobj.createImage()
dataType = 'DOUBLE'
heightImage.initImage(heightFilename,accessMode,width,dataType)
heightImage.createImage()
# add inc and mask file if requested
if full:
incImage = isceobj.createImage()
dataType = 'FLOAT'
incImage.initImage(incFilename,accessMode,width,dataType,bands=bands,scheme=scheme)
incImage.createImage()
incImagePtr = incImage.getImagePointer()
maskImage = isceobj.createImage()
dataType = 'BYTE'
bands = 1
maskImage.initImage(maskFilename,accessMode,width,dataType,bands=bands,scheme=scheme)
maskImage.createImage()
maskImagePtr = maskImage.getImagePointer()
else:
incImagePtr = 0
maskImagePtr = 0
# initalize planet
elp = Planet(pname='Earth').ellipsoid
# initialize topo object and fill with parameters
topo = PyTopozero()
topo.set_firstlat(demImage.getFirstLatitude())
topo.set_firstlon(demImage.getFirstLongitude())
topo.set_deltalat(demImage.getDeltaLatitude())
topo.set_deltalon(demImage.getDeltaLongitude())
topo.set_major(elp.a)
topo.set_eccentricitySquared(elp.e2)
topo.set_rSpace(info.slantRangePixelSpacing)
topo.set_r0(r0)
topo.set_pegHdg(pegHdg)
topo.set_prf(info.prf)
topo.set_t0(DTU.seconds_since_midnight(tbef))
topo.set_wvl(info.radarWavelength)
topo.set_thresh(.05)
topo.set_demAccessor(demImage.getImagePointer())
topo.set_dopAccessor(polyDoppler.getPointer())
topo.set_slrngAccessor(slantRangeImage.getPointer())
topo.set_latAccessor(latImage.getImagePointer())
topo.set_lonAccessor(lonImage.getImagePointer())
topo.set_losAccessor(losImage.getImagePointer())
topo.set_heightAccessor(heightImage.getImagePointer())
topo.set_incAccessor(incImagePtr)
topo.set_maskAccessor(maskImagePtr)
topo.set_numIter(25)
topo.set_idemWidth(demImage.getWidth())
topo.set_idemLength(demImage.getLength())
topo.set_ilrl(info.lookSide)
topo.set_extraIter(10)
topo.set_length(length)
topo.set_width(width)
topo.set_nRngLooks(info.numberRangeLooks)
topo.set_nAzLooks(info.numberAzimuthLooks)
topo.set_demMethod(5) # BIQUINTIC METHOD
topo.set_orbitMethod(omethod)
# Need to simplify orbit stuff later
nvecs = len(info.orbit.stateVectors.list)
topo.set_orbitNvecs(nvecs)
topo.set_orbitBasis(1) # Is this ever different?
topo.createOrbit() # Initializes the empty orbit to the right allocated size
count = 0
for sv in info.orbit.stateVectors.list:
td = DTU.seconds_since_midnight(sv.getTime())
pos = sv.getPosition()
vel = sv.getVelocity()
topo.set_orbitVector(count,td,pos[0],pos[1],pos[2],vel[0],vel[1],vel[2])
count += 1
# run topo
topo.runTopo()
# close the written files and add description etc
# lat file
latImage.addDescription('Pixel-by-pixel latitude in degrees.')
latImage.finalizeImage()
latImage.renderHdr()
# lon file
lonImage.addDescription('Pixel-by-pixel longitude in degrees.')
lonImage.finalizeImage()
lonImage.renderHdr()
# height file
heightImage.addDescription('Pixel-by-pixel height in meters.')
heightImage.finalizeImage()
heightImage.renderHdr()
# los file
descr = '''Two channel Line-Of-Sight geometry image (all angles in degrees). Represents vector drawn from target to platform.
Channel 1: Incidence angle measured from vertical at target (always +ve).
Channel 2: Azimuth angle measured from North in Anti-clockwise direction.'''
losImage.setImageType('bil')
losImage.addDescription(descr)
losImage.finalizeImage()
losImage.renderHdr()
# dem/ height file
demImage.finalizeImage()
# adding in additional files if requested
if full:
descr = '''Two channel angle file.
Channel 1: Angle between ray to target and the vertical at the sensor
Channel 2: Local incidence angle accounting for DEM slope at target'''
incImage.addDescription(descr)
incImage.finalizeImage()
incImage.renderHdr()
descr = 'Radar shadow-layover mask. 1 - Radar Shadow. 2 - Radar Layover. 3 - Both.'
maskImage.addDescription(descr)
maskImage.finalizeImage()
maskImage.renderHdr()
if slantRangeImage:
try:
slantRangeImage.finalizeImage()
except:
pass
def runTopoCPU(info, demImage, dop=None,
nativedop=False, legendre=False):
from zerodop.topozero import createTopozero
from isceobj.Planet.Planet import Planet
os.makedirs(info.outdir, exist_ok=True)
#####Run Topo
planet = Planet(pname='Earth')
topo = createTopozero()
topo.slantRangePixelSpacing = info.slantRangePixelSpacing
topo.prf = info.prf
topo.radarWavelength = info.radarWavelength
topo.orbit = info.orbit
topo.width = info.width // info.numberRangeLooks
topo.length = info.length //info.numberAzimuthLooks
topo.wireInputPort(name='dem', object=demImage)
topo.wireInputPort(name='planet', object=planet)
topo.numberRangeLooks = info.numberRangeLooks
topo.numberAzimuthLooks = info.numberAzimuthLooks
topo.lookSide = info.lookSide
topo.sensingStart = info.sensingStart + datetime.timedelta(seconds = ((info.numberAzimuthLooks - 1) /2) / info.prf)
topo.rangeFirstSample = info.rangeFirstSample + ((info.numberRangeLooks - 1)/2) * info.slantRangePixelSpacing
topo.demInterpolationMethod='BIQUINTIC'
if legendre:
topo.orbitInterpolationMethod = 'LEGENDRE'
topo.latFilename = info.latFilename
topo.lonFilename = info.lonFilename
topo.losFilename = info.losFilename
topo.heightFilename = info.heightFilename
topo.incFilename = info.incFilename
topo.maskFilename = info.maskFilename
if nativedop and (dop is not None):
try:
coeffs = dop._coeffs
except:
coeffs = dop
doppler = Poly2D()
doppler.setWidth(info.width // info.numberRangeLooks)
doppler.setLength(info.length // info.numberAzimuthLooks)
doppler.initPoly(rangeOrder = len(coeffs)-1, azimuthOrder=0, coeffs=[coeffs])
else:
print('Zero doppler')
doppler = None
topo.polyDoppler = doppler
topo.topo()
return
def runSimamp(outdir, hname='z.rdr'):
from iscesys.StdOEL.StdOELPy import create_writer
#####Run simamp
stdWriter = create_writer("log","",True,filename='sim.log')
objShade = isceobj.createSimamplitude()
objShade.setStdWriter(stdWriter)
hgtImage = isceobj.createImage()
hgtImage.load(os.path.join(outdir, hname) + '.xml')
hgtImage.setAccessMode('read')
hgtImage.createImage()
simImage = isceobj.createImage()
simImage.setFilename(os.path.join(outdir, 'simamp.rdr'))
simImage.dataType = 'FLOAT'
simImage.setAccessMode('write')
simImage.setWidth(hgtImage.getWidth())
simImage.createImage()
objShade.simamplitude(hgtImage, simImage, shade=3.0)
simImage.renderHdr()
hgtImage.finalizeImage()
simImage.finalizeImage()
return
def runMultilook(in_dir, out_dir, alks, rlks):
print('generate multilooked geometry files with alks={} and rlks={}'.format(alks, rlks))
from iscesys.Parsers.FileParserFactory import createFileParser
FP = createFileParser('xml')
os.makedirs(out_dir, exist_ok=True)
for fbase in ['hgt', 'incLocal', 'lat', 'lon', 'los', 'shadowMask', 'waterMask']:
fname = '{}.rdr'.format(fbase)
in_file = os.path.join(in_dir, fname)
out_file = os.path.join(out_dir, fname)
if os.path.isfile(in_file):
xmlProp = FP.parse(in_file+'.xml')[0]
if('image_type' in xmlProp and xmlProp['image_type'] == 'dem'):
inImage = isceobj.createDemImage()
else:
inImage = isceobj.createImage()
inImage.load(in_file+'.xml')
inImage.filename = in_file
lkObj = Looks()
lkObj.setDownLooks(alks)
lkObj.setAcrossLooks(rlks)
lkObj.setInputImage(inImage)
lkObj.setOutputFilename(out_file)
lkObj.looks()
# copy the full resolution xml/vrt file from ./merged/geom_master to ./geom_master
# to facilitate the number of looks extraction
# the file path inside .xml file is not, but should, updated
shutil.copy(in_file+'.xml', out_file+'.full.xml')
shutil.copy(in_file+'.vrt', out_file+'.full.vrt')
return out_dir
def runMultilookGdal(in_dir, out_dir, alks, rlks, in_ext='.rdr', out_ext='.rdr',
fbase_list=['hgt', 'incLocal', 'lat', 'lon', 'los', 'shadowMask', 'waterMask']):
print('generate multilooked geometry files with alks={} and rlks={}'.format(alks, rlks))
import gdal
# create 'geom_master' directory
os.makedirs(out_dir, exist_ok=True)
# multilook files one by one
for fbase in fbase_list:
in_file = os.path.join(in_dir, '{}{}'.format(fbase, in_ext))
out_file = os.path.join(out_dir, '{}{}'.format(fbase, out_ext))
if os.path.isfile(in_file):
ds = gdal.Open(in_file, gdal.GA_ReadOnly)
in_wid = ds.RasterXSize
in_len = ds.RasterYSize
out_wid = int(in_wid / rlks)
out_len = int(in_len / alks)
src_wid = out_wid * rlks
src_len = out_len * alks
cmd = 'gdal_translate -of ENVI -a_nodata 0 -outsize {ox} {oy} '.format(ox=out_wid, oy=out_len)
cmd += ' -srcwin 0 0 {sx} {sy} {fi} {fo} '.format(sx=src_wid, sy=src_len, fi=in_file, fo=out_file)
print(cmd)
os.system(cmd)
# copy the full resolution xml/vrt file from ./merged/geom_master to ./geom_master
# to facilitate the number of looks extraction
# the file path inside .xml file is not, but should, updated
if in_file != out_file+'.full':
shutil.copy(in_file+'.xml', out_file+'.full.xml')
shutil.copy(in_file+'.vrt', out_file+'.full.vrt')
return out_dir
def extractInfo(frame, inps):
'''
Extract relevant information only.
'''
info = Dummy()
ins = frame.getInstrument()
info.sensingStart = frame.getSensingStart()
info.lookSide = frame.instrument.platform.pointingDirection
info.rangeFirstSample = frame.startingRange
info.numberRangeLooks = 1 #inps.rlks
info.numberAzimuthLooks = 1 #inps.alks
fsamp = frame.rangeSamplingRate
info.slantRangePixelSpacing = 0.5 * SPEED_OF_LIGHT / fsamp
info.prf = frame.PRF
info.radarWavelength = frame.radarWavelegth
info.orbit = frame.getOrbit()
info.width = frame.getNumberOfSamples()
info.length = frame.getNumberOfLines()
info.sensingStop = frame.getSensingStop()
info.outdir = inps.outdir
return info
def main(iargs=None):
inps = cmdLineParse(iargs)
# see if the user compiled isce with GPU enabled
run_GPU = False
try:
from zerodop.GPUtopozero.GPUtopozero import PyTopozero
from zerodop.GPUgeo2rdr.GPUgeo2rdr import PyGeo2rdr
run_GPU = True
except:
pass
if inps.useGPU and not run_GPU:
print("GPU mode requested but no GPU ISCE code found")
# setting the respective version of geo2rdr for CPU and GPU
if run_GPU and inps.useGPU:
print('GPU mode')
runTopo = runTopoGPU
else:
print('CPU mode')
runTopo = runTopoCPU
db = shelve.open(os.path.join(inps.master, 'data'))
frame = db['frame']
try:
doppler = db['doppler']
except:
doppler = frame._dopplerVsPixel
db.close()
####Setup dem
demImage = isceobj.createDemImage()
demImage.load(inps.dem + '.xml')
demImage.setAccessMode('read')
info = extractInfo(frame, inps)
# define topo output names:
info.latFilename = os.path.join(info.outdir, 'lat.rdr')
info.lonFilename = os.path.join(info.outdir, 'lon.rdr')
info.losFilename = os.path.join(info.outdir, 'los.rdr')
info.heightFilename = os.path.join(info.outdir, 'hgt.rdr')
info.incFilename = os.path.join(info.outdir, 'incLocal.rdr')
info.maskFilename = os.path.join(info.outdir, 'shadowMask.rdr')
runTopo(info,demImage,dop=doppler,nativedop=inps.nativedop, legendre=inps.legendre)
runSimamp(os.path.dirname(info.heightFilename),os.path.basename(info.heightFilename))
# write multilooked geometry files in "geom_master" directory, same level as "Igrams"
if inps.rlks * inps.rlks > 1:
out_dir = os.path.join(os.path.dirname(os.path.dirname(info.outdir)), 'geom_master')
runMultilookGdal(in_dir=info.outdir, out_dir=out_dir, alks=inps.alks, rlks=inps.rlks)
#runMultilook(in_dir=info.outdir, out_dir=out_dir, alks=inps.alks, rlks=inps.rlks)
return
if __name__ == '__main__':
'''
Main driver.
'''
main() | contrib/stack/stripmapStack/topo.py |
import os
import argparse
import shelve
import datetime
import shutil
import numpy as np
import isce
import isceobj
from isceobj.Constants import SPEED_OF_LIGHT
from isceobj.Util.Poly2D import Poly2D
from mroipac.looks.Looks import Looks
def createParser():
'''
Command line parser.
'''
parser = argparse.ArgumentParser( description='Create DEM simulation for merged images')
parser.add_argument('-a','--alks', dest='alks', type=int, default=1,
help = 'Number of azimuth looks')
parser.add_argument('-r','--rlks', dest='rlks', type=int, default=1,
help = 'Number of range looks')
parser.add_argument('-d', '--dem', dest='dem', type=str, required=True,
help = 'Input DEM to use')
parser.add_argument('-m', '--master', dest='master', type=str, required=True,
help = 'Dir with master frame')
parser.add_argument('-o', '--output', dest='outdir', type=str, required=True,
help = 'Output directory')
parser.add_argument('-n','--native', dest='nativedop', action='store_true',
default=False, help='Products in native doppler geometry instead of zero doppler')
parser.add_argument('-l','--legendre', dest='legendre', action='store_true',
default=False, help='Use legendre interpolation instead of hermite')
parser.add_argument('-useGPU', '--useGPU', dest='useGPU',action='store_true', default=False,
help='Allow App to use GPU when available')
return parser
def cmdLineParse(iargs = None):
parser = createParser()
return parser.parse_args(args=iargs)
class Dummy(object):
pass
def runTopoGPU(info, demImage, dop=None, nativedop=False, legendre=False):
from isceobj.Planet.Planet import Planet
from zerodop.GPUtopozero.GPUtopozero import PyTopozero
from isceobj import Constants as CN
from isceobj.Util.Poly2D import Poly2D
from iscesys import DateTimeUtil as DTU
## TODO GPU does not support shadow and layover and local inc file generation
full = False
os.makedirs(info.outdir, exist_ok=True)
# define variables to be used later on
r0 = info.rangeFirstSample + ((info.numberRangeLooks - 1)/2) * info.slantRangePixelSpacing
tbef = info.sensingStart + datetime.timedelta(seconds = ((info.numberAzimuthLooks - 1) /2) / info.prf)
pegHdg = np.radians(info.orbit.getENUHeading(tbef))
width = info.width // info.numberRangeLooks
length = info.length // info.numberAzimuthLooks
dr = info.slantRangePixelSpacing*info.numberRangeLooks
# output file names
latFilename = info.latFilename
lonFilename = info.lonFilename
losFilename = info.losFilename
heightFilename = info.heightFilename
incFilename = info.incFilename
maskFilename = info.maskFilename
# orbit interpolator
if legendre:
omethod = 2 # LEGENDRE INTERPOLATION
else:
omethod = 0 # HERMITE INTERPOLATION
# tracking doppler specifications
if nativedop and (dop is not None):
try:
coeffs = dop._coeffs
except:
coeffs = dop
polyDoppler = Poly2D()
polyDoppler.setWidth(width)
polyDoppler.setLength(length)
polyDoppler.initPoly(rangeOrder = len(coeffs)-1, azimuthOrder=0, coeffs=[coeffs])
else:
print('Zero doppler')
polyDoppler = Poly2D(name='stripmapStack_dopplerPoly')
polyDoppler.setWidth(width)
polyDoppler.setLength(length)
polyDoppler.setNormRange(1.0)
polyDoppler.setNormAzimuth(1.0)
polyDoppler.setMeanRange(0.0)
polyDoppler.setMeanAzimuth(0.0)
polyDoppler.initPoly(rangeOrder=0, azimuthOrder=0, coeffs=[[0.0]])
polyDoppler.createPoly2D()
# dem
demImage.setCaster('read','FLOAT')
demImage.createImage()
# slant range file
slantRangeImage = Poly2D()
slantRangeImage.setWidth(width)
slantRangeImage.setLength(length)
slantRangeImage.setNormRange(1.0)
slantRangeImage.setNormAzimuth(1.0)
slantRangeImage.setMeanRange(0.0)
slantRangeImage.setMeanAzimuth(0.0)
slantRangeImage.initPoly(rangeOrder=1,azimuthOrder=0, coeffs=[[r0,dr]])
slantRangeImage.createPoly2D()
# lat file
latImage = isceobj.createImage()
accessMode = 'write'
dataType = 'DOUBLE'
latImage.initImage(latFilename,accessMode,width,dataType)
latImage.createImage()
# lon file
lonImage = isceobj.createImage()
lonImage.initImage(lonFilename,accessMode,width,dataType)
lonImage.createImage()
# LOS file
losImage = isceobj.createImage()
dataType = 'FLOAT'
bands = 2
scheme = 'BIL'
losImage.initImage(losFilename,accessMode,width,dataType,bands=bands,scheme=scheme)
losImage.setCaster('write','DOUBLE')
losImage.createImage()
# height file
heightImage = isceobj.createImage()
dataType = 'DOUBLE'
heightImage.initImage(heightFilename,accessMode,width,dataType)
heightImage.createImage()
# add inc and mask file if requested
if full:
incImage = isceobj.createImage()
dataType = 'FLOAT'
incImage.initImage(incFilename,accessMode,width,dataType,bands=bands,scheme=scheme)
incImage.createImage()
incImagePtr = incImage.getImagePointer()
maskImage = isceobj.createImage()
dataType = 'BYTE'
bands = 1
maskImage.initImage(maskFilename,accessMode,width,dataType,bands=bands,scheme=scheme)
maskImage.createImage()
maskImagePtr = maskImage.getImagePointer()
else:
incImagePtr = 0
maskImagePtr = 0
# initalize planet
elp = Planet(pname='Earth').ellipsoid
# initialize topo object and fill with parameters
topo = PyTopozero()
topo.set_firstlat(demImage.getFirstLatitude())
topo.set_firstlon(demImage.getFirstLongitude())
topo.set_deltalat(demImage.getDeltaLatitude())
topo.set_deltalon(demImage.getDeltaLongitude())
topo.set_major(elp.a)
topo.set_eccentricitySquared(elp.e2)
topo.set_rSpace(info.slantRangePixelSpacing)
topo.set_r0(r0)
topo.set_pegHdg(pegHdg)
topo.set_prf(info.prf)
topo.set_t0(DTU.seconds_since_midnight(tbef))
topo.set_wvl(info.radarWavelength)
topo.set_thresh(.05)
topo.set_demAccessor(demImage.getImagePointer())
topo.set_dopAccessor(polyDoppler.getPointer())
topo.set_slrngAccessor(slantRangeImage.getPointer())
topo.set_latAccessor(latImage.getImagePointer())
topo.set_lonAccessor(lonImage.getImagePointer())
topo.set_losAccessor(losImage.getImagePointer())
topo.set_heightAccessor(heightImage.getImagePointer())
topo.set_incAccessor(incImagePtr)
topo.set_maskAccessor(maskImagePtr)
topo.set_numIter(25)
topo.set_idemWidth(demImage.getWidth())
topo.set_idemLength(demImage.getLength())
topo.set_ilrl(info.lookSide)
topo.set_extraIter(10)
topo.set_length(length)
topo.set_width(width)
topo.set_nRngLooks(info.numberRangeLooks)
topo.set_nAzLooks(info.numberAzimuthLooks)
topo.set_demMethod(5) # BIQUINTIC METHOD
topo.set_orbitMethod(omethod)
# Need to simplify orbit stuff later
nvecs = len(info.orbit.stateVectors.list)
topo.set_orbitNvecs(nvecs)
topo.set_orbitBasis(1) # Is this ever different?
topo.createOrbit() # Initializes the empty orbit to the right allocated size
count = 0
for sv in info.orbit.stateVectors.list:
td = DTU.seconds_since_midnight(sv.getTime())
pos = sv.getPosition()
vel = sv.getVelocity()
topo.set_orbitVector(count,td,pos[0],pos[1],pos[2],vel[0],vel[1],vel[2])
count += 1
# run topo
topo.runTopo()
# close the written files and add description etc
# lat file
latImage.addDescription('Pixel-by-pixel latitude in degrees.')
latImage.finalizeImage()
latImage.renderHdr()
# lon file
lonImage.addDescription('Pixel-by-pixel longitude in degrees.')
lonImage.finalizeImage()
lonImage.renderHdr()
# height file
heightImage.addDescription('Pixel-by-pixel height in meters.')
heightImage.finalizeImage()
heightImage.renderHdr()
# los file
descr = '''Two channel Line-Of-Sight geometry image (all angles in degrees). Represents vector drawn from target to platform.
Channel 1: Incidence angle measured from vertical at target (always +ve).
Channel 2: Azimuth angle measured from North in Anti-clockwise direction.'''
losImage.setImageType('bil')
losImage.addDescription(descr)
losImage.finalizeImage()
losImage.renderHdr()
# dem/ height file
demImage.finalizeImage()
# adding in additional files if requested
if full:
descr = '''Two channel angle file.
Channel 1: Angle between ray to target and the vertical at the sensor
Channel 2: Local incidence angle accounting for DEM slope at target'''
incImage.addDescription(descr)
incImage.finalizeImage()
incImage.renderHdr()
descr = 'Radar shadow-layover mask. 1 - Radar Shadow. 2 - Radar Layover. 3 - Both.'
maskImage.addDescription(descr)
maskImage.finalizeImage()
maskImage.renderHdr()
if slantRangeImage:
try:
slantRangeImage.finalizeImage()
except:
pass
def runTopoCPU(info, demImage, dop=None,
nativedop=False, legendre=False):
from zerodop.topozero import createTopozero
from isceobj.Planet.Planet import Planet
os.makedirs(info.outdir, exist_ok=True)
#####Run Topo
planet = Planet(pname='Earth')
topo = createTopozero()
topo.slantRangePixelSpacing = info.slantRangePixelSpacing
topo.prf = info.prf
topo.radarWavelength = info.radarWavelength
topo.orbit = info.orbit
topo.width = info.width // info.numberRangeLooks
topo.length = info.length //info.numberAzimuthLooks
topo.wireInputPort(name='dem', object=demImage)
topo.wireInputPort(name='planet', object=planet)
topo.numberRangeLooks = info.numberRangeLooks
topo.numberAzimuthLooks = info.numberAzimuthLooks
topo.lookSide = info.lookSide
topo.sensingStart = info.sensingStart + datetime.timedelta(seconds = ((info.numberAzimuthLooks - 1) /2) / info.prf)
topo.rangeFirstSample = info.rangeFirstSample + ((info.numberRangeLooks - 1)/2) * info.slantRangePixelSpacing
topo.demInterpolationMethod='BIQUINTIC'
if legendre:
topo.orbitInterpolationMethod = 'LEGENDRE'
topo.latFilename = info.latFilename
topo.lonFilename = info.lonFilename
topo.losFilename = info.losFilename
topo.heightFilename = info.heightFilename
topo.incFilename = info.incFilename
topo.maskFilename = info.maskFilename
if nativedop and (dop is not None):
try:
coeffs = dop._coeffs
except:
coeffs = dop
doppler = Poly2D()
doppler.setWidth(info.width // info.numberRangeLooks)
doppler.setLength(info.length // info.numberAzimuthLooks)
doppler.initPoly(rangeOrder = len(coeffs)-1, azimuthOrder=0, coeffs=[coeffs])
else:
print('Zero doppler')
doppler = None
topo.polyDoppler = doppler
topo.topo()
return
def runSimamp(outdir, hname='z.rdr'):
from iscesys.StdOEL.StdOELPy import create_writer
#####Run simamp
stdWriter = create_writer("log","",True,filename='sim.log')
objShade = isceobj.createSimamplitude()
objShade.setStdWriter(stdWriter)
hgtImage = isceobj.createImage()
hgtImage.load(os.path.join(outdir, hname) + '.xml')
hgtImage.setAccessMode('read')
hgtImage.createImage()
simImage = isceobj.createImage()
simImage.setFilename(os.path.join(outdir, 'simamp.rdr'))
simImage.dataType = 'FLOAT'
simImage.setAccessMode('write')
simImage.setWidth(hgtImage.getWidth())
simImage.createImage()
objShade.simamplitude(hgtImage, simImage, shade=3.0)
simImage.renderHdr()
hgtImage.finalizeImage()
simImage.finalizeImage()
return
def runMultilook(in_dir, out_dir, alks, rlks):
print('generate multilooked geometry files with alks={} and rlks={}'.format(alks, rlks))
from iscesys.Parsers.FileParserFactory import createFileParser
FP = createFileParser('xml')
os.makedirs(out_dir, exist_ok=True)
for fbase in ['hgt', 'incLocal', 'lat', 'lon', 'los', 'shadowMask', 'waterMask']:
fname = '{}.rdr'.format(fbase)
in_file = os.path.join(in_dir, fname)
out_file = os.path.join(out_dir, fname)
if os.path.isfile(in_file):
xmlProp = FP.parse(in_file+'.xml')[0]
if('image_type' in xmlProp and xmlProp['image_type'] == 'dem'):
inImage = isceobj.createDemImage()
else:
inImage = isceobj.createImage()
inImage.load(in_file+'.xml')
inImage.filename = in_file
lkObj = Looks()
lkObj.setDownLooks(alks)
lkObj.setAcrossLooks(rlks)
lkObj.setInputImage(inImage)
lkObj.setOutputFilename(out_file)
lkObj.looks()
# copy the full resolution xml/vrt file from ./merged/geom_master to ./geom_master
# to facilitate the number of looks extraction
# the file path inside .xml file is not, but should, updated
shutil.copy(in_file+'.xml', out_file+'.full.xml')
shutil.copy(in_file+'.vrt', out_file+'.full.vrt')
return out_dir
def runMultilookGdal(in_dir, out_dir, alks, rlks, in_ext='.rdr', out_ext='.rdr',
fbase_list=['hgt', 'incLocal', 'lat', 'lon', 'los', 'shadowMask', 'waterMask']):
print('generate multilooked geometry files with alks={} and rlks={}'.format(alks, rlks))
import gdal
# create 'geom_master' directory
os.makedirs(out_dir, exist_ok=True)
# multilook files one by one
for fbase in fbase_list:
in_file = os.path.join(in_dir, '{}{}'.format(fbase, in_ext))
out_file = os.path.join(out_dir, '{}{}'.format(fbase, out_ext))
if os.path.isfile(in_file):
ds = gdal.Open(in_file, gdal.GA_ReadOnly)
in_wid = ds.RasterXSize
in_len = ds.RasterYSize
out_wid = int(in_wid / rlks)
out_len = int(in_len / alks)
src_wid = out_wid * rlks
src_len = out_len * alks
cmd = 'gdal_translate -of ENVI -a_nodata 0 -outsize {ox} {oy} '.format(ox=out_wid, oy=out_len)
cmd += ' -srcwin 0 0 {sx} {sy} {fi} {fo} '.format(sx=src_wid, sy=src_len, fi=in_file, fo=out_file)
print(cmd)
os.system(cmd)
# copy the full resolution xml/vrt file from ./merged/geom_master to ./geom_master
# to facilitate the number of looks extraction
# the file path inside .xml file is not, but should, updated
if in_file != out_file+'.full':
shutil.copy(in_file+'.xml', out_file+'.full.xml')
shutil.copy(in_file+'.vrt', out_file+'.full.vrt')
return out_dir
def extractInfo(frame, inps):
'''
Extract relevant information only.
'''
info = Dummy()
ins = frame.getInstrument()
info.sensingStart = frame.getSensingStart()
info.lookSide = frame.instrument.platform.pointingDirection
info.rangeFirstSample = frame.startingRange
info.numberRangeLooks = 1 #inps.rlks
info.numberAzimuthLooks = 1 #inps.alks
fsamp = frame.rangeSamplingRate
info.slantRangePixelSpacing = 0.5 * SPEED_OF_LIGHT / fsamp
info.prf = frame.PRF
info.radarWavelength = frame.radarWavelegth
info.orbit = frame.getOrbit()
info.width = frame.getNumberOfSamples()
info.length = frame.getNumberOfLines()
info.sensingStop = frame.getSensingStop()
info.outdir = inps.outdir
return info
def main(iargs=None):
inps = cmdLineParse(iargs)
# see if the user compiled isce with GPU enabled
run_GPU = False
try:
from zerodop.GPUtopozero.GPUtopozero import PyTopozero
from zerodop.GPUgeo2rdr.GPUgeo2rdr import PyGeo2rdr
run_GPU = True
except:
pass
if inps.useGPU and not run_GPU:
print("GPU mode requested but no GPU ISCE code found")
# setting the respective version of geo2rdr for CPU and GPU
if run_GPU and inps.useGPU:
print('GPU mode')
runTopo = runTopoGPU
else:
print('CPU mode')
runTopo = runTopoCPU
db = shelve.open(os.path.join(inps.master, 'data'))
frame = db['frame']
try:
doppler = db['doppler']
except:
doppler = frame._dopplerVsPixel
db.close()
####Setup dem
demImage = isceobj.createDemImage()
demImage.load(inps.dem + '.xml')
demImage.setAccessMode('read')
info = extractInfo(frame, inps)
# define topo output names:
info.latFilename = os.path.join(info.outdir, 'lat.rdr')
info.lonFilename = os.path.join(info.outdir, 'lon.rdr')
info.losFilename = os.path.join(info.outdir, 'los.rdr')
info.heightFilename = os.path.join(info.outdir, 'hgt.rdr')
info.incFilename = os.path.join(info.outdir, 'incLocal.rdr')
info.maskFilename = os.path.join(info.outdir, 'shadowMask.rdr')
runTopo(info,demImage,dop=doppler,nativedop=inps.nativedop, legendre=inps.legendre)
runSimamp(os.path.dirname(info.heightFilename),os.path.basename(info.heightFilename))
# write multilooked geometry files in "geom_master" directory, same level as "Igrams"
if inps.rlks * inps.rlks > 1:
out_dir = os.path.join(os.path.dirname(os.path.dirname(info.outdir)), 'geom_master')
runMultilookGdal(in_dir=info.outdir, out_dir=out_dir, alks=inps.alks, rlks=inps.rlks)
#runMultilook(in_dir=info.outdir, out_dir=out_dir, alks=inps.alks, rlks=inps.rlks)
return
if __name__ == '__main__':
'''
Main driver.
'''
main() | 0.309754 | 0.135833 |
from dataclasses import dataclass
import threading
import pytest
from typing import Any, Callable
from flask.testing import FlaskClient
from src.artemis.parameters import FullParameters
from src.artemis.main import create_app, Status, Actions
from src.artemis.devices.det_dim_constants import EIGER_TYPE_EIGER2_X_4M
import json
from time import sleep
FGS_ENDPOINT = "/fast_grid_scan/"
START_ENDPOINT = FGS_ENDPOINT + Actions.START.value
STOP_ENDPOINT = FGS_ENDPOINT + Actions.STOP.value
STATUS_ENDPOINT = FGS_ENDPOINT + "status"
SHUTDOWN_ENDPOINT = FGS_ENDPOINT + Actions.SHUTDOWN.value
TEST_PARAMS = FullParameters().to_json()
class MockRunEngine:
RE_takes_time = True
aborting_takes_time = False
error: str = None
def __call__(self, *args: Any, **kwds: Any) -> Any:
while self.RE_takes_time:
sleep(0.1)
if self.error:
raise Exception(self.error)
def abort(self):
while self.aborting_takes_time:
sleep(0.1)
if self.error:
raise Exception(self.error)
self.RE_takes_time = False
@dataclass
class ClientAndRunEngine:
client: FlaskClient
mock_run_engine: MockRunEngine
@pytest.fixture
def test_env():
mock_run_engine = MockRunEngine()
app, runner = create_app({"TESTING": True}, mock_run_engine)
runner_thread = threading.Thread(target=runner.wait_on_queue)
runner_thread.start()
with app.test_client() as client:
yield ClientAndRunEngine(client, mock_run_engine)
runner.shutdown()
runner_thread.join()
def wait_for_run_engine_status(
client: FlaskClient,
status_check: Callable[[str], bool] = lambda status: status != Status.BUSY.value,
attempts=10,
):
while attempts != 0:
response = client.get(STATUS_ENDPOINT)
response_json = json.loads(response.data)
if status_check(response_json["status"]):
return response_json
else:
attempts -= 1
sleep(0.1)
assert False, "Run engine still busy"
def check_status_in_response(response_object, expected_result: Status):
response_json = json.loads(response_object.data)
assert response_json["status"] == expected_result.value
def test_start_gives_success(test_env: ClientAndRunEngine):
response = test_env.client.put(START_ENDPOINT, data=TEST_PARAMS)
check_status_in_response(response, Status.SUCCESS)
def test_getting_status_return_idle(test_env: ClientAndRunEngine):
response = test_env.client.get(STATUS_ENDPOINT)
check_status_in_response(response, Status.IDLE)
def test_getting_status_after_start_sent_returns_busy(
test_env: ClientAndRunEngine,
):
test_env.client.put(START_ENDPOINT, data=TEST_PARAMS)
response = test_env.client.get(STATUS_ENDPOINT)
check_status_in_response(response, Status.BUSY)
def test_sending_start_twice_fails(test_env: ClientAndRunEngine):
test_env.client.put(START_ENDPOINT, data=TEST_PARAMS)
response = test_env.client.put(START_ENDPOINT, data=TEST_PARAMS)
check_status_in_response(response, Status.FAILED)
def test_given_started_when_stopped_then_success_and_idle_status(
test_env: ClientAndRunEngine,
):
test_env.mock_run_engine.aborting_takes_time = True
test_env.client.put(START_ENDPOINT, data=TEST_PARAMS)
response = test_env.client.put(STOP_ENDPOINT)
check_status_in_response(response, Status.ABORTING)
response = test_env.client.get(STATUS_ENDPOINT)
check_status_in_response(response, Status.ABORTING)
test_env.mock_run_engine.aborting_takes_time = False
wait_for_run_engine_status(
test_env.client, lambda status: status != Status.ABORTING
)
check_status_in_response(response, Status.ABORTING)
def test_given_started_when_stopped_and_started_again_then_runs(
test_env: ClientAndRunEngine,
):
test_env.client.put(START_ENDPOINT, data=TEST_PARAMS)
test_env.client.put(STOP_ENDPOINT)
response = test_env.client.put(START_ENDPOINT, data=TEST_PARAMS)
check_status_in_response(response, Status.SUCCESS)
response = test_env.client.get(STATUS_ENDPOINT)
check_status_in_response(response, Status.BUSY)
def test_given_started_when_RE_stops_on_its_own_with_error_then_error_reported(
test_env: ClientAndRunEngine,
):
test_env.client.put(START_ENDPOINT, data=TEST_PARAMS)
error_message = "D'Oh"
test_env.mock_run_engine.error = error_message
response_json = wait_for_run_engine_status(test_env.client)
assert response_json["status"] == Status.FAILED.value
assert response_json["message"] == error_message
def test_given_started_and_return_status_interrupted_when_RE_aborted_then_error_reported(
test_env: ClientAndRunEngine,
):
test_env.mock_run_engine.aborting_takes_time = True
test_env.client.put(START_ENDPOINT, data=TEST_PARAMS)
error_message = "D'Oh"
test_env.client.put(STOP_ENDPOINT)
test_env.mock_run_engine.error = error_message
response_json = wait_for_run_engine_status(
test_env.client, lambda status: status != Status.ABORTING.value
)
assert response_json["status"] == Status.FAILED.value
assert response_json["message"] == error_message
def test_given_started_when_RE_stops_on_its_own_happily_then_no_error_reported(
test_env: ClientAndRunEngine,
):
test_env.client.put(START_ENDPOINT, data=TEST_PARAMS)
test_env.mock_run_engine.RE_takes_time = False
response_json = wait_for_run_engine_status(test_env.client)
assert response_json["status"] == Status.IDLE.value | src/artemis/tests/test_main_system.py | from dataclasses import dataclass
import threading
import pytest
from typing import Any, Callable
from flask.testing import FlaskClient
from src.artemis.parameters import FullParameters
from src.artemis.main import create_app, Status, Actions
from src.artemis.devices.det_dim_constants import EIGER_TYPE_EIGER2_X_4M
import json
from time import sleep
FGS_ENDPOINT = "/fast_grid_scan/"
START_ENDPOINT = FGS_ENDPOINT + Actions.START.value
STOP_ENDPOINT = FGS_ENDPOINT + Actions.STOP.value
STATUS_ENDPOINT = FGS_ENDPOINT + "status"
SHUTDOWN_ENDPOINT = FGS_ENDPOINT + Actions.SHUTDOWN.value
TEST_PARAMS = FullParameters().to_json()
class MockRunEngine:
RE_takes_time = True
aborting_takes_time = False
error: str = None
def __call__(self, *args: Any, **kwds: Any) -> Any:
while self.RE_takes_time:
sleep(0.1)
if self.error:
raise Exception(self.error)
def abort(self):
while self.aborting_takes_time:
sleep(0.1)
if self.error:
raise Exception(self.error)
self.RE_takes_time = False
@dataclass
class ClientAndRunEngine:
client: FlaskClient
mock_run_engine: MockRunEngine
@pytest.fixture
def test_env():
mock_run_engine = MockRunEngine()
app, runner = create_app({"TESTING": True}, mock_run_engine)
runner_thread = threading.Thread(target=runner.wait_on_queue)
runner_thread.start()
with app.test_client() as client:
yield ClientAndRunEngine(client, mock_run_engine)
runner.shutdown()
runner_thread.join()
def wait_for_run_engine_status(
client: FlaskClient,
status_check: Callable[[str], bool] = lambda status: status != Status.BUSY.value,
attempts=10,
):
while attempts != 0:
response = client.get(STATUS_ENDPOINT)
response_json = json.loads(response.data)
if status_check(response_json["status"]):
return response_json
else:
attempts -= 1
sleep(0.1)
assert False, "Run engine still busy"
def check_status_in_response(response_object, expected_result: Status):
response_json = json.loads(response_object.data)
assert response_json["status"] == expected_result.value
def test_start_gives_success(test_env: ClientAndRunEngine):
response = test_env.client.put(START_ENDPOINT, data=TEST_PARAMS)
check_status_in_response(response, Status.SUCCESS)
def test_getting_status_return_idle(test_env: ClientAndRunEngine):
response = test_env.client.get(STATUS_ENDPOINT)
check_status_in_response(response, Status.IDLE)
def test_getting_status_after_start_sent_returns_busy(
test_env: ClientAndRunEngine,
):
test_env.client.put(START_ENDPOINT, data=TEST_PARAMS)
response = test_env.client.get(STATUS_ENDPOINT)
check_status_in_response(response, Status.BUSY)
def test_sending_start_twice_fails(test_env: ClientAndRunEngine):
test_env.client.put(START_ENDPOINT, data=TEST_PARAMS)
response = test_env.client.put(START_ENDPOINT, data=TEST_PARAMS)
check_status_in_response(response, Status.FAILED)
def test_given_started_when_stopped_then_success_and_idle_status(
test_env: ClientAndRunEngine,
):
test_env.mock_run_engine.aborting_takes_time = True
test_env.client.put(START_ENDPOINT, data=TEST_PARAMS)
response = test_env.client.put(STOP_ENDPOINT)
check_status_in_response(response, Status.ABORTING)
response = test_env.client.get(STATUS_ENDPOINT)
check_status_in_response(response, Status.ABORTING)
test_env.mock_run_engine.aborting_takes_time = False
wait_for_run_engine_status(
test_env.client, lambda status: status != Status.ABORTING
)
check_status_in_response(response, Status.ABORTING)
def test_given_started_when_stopped_and_started_again_then_runs(
test_env: ClientAndRunEngine,
):
test_env.client.put(START_ENDPOINT, data=TEST_PARAMS)
test_env.client.put(STOP_ENDPOINT)
response = test_env.client.put(START_ENDPOINT, data=TEST_PARAMS)
check_status_in_response(response, Status.SUCCESS)
response = test_env.client.get(STATUS_ENDPOINT)
check_status_in_response(response, Status.BUSY)
def test_given_started_when_RE_stops_on_its_own_with_error_then_error_reported(
test_env: ClientAndRunEngine,
):
test_env.client.put(START_ENDPOINT, data=TEST_PARAMS)
error_message = "D'Oh"
test_env.mock_run_engine.error = error_message
response_json = wait_for_run_engine_status(test_env.client)
assert response_json["status"] == Status.FAILED.value
assert response_json["message"] == error_message
def test_given_started_and_return_status_interrupted_when_RE_aborted_then_error_reported(
test_env: ClientAndRunEngine,
):
test_env.mock_run_engine.aborting_takes_time = True
test_env.client.put(START_ENDPOINT, data=TEST_PARAMS)
error_message = "D'Oh"
test_env.client.put(STOP_ENDPOINT)
test_env.mock_run_engine.error = error_message
response_json = wait_for_run_engine_status(
test_env.client, lambda status: status != Status.ABORTING.value
)
assert response_json["status"] == Status.FAILED.value
assert response_json["message"] == error_message
def test_given_started_when_RE_stops_on_its_own_happily_then_no_error_reported(
test_env: ClientAndRunEngine,
):
test_env.client.put(START_ENDPOINT, data=TEST_PARAMS)
test_env.mock_run_engine.RE_takes_time = False
response_json = wait_for_run_engine_status(test_env.client)
assert response_json["status"] == Status.IDLE.value | 0.633864 | 0.315565 |
import random
import pathlib
import sys
"""
This script is for making 8 kind of xyz data of 700 samples
with specific combination of LC(Lattice constant) and Temperature
1. all(7) of LC with 300K [100x7]
2. all(7) of LC with 900K [100x7]
3. all(7) of LC with 1500K [100x7]
4. all(7) of LC with 600K(20),900K(60),1200K(20) [(20+60+20)x7]
5. LC=0.97(100)&0.98(40) with all(5) temperature [(100+40)x5]
6. LC=0.99(20)&1.00(100)&1.01(20) with all(5) temperature [(20+100+20)x5]
7. LC=1.02(40)&1.03(100) with all(5) temperature [(40+100)x5]
8. LC=0.99/T=600K(70),LC=0.99/T=900K(80),LC=0.99/T=1200K(70),
LC=1.00/T=600K(80),LC=1.00/T=900K(100),LC=1.00/T=1200K(80),
LC=1.01/T=600K(70),LC=1.01/T=900K(80),LC=1.01/T=1200K(70) [700]
Put this script under LAMMPS folder which has "LC_TEMP" subfolder
like as "scale0.97_300K". Other subfolder should not be existed.
"""
if __name__ == '__main__':
outfolder="/home/okugawa/LAMMPS/out/"
logfile=outfolder+"log.txt"
samples=[["bs1", [["300K",100]]],
["bs2", [["900K",100]]],
["bs3", [["1500K",100]]],
["bs4", [["600K",20],["900K",60],["1200K",20]]],
["bs5", [["0.97",100],["0.98",40]]],
["bs6", [["0.99",20],["1.00",100],["1.01",20]]],
["bs7", [["1.02",40],["1.03",100]]],
["bs8", [["0.99","600K",70],["0.99","900K",80],["0.99","1200K",70],
["1.00","600K",80],["1.00","900K",100],["1.00","1200K",80],
["1.01","600K",70],["1.01","900K",80],["1.01","1200K",70]]]]
for smp in samples:
xyzfile=outfolder+smp[0]+"-700.xyz"
LCTs=smp[1]
with open(xyzfile, mode='w') as xyz, open(logfile, mode='a') as log:
targets=[]
present=pathlib.Path('./')
dirs=([p for p in present.iterdir() if p.is_dir()])
for dir in dirs:
dirstr=str(dir)
for LCT in LCTs:
if len(LCT)==3:
if all(xx in dirstr for xx in (LCT[0], LCT[1])):
subdirs=([s for s in dir.iterdir() if s.is_dir()])
targets+=random.sample(subdirs,LCT[2])
elif len(LCT)==2:
if LCT[0] in dirstr:
subdirs=([s for s in dir.iterdir() if s.is_dir()])
targets+=random.sample(subdirs,LCT[1])
else:
print(f'Format of picking up sample is not valid')
sys.exit()
print(f'{smp[0]} total collected samples: {len(targets)}')
for target in targets:
with open(str(target)+'/data.xyz', mode='r') as f:
lines=f.readlines()
for l in lines:
xyz.write(l)
log.write(str(target)+'\n') | tools/bad-sample/pkup_xyz.py | import random
import pathlib
import sys
"""
This script is for making 8 kind of xyz data of 700 samples
with specific combination of LC(Lattice constant) and Temperature
1. all(7) of LC with 300K [100x7]
2. all(7) of LC with 900K [100x7]
3. all(7) of LC with 1500K [100x7]
4. all(7) of LC with 600K(20),900K(60),1200K(20) [(20+60+20)x7]
5. LC=0.97(100)&0.98(40) with all(5) temperature [(100+40)x5]
6. LC=0.99(20)&1.00(100)&1.01(20) with all(5) temperature [(20+100+20)x5]
7. LC=1.02(40)&1.03(100) with all(5) temperature [(40+100)x5]
8. LC=0.99/T=600K(70),LC=0.99/T=900K(80),LC=0.99/T=1200K(70),
LC=1.00/T=600K(80),LC=1.00/T=900K(100),LC=1.00/T=1200K(80),
LC=1.01/T=600K(70),LC=1.01/T=900K(80),LC=1.01/T=1200K(70) [700]
Put this script under LAMMPS folder which has "LC_TEMP" subfolder
like as "scale0.97_300K". Other subfolder should not be existed.
"""
if __name__ == '__main__':
outfolder="/home/okugawa/LAMMPS/out/"
logfile=outfolder+"log.txt"
samples=[["bs1", [["300K",100]]],
["bs2", [["900K",100]]],
["bs3", [["1500K",100]]],
["bs4", [["600K",20],["900K",60],["1200K",20]]],
["bs5", [["0.97",100],["0.98",40]]],
["bs6", [["0.99",20],["1.00",100],["1.01",20]]],
["bs7", [["1.02",40],["1.03",100]]],
["bs8", [["0.99","600K",70],["0.99","900K",80],["0.99","1200K",70],
["1.00","600K",80],["1.00","900K",100],["1.00","1200K",80],
["1.01","600K",70],["1.01","900K",80],["1.01","1200K",70]]]]
for smp in samples:
xyzfile=outfolder+smp[0]+"-700.xyz"
LCTs=smp[1]
with open(xyzfile, mode='w') as xyz, open(logfile, mode='a') as log:
targets=[]
present=pathlib.Path('./')
dirs=([p for p in present.iterdir() if p.is_dir()])
for dir in dirs:
dirstr=str(dir)
for LCT in LCTs:
if len(LCT)==3:
if all(xx in dirstr for xx in (LCT[0], LCT[1])):
subdirs=([s for s in dir.iterdir() if s.is_dir()])
targets+=random.sample(subdirs,LCT[2])
elif len(LCT)==2:
if LCT[0] in dirstr:
subdirs=([s for s in dir.iterdir() if s.is_dir()])
targets+=random.sample(subdirs,LCT[1])
else:
print(f'Format of picking up sample is not valid')
sys.exit()
print(f'{smp[0]} total collected samples: {len(targets)}')
for target in targets:
with open(str(target)+'/data.xyz', mode='r') as f:
lines=f.readlines()
for l in lines:
xyz.write(l)
log.write(str(target)+'\n') | 0.099006 | 0.375191 |
import gym
import pathlib
from gym.wrappers import FlattenObservation
import safety_gym
import simple_safety_gym
import safe_rl
from safe_rl.utils.run_utils import setup_logger_kwargs
from safe_rl.utils.mpi_tools import mpi_fork
from constraint.constraint_wrapper import ConstraintEnv
from constraint.constraints.register import get_constraint
def main(robot,
task,
algo,
seed,
exp_name,
cpu,
target_kl,
constraint,
use_aug,
dense_coeff,
is_linear,
use_baseline,
correct_advantage,
penalty_dampen_coeff=0.,
linear_cost=False,
nonlinear_cost=False,
use_safe_action=''):
# Verify experiment
robot_list = ['point', 'car', 'doggo']
task_list = [
'goal1', 'goal2', 'button1', 'button2', 'push1', 'push2', 'simplegoal'
]
algo_list = ['ppo', 'ppo_lagrangian', 'trpo', 'trpo_lagrangian', 'cpo']
algo = algo.lower()
task = task.capitalize()
robot = robot.capitalize()
assert algo in algo_list, "Invalid algo"
assert task.lower() in task_list, "Invalid task"
assert robot.lower() in robot_list, "Invalid robot"
# Hyperparameters
#exp_name = algo + '_' + robot + task
if robot == 'Doggo':
num_steps = 1e8
steps_per_epoch = 60000
else:
num_steps = 1e7
steps_per_epoch = 30000
if task == 'Simplegoal':
num_steps /= 3
epochs = int(num_steps / steps_per_epoch)
save_freq = 50
cost_lim = 25
# Fork for parallelizing
mpi_fork(cpu)
# Prepare Logger
exp_name = exp_name or (algo + '_' + robot.lower() + task.lower())
use_safe_action_str = use_safe_action or 'unsafe'
logger_kwargs = setup_logger_kwargs(exp_name,
seed,
data_dir=str(pathlib.Path('../tests')),
datestamp=False,
seed_str=use_safe_action_str)
# Algo and Env
algo = eval('safe_rl.' + algo)
env_name = 'Safexp-' + robot + task + '-v0'
def env_fn():
from gym import envs
env = gym.make(env_name)
if constraint != None:
if use_aug:
augmentation_type = 'constraint_state_concat'
else:
augmentation_type = 'None'
use_dense = dense_coeff > 0.
env = ConstraintEnv(env, [
get_constraint(constraint)(False, use_dense, dense_coeff,
is_linear)
],
augmentation_type=augmentation_type,
log_dir='../tests/' + exp_name)
fcenv = FlattenObservation(env)
return fcenv
algo(env_fn=env_fn,
ac_kwargs=dict(hidden_sizes=(256, 256), ),
epochs=epochs,
steps_per_epoch=steps_per_epoch,
save_freq=save_freq,
target_kl=target_kl,
cost_lim=cost_lim,
penalty_lr=1e-1,
seed=seed,
logger_kwargs=logger_kwargs,
use_baseline=use_baseline,
correct_advantage=correct_advantage,
penalty_dampen_coeff=penalty_dampen_coeff,
linear_cost=linear_cost,
nonlinear_cost=nonlinear_cost,
use_safe_action=use_safe_action)
print(exp_name)
(pathlib.Path('../tests') / exp_name / 'final.txt').touch()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--robot', type=str, default='Point')
parser.add_argument('--task', type=str, default='Goal1')
parser.add_argument('--algo', type=str, default='ppo')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--exp_name', type=str, default='')
parser.add_argument('--cpu', type=int, default=1)
parser.add_argument('--kl', type=float, default=0.01)
parser.add_argument('--constraint', type=str, default=None)
parser.add_argument('--use_aug', action='store_true', default=False)
parser.add_argument('--dense_coeff', type=float, default=0.)
parser.add_argument('--is_linear', action='store_true', default=False)
parser.add_argument('--use_baseline', action='store_true', default=False)
parser.add_argument('--correct_advantage',
action='store_true',
default=False)
parser.add_argument('--penalty_dampen_coeff', type=float, default=0.)
parser.add_argument('--linear_cost', action='store_true', default=False)
parser.add_argument('--nonlinear_cost', action='store_true', default=False)
parser.add_argument('--use_safe_action', type=str, default=None)
args = parser.parse_args()
exp_name = args.exp_name if not (args.exp_name == '') else None
main(args.robot, args.task, args.algo, args.seed, exp_name, args.cpu,
args.kl, args.constraint, args.use_aug, args.dense_coeff,
args.is_linear, args.use_baseline, args.correct_advantage,
args.penalty_dampen_coeff, args.linear_cost, args.nonlinear_cost,
args.use_safe_action)
pathlib.Path(pathlib.Path('../tests', exp_name, 'final.txt')).touch() | scripts/experiment.py | import gym
import pathlib
from gym.wrappers import FlattenObservation
import safety_gym
import simple_safety_gym
import safe_rl
from safe_rl.utils.run_utils import setup_logger_kwargs
from safe_rl.utils.mpi_tools import mpi_fork
from constraint.constraint_wrapper import ConstraintEnv
from constraint.constraints.register import get_constraint
def main(robot,
task,
algo,
seed,
exp_name,
cpu,
target_kl,
constraint,
use_aug,
dense_coeff,
is_linear,
use_baseline,
correct_advantage,
penalty_dampen_coeff=0.,
linear_cost=False,
nonlinear_cost=False,
use_safe_action=''):
# Verify experiment
robot_list = ['point', 'car', 'doggo']
task_list = [
'goal1', 'goal2', 'button1', 'button2', 'push1', 'push2', 'simplegoal'
]
algo_list = ['ppo', 'ppo_lagrangian', 'trpo', 'trpo_lagrangian', 'cpo']
algo = algo.lower()
task = task.capitalize()
robot = robot.capitalize()
assert algo in algo_list, "Invalid algo"
assert task.lower() in task_list, "Invalid task"
assert robot.lower() in robot_list, "Invalid robot"
# Hyperparameters
#exp_name = algo + '_' + robot + task
if robot == 'Doggo':
num_steps = 1e8
steps_per_epoch = 60000
else:
num_steps = 1e7
steps_per_epoch = 30000
if task == 'Simplegoal':
num_steps /= 3
epochs = int(num_steps / steps_per_epoch)
save_freq = 50
cost_lim = 25
# Fork for parallelizing
mpi_fork(cpu)
# Prepare Logger
exp_name = exp_name or (algo + '_' + robot.lower() + task.lower())
use_safe_action_str = use_safe_action or 'unsafe'
logger_kwargs = setup_logger_kwargs(exp_name,
seed,
data_dir=str(pathlib.Path('../tests')),
datestamp=False,
seed_str=use_safe_action_str)
# Algo and Env
algo = eval('safe_rl.' + algo)
env_name = 'Safexp-' + robot + task + '-v0'
def env_fn():
from gym import envs
env = gym.make(env_name)
if constraint != None:
if use_aug:
augmentation_type = 'constraint_state_concat'
else:
augmentation_type = 'None'
use_dense = dense_coeff > 0.
env = ConstraintEnv(env, [
get_constraint(constraint)(False, use_dense, dense_coeff,
is_linear)
],
augmentation_type=augmentation_type,
log_dir='../tests/' + exp_name)
fcenv = FlattenObservation(env)
return fcenv
algo(env_fn=env_fn,
ac_kwargs=dict(hidden_sizes=(256, 256), ),
epochs=epochs,
steps_per_epoch=steps_per_epoch,
save_freq=save_freq,
target_kl=target_kl,
cost_lim=cost_lim,
penalty_lr=1e-1,
seed=seed,
logger_kwargs=logger_kwargs,
use_baseline=use_baseline,
correct_advantage=correct_advantage,
penalty_dampen_coeff=penalty_dampen_coeff,
linear_cost=linear_cost,
nonlinear_cost=nonlinear_cost,
use_safe_action=use_safe_action)
print(exp_name)
(pathlib.Path('../tests') / exp_name / 'final.txt').touch()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--robot', type=str, default='Point')
parser.add_argument('--task', type=str, default='Goal1')
parser.add_argument('--algo', type=str, default='ppo')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--exp_name', type=str, default='')
parser.add_argument('--cpu', type=int, default=1)
parser.add_argument('--kl', type=float, default=0.01)
parser.add_argument('--constraint', type=str, default=None)
parser.add_argument('--use_aug', action='store_true', default=False)
parser.add_argument('--dense_coeff', type=float, default=0.)
parser.add_argument('--is_linear', action='store_true', default=False)
parser.add_argument('--use_baseline', action='store_true', default=False)
parser.add_argument('--correct_advantage',
action='store_true',
default=False)
parser.add_argument('--penalty_dampen_coeff', type=float, default=0.)
parser.add_argument('--linear_cost', action='store_true', default=False)
parser.add_argument('--nonlinear_cost', action='store_true', default=False)
parser.add_argument('--use_safe_action', type=str, default=None)
args = parser.parse_args()
exp_name = args.exp_name if not (args.exp_name == '') else None
main(args.robot, args.task, args.algo, args.seed, exp_name, args.cpu,
args.kl, args.constraint, args.use_aug, args.dense_coeff,
args.is_linear, args.use_baseline, args.correct_advantage,
args.penalty_dampen_coeff, args.linear_cost, args.nonlinear_cost,
args.use_safe_action)
pathlib.Path(pathlib.Path('../tests', exp_name, 'final.txt')).touch() | 0.534855 | 0.300925 |
from nx import classes as pc
import json
from string import ascii_letters, digits
from random import choice
import copy
import operator
from functools import reduce
def from_file_to_cy(
classes_dict, session
) -> dict():
"""
A (valid) json file is loaded and converted into a python dictionary.
"""
if "parents_dict" not in session:
session['parents_dict'] = {}
g = {}
g["nodes"] = []
g["edges"] = []
try:
# We load the json file in a dictionary
classes_dict = json.loads(classes_dict)
except Exception as e:
raise Exception
# We start populating the graph, Root node
root_node = 'root' + ''.join([choice(ascii_letters + digits) for i in range(6)])
session['parents_dict']["root"] = root_node
g['nodes'].append( {
'classes': 'red',
'parent': "root",
'data': {'id' : root_node,
'Node_Type': "root",
'name': "root"
} })
# parent nodes and edges
for p in classes_dict.keys():
if p + 'Parent' in session['defaults']:
nestingPath = session['defaults'][p + 'Parent'][p + 'Parent']['depth'].split('.')
parentData = copy.deepcopy(classes_dict[p])
reduce(operator.getitem, nestingPath[:-1], parentData)[nestingPath[-1]] = []
print('parentData:', parentData)
id_parent = 'root_' + p + '_' + ''.join([choice(ascii_letters + digits) for i in range(6)])
session['parents_dict'][p] = id_parent
g['nodes'].append( {
'classes': 'red',
'parent': "root",
'data': {'id': id_parent,
'Node_Type': p,
'name': p,
to_client_convention['node_json']: parentData
} }) if p + 'Parent' in session['defaults'] else g['nodes'].append( {
'classes': 'red',
'parent': "root",
'data': {'id': id_parent,
'Node_Type': p,
'name': p
} })
g['edges'].append( {
'classes': 'followerEdge',
"data": {
"id": str(root_node) + '-' + str(id_parent),
"source": root_node,
"target": session['parents_dict'][p]
}
})
# children nodes and edges
for parent, children_list in classes_dict.items():
for child_name, child in children_list.items():
id = ''.join([choice(ascii_letters + digits) for i in range(6)])
child_id = parent + '-' + id
if p + 'Parent' in session['defaults']:
nestingPath = session['defaults'][p + 'Parent'][p + 'Parent']['depth'].split('.')
parentJson = classes_dict[parent]
for _ in reduce(operator.getitem, nestingPath, parentJson):
id = ''.join([choice(ascii_letters + digits) for i in range(6)])
child_id = parent + '-' + id
g['nodes'].append( {
'classes': 'green',
'parent': parent,
'data': {'id' : child_id,
'Node_Type': parent,
'name': child_id,
to_client_convention['node_json']: _
} })
g['edges'].append( {
'classes': 'followerEdge',
"data": {
"id": session['parents_dict'][parent] + '-' + \
child_id,
"source": session['parents_dict'][parent],
"target": child_id
}
})
break
else:
g['nodes'].append( {
'classes': 'green',
'parent': parent,
'data': {'id' : child_id,
'Node_Type': parent,
'name': child_name,
to_client_convention['node_json']: child
} })
zmqDict(session, child, child_id)
g['edges'].append( {
'classes': 'followerEdge',
"data": {
"id": session['parents_dict'][parent] + '-' + \
child_id,
"source": session['parents_dict'][parent],
"target": child_id
}
})
return g
to_client_convention ={
"node_json": "config-block"
} | nx/__init__.py |
from nx import classes as pc
import json
from string import ascii_letters, digits
from random import choice
import copy
import operator
from functools import reduce
def from_file_to_cy(
classes_dict, session
) -> dict():
"""
A (valid) json file is loaded and converted into a python dictionary.
"""
if "parents_dict" not in session:
session['parents_dict'] = {}
g = {}
g["nodes"] = []
g["edges"] = []
try:
# We load the json file in a dictionary
classes_dict = json.loads(classes_dict)
except Exception as e:
raise Exception
# We start populating the graph, Root node
root_node = 'root' + ''.join([choice(ascii_letters + digits) for i in range(6)])
session['parents_dict']["root"] = root_node
g['nodes'].append( {
'classes': 'red',
'parent': "root",
'data': {'id' : root_node,
'Node_Type': "root",
'name': "root"
} })
# parent nodes and edges
for p in classes_dict.keys():
if p + 'Parent' in session['defaults']:
nestingPath = session['defaults'][p + 'Parent'][p + 'Parent']['depth'].split('.')
parentData = copy.deepcopy(classes_dict[p])
reduce(operator.getitem, nestingPath[:-1], parentData)[nestingPath[-1]] = []
print('parentData:', parentData)
id_parent = 'root_' + p + '_' + ''.join([choice(ascii_letters + digits) for i in range(6)])
session['parents_dict'][p] = id_parent
g['nodes'].append( {
'classes': 'red',
'parent': "root",
'data': {'id': id_parent,
'Node_Type': p,
'name': p,
to_client_convention['node_json']: parentData
} }) if p + 'Parent' in session['defaults'] else g['nodes'].append( {
'classes': 'red',
'parent': "root",
'data': {'id': id_parent,
'Node_Type': p,
'name': p
} })
g['edges'].append( {
'classes': 'followerEdge',
"data": {
"id": str(root_node) + '-' + str(id_parent),
"source": root_node,
"target": session['parents_dict'][p]
}
})
# children nodes and edges
for parent, children_list in classes_dict.items():
for child_name, child in children_list.items():
id = ''.join([choice(ascii_letters + digits) for i in range(6)])
child_id = parent + '-' + id
if p + 'Parent' in session['defaults']:
nestingPath = session['defaults'][p + 'Parent'][p + 'Parent']['depth'].split('.')
parentJson = classes_dict[parent]
for _ in reduce(operator.getitem, nestingPath, parentJson):
id = ''.join([choice(ascii_letters + digits) for i in range(6)])
child_id = parent + '-' + id
g['nodes'].append( {
'classes': 'green',
'parent': parent,
'data': {'id' : child_id,
'Node_Type': parent,
'name': child_id,
to_client_convention['node_json']: _
} })
g['edges'].append( {
'classes': 'followerEdge',
"data": {
"id": session['parents_dict'][parent] + '-' + \
child_id,
"source": session['parents_dict'][parent],
"target": child_id
}
})
break
else:
g['nodes'].append( {
'classes': 'green',
'parent': parent,
'data': {'id' : child_id,
'Node_Type': parent,
'name': child_name,
to_client_convention['node_json']: child
} })
zmqDict(session, child, child_id)
g['edges'].append( {
'classes': 'followerEdge',
"data": {
"id": session['parents_dict'][parent] + '-' + \
child_id,
"source": session['parents_dict'][parent],
"target": child_id
}
})
return g
to_client_convention ={
"node_json": "config-block"
} | 0.214938 | 0.229784 |
""" This is a template module just for instruction. """
# no imports
# functions
def foo(i, j): # real signature unknown; restored from __doc__
"""
foo(i,j)
Return the sum of i and j.
"""
pass
def new(): # real signature unknown; restored from __doc__
""" new() -> new Xx object """
pass
def roj(a, b): # real signature unknown; restored from __doc__
""" roj(a,b) -> None """
pass
# classes
class error(Exception):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
class Null(object):
# no doc
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
__hash__ = None
class Str(str):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
class Xxo(object):
""" The Xxo type """
def demo(self): # real signature unknown; restored from __doc__
""" demo() -> None """
pass
def __del__(self, *args, **kwargs): # real signature unknown
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
# variables with complex values
__loader__ = None # (!) real value is ''
__spec__ = None # (!) real value is '' | pyy1/.pycharm_helpers/python_stubs/-1550516950/xxlimited.py | """ This is a template module just for instruction. """
# no imports
# functions
def foo(i, j): # real signature unknown; restored from __doc__
"""
foo(i,j)
Return the sum of i and j.
"""
pass
def new(): # real signature unknown; restored from __doc__
""" new() -> new Xx object """
pass
def roj(a, b): # real signature unknown; restored from __doc__
""" roj(a,b) -> None """
pass
# classes
class error(Exception):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
class Null(object):
# no doc
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
__hash__ = None
class Str(str):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
class Xxo(object):
""" The Xxo type """
def demo(self): # real signature unknown; restored from __doc__
""" demo() -> None """
pass
def __del__(self, *args, **kwargs): # real signature unknown
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
# variables with complex values
__loader__ = None # (!) real value is ''
__spec__ = None # (!) real value is '' | 0.656328 | 0.162779 |
import artm
import numpy as np
import shutil
import pytest
import warnings
from ..cooking_machine.models.topic_model import TopicModel
from ..cooking_machine.dataset import Dataset, W_DIFF_BATCHES_1
from ..viewers import top_documents_viewer
NUM_TOPICS = 5
NUM_DOCUMENT_PASSES = 1
NUM_ITERATIONS = 10
class TestTopDocumentsViewer:
""" """
topic_model = None
theta = None
top_documents_viewer = None
dataset = None
@classmethod
def setup_class(cls):
""" """
with warnings.catch_warnings():
warnings.filterwarnings(action="ignore", message=W_DIFF_BATCHES_1)
cls.dataset = Dataset('tests/test_data/test_dataset.csv')
dictionary = cls.dataset.get_dictionary()
batch_vectorizer = cls.dataset.get_batch_vectorizer()
model_artm = artm.ARTM(
num_topics=NUM_TOPICS,
cache_theta=True,
num_document_passes=NUM_DOCUMENT_PASSES,
dictionary=dictionary,
scores=[artm.PerplexityScore(name='PerplexityScore')],)
cls.topic_model = TopicModel(model_artm, model_id='model_id')
cls.topic_model._fit(batch_vectorizer, num_iterations=NUM_ITERATIONS)
cls.theta = cls.topic_model.get_theta(dataset=cls.dataset)
cls.top_documents_viewer = top_documents_viewer.TopDocumentsViewer(model=cls.topic_model)
@classmethod
def teardown_class(cls):
""" """
shutil.rmtree(cls.dataset._internals_folder_path)
def test_check_output_format(self):
""" """
topics_documents = TestTopDocumentsViewer.top_documents_viewer.view()
assert isinstance(topics_documents, list), 'Result of view() not of type "list"'
assert all(isinstance(topic_documents, list) for topic_documents in topics_documents),\
'Some elements in the result list of view() not of type "list"'
def test_check_output_content(self):
""" """
num_documents = TestTopDocumentsViewer.theta.shape[1]
documents_indices = list(range(num_documents))
topics_documents_from_viewer = TestTopDocumentsViewer.top_documents_viewer.view()
documents_from_viewer = merge_lists(topics_documents_from_viewer)
assert sorted(documents_from_viewer) == documents_indices,\
'Viewer returned as documents "{0}".' \
'But expected to get documents\' indices from "0" to "{1}"'.format(
documents_from_viewer, num_documents - 1)
def test_check_precomputed_distances_parameter_workable(self):
""" """
index_of_topic_to_be_nearest_to_all_documents = 0
distances_all_one_except_to_one_topic = np.ones_like(TestTopDocumentsViewer.theta.values)
distances_all_one_except_to_one_topic[:, index_of_topic_to_be_nearest_to_all_documents] = 0
documents_viewer = top_documents_viewer.TopDocumentsViewer(
model=TestTopDocumentsViewer.topic_model,
precomputed_distances=distances_all_one_except_to_one_topic)
topics_documents = documents_viewer.view()
num_documents_in_nearest_topic = len(
topics_documents[index_of_topic_to_be_nearest_to_all_documents])
num_documents = TestTopDocumentsViewer.theta.shape[1]
assert num_documents_in_nearest_topic == num_documents,\
'Expected to see all documents in one topic.' \
'But the topic has "{}" documents instead of "{}"'.format(
num_documents_in_nearest_topic, num_documents)
@pytest.mark.parametrize("max_num_top_documents", [0, 1])
def test_check_max_top_documents_number_parameter_workable(self, max_num_top_documents):
""" """
documents_viewer = top_documents_viewer.TopDocumentsViewer(
model=TestTopDocumentsViewer.topic_model,
max_top_number=max_num_top_documents)
topics_documents = documents_viewer.view()
assert all(len(topic_documents) <= max_num_top_documents
for topic_documents in topics_documents),\
'Not all top documents lists from "{}" have less elements than required "{}"'.format(
topics_documents, max_num_top_documents)
def test_check_object_clusters_parameter_workable(self):
""" """
num_documents = TestTopDocumentsViewer.theta.shape[1]
cluster_label_to_be_same_for_all_documents = 0
cluster_labels = list(
cluster_label_to_be_same_for_all_documents for _ in range(num_documents))
documents_viewer = top_documents_viewer.TopDocumentsViewer(
model=TestTopDocumentsViewer.topic_model,
object_clusters=cluster_labels)
topics_documents = documents_viewer.view()
num_documents_with_given_cluster_label = len(
topics_documents[cluster_label_to_be_same_for_all_documents])
assert num_documents_with_given_cluster_label == num_documents,\
'Marked all documents with label "{}".' \
'Expected to see all "{}" documents in that topic,' \
'but there are only "{}" documents'.format(
cluster_label_to_be_same_for_all_documents, num_documents,
num_documents_with_given_cluster_label)
@pytest.mark.parametrize("illegal_cluster_label", [-1, NUM_TOPICS])
def test_check_object_clusters_parameter_validates_range_of_input_labels(
self, illegal_cluster_label):
""" """
num_documents = TestTopDocumentsViewer.theta.shape[1]
cluster_labels = list(0 for _ in range(num_documents))
cluster_labels[0] = illegal_cluster_label
with pytest.raises(ValueError):
_ = top_documents_viewer.TopDocumentsViewer(
model=TestTopDocumentsViewer.topic_model,
object_clusters=cluster_labels).view()
def merge_lists(iterable_of_lists):
""" """
result = []
for i in iterable_of_lists:
result += i
return result | topicnet/tests/test_top_documents_viewer.py | import artm
import numpy as np
import shutil
import pytest
import warnings
from ..cooking_machine.models.topic_model import TopicModel
from ..cooking_machine.dataset import Dataset, W_DIFF_BATCHES_1
from ..viewers import top_documents_viewer
NUM_TOPICS = 5
NUM_DOCUMENT_PASSES = 1
NUM_ITERATIONS = 10
class TestTopDocumentsViewer:
""" """
topic_model = None
theta = None
top_documents_viewer = None
dataset = None
@classmethod
def setup_class(cls):
""" """
with warnings.catch_warnings():
warnings.filterwarnings(action="ignore", message=W_DIFF_BATCHES_1)
cls.dataset = Dataset('tests/test_data/test_dataset.csv')
dictionary = cls.dataset.get_dictionary()
batch_vectorizer = cls.dataset.get_batch_vectorizer()
model_artm = artm.ARTM(
num_topics=NUM_TOPICS,
cache_theta=True,
num_document_passes=NUM_DOCUMENT_PASSES,
dictionary=dictionary,
scores=[artm.PerplexityScore(name='PerplexityScore')],)
cls.topic_model = TopicModel(model_artm, model_id='model_id')
cls.topic_model._fit(batch_vectorizer, num_iterations=NUM_ITERATIONS)
cls.theta = cls.topic_model.get_theta(dataset=cls.dataset)
cls.top_documents_viewer = top_documents_viewer.TopDocumentsViewer(model=cls.topic_model)
@classmethod
def teardown_class(cls):
""" """
shutil.rmtree(cls.dataset._internals_folder_path)
def test_check_output_format(self):
""" """
topics_documents = TestTopDocumentsViewer.top_documents_viewer.view()
assert isinstance(topics_documents, list), 'Result of view() not of type "list"'
assert all(isinstance(topic_documents, list) for topic_documents in topics_documents),\
'Some elements in the result list of view() not of type "list"'
def test_check_output_content(self):
""" """
num_documents = TestTopDocumentsViewer.theta.shape[1]
documents_indices = list(range(num_documents))
topics_documents_from_viewer = TestTopDocumentsViewer.top_documents_viewer.view()
documents_from_viewer = merge_lists(topics_documents_from_viewer)
assert sorted(documents_from_viewer) == documents_indices,\
'Viewer returned as documents "{0}".' \
'But expected to get documents\' indices from "0" to "{1}"'.format(
documents_from_viewer, num_documents - 1)
def test_check_precomputed_distances_parameter_workable(self):
""" """
index_of_topic_to_be_nearest_to_all_documents = 0
distances_all_one_except_to_one_topic = np.ones_like(TestTopDocumentsViewer.theta.values)
distances_all_one_except_to_one_topic[:, index_of_topic_to_be_nearest_to_all_documents] = 0
documents_viewer = top_documents_viewer.TopDocumentsViewer(
model=TestTopDocumentsViewer.topic_model,
precomputed_distances=distances_all_one_except_to_one_topic)
topics_documents = documents_viewer.view()
num_documents_in_nearest_topic = len(
topics_documents[index_of_topic_to_be_nearest_to_all_documents])
num_documents = TestTopDocumentsViewer.theta.shape[1]
assert num_documents_in_nearest_topic == num_documents,\
'Expected to see all documents in one topic.' \
'But the topic has "{}" documents instead of "{}"'.format(
num_documents_in_nearest_topic, num_documents)
@pytest.mark.parametrize("max_num_top_documents", [0, 1])
def test_check_max_top_documents_number_parameter_workable(self, max_num_top_documents):
""" """
documents_viewer = top_documents_viewer.TopDocumentsViewer(
model=TestTopDocumentsViewer.topic_model,
max_top_number=max_num_top_documents)
topics_documents = documents_viewer.view()
assert all(len(topic_documents) <= max_num_top_documents
for topic_documents in topics_documents),\
'Not all top documents lists from "{}" have less elements than required "{}"'.format(
topics_documents, max_num_top_documents)
def test_check_object_clusters_parameter_workable(self):
""" """
num_documents = TestTopDocumentsViewer.theta.shape[1]
cluster_label_to_be_same_for_all_documents = 0
cluster_labels = list(
cluster_label_to_be_same_for_all_documents for _ in range(num_documents))
documents_viewer = top_documents_viewer.TopDocumentsViewer(
model=TestTopDocumentsViewer.topic_model,
object_clusters=cluster_labels)
topics_documents = documents_viewer.view()
num_documents_with_given_cluster_label = len(
topics_documents[cluster_label_to_be_same_for_all_documents])
assert num_documents_with_given_cluster_label == num_documents,\
'Marked all documents with label "{}".' \
'Expected to see all "{}" documents in that topic,' \
'but there are only "{}" documents'.format(
cluster_label_to_be_same_for_all_documents, num_documents,
num_documents_with_given_cluster_label)
@pytest.mark.parametrize("illegal_cluster_label", [-1, NUM_TOPICS])
def test_check_object_clusters_parameter_validates_range_of_input_labels(
self, illegal_cluster_label):
""" """
num_documents = TestTopDocumentsViewer.theta.shape[1]
cluster_labels = list(0 for _ in range(num_documents))
cluster_labels[0] = illegal_cluster_label
with pytest.raises(ValueError):
_ = top_documents_viewer.TopDocumentsViewer(
model=TestTopDocumentsViewer.topic_model,
object_clusters=cluster_labels).view()
def merge_lists(iterable_of_lists):
""" """
result = []
for i in iterable_of_lists:
result += i
return result | 0.619817 | 0.434581 |
from msrest.serialization import Model
class CertificateBundle(Model):
"""A certificate bundle consists of a certificate (X509) plus its attributes.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The certificate id.
:vartype id: str
:ivar kid: The key id.
:vartype kid: str
:ivar sid: The secret id.
:vartype sid: str
:ivar x509_thumbprint: Thumbprint of the certificate.
:vartype x509_thumbprint: bytes
:ivar policy: The management policy.
:vartype policy: :class:`CertificatePolicy
<azure.keyvault.generated.models.CertificatePolicy>`
:param cer: CER contents of x509 certificate.
:type cer: bytearray
:param content_type: The content type of the secret.
:type content_type: str
:param attributes: The certificate attributes.
:type attributes: :class:`CertificateAttributes
<azure.keyvault.generated.models.CertificateAttributes>`
:param tags: Application specific metadata in the form of key-value pairs
:type tags: dict
"""
_validation = {
'id': {'readonly': True},
'kid': {'readonly': True},
'sid': {'readonly': True},
'x509_thumbprint': {'readonly': True},
'policy': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'kid': {'key': 'kid', 'type': 'str'},
'sid': {'key': 'sid', 'type': 'str'},
'x509_thumbprint': {'key': 'x5t', 'type': 'base64'},
'policy': {'key': 'policy', 'type': 'CertificatePolicy'},
'cer': {'key': 'cer', 'type': 'bytearray'},
'content_type': {'key': 'contentType', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'CertificateAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, cer=None, content_type=None, attributes=None, tags=None):
self.id = None
self.kid = None
self.sid = None
self.x509_thumbprint = None
self.policy = None
self.cer = cer
self.content_type = content_type
self.attributes = attributes
self.tags = tags | azure-keyvault/azure/keyvault/generated/models/certificate_bundle.py |
from msrest.serialization import Model
class CertificateBundle(Model):
"""A certificate bundle consists of a certificate (X509) plus its attributes.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The certificate id.
:vartype id: str
:ivar kid: The key id.
:vartype kid: str
:ivar sid: The secret id.
:vartype sid: str
:ivar x509_thumbprint: Thumbprint of the certificate.
:vartype x509_thumbprint: bytes
:ivar policy: The management policy.
:vartype policy: :class:`CertificatePolicy
<azure.keyvault.generated.models.CertificatePolicy>`
:param cer: CER contents of x509 certificate.
:type cer: bytearray
:param content_type: The content type of the secret.
:type content_type: str
:param attributes: The certificate attributes.
:type attributes: :class:`CertificateAttributes
<azure.keyvault.generated.models.CertificateAttributes>`
:param tags: Application specific metadata in the form of key-value pairs
:type tags: dict
"""
_validation = {
'id': {'readonly': True},
'kid': {'readonly': True},
'sid': {'readonly': True},
'x509_thumbprint': {'readonly': True},
'policy': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'kid': {'key': 'kid', 'type': 'str'},
'sid': {'key': 'sid', 'type': 'str'},
'x509_thumbprint': {'key': 'x5t', 'type': 'base64'},
'policy': {'key': 'policy', 'type': 'CertificatePolicy'},
'cer': {'key': 'cer', 'type': 'bytearray'},
'content_type': {'key': 'contentType', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'CertificateAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, cer=None, content_type=None, attributes=None, tags=None):
self.id = None
self.kid = None
self.sid = None
self.x509_thumbprint = None
self.policy = None
self.cer = cer
self.content_type = content_type
self.attributes = attributes
self.tags = tags | 0.891838 | 0.361841 |
import numpy as np
import tensorflow as tf
import argparse
import time
import os
import pickle
from utils import DataLoader
from model import Model
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--rnn_size', type=int, default=256,
help='size of RNN hidden state')
parser.add_argument('--num_layers', type=int, default=2,
help='number of layers in the RNN')
parser.add_argument('--model', type=str, default='lstm',
help='rnn, gru, or lstm')
parser.add_argument('--batch_size', type=int, default=50,
help='minibatch size')
parser.add_argument('--seq_length', type=int, default=300,
help='RNN sequence length')
parser.add_argument('--num_epochs', type=int, default=30,
help='number of epochs')
parser.add_argument('--save_every', type=int, default=500,
help='save frequency')
parser.add_argument('--model_dir', type=str, default='save',
help='directory to save model to')
parser.add_argument('--grad_clip', type=float, default=10.,
help='clip gradients at this value')
parser.add_argument('--learning_rate', type=float, default=0.005,
help='learning rate')
parser.add_argument('--decay_rate', type=float, default=0.95,
help='decay rate for rmsprop')
parser.add_argument('--num_mixture', type=int, default=20,
help='number of gaussian mixtures')
parser.add_argument('--data_scale', type=float, default=20,
help='factor to scale raw data down by')
parser.add_argument('--keep_prob', type=float, default=0.8,
help='dropout keep probability')
args = parser.parse_args()
train(args)
def train(args):
data_loader = DataLoader(args.batch_size, args.seq_length, args.data_scale)
if args.model_dir != '' and not os.path.exists(args.model_dir):
os.makedirs(args.model_dir)
with open(os.path.join(args.model_dir, 'config.pkl'), 'wb') as f:
pickle.dump(args, f)
model = Model(args)
config = tf.ConfigProto(
gpu_options=tf.GPUOptions(visible_device_list="1")
)
with tf.Session(config=config) as sess:
summary_writer = tf.summary.FileWriter(os.path.join(args.model_dir, 'log'), sess.graph)
tf.global_variables_initializer().run()
saver = tf.train.Saver(tf.global_variables())
for e in range(args.num_epochs):
sess.run(tf.assign(model.lr, args.learning_rate * (args.decay_rate ** e)))
data_loader.reset_batch_pointer()
v_x, v_y = data_loader.validation_data()
valid_feed = {model.input_data: v_x, model.target_data: v_y, model.state_in: model.state_in.eval()}
state = model.state_in.eval()
for b in range(data_loader.num_batches):
i = e * data_loader.num_batches + b
start = time.time()
x, y = data_loader.next_batch()
feed = {model.input_data: x, model.target_data: y, model.state_in: state}
train_loss_summary, train_loss, state, _ = sess.run([model.train_loss_summary, model.cost, model.state_out, model.train_op], feed)
summary_writer.add_summary(train_loss_summary, i)
valid_loss_summary, valid_loss, = sess.run([model.valid_loss_summary, model.cost], valid_feed)
summary_writer.add_summary(valid_loss_summary, i)
end = time.time()
print(
"{}/{} (epoch {}), train_loss = {:.3f}, valid_loss = {:.3f}, time/batch = {:.3f}" \
.format(
i,
args.num_epochs * data_loader.num_batches,
e,
train_loss, valid_loss, end - start))
if (e * data_loader.num_batches + b) % args.save_every == 0 and ((e * data_loader.num_batches + b) > 0):
checkpoint_path = os.path.join(args.model_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step = e * data_loader.num_batches + b)
print("model saved to {}".format(checkpoint_path))
if __name__ == '__main__':
main() | train.py | import numpy as np
import tensorflow as tf
import argparse
import time
import os
import pickle
from utils import DataLoader
from model import Model
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--rnn_size', type=int, default=256,
help='size of RNN hidden state')
parser.add_argument('--num_layers', type=int, default=2,
help='number of layers in the RNN')
parser.add_argument('--model', type=str, default='lstm',
help='rnn, gru, or lstm')
parser.add_argument('--batch_size', type=int, default=50,
help='minibatch size')
parser.add_argument('--seq_length', type=int, default=300,
help='RNN sequence length')
parser.add_argument('--num_epochs', type=int, default=30,
help='number of epochs')
parser.add_argument('--save_every', type=int, default=500,
help='save frequency')
parser.add_argument('--model_dir', type=str, default='save',
help='directory to save model to')
parser.add_argument('--grad_clip', type=float, default=10.,
help='clip gradients at this value')
parser.add_argument('--learning_rate', type=float, default=0.005,
help='learning rate')
parser.add_argument('--decay_rate', type=float, default=0.95,
help='decay rate for rmsprop')
parser.add_argument('--num_mixture', type=int, default=20,
help='number of gaussian mixtures')
parser.add_argument('--data_scale', type=float, default=20,
help='factor to scale raw data down by')
parser.add_argument('--keep_prob', type=float, default=0.8,
help='dropout keep probability')
args = parser.parse_args()
train(args)
def train(args):
data_loader = DataLoader(args.batch_size, args.seq_length, args.data_scale)
if args.model_dir != '' and not os.path.exists(args.model_dir):
os.makedirs(args.model_dir)
with open(os.path.join(args.model_dir, 'config.pkl'), 'wb') as f:
pickle.dump(args, f)
model = Model(args)
config = tf.ConfigProto(
gpu_options=tf.GPUOptions(visible_device_list="1")
)
with tf.Session(config=config) as sess:
summary_writer = tf.summary.FileWriter(os.path.join(args.model_dir, 'log'), sess.graph)
tf.global_variables_initializer().run()
saver = tf.train.Saver(tf.global_variables())
for e in range(args.num_epochs):
sess.run(tf.assign(model.lr, args.learning_rate * (args.decay_rate ** e)))
data_loader.reset_batch_pointer()
v_x, v_y = data_loader.validation_data()
valid_feed = {model.input_data: v_x, model.target_data: v_y, model.state_in: model.state_in.eval()}
state = model.state_in.eval()
for b in range(data_loader.num_batches):
i = e * data_loader.num_batches + b
start = time.time()
x, y = data_loader.next_batch()
feed = {model.input_data: x, model.target_data: y, model.state_in: state}
train_loss_summary, train_loss, state, _ = sess.run([model.train_loss_summary, model.cost, model.state_out, model.train_op], feed)
summary_writer.add_summary(train_loss_summary, i)
valid_loss_summary, valid_loss, = sess.run([model.valid_loss_summary, model.cost], valid_feed)
summary_writer.add_summary(valid_loss_summary, i)
end = time.time()
print(
"{}/{} (epoch {}), train_loss = {:.3f}, valid_loss = {:.3f}, time/batch = {:.3f}" \
.format(
i,
args.num_epochs * data_loader.num_batches,
e,
train_loss, valid_loss, end - start))
if (e * data_loader.num_batches + b) % args.save_every == 0 and ((e * data_loader.num_batches + b) > 0):
checkpoint_path = os.path.join(args.model_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step = e * data_loader.num_batches + b)
print("model saved to {}".format(checkpoint_path))
if __name__ == '__main__':
main() | 0.593256 | 0.082512 |
import pandas as pd
import numpy as np
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
#load the data
from google.colab import files # Use to load data on Google Colab
uploaded = files.upload()
#Load the data into the data frame
df = pd.read_csv('WA_Fn-UseC_-Telco-Customer-moved-agitated.csv')
df.head(7)
df.shape
#Show all of the column names
df.columns.values
#Check for na or missing data
df.isna().sum()
#Show statistics on the current data
df.describe()
#Get the number of customers that moved
df['Churn'].value_counts()
#Visualize the count of customer churn
sns.countplot(df['Churn'])
#What percentage of customers are leaving ?
retained = df[df.Churn == 'No']
churned = df[df.Churn == 'Yes']
num_retained = retained.shape[0]
num_churned = churned.shape[0]
#Print the percentage of customers that stayed and left
print( num_retained / (num_retained + num_churned) * 100 , "% of customers stayed with the company.")
#Print the percentage of customers that stayed and left
print( num_churned / (num_retained + num_churned) * 100,"% of customers left the company.")
#Visualize the churn count for both Males and Females
sns.countplot(x='gender', hue='Churn',data = df)
#Visualize the churn count for the internet service
sns.countplot(x='InternetService', hue='Churn', data = df)
numerical_features = ['tenure', 'MonthlyCharges']
fig, ax = plt.subplots(1, 2, figsize=(28, 8))
df[df.Churn == 'No'][numerical_features].hist(bins=20, color="blue", alpha=0.5, ax=ax)
df[df.Churn == 'Yes'][numerical_features].hist(bins=20, color="orange", alpha=0.5, ax=ax)
#Remove the unnecessary column customerID
cleaned_df = df = df.drop('customerID', axis=1)
#Look at the number of rows and cols in the new data set
cleaned_df.shape
#Convert all the non-numeric columns to numerical data types
for column in cleaned_df.columns:
if cleaned_df[column].dtype == np.number:
continue
cleaned_df[column] = LabelEncoder().fit_transform(cleaned_df[column])
#Check the new data set data types
cleaned_df.dtypes
#Show the first 5 rows of the new data set
cleaned_df.head()
#Scale the cleaned data
X = cleaned_df.drop('Churn', axis = 1)
y = cleaned_df['Churn']
#Standardizing/scaling the features
X = StandardScaler().fit_transform(X)
#Split the data into 80% training and 20% testing
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
#Create the model
model = LogisticRegression()
#Train the model
model.fit(x_train, y_train)
predictions = model.predict(x_test)
#printing the predictions
print(predictions)
#Check precision, recall, f1-score
print( classification_report(y_test, predictions) ) | telecomproject.py | import pandas as pd
import numpy as np
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
#load the data
from google.colab import files # Use to load data on Google Colab
uploaded = files.upload()
#Load the data into the data frame
df = pd.read_csv('WA_Fn-UseC_-Telco-Customer-moved-agitated.csv')
df.head(7)
df.shape
#Show all of the column names
df.columns.values
#Check for na or missing data
df.isna().sum()
#Show statistics on the current data
df.describe()
#Get the number of customers that moved
df['Churn'].value_counts()
#Visualize the count of customer churn
sns.countplot(df['Churn'])
#What percentage of customers are leaving ?
retained = df[df.Churn == 'No']
churned = df[df.Churn == 'Yes']
num_retained = retained.shape[0]
num_churned = churned.shape[0]
#Print the percentage of customers that stayed and left
print( num_retained / (num_retained + num_churned) * 100 , "% of customers stayed with the company.")
#Print the percentage of customers that stayed and left
print( num_churned / (num_retained + num_churned) * 100,"% of customers left the company.")
#Visualize the churn count for both Males and Females
sns.countplot(x='gender', hue='Churn',data = df)
#Visualize the churn count for the internet service
sns.countplot(x='InternetService', hue='Churn', data = df)
numerical_features = ['tenure', 'MonthlyCharges']
fig, ax = plt.subplots(1, 2, figsize=(28, 8))
df[df.Churn == 'No'][numerical_features].hist(bins=20, color="blue", alpha=0.5, ax=ax)
df[df.Churn == 'Yes'][numerical_features].hist(bins=20, color="orange", alpha=0.5, ax=ax)
#Remove the unnecessary column customerID
cleaned_df = df = df.drop('customerID', axis=1)
#Look at the number of rows and cols in the new data set
cleaned_df.shape
#Convert all the non-numeric columns to numerical data types
for column in cleaned_df.columns:
if cleaned_df[column].dtype == np.number:
continue
cleaned_df[column] = LabelEncoder().fit_transform(cleaned_df[column])
#Check the new data set data types
cleaned_df.dtypes
#Show the first 5 rows of the new data set
cleaned_df.head()
#Scale the cleaned data
X = cleaned_df.drop('Churn', axis = 1)
y = cleaned_df['Churn']
#Standardizing/scaling the features
X = StandardScaler().fit_transform(X)
#Split the data into 80% training and 20% testing
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
#Create the model
model = LogisticRegression()
#Train the model
model.fit(x_train, y_train)
predictions = model.predict(x_test)
#printing the predictions
print(predictions)
#Check precision, recall, f1-score
print( classification_report(y_test, predictions) ) | 0.348756 | 0.676533 |
from __init__ import sanitize
from time import sleep
from ldap import SCOPE_SUBORDINATE
from swiftclient import service as swiftService
from cinderclient.v2 import client as cinderClient
from keystoneclient.exceptions import NotFound
from keystoneclient.v3 import client as keystoneClient
from keystoneclient.v3.roles import RoleManager
from keystoneclient.v3.groups import GroupManager
from keystoneclient.v3.domains import DomainManager
from keystoneclient.v3.projects import ProjectManager
from keystoneclient.v3.role_assignments import RoleAssignmentManager
from neutronclient.v2_0 import client as neutronClient
from novaclient.v1_1 import client as novaClient
from novaclient.exceptions import Conflict
from novaclient.exceptions import NotFound
from novaclient.exceptions import BadRequest
from novaclient.exceptions import Unauthorized
from ldap_updater import LDAPUpdater
class QuotaChecker:
"""Check and enforce OpenStack tenant quota.
Verifies that a given tenant does have its correct allocated quota.
Attributes:
DEFAULT_QUOTA (dict): The default quota for a service developer.
PARTNER_QUOTA (dict): The default quota for a partner with CRA.
BIGDATA_QUOTA (dict): The quota for big data enabled projects.
"""
_DEFAULT_QUOTA_NAME = 'Default CRA quota'
_BIGDATA_QUOTA_NAME = 'Bigdata CRA quota'
DEFAULT_QUOTA = {
'instances': 16,
'cores': 16,
'ram': 32 * 1024,
'floating_ips': 5,
'cinder_GB': 1024,
'swift_bytes': 1024 * 1024 * 1024 * 1024,
'flavors': ['m1.tiny', 'm1.small', 'm1.medium', 'm1.large', 'm1.x-large']
}
PARTNER_QUOTA = {
'instances': 1,
'cores': 1,
'ram': 1024,
'floating_ips': 1,
'cinder_GB': 40,
'swift_bytes': 40 * 1024 * 1024 * 1024,
'flavors': ['m1.tiny']
}
BIGDATA_QUOTA = {
'instances': 16,
'cores': 46,
'ram': 400 * 1024,
'floating_ips': 15,
'cinder_GB': 1024,
'swift_bytes': 1024 * 1024 * 1024 * 1024,
'flavors': ['m1.tiny', 'm1.small', 'hadoop.small', 'hadoop.medium', 'hadoop.large']
}
def __init__(self, username=None, password=<PASSWORD>, tenantid=None, baseurl=None):
"""Set instance authentication constants.
Args:
username (str): OpenStack administrator username.
password (<PASSWORD>): <PASSWORD>.
tenantid (str): OpenStack tenant for the administrator account.
baseurl (str): OpenStack environment URI.
"""
self._AUTH_USERNAME = username
self._AUTH_PASSWORD = password
self._AUTH_TENANTID = tenantid
self._BASE_URL = baseurl
keystone = keystoneClient.Client(username=self._AUTH_USERNAME,
password=self._AUTH_PASSWORD,
project_name=self._AUTH_TENANTID,
auth_url='%s:5001/v3' % self._BASE_URL)
self._roleManager = RoleManager(keystone)
self._groupManager = GroupManager(keystone)
self._domainManager = DomainManager(keystone)
self._projectManager = ProjectManager(keystone)
self._roleAssignmentManager = RoleAssignmentManager(keystone)
def _getOpenstackGroup(self, group):
try:
os_group = self._groupManager.find(name=group)
except:
return None
return os_group
def _getTenantId(self, tenant):
projectMap = dict(map(lambda assignment: (assignment.group['id'], assignment.scope['project']['id']),
filter(lambda a: 'group' in a._info.keys(), self._roleAssignmentManager.list())))
return projectMap[tenant].strip() if tenant in projectMap.keys() else None
def _ensureTenantNetwork(self, tenant):
neutron = neutronClient.Client(username=self._AUTH_USERNAME,
password=self._AUTH_PASSWORD,
tenant_id=self._AUTH_TENANTID,
auth_url='%s:5001/v2.0' % self._BASE_URL)
if not filter(lambda network: network['tenant_id'] == tenant, neutron.list_networks()['networks']):
network = neutron.create_network({'network': {'name': 'default', 'tenant_id': tenant}})['network']
while not neutron.list_networks(id=network['id'])['networks']:
sleep(1)
allocated_cidrs = map(lambda chunk: (int(chunk[0]), int(chunk[1])),
map(lambda cidr: cidr['cidr'].split('/')[0].split('.')[-2:],
filter(lambda subnet: subnet['cidr'].endswith('/27'),
neutron.list_subnets()['subnets'])))
if (192, 0) in allocated_cidrs:
allocated_cidrs.remove((192, 0))
if allocated_cidrs:
max_bigchunk = max(map(lambda chunk: chunk[0], allocated_cidrs))
max_smlchunk = max(map(lambda chunk: chunk[1], filter(lambda c: c[0] == max_bigchunk, allocated_cidrs)))
if max_bigchunk == 191 and max_smlchunk == 224:
max_bigchunk = 192
max_smlchunk = 0
if max_smlchunk == 224:
cidr = '.'.join([str(chunk) for chunk in [192, 168, max_bigchunk + 1, 0]]) + '/27'
else:
cidr = '.'.join([str(chunk) for chunk in [192, 168, max_bigchunk, max_smlchunk + 32]]) + '/27'
else:
cidr = '192.168.0.0/27'
subnet = neutron.create_subnet({'subnet': {'name': 'default-subnet',
'cidr': cidr,
'dns_nameservers': ['192.168.3.11', '192.168.127.12'],
'tenant_id': tenant,
'network_id': network['id'],
'ip_version': '4'}})['subnet']
while not neutron.list_subnets(id=subnet['id'])['subnets']:
sleep(1)
router = neutron.create_router({'router': {'tenant_id': tenant,
'name': 'default-router'}})['router']
while not neutron.list_routers(id=router['id'])['routers']:
sleep(1)
public_net_id = filter(lambda n: n['router:external'],
neutron.list_networks(name='public')['networks'])[0]['id']
neutron.add_gateway_router(router['id'], {'network_id': public_net_id})
neutron.add_interface_router(router['id'], {'subnet_id': subnet['id']})
def _getTenantQuota(self, tenant, tenantType):
quota = None
statedQuota = map(lambda q: q['FIELD_VALUE'], filter(lambda f: f['CUSTOM_FIELD_ID'] == 'PROJECT_FIELD_1',
tenant['CUSTOMFIELDS']))[0]
if statedQuota == self._BIGDATA_QUOTA_NAME:
quota = self.BIGDATA_QUOTA
else:
if statedQuota == self._DEFAULT_QUOTA_NAME:
if tenantType == LDAPUpdater.FPA_CRA:
quota = self.PARTNER_QUOTA
if tenantType == LDAPUpdater.SDA:
quota = self.DEFAULT_QUOTA
return quota
def _grantAccess(self, client, flavor, tenant):
try:
client.flavor_access.add_tenant_access(flavor, tenant)
except Conflict:
pass
def _revokeAccess(self, client, flavor, tenant):
try:
client.flavor_access.remove_tenant_access(flavor, tenant)
except NotFound:
pass
def _enforceQuota(self, ldap_tenant, quotaDefinition, ldap_conn=None):
openstackGroup = self._getOpenstackGroup(ldap_tenant)
if openstackGroup:
tenant = self._getTenantId(ldap_tenant)
if not tenant:
# Create or map tenant in openstack
project = self._projectManager.list(name=ldap_tenant)
if not project:
project = self._projectManager.create(ldap_tenant, self._domainManager.find(id='default'))
self._roleManager.grant(self._roleManager.find(name='member').id,
group=openstackGroup.id,
project=project.id)
tenant = project.id
if ldap_conn and ldap_tenant in map(lambda t: t[0].split(',')[0].split('=')[1],
ldap_conn.ldap_search('cn=digile.platform,ou=projects,\
dc=forgeservicelab,dc=fi',
SCOPE_SUBORDINATE, attrsonly=1)):
with novaClient.Client(username=self._AUTH_USERNAME,
api_key=self._AUTH_PASSWORD,
tenant_id=tenant,
auth_url='%s:5001/v2.0' % self._BASE_URL) as nova:
try:
nova.security_group_rules.create(nova.security_groups.find(name='default').id,
ip_protocol='tcp',
from_port=22,
to_port=22,
cidr='172.16.58.3/32')
except Unauthorized:
# butler.service not yet part of the tenant, wait for next round.
pass
except BadRequest:
# Rule already exists, that's OK.
pass
self._ensureTenantNetwork(tenant)
if quotaDefinition:
service_opts = {
'meta': ['quota-bytes:%s' % quotaDefinition['swift_bytes']],
'os_username': self._AUTH_USERNAME,
'os_password': self._AUTH_PASSWORD,
'os_auth_url': '%s:5001/v2.0' % self._BASE_URL,
'os_storage_url': '%s:8081/v1/AUTH_%s' % (self._BASE_URL, self._projectManager.get(tenant).name),
'os_tenant_name': self._AUTH_TENANTID
}
swift = swiftService.SwiftService(options=service_opts)
swift.post()
del swift
cinder = cinderClient.Client(username=self._AUTH_USERNAME,
api_key=self._AUTH_PASSWORD,
tenant_id=self._AUTH_TENANTID,
auth_url=service_opts['os_auth_url'])
cinder.quotas.update(tenant, gigabytes=quotaDefinition['cinder_GB'])
del cinder
with novaClient.Client(username=self._AUTH_USERNAME,
api_key=self._AUTH_PASSWORD,
tenant_id=self._AUTH_TENANTID,
auth_url=service_opts['os_auth_url']) as nova:
nova.quotas.update(tenant,
instances=quotaDefinition['instances'],
cores=quotaDefinition['cores'],
ram=quotaDefinition['ram'],
floating_ips=quotaDefinition['floating_ips'])
allFlavors = nova.flavors.findall(is_public=None)
map(lambda f: self._grantAccess(nova, f, tenant),
filter(lambda f: f.name.encode() in quotaDefinition['flavors'], allFlavors))
map(lambda f: self._revokeAccess(nova, f, tenant),
filter(lambda f: f.name.encode() not in quotaDefinition['flavors'], allFlavors))
neutron = neutronClient.Client(username=self._AUTH_USERNAME,
password=self._<PASSWORD>,
tenant_id=self._AUTH_TENANTID,
auth_url=service_opts['os_auth_url'])
neutron.update_quota(tenant, {'quota': {'floatingip': quotaDefinition['floating_ips']}})
del neutron
with novaClient.Client(username=self._AUTH_USERNAME,
api_key=self._AUTH_PASSWORD,
tenant_id=self._AUTH_TENANTID,
auth_url='%s:5001/v2.0' % self._BASE_URL) as nova:
self._grantAccess(nova, nova.flavors.find(name='m1.tiny', is_public=None), tenant)
def enforceQuotas(self, tenantList, tenantsType, ldap_conn=None):
"""Enforce the quota for each tenant on the list.
Args:
tenantList (List): A list of tenants as JSON from Insightly.
tenantsType (str): A description of the type of tenant, one of 'SDA', 'FPA' or 'FPA (CRA)'.
"""
map(lambda t: self._enforceQuota(sanitize(t['PROJECT_NAME']), self._getTenantQuota(t, tenantsType),
ldap_conn), tenantList) | quota_checker.py | from __init__ import sanitize
from time import sleep
from ldap import SCOPE_SUBORDINATE
from swiftclient import service as swiftService
from cinderclient.v2 import client as cinderClient
from keystoneclient.exceptions import NotFound
from keystoneclient.v3 import client as keystoneClient
from keystoneclient.v3.roles import RoleManager
from keystoneclient.v3.groups import GroupManager
from keystoneclient.v3.domains import DomainManager
from keystoneclient.v3.projects import ProjectManager
from keystoneclient.v3.role_assignments import RoleAssignmentManager
from neutronclient.v2_0 import client as neutronClient
from novaclient.v1_1 import client as novaClient
from novaclient.exceptions import Conflict
from novaclient.exceptions import NotFound
from novaclient.exceptions import BadRequest
from novaclient.exceptions import Unauthorized
from ldap_updater import LDAPUpdater
class QuotaChecker:
"""Check and enforce OpenStack tenant quota.
Verifies that a given tenant does have its correct allocated quota.
Attributes:
DEFAULT_QUOTA (dict): The default quota for a service developer.
PARTNER_QUOTA (dict): The default quota for a partner with CRA.
BIGDATA_QUOTA (dict): The quota for big data enabled projects.
"""
_DEFAULT_QUOTA_NAME = 'Default CRA quota'
_BIGDATA_QUOTA_NAME = 'Bigdata CRA quota'
DEFAULT_QUOTA = {
'instances': 16,
'cores': 16,
'ram': 32 * 1024,
'floating_ips': 5,
'cinder_GB': 1024,
'swift_bytes': 1024 * 1024 * 1024 * 1024,
'flavors': ['m1.tiny', 'm1.small', 'm1.medium', 'm1.large', 'm1.x-large']
}
PARTNER_QUOTA = {
'instances': 1,
'cores': 1,
'ram': 1024,
'floating_ips': 1,
'cinder_GB': 40,
'swift_bytes': 40 * 1024 * 1024 * 1024,
'flavors': ['m1.tiny']
}
BIGDATA_QUOTA = {
'instances': 16,
'cores': 46,
'ram': 400 * 1024,
'floating_ips': 15,
'cinder_GB': 1024,
'swift_bytes': 1024 * 1024 * 1024 * 1024,
'flavors': ['m1.tiny', 'm1.small', 'hadoop.small', 'hadoop.medium', 'hadoop.large']
}
def __init__(self, username=None, password=<PASSWORD>, tenantid=None, baseurl=None):
"""Set instance authentication constants.
Args:
username (str): OpenStack administrator username.
password (<PASSWORD>): <PASSWORD>.
tenantid (str): OpenStack tenant for the administrator account.
baseurl (str): OpenStack environment URI.
"""
self._AUTH_USERNAME = username
self._AUTH_PASSWORD = password
self._AUTH_TENANTID = tenantid
self._BASE_URL = baseurl
keystone = keystoneClient.Client(username=self._AUTH_USERNAME,
password=self._AUTH_PASSWORD,
project_name=self._AUTH_TENANTID,
auth_url='%s:5001/v3' % self._BASE_URL)
self._roleManager = RoleManager(keystone)
self._groupManager = GroupManager(keystone)
self._domainManager = DomainManager(keystone)
self._projectManager = ProjectManager(keystone)
self._roleAssignmentManager = RoleAssignmentManager(keystone)
def _getOpenstackGroup(self, group):
try:
os_group = self._groupManager.find(name=group)
except:
return None
return os_group
def _getTenantId(self, tenant):
projectMap = dict(map(lambda assignment: (assignment.group['id'], assignment.scope['project']['id']),
filter(lambda a: 'group' in a._info.keys(), self._roleAssignmentManager.list())))
return projectMap[tenant].strip() if tenant in projectMap.keys() else None
def _ensureTenantNetwork(self, tenant):
neutron = neutronClient.Client(username=self._AUTH_USERNAME,
password=self._AUTH_PASSWORD,
tenant_id=self._AUTH_TENANTID,
auth_url='%s:5001/v2.0' % self._BASE_URL)
if not filter(lambda network: network['tenant_id'] == tenant, neutron.list_networks()['networks']):
network = neutron.create_network({'network': {'name': 'default', 'tenant_id': tenant}})['network']
while not neutron.list_networks(id=network['id'])['networks']:
sleep(1)
allocated_cidrs = map(lambda chunk: (int(chunk[0]), int(chunk[1])),
map(lambda cidr: cidr['cidr'].split('/')[0].split('.')[-2:],
filter(lambda subnet: subnet['cidr'].endswith('/27'),
neutron.list_subnets()['subnets'])))
if (192, 0) in allocated_cidrs:
allocated_cidrs.remove((192, 0))
if allocated_cidrs:
max_bigchunk = max(map(lambda chunk: chunk[0], allocated_cidrs))
max_smlchunk = max(map(lambda chunk: chunk[1], filter(lambda c: c[0] == max_bigchunk, allocated_cidrs)))
if max_bigchunk == 191 and max_smlchunk == 224:
max_bigchunk = 192
max_smlchunk = 0
if max_smlchunk == 224:
cidr = '.'.join([str(chunk) for chunk in [192, 168, max_bigchunk + 1, 0]]) + '/27'
else:
cidr = '.'.join([str(chunk) for chunk in [192, 168, max_bigchunk, max_smlchunk + 32]]) + '/27'
else:
cidr = '192.168.0.0/27'
subnet = neutron.create_subnet({'subnet': {'name': 'default-subnet',
'cidr': cidr,
'dns_nameservers': ['192.168.3.11', '192.168.127.12'],
'tenant_id': tenant,
'network_id': network['id'],
'ip_version': '4'}})['subnet']
while not neutron.list_subnets(id=subnet['id'])['subnets']:
sleep(1)
router = neutron.create_router({'router': {'tenant_id': tenant,
'name': 'default-router'}})['router']
while not neutron.list_routers(id=router['id'])['routers']:
sleep(1)
public_net_id = filter(lambda n: n['router:external'],
neutron.list_networks(name='public')['networks'])[0]['id']
neutron.add_gateway_router(router['id'], {'network_id': public_net_id})
neutron.add_interface_router(router['id'], {'subnet_id': subnet['id']})
def _getTenantQuota(self, tenant, tenantType):
quota = None
statedQuota = map(lambda q: q['FIELD_VALUE'], filter(lambda f: f['CUSTOM_FIELD_ID'] == 'PROJECT_FIELD_1',
tenant['CUSTOMFIELDS']))[0]
if statedQuota == self._BIGDATA_QUOTA_NAME:
quota = self.BIGDATA_QUOTA
else:
if statedQuota == self._DEFAULT_QUOTA_NAME:
if tenantType == LDAPUpdater.FPA_CRA:
quota = self.PARTNER_QUOTA
if tenantType == LDAPUpdater.SDA:
quota = self.DEFAULT_QUOTA
return quota
def _grantAccess(self, client, flavor, tenant):
try:
client.flavor_access.add_tenant_access(flavor, tenant)
except Conflict:
pass
def _revokeAccess(self, client, flavor, tenant):
try:
client.flavor_access.remove_tenant_access(flavor, tenant)
except NotFound:
pass
def _enforceQuota(self, ldap_tenant, quotaDefinition, ldap_conn=None):
openstackGroup = self._getOpenstackGroup(ldap_tenant)
if openstackGroup:
tenant = self._getTenantId(ldap_tenant)
if not tenant:
# Create or map tenant in openstack
project = self._projectManager.list(name=ldap_tenant)
if not project:
project = self._projectManager.create(ldap_tenant, self._domainManager.find(id='default'))
self._roleManager.grant(self._roleManager.find(name='member').id,
group=openstackGroup.id,
project=project.id)
tenant = project.id
if ldap_conn and ldap_tenant in map(lambda t: t[0].split(',')[0].split('=')[1],
ldap_conn.ldap_search('cn=digile.platform,ou=projects,\
dc=forgeservicelab,dc=fi',
SCOPE_SUBORDINATE, attrsonly=1)):
with novaClient.Client(username=self._AUTH_USERNAME,
api_key=self._AUTH_PASSWORD,
tenant_id=tenant,
auth_url='%s:5001/v2.0' % self._BASE_URL) as nova:
try:
nova.security_group_rules.create(nova.security_groups.find(name='default').id,
ip_protocol='tcp',
from_port=22,
to_port=22,
cidr='172.16.58.3/32')
except Unauthorized:
# butler.service not yet part of the tenant, wait for next round.
pass
except BadRequest:
# Rule already exists, that's OK.
pass
self._ensureTenantNetwork(tenant)
if quotaDefinition:
service_opts = {
'meta': ['quota-bytes:%s' % quotaDefinition['swift_bytes']],
'os_username': self._AUTH_USERNAME,
'os_password': self._AUTH_PASSWORD,
'os_auth_url': '%s:5001/v2.0' % self._BASE_URL,
'os_storage_url': '%s:8081/v1/AUTH_%s' % (self._BASE_URL, self._projectManager.get(tenant).name),
'os_tenant_name': self._AUTH_TENANTID
}
swift = swiftService.SwiftService(options=service_opts)
swift.post()
del swift
cinder = cinderClient.Client(username=self._AUTH_USERNAME,
api_key=self._AUTH_PASSWORD,
tenant_id=self._AUTH_TENANTID,
auth_url=service_opts['os_auth_url'])
cinder.quotas.update(tenant, gigabytes=quotaDefinition['cinder_GB'])
del cinder
with novaClient.Client(username=self._AUTH_USERNAME,
api_key=self._AUTH_PASSWORD,
tenant_id=self._AUTH_TENANTID,
auth_url=service_opts['os_auth_url']) as nova:
nova.quotas.update(tenant,
instances=quotaDefinition['instances'],
cores=quotaDefinition['cores'],
ram=quotaDefinition['ram'],
floating_ips=quotaDefinition['floating_ips'])
allFlavors = nova.flavors.findall(is_public=None)
map(lambda f: self._grantAccess(nova, f, tenant),
filter(lambda f: f.name.encode() in quotaDefinition['flavors'], allFlavors))
map(lambda f: self._revokeAccess(nova, f, tenant),
filter(lambda f: f.name.encode() not in quotaDefinition['flavors'], allFlavors))
neutron = neutronClient.Client(username=self._AUTH_USERNAME,
password=self._<PASSWORD>,
tenant_id=self._AUTH_TENANTID,
auth_url=service_opts['os_auth_url'])
neutron.update_quota(tenant, {'quota': {'floatingip': quotaDefinition['floating_ips']}})
del neutron
with novaClient.Client(username=self._AUTH_USERNAME,
api_key=self._AUTH_PASSWORD,
tenant_id=self._AUTH_TENANTID,
auth_url='%s:5001/v2.0' % self._BASE_URL) as nova:
self._grantAccess(nova, nova.flavors.find(name='m1.tiny', is_public=None), tenant)
def enforceQuotas(self, tenantList, tenantsType, ldap_conn=None):
"""Enforce the quota for each tenant on the list.
Args:
tenantList (List): A list of tenants as JSON from Insightly.
tenantsType (str): A description of the type of tenant, one of 'SDA', 'FPA' or 'FPA (CRA)'.
"""
map(lambda t: self._enforceQuota(sanitize(t['PROJECT_NAME']), self._getTenantQuota(t, tenantsType),
ldap_conn), tenantList) | 0.574514 | 0.064036 |
import sys
import requests
import json
import phantom.app as phantom
from datetime import datetime
from bs4 import BeautifulSoup
from phantom.base_connector import BaseConnector
from phantom.action_result import ActionResult
from digitalguardianarc_consts import *
from bs4 import UnicodeDammit
class RetVal(tuple):
def __new__(cls, val1, val2=None):
return tuple.__new__(RetVal, (val1, val2))
class DigitalGuardianArcConnector(BaseConnector):
def __init__(self):
# Call the BaseConnectors init first
super(DigitalGuardianArcConnector, self).__init__()
self._state = None
self._auth_url = None
self._arc_url = None
self._client_id = None
self._client_secret = None
self._export_profile = None
self._api_key = None
self._client_headers = {}
def _process_empty_response(self, response, action_result):
if response.status_code == 200:
return RetVal(phantom.APP_SUCCESS, {})
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Status Code: {0}. Empty response and no information in the header'.format(response.status_code)), None)
def _process_html_response(self, response, action_result):
# An html response, treat it like an error
status_code = response.status_code
try:
soup = BeautifulSoup(response.text, 'html.parser')
# Remove the script, style, footer and navigation part from the HTML message
for element in soup(["script", "style", "footer", "nav"]):
element.extract()
error_text = soup.text
split_lines = error_text.split('\n')
split_lines = [x.strip() for x in split_lines if x.strip()]
error_text = '\n'.join(split_lines)
except Exception as e:
err = self._get_error_message_from_exception(e)
error_text = 'Cannot parse error details {}'.format(err)
message = "Status Code: {0}. Data from server:{1}".format(
status_code, self._handle_py_ver_compat_for_input_str(error_text))
message = message.replace('{', '{{').replace('}', '}}')
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _process_json_response(self, r, action_result):
# Try a json parse
try:
resp_json = r.json()
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Unable to parse JSON response. {0}'.format(err)), None)
# Please specify the status codes here
if 200 <= r.status_code < 399:
return RetVal(phantom.APP_SUCCESS, resp_json)
# You should process the error returned in the json
message = 'Error from server. Status Code: {0} Data from server: {1}'.format(
r.status_code, self._handle_py_ver_compat_for_input_str(r.text.replace('{', '{{').replace('}', '}}')))
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _process_response(self, response, action_result):
# store the r_text in debug data, it will get dumped in the logs if the action fails
try:
if hasattr(action_result, 'add_debug_data') and (self.get_action_identifier() != 'get-file' or not 200 <= response.status_code < 399):
action_result.add_debug_data(
{'r_status_code': response.status_code})
action_result.add_debug_data({'r_text': response.text})
action_result.add_debug_data({'r_headers': response.headers})
if 'json' in response.headers.get('Content-Type', ''):
self.save_progress("Action: 'process_json_response'")
return self._process_json_response(response, action_result)
if 'html' in response.headers.get('Content-Type', ''):
self.save_progress("Action: 'process_html_response'")
return self._process_html_response(response, action_result)
if not response.text:
self.save_progress("Action: 'process_empty_response'")
return self._process_empty_response(response, action_result)
message = (
"Can't process response from server. Status Code: {0} Data from server: {1}"
).format(response.status_code,
self._handle_py_ver_compat_for_input_str(response.text.replace('{', '{{').replace('}', '}}')))
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
except Exception as e:
err = self._get_error_message_from_exception(e)
exc_tb = sys.exc_info()
self.save_progress(('exception_line={} {}').format(exc_tb.tb_lineno, err))
return RetVal(action_result.set_status(phantom.APP_ERROR, ('Error: {}').format(err)), None)
def _make_rest_call(self, endpoint, action_result, method='get', **kwargs):
# **kwargs can be any additional parameters that requests.request accepts
config = self.get_config()
resp_json = None
try:
request_func = getattr(requests, method)
except AttributeError:
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Invalid method: {0}'.format(method)), resp_json)
# Create a URL to connect to
url = "%s/%s" % (self._arc_url.strip("/"), endpoint)
try:
self.save_progress("Connecting to URL: {0}".format(url))
r = request_func(url,
verify=config.get('verify_server_cert', False),
**kwargs)
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err)), resp_json)
return self._process_response(r, action_result)
def _handle_py_ver_compat_for_input_str(self, input_str):
"""
This method returns the encoded|original string based on the Python version.
:param input_str: Input string to be processed
:return: input_str (Processed input string based on following logic 'input_str - Python 3; encoded input_str - Python 2')
"""
try:
if input_str and self._python_version == 2:
input_str = UnicodeDammit(input_str).unicode_markup.encode('utf-8')
except:
self.debug_print("Error occurred while handling python 2to3 compatibility for the input string")
return input_str
def _get_error_message_from_exception(self, e):
""" This method is used to get appropriate error messages from the exception.
:param e: Exception object
:return: error message
"""
try:
if e.args:
if len(e.args) > 1:
error_code = e.args[0]
error_msg = e.args[1]
elif len(e.args) == 1:
error_code = ERR_CODE_MSG
error_msg = e.args[0]
else:
error_code = ERR_CODE_MSG
error_msg = ERR_MSG_UNAVAILABLE
except:
error_code = ERR_CODE_MSG
error_msg = ERR_MSG_UNAVAILABLE
try:
error_msg = self._handle_py_ver_compat_for_input_str(error_msg)
except TypeError:
error_msg = TYPE_ERR_MSG
except:
error_msg = ERR_MSG_UNAVAILABLE
try:
if error_code in ERR_CODE_MSG:
error_text = "Error Message: {0}".format(error_msg)
else:
error_text = "Error Code: {0}. Error Message: {1}".format(error_code, error_msg)
except:
self.debug_print(PARSE_ERR_MSG)
error_text = PARSE_ERR_MSG
return error_text
def _handle_test_connectivity(self, param):
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
# NOTE: test connectivity does _NOT_ take any parameters
# i.e. the param dictionary passed to this handler will be empty.
# Also typically it does not add any data into an action_result either.
# The status and progress messages are more important.
self.save_progress('Connecting to DG ARC')
ret_val, message = self.requestApiToken()
if not self._client_headers['Authorization']:
self.save_progress('Test Connectivity Failed')
return action_result.get_status()
else:
self.save_progress('Test Connectivity Passed')
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_on_poll(self, param):
oldname = ''
action_result = self.add_action_result(ActionResult(dict(param)))
response_status, export_list = self.get_export(action_result)
if phantom.is_fail(response_status):
self.debug_print('On Poll Failed')
return action_result.get_status()
if export_list:
self.save_progress('Ingesting alarm records')
else:
self.save_progress('No export data found')
return action_result.set_status(phantom.APP_SUCCESS, 'No export data found')
for entry in export_list:
try:
comm = entry['dg_alarm_name'].find(',')
if comm == -1:
comm = 100
name = ('{alarm_name}-{id}').format(
alarm_name=entry['dg_alarm_name'][0:comm],
id=entry['dg_guid'])
if name != oldname:
container_id = self.create_container(name, entry)
oldname = name
if container_id:
(artifacts_creation_status,
artifacts_creation_msg) = self.create_artifacts(alert=entry, container_id=container_id)
if phantom.is_fail(artifacts_creation_status):
self.debug_print((
'Error while creating artifacts for container with ID {container_id}. {error_msg}'
).format(container_id=container_id, error_msg=artifacts_creation_msg))
self._state['first_run'] = False
except Exception as e:
err = self._get_error_message_from_exception(e)
self.debug_print("Error occurred while processing export list response from server. {}".format(err))
return action_result.set_status(phantom.APP_SUCCESS)
def get_export(self, action_result):
self.save_progress('Getting ARC Export data')
ret_val, message = self.requestApiToken()
if phantom.is_fail(ret_val):
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
full_url = '{0}/export_profiles/{1}/export_and_ack'.format(self._arc_url.strip("/"), self._export_profile)
try:
request_response = requests.post(url=full_url,
headers=self._client_headers,
verify=False)
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err)), None)
request_status = request_response.status_code
if 200 <= request_status <= 299:
headerField = []
try:
jsonText = json.loads(request_response.text)
if jsonText['total_hits'] == 0:
return RetVal(phantom.APP_SUCCESS, None)
for field in jsonText['fields']:
print('name=' + field['name'])
headerField.append(field['name'])
exportdata = []
for data in jsonText['data']:
entryLine = {}
headerPosition = 0
for dataValue in data:
if not dataValue:
entryLine[headerField[headerPosition]] = "null"
else:
entryLine[headerField[headerPosition]] = dataValue
headerPosition += 1
exportdata.append(entryLine)
return RetVal(phantom.APP_SUCCESS, exportdata)
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Unable to parse JSON response. {0}'.format(err)), None)
else:
data = self._handle_py_ver_compat_for_input_str(request_response.text.replace('{', '{{').replace('}', '}}'))
message = 'Error from server. Status Code: {0} Data from server: {1}'.format(request_status, data)
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def create_container(self, name, items):
container_dict = dict()
if not items['dg_alert.dg_detection_source'] == 'alert' and items[
'dg_tags']:
container_dict['name'] = name
container_dict['start_time'] = ('{time}Z').format(
time=datetime.utcfromtimestamp(items['dg_processed_time'] / 1000).isoformat())
container_dict['source_data_identifier'] = container_dict['name']
container_dict['severity'] = self.convert_to_phantom_severity(
items['dg_alarm_sev'])
container_dict['sensitivity'] = self.convert_to_phantom_sensitivity(items['dg_class.dg_name'])
custom_fields = {
'threat type': (items['dg_tags']),
'activity': (items['dg_utype'])
}
container_dict['tags'] = [('{}={}').format(x, custom_fields[x])
for x in custom_fields
if custom_fields[x] is not None]
container_creation_status, container_creation_msg, container_id = self.save_container(
container=container_dict)
if phantom.is_fail(container_creation_status):
self.save_progress((
'Error while creating container for alert {alert_name}. {error_message}'
).format(alert_name=items['dg_alarm_name'], error_message=container_creation_msg))
return None
else:
return container_id
return None
def create_artifacts(self, alert, container_id):
""" This function is used to create artifacts in given container using export data.
:param alert: Data of single export
:param container_id: ID of container in which we have to create the artifacts
:return: status(success/failure), message
"""
artifacts_list = []
cat = 'alarm'
# self.save_progress(('action=create_artifacts tenant={} artifact={}').format(self._client_id, json.dumps(alert)))
operation_mapping = {
'File': ['Alarm', 'Process', 'Computer', 'User', 'File'],
'CD/D': ['Alarm', 'Process', 'Computer', 'User', 'File'],
'Netw':
['Alarm', 'Process', 'Computer', 'User', 'File', 'Network'],
'Send': ['Alarm', 'Process', 'Computer', 'User', 'Email'],
'Proc': ['Alarm', 'Process', 'Computer', 'User'],
'Appl': ['Alarm', 'Process', 'Computer', 'User'],
'ADE ': ['Alarm', 'Process', 'Computer', 'User', 'File'],
'Prin':
['Alarm', 'Process', 'Computer', 'User', 'File', 'Network'],
'Othe': ['Alarm']
}
artifacts_mapping = {
'Alarm': {
'Alarm_Name': ('dg_alarm_name', []),
'Alarm_Severity': ('dg_alarm_sev', []),
'Threat_Type': ('dg_tags', []),
'Detection_Name': ('dg_det_name', []),
'Alert_Category': ('dg_alert.dg_category_name', []),
'Policy_Name':
('dg_alert.dg_alert.dg_alert.dg_policy.dg_name', []),
'Action_Was_Blocked': ('dg_alert.dg_hc', []),
'startTime': ('dg_local_timestamp', [])
},
'File': {
'File_Name': ('dg_src_file_name', ['fileName']),
'File_Size': ('dg_alert.dg_total_size', ['fileSize']),
'Classification': ('dg_class.dg_name', []),
'File_Was_Classified': ('dg_hc', []),
'File_Type': ('dg_src_file_ext', ['fileType']),
'File_Path': ('dg_alert.uad_sp', ['filePath']),
'Destination_File_Path': ('dg_alert.uad_dp', ['filePath'])
},
'Process': {
'Process_Name': ('dg_proc_file_name', ['process name']),
'Parent_Process_Name': ('dg_parent_name', ['app']),
'Process_Path': ('pi_fp', ['filePath']),
'Command_Line': ('pi_cmdln', []),
'MD5': ('dg_md5', ['filehash']),
'SHA1': ('dg_sha1', ['filehash']),
'SHA256': ('dg_sha256', ['filehash']),
'VirusTotal_Status': ('dg_vt_status', [])
},
'Email': {
'Attachment_File_Name':
('dg_attachments.dg_src_file_name', ['fileName']),
'Attachment_Was_Classified': ('dg_attachments.uad_sfc', []),
'Email_Subject': ('ua_msb', ['email']),
'Email_Sender': ('ua_ms', ['email']),
'Email_Recipient': ('dg_recipients.uad_mr', ['email']),
'Email_Recipient_Domain':
('dg_recipients.dg_rec_email_domain', ['domain'])
},
'Network': {
'Destination_Address': ('ua_ra', ['ip', 'ipv4']),
'Request_URL': ('ua_up', ['url']),
'Destination_DNS_Domain': ('ua_hn', ['domain']),
'Remote_Port': ('ua_rp', ['ip'])
},
'Computer': {
'Computer_Name': ('dg_machine_name', ['hostname']),
'Computer_Type': ('dg_machine_type', []),
'Source_Host_Name': ('dg_shn', []),
'Source_IP': ('ua_sa', ['ip', 'ipv4']),
'Source_Address': ('ua_sa', ['ip', 'ipv4'])
},
'User': {
'User_Name': ('dg_user', ['suser']),
'NTDomain': ('ua_dn', [])
}
}
specific_alert_mapping = {
'alarm': {
'dgarcUID': ('dg_guid', []),
'dg_process_time': ('dg_process_time', []),
'Activity': ('dg_utype', []),
'os_version': ('os_version', []),
'Policy': ('dg_alert.dg_policy.dg_name', []),
'Printer_Name': ('uad_pn', []),
'os': ('os', []),
'browser': ('browser', []),
'App_Category': ('appcategory', ['category']),
}
}
for (artifact_name, artifact_keys) in artifacts_mapping.items():
temp_dict = {}
cef = {}
cef_types = {}
# self.save_progress(('artifact_name={}').format(artifact_name))
for (artifact_key, artifact_tuple) in artifact_keys.items():
if alert.get(artifact_tuple[0]):
cef[artifact_key] = alert[artifact_tuple[0]]
cef_types[artifact_key] = artifact_tuple[1]
cef['tenant'] = self._client_id
if cef:
temp_dict['cef'] = cef
temp_dict['cef_types'] = cef_types
temp_dict['name'] = artifact_name
temp_dict['label'] = artifact_name
temp_dict['type'] = 'host'
temp_dict['container_id'] = container_id
temp_dict['severity'] = self.convert_to_phantom_severity(alert['dg_alarm_sev'])
temp_dict['source_data_identifier'] = self.create_dict_hash(temp_dict)
temp_dict['tenant'] = self._client_id
operation = alert['dg_utype'][:4]
if operation in operation_mapping.keys():
accepted_types = operation_mapping[operation]
else:
accepted_types = operation_mapping['Othe']
if artifact_name in accepted_types:
artifacts_list.append(temp_dict)
if cat in specific_alert_mapping:
temp_dict = {}
cef = {}
cef_types = {}
artifact_name = '{} Artifact'.format('Alarm Detail')
# artifact_name = '{} Artifact'.format(alert.get('dg_alarm_name'))
for (artifact_key, artifact_tuple) in specific_alert_mapping.get(cat).items():
if alert.get(artifact_tuple[0]):
cef[artifact_key] = alert[artifact_tuple[0]]
cef_types[artifact_key] = artifact_tuple[1]
cef['tenant'] = self._client_id
if cef:
temp_dict['cef'] = cef
temp_dict['cef_types'] = cef_types
temp_dict['name'] = artifact_name
temp_dict['label'] = artifact_name
temp_dict['type'] = 'host'
temp_dict['container_id'] = container_id
temp_dict['severity'] = self.convert_to_phantom_severity(alert['dg_alarm_sev'])
temp_dict['source_data_identifier'] = self.create_dict_hash(temp_dict)
temp_dict['tenant'] = self._client_id
artifacts_list.append(temp_dict)
create_artifact_status, create_artifact_msg, _ = self.save_artifacts(artifacts_list)
if phantom.is_fail(create_artifact_status):
return (phantom.APP_ERROR, create_artifact_msg)
return (phantom.APP_SUCCESS, 'Artifacts created successfully')
def convert_to_phantom_severity(self, dg_severity):
if dg_severity == 'Critical':
phantom_severity = 'High'
elif dg_severity == 'High':
phantom_severity = 'Medium'
else:
phantom_severity = 'Low'
return phantom_severity
# mapping classification name to dlp_high, dlp_restrict,dlp_medium,dlp_low
def convert_to_phantom_sensitivity(self, dg_classification):
if dg_classification[-3:] == 'igh':
phantom_sensitivity = 'red'
elif dg_classification[-3:] == 'ted':
phantom_sensitivity = 'red'
elif dg_classification[-3:] == 'med':
phantom_sensitivity = 'amber'
elif dg_classification[-3:] == 'low':
phantom_sensitivity = 'green'
else:
phantom_sensitivity = 'white'
return phantom_sensitivity
def create_dict_hash(self, input_dict):
if not input_dict:
return
else:
try:
input_dict_str = json.dumps(input_dict, sort_keys=True)
self.debug_print("Input dictionary is {}".format(self._handle_py_ver_compat_for_input_str(input_dict_str)))
return
except Exception as e:
err = self._get_error_message_from_exception(e)
self.debug_print("Handled exception in '_create_dict_hash'", err)
return
def get_watchlist_id(self, watchListName, action_result):
ret_val, message = self.requestApiToken()
if phantom.is_fail(ret_val):
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
full_url = '{0}/watchlists/'.format(self._arc_url.strip("/"))
try:
r = requests.get(url=full_url,
headers=self._client_headers,
verify=False)
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err)), None)
try:
jsonText = json.loads(r.text)
list_id = ''
if 200 <= r.status_code <= 299:
jsonText = json.loads(r.text)
for jText in jsonText:
if self._handle_py_ver_compat_for_input_str(jText['display_name']).lower() == watchListName.lower():
list_id = jText['name']
return RetVal(phantom.APP_SUCCESS, list_id)
return RetVal(phantom.APP_SUCCESS, list_id)
else:
data = self._handle_py_ver_compat_for_input_str(r.text.replace('{', '{{').replace('}', '}}'))
message = 'Error from server. Status Code: {0} Data from server: {1}'.format(r.status_code, data)
return RetVal(action_result.set_status(phantom.APP_ERROR, message), list_id)
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Unable to process response from the server. {0}'.format(err)), list_id)
def _check_watchlist_id(self, watch_list_id, watchlist_entry, action_result):
full_url = '{0}/watchlists/'.format(self._arc_url.strip("/"))
try:
r = requests.get(url='{0}{1}/values?limit=100000'.format(full_url, watch_list_id),
headers=self._client_headers,
verify=False)
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err)), None)
try:
if 200 <= r.status_code <= 299:
jsonText = json.loads(r.text)
entryExists = False
for jText in jsonText:
if self._handle_py_ver_compat_for_input_str(jText['value_name']).lower() == watchlist_entry.lower():
entryExists = True
return RetVal(phantom.APP_SUCCESS, jText['value_id'])
if not entryExists:
return RetVal(phantom.APP_SUCCESS, '')
else:
data = self._handle_py_ver_compat_for_input_str(r.text.replace('{', '{{').replace('}', '}}'))
message = 'Error from server. Status Code: {0} Data from server: {1}'.format(r.status_code, data)
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Unable to process response from the server. {0}'.format(err)), None)
def get_list_id(self, list_name, list_type, action_result):
ret_val, message = self.requestApiToken()
if phantom.is_fail(ret_val):
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
full_url = '{0}/lists/{1}'.format(self._arc_url.strip("/"), list_type)
try:
r = requests.get(url=full_url,
headers=self._client_headers,
verify=False)
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err)), None)
try:
jsonText = json.loads(r.text)
list_id = ""
if 200 <= r.status_code <= 299:
for jText in jsonText:
if self._handle_py_ver_compat_for_input_str(jText['name']).lower() == list_name.lower():
list_id = jText['id']
return RetVal(phantom.APP_SUCCESS, list_id)
return RetVal(phantom.APP_SUCCESS, None)
else:
data = self._handle_py_ver_compat_for_input_str(r.text.replace('{', '{{').replace('}', '}}'))
message = 'Error from server. Status Code: {0} Data from server: {1}'.format(r.status_code, data)
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Unable to process response from the server. {0}'.format(err)), None)
def _add_watchlist_entry(self, param):
self.save_progress(('In action handler for: {0}').format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
self.debug_print(param)
watchlist_name = self._handle_py_ver_compat_for_input_str(param['watchlist_name'])
watchlist_entry = self._handle_py_ver_compat_for_input_str(param['watchlist_entry'])
msg_string = "{0} to watchlist={1}".format(watchlist_entry, watchlist_name)
# self.save_progress(('Watchlistname={} Watchlistentry={}').format(watchlist_name, watchlist_entry))
ret_val, watch_list_id = self.get_watchlist_id(watchlist_name, action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
if watch_list_id:
watch_list_entry_json = '[{"value_name":"%s"}]' % watchlist_entry
full_url = '{0}/watchlists/'.format(self._arc_url.strip("/"))
try:
r = requests.post(url='{0}{1}/values/'.format(full_url, watch_list_id),
data=watch_list_entry_json,
headers=self._client_headers,
verify=False)
except Exception as e:
err = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err))
if 200 <= r.status_code <= 299:
return action_result.set_status(phantom.APP_SUCCESS, 'Successfully added {0}'.format(msg_string))
else:
return action_result.set_status(phantom.APP_ERROR, 'Failed to add {0}'.format(msg_string))
return action_result.set_status(phantom.APP_ERROR, 'Could not find watch_list = {0}'.format(watchlist_name))
def _remove_watchlist_entry(self, param):
self.save_progress(('In action handler for: {0}').format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
self.debug_print(param)
watchlist_name = self._handle_py_ver_compat_for_input_str(param['watchlist_name'])
watchlist_entry = self._handle_py_ver_compat_for_input_str(param['watchlist_entry'])
msg_string = '{0} from watchlist={1}'.format(watchlist_entry, watchlist_name)
ret_val, watch_list_id = self.get_watchlist_id(watchlist_name, action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
if watch_list_id:
ret_val, watch_list_value_id = self._check_watchlist_id(watch_list_id, watchlist_entry, action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
if watch_list_value_id:
full_url = '{0}/watchlists/'.format(self._arc_url.strip("/"))
try:
r = requests.delete(url='{0}{1}/values/{2}'.format(full_url, watch_list_id, watch_list_value_id),
headers=self._client_headers,
verify=False)
except Exception as e:
err = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err))
if 200 <= r.status_code <= 299:
return action_result.set_status(phantom.APP_SUCCESS, 'Successfully removed {0}'.format(msg_string))
else:
return action_result.set_status(phantom.APP_ERROR, 'Failed to remove {0}'.format(msg_string))
else:
return action_result.set_status(phantom.APP_ERROR, 'Could not find entry {0}'.format(msg_string))
else:
return action_result.set_status(phantom.APP_ERROR, 'Could not find watch_list = {0}'.format(watchlist_name))
def _check_watchlist_entry(self, param):
self.save_progress(('In action handler for: {0}').format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
self.debug_print(param)
watchlist_name = self._handle_py_ver_compat_for_input_str(param['watchlist_name'])
watchlist_entry = self._handle_py_ver_compat_for_input_str(param['watchlist_entry'])
msg_string = '{0} in watchlist={1}'.format(watchlist_entry, watchlist_name)
ret_val, watch_list_id = self.get_watchlist_id(watchlist_name, action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
if watch_list_id:
ret_val, watch_list_value_id = self._check_watchlist_id(watch_list_id, watchlist_entry, action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
if watch_list_value_id:
return action_result.set_status(phantom.APP_SUCCESS, 'Successfully found {0}'.format(msg_string))
else:
return action_result.set_status(phantom.APP_SUCCESS, 'Failed to find entry {0}'.format(msg_string))
else:
return action_result.set_status(phantom.APP_ERROR, 'Could not find watch_list = {0}'.format(watchlist_name))
def _add_componentlist_entry(self, param):
self.save_progress(('In action handler for: {0}').format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
self.debug_print(param)
componentlist_name = self._handle_py_ver_compat_for_input_str(param['componentlist_name'])
componentlist_entry = self._handle_py_ver_compat_for_input_str(param['componentlist_entry'])
msg_string = '{0} to componentlist={1}'.format(componentlist_entry, componentlist_name)
ret_val, list_id = self.get_list_id(componentlist_name, 'component_list', action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
self._client_headers["Content-Type"] = "application/json"
if list_id:
component_list_entry_json = '{"items":["%s"]}' % componentlist_entry
full_url = '{0}/remediation/lists/'.format(self._arc_url.strip("/"))
try:
r = requests.put(url='{0}{1}/append'.format(full_url, list_id),
headers=self._client_headers,
data=component_list_entry_json,
verify=False)
except Exception as e:
err = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err))
if 200 <= r.status_code <= 299:
return action_result.set_status(phantom.APP_SUCCESS, 'Successfully added {0}'.format(msg_string))
else:
return action_result.set_status(phantom.APP_ERROR, 'Failed to add {0}'.format(msg_string))
return action_result.set_status(phantom.APP_ERROR, 'Could not find component_list = {0}'.format(componentlist_name))
def _remove_componentlist_entry(self, param):
self.save_progress(('In action handler for: {0}').format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
self.debug_print(param)
componentlist_name = self._handle_py_ver_compat_for_input_str(param['componentlist_name'])
componentlist_entry = self._handle_py_ver_compat_for_input_str(param['componentlist_entry'])
msg_string = '{0} from componentlist={1}'.format(componentlist_entry, componentlist_name)
ret_val, list_id = self.get_list_id(componentlist_name, 'component_list', action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
self._client_headers["Content-Type"] = "application/json"
if list_id:
component_list_entry_json = '{"items":["%s"]}' % componentlist_entry
full_url = '{0}/remediation/lists/'.format(self._arc_url.strip("/"))
try:
r = requests.post(url='{0}{1}/delete'.format(full_url, list_id),
headers=self._client_headers,
data=component_list_entry_json,
verify=False)
except Exception as e:
err = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err))
if 200 <= r.status_code <= 299:
return action_result.set_status(phantom.APP_SUCCESS, 'Successfully removed {0}'.format(msg_string))
else:
return action_result.set_status(phantom.APP_ERROR, 'Failed to remove {0}'.format(msg_string))
return action_result.set_status(phantom.APP_ERROR, 'Could not find component_list = {0}'.format(componentlist_name))
def _check_componentlist_entry(self, param):
self.save_progress(('In action handler for: {0}').format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
self.debug_print(param)
componentlist_name = self._handle_py_ver_compat_for_input_str(param['componentlist_name'])
componentlist_entry = self._handle_py_ver_compat_for_input_str(param['componentlist_entry'])
msg_string = '{0} in componentlist={1}'.format(componentlist_entry, componentlist_name)
ret_val, list_id = self.get_list_id(componentlist_name, 'component_list', action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
if list_id:
full_url = '{0}/lists/'.format(self._arc_url.strip("/"))
try:
r = requests.get(url='{0}{1}/values?limit=100000'.format(full_url, list_id),
headers=self._client_headers,
verify=False)
except Exception as e:
err = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err))
try:
jsonText = json.loads(r.text)
entryExists = False
if 200 <= r.status_code <= 299:
for jText in jsonText:
entryExists = True
if self._handle_py_ver_compat_for_input_str(jText['content_value']).lower() == componentlist_entry.lower():
return action_result.set_status(phantom.APP_SUCCESS, 'Successfully found {0}'.format(msg_string))
if not entryExists:
return action_result.set_status(phantom.APP_SUCCESS, 'Failed to find entry {0}'.format(msg_string))
except Exception as e:
err = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, 'Unable to parse JSON response from the server. {0}'.format(err))
else:
return action_result.set_status(phantom.APP_ERROR, 'Could not find component_list = {0}'.format(componentlist_name))
def handle_action(self, param):
ret_val = phantom.APP_SUCCESS
action_id = self.get_action_identifier()
self.debug_print('action_id', self.get_action_identifier())
if action_id == 'test_connectivity':
ret_val = self._handle_test_connectivity(param)
elif action_id == 'on_poll':
ret_val = self._handle_on_poll(param)
elif action_id == 'add_watchlist_entry':
ret_val = self._add_watchlist_entry(param)
elif action_id == 'check_watchlist_entry':
ret_val = self._check_watchlist_entry(param)
elif action_id == 'remove_watchlist_entry':
ret_val = self._remove_watchlist_entry(param)
elif action_id == 'add_componentlist_entry':
ret_val = self._add_componentlist_entry(param)
elif action_id == 'remove_componentlist_entry':
ret_val = self._remove_componentlist_entry(param)
elif action_id == 'check_componentlist_entry':
ret_val = self._check_componentlist_entry(param)
return ret_val
def initialize(self):
# Load the state in initialize, use it to store data
# that needs to be accessed across actions
self.debug_print("Action: 'initialize' Status: start")
self._state = self.load_state()
self.debug_print(("Action: 'initialize' State: {}").format(self._state))
# Fetching the Python major version
try:
self._python_version = int(sys.version_info[0])
except:
return self.set_status(phantom.APP_ERROR, "Error occurred while fetching the Phantom server's Python major version")
config = self.get_config()
self._auth_url = self._handle_py_ver_compat_for_input_str(config['auth_url'])
self._arc_url = self._handle_py_ver_compat_for_input_str(config['arc_url'] + '/rest/1.0/')
self._client_id = self._handle_py_ver_compat_for_input_str(config['client_id'])
self._client_secret = config['client_secret']
self._export_profile = self._handle_py_ver_compat_for_input_str(config['export_profile'])
self._client_headers = DG_CLIENT_HEADER
return phantom.APP_SUCCESS
def finalize(self):
# Save the state, this data is saved across actions and app upgrades
# self.save_state(self._state)
return phantom.APP_SUCCESS
def validateApiToken(self):
if self._api_key == '':
return False
payload = {
'client_id': self._client_id,
'client_secret': self._client_secret,
'grant_type':
'urn:pingidentity.com:oauth2:grant_type:validate_bearer',
'token': self._api_key,
}
try:
api_key_response = requests.post(url='{}/as/introspect.oauth2'.format(self._auth_url.strip("/")),
headers=DG_HEADER_URL,
data=payload,
verify=False)
response_json = api_key_response.json()
except Exception as e:
err = self._get_error_message_from_exception(e)
self.debug_print(err)
return False
if api_key_response.status_code == 200 and response_json['active']:
return True
return False
def requestApiToken(self):
if not self.validateApiToken():
payload = {
'client_id': self._client_id,
'client_secret': self._client_secret,
'grant_type': 'client_credentials',
'scope': 'client',
}
try:
url = '{0}/as/token.oauth2'.format(self._auth_url.strip("/"))
api_key_response = requests.post(url=url,
headers=DG_HEADER_URL,
data=payload,
verify=False)
except requests.exceptions.InvalidSchema:
error_message = 'Error connecting to server. No connection adapters were found for %s' % (url)
return (phantom.APP_ERROR, error_message)
except requests.exceptions.InvalidURL:
error_message = 'Error connecting to server. Invalid URL %s' % (url)
return (phantom.APP_ERROR, error_message)
except Exception as e:
err = self._get_error_message_from_exception(e)
return (phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err))
try:
response_json = api_key_response.json()
if api_key_response.status_code == 200:
self._api_key = response_json['access_token']
self._client_headers.update({'Authorization': 'Bearer {}'.format(self._api_key)})
self._client_headers['Authorization'] = 'Bearer {}'.format(self._api_key)
self.save_progress('Got API Token ' + str(self._client_headers['Authorization']))
return (phantom.APP_SUCCESS, None)
else:
return (phantom.APP_ERROR, self._handle_py_ver_compat_for_input_str(api_key_response.text))
except Exception as e:
err = self._get_error_message_from_exception(e)
return (phantom.APP_ERROR, 'Unable to process response from the server. {0}'.format(err))
else:
self._client_headers['Authorization'] = 'Bearer {}'.format(self._api_key)
return (phantom.APP_SUCCESS, None)
if __name__ == '__main__':
import pudb
import argparse
pudb.set_trace()
argparser = argparse.ArgumentParser()
argparser.add_argument('input_test_json', help='Input Test JSON file')
argparser.add_argument('-u', '--username', help='username', required=False)
argparser.add_argument('-p', '--password', help='password', required=False)
args = argparser.parse_args()
session_id = None
username = args.username
password = args.password
if username is not None and password is None:
# User specified a username but not a password, so ask
import getpass
password = <PASSWORD>('Password: ')
if username and password:
try:
login_url = DigitalGuardianArcConnector._get_phantom_base_url() + '/login'
print('Accessing the Login page')
r = requests.get(login_url, verify=False)
csrftoken = r.cookies['csrftoken']
data = dict()
data['username'] = username
data['password'] = password
data['csrfmiddlewaretoken'] = csrftoken
headers = dict()
headers['Cookie'] = 'csrftoken=' + csrftoken
headers['Referer'] = login_url
print('Logging into Platform to get the session id')
r2 = requests.post(login_url,
verify=False,
data=data,
headers=headers)
session_id = r2.cookies['sessionid']
except Exception as e:
print('Unable to get session id from the platform. Error: ' + str(e))
exit(1)
with open(args.input_test_json) as f:
in_json = f.read()
in_json = json.loads(in_json)
print(json.dumps(in_json, indent=4))
connector = DigitalGuardianArcConnector()
connector.print_progress_message = True
if session_id is not None:
in_json['user_session_token'] = session_id
connector._set_csrf_info(csrftoken, headers['Referer'])
ret_val = connector._handle_action(json.dumps(in_json), None)
print(json.dumps(json.loads(ret_val), indent=4))
exit(0) | Apps/phdigitalguardianarc/digitalguardianarc_connector.py |
import sys
import requests
import json
import phantom.app as phantom
from datetime import datetime
from bs4 import BeautifulSoup
from phantom.base_connector import BaseConnector
from phantom.action_result import ActionResult
from digitalguardianarc_consts import *
from bs4 import UnicodeDammit
class RetVal(tuple):
def __new__(cls, val1, val2=None):
return tuple.__new__(RetVal, (val1, val2))
class DigitalGuardianArcConnector(BaseConnector):
def __init__(self):
# Call the BaseConnectors init first
super(DigitalGuardianArcConnector, self).__init__()
self._state = None
self._auth_url = None
self._arc_url = None
self._client_id = None
self._client_secret = None
self._export_profile = None
self._api_key = None
self._client_headers = {}
def _process_empty_response(self, response, action_result):
if response.status_code == 200:
return RetVal(phantom.APP_SUCCESS, {})
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Status Code: {0}. Empty response and no information in the header'.format(response.status_code)), None)
def _process_html_response(self, response, action_result):
# An html response, treat it like an error
status_code = response.status_code
try:
soup = BeautifulSoup(response.text, 'html.parser')
# Remove the script, style, footer and navigation part from the HTML message
for element in soup(["script", "style", "footer", "nav"]):
element.extract()
error_text = soup.text
split_lines = error_text.split('\n')
split_lines = [x.strip() for x in split_lines if x.strip()]
error_text = '\n'.join(split_lines)
except Exception as e:
err = self._get_error_message_from_exception(e)
error_text = 'Cannot parse error details {}'.format(err)
message = "Status Code: {0}. Data from server:{1}".format(
status_code, self._handle_py_ver_compat_for_input_str(error_text))
message = message.replace('{', '{{').replace('}', '}}')
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _process_json_response(self, r, action_result):
# Try a json parse
try:
resp_json = r.json()
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Unable to parse JSON response. {0}'.format(err)), None)
# Please specify the status codes here
if 200 <= r.status_code < 399:
return RetVal(phantom.APP_SUCCESS, resp_json)
# You should process the error returned in the json
message = 'Error from server. Status Code: {0} Data from server: {1}'.format(
r.status_code, self._handle_py_ver_compat_for_input_str(r.text.replace('{', '{{').replace('}', '}}')))
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _process_response(self, response, action_result):
# store the r_text in debug data, it will get dumped in the logs if the action fails
try:
if hasattr(action_result, 'add_debug_data') and (self.get_action_identifier() != 'get-file' or not 200 <= response.status_code < 399):
action_result.add_debug_data(
{'r_status_code': response.status_code})
action_result.add_debug_data({'r_text': response.text})
action_result.add_debug_data({'r_headers': response.headers})
if 'json' in response.headers.get('Content-Type', ''):
self.save_progress("Action: 'process_json_response'")
return self._process_json_response(response, action_result)
if 'html' in response.headers.get('Content-Type', ''):
self.save_progress("Action: 'process_html_response'")
return self._process_html_response(response, action_result)
if not response.text:
self.save_progress("Action: 'process_empty_response'")
return self._process_empty_response(response, action_result)
message = (
"Can't process response from server. Status Code: {0} Data from server: {1}"
).format(response.status_code,
self._handle_py_ver_compat_for_input_str(response.text.replace('{', '{{').replace('}', '}}')))
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
except Exception as e:
err = self._get_error_message_from_exception(e)
exc_tb = sys.exc_info()
self.save_progress(('exception_line={} {}').format(exc_tb.tb_lineno, err))
return RetVal(action_result.set_status(phantom.APP_ERROR, ('Error: {}').format(err)), None)
def _make_rest_call(self, endpoint, action_result, method='get', **kwargs):
# **kwargs can be any additional parameters that requests.request accepts
config = self.get_config()
resp_json = None
try:
request_func = getattr(requests, method)
except AttributeError:
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Invalid method: {0}'.format(method)), resp_json)
# Create a URL to connect to
url = "%s/%s" % (self._arc_url.strip("/"), endpoint)
try:
self.save_progress("Connecting to URL: {0}".format(url))
r = request_func(url,
verify=config.get('verify_server_cert', False),
**kwargs)
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err)), resp_json)
return self._process_response(r, action_result)
def _handle_py_ver_compat_for_input_str(self, input_str):
"""
This method returns the encoded|original string based on the Python version.
:param input_str: Input string to be processed
:return: input_str (Processed input string based on following logic 'input_str - Python 3; encoded input_str - Python 2')
"""
try:
if input_str and self._python_version == 2:
input_str = UnicodeDammit(input_str).unicode_markup.encode('utf-8')
except:
self.debug_print("Error occurred while handling python 2to3 compatibility for the input string")
return input_str
def _get_error_message_from_exception(self, e):
""" This method is used to get appropriate error messages from the exception.
:param e: Exception object
:return: error message
"""
try:
if e.args:
if len(e.args) > 1:
error_code = e.args[0]
error_msg = e.args[1]
elif len(e.args) == 1:
error_code = ERR_CODE_MSG
error_msg = e.args[0]
else:
error_code = ERR_CODE_MSG
error_msg = ERR_MSG_UNAVAILABLE
except:
error_code = ERR_CODE_MSG
error_msg = ERR_MSG_UNAVAILABLE
try:
error_msg = self._handle_py_ver_compat_for_input_str(error_msg)
except TypeError:
error_msg = TYPE_ERR_MSG
except:
error_msg = ERR_MSG_UNAVAILABLE
try:
if error_code in ERR_CODE_MSG:
error_text = "Error Message: {0}".format(error_msg)
else:
error_text = "Error Code: {0}. Error Message: {1}".format(error_code, error_msg)
except:
self.debug_print(PARSE_ERR_MSG)
error_text = PARSE_ERR_MSG
return error_text
def _handle_test_connectivity(self, param):
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
# NOTE: test connectivity does _NOT_ take any parameters
# i.e. the param dictionary passed to this handler will be empty.
# Also typically it does not add any data into an action_result either.
# The status and progress messages are more important.
self.save_progress('Connecting to DG ARC')
ret_val, message = self.requestApiToken()
if not self._client_headers['Authorization']:
self.save_progress('Test Connectivity Failed')
return action_result.get_status()
else:
self.save_progress('Test Connectivity Passed')
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_on_poll(self, param):
oldname = ''
action_result = self.add_action_result(ActionResult(dict(param)))
response_status, export_list = self.get_export(action_result)
if phantom.is_fail(response_status):
self.debug_print('On Poll Failed')
return action_result.get_status()
if export_list:
self.save_progress('Ingesting alarm records')
else:
self.save_progress('No export data found')
return action_result.set_status(phantom.APP_SUCCESS, 'No export data found')
for entry in export_list:
try:
comm = entry['dg_alarm_name'].find(',')
if comm == -1:
comm = 100
name = ('{alarm_name}-{id}').format(
alarm_name=entry['dg_alarm_name'][0:comm],
id=entry['dg_guid'])
if name != oldname:
container_id = self.create_container(name, entry)
oldname = name
if container_id:
(artifacts_creation_status,
artifacts_creation_msg) = self.create_artifacts(alert=entry, container_id=container_id)
if phantom.is_fail(artifacts_creation_status):
self.debug_print((
'Error while creating artifacts for container with ID {container_id}. {error_msg}'
).format(container_id=container_id, error_msg=artifacts_creation_msg))
self._state['first_run'] = False
except Exception as e:
err = self._get_error_message_from_exception(e)
self.debug_print("Error occurred while processing export list response from server. {}".format(err))
return action_result.set_status(phantom.APP_SUCCESS)
def get_export(self, action_result):
self.save_progress('Getting ARC Export data')
ret_val, message = self.requestApiToken()
if phantom.is_fail(ret_val):
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
full_url = '{0}/export_profiles/{1}/export_and_ack'.format(self._arc_url.strip("/"), self._export_profile)
try:
request_response = requests.post(url=full_url,
headers=self._client_headers,
verify=False)
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err)), None)
request_status = request_response.status_code
if 200 <= request_status <= 299:
headerField = []
try:
jsonText = json.loads(request_response.text)
if jsonText['total_hits'] == 0:
return RetVal(phantom.APP_SUCCESS, None)
for field in jsonText['fields']:
print('name=' + field['name'])
headerField.append(field['name'])
exportdata = []
for data in jsonText['data']:
entryLine = {}
headerPosition = 0
for dataValue in data:
if not dataValue:
entryLine[headerField[headerPosition]] = "null"
else:
entryLine[headerField[headerPosition]] = dataValue
headerPosition += 1
exportdata.append(entryLine)
return RetVal(phantom.APP_SUCCESS, exportdata)
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Unable to parse JSON response. {0}'.format(err)), None)
else:
data = self._handle_py_ver_compat_for_input_str(request_response.text.replace('{', '{{').replace('}', '}}'))
message = 'Error from server. Status Code: {0} Data from server: {1}'.format(request_status, data)
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def create_container(self, name, items):
container_dict = dict()
if not items['dg_alert.dg_detection_source'] == 'alert' and items[
'dg_tags']:
container_dict['name'] = name
container_dict['start_time'] = ('{time}Z').format(
time=datetime.utcfromtimestamp(items['dg_processed_time'] / 1000).isoformat())
container_dict['source_data_identifier'] = container_dict['name']
container_dict['severity'] = self.convert_to_phantom_severity(
items['dg_alarm_sev'])
container_dict['sensitivity'] = self.convert_to_phantom_sensitivity(items['dg_class.dg_name'])
custom_fields = {
'threat type': (items['dg_tags']),
'activity': (items['dg_utype'])
}
container_dict['tags'] = [('{}={}').format(x, custom_fields[x])
for x in custom_fields
if custom_fields[x] is not None]
container_creation_status, container_creation_msg, container_id = self.save_container(
container=container_dict)
if phantom.is_fail(container_creation_status):
self.save_progress((
'Error while creating container for alert {alert_name}. {error_message}'
).format(alert_name=items['dg_alarm_name'], error_message=container_creation_msg))
return None
else:
return container_id
return None
def create_artifacts(self, alert, container_id):
""" This function is used to create artifacts in given container using export data.
:param alert: Data of single export
:param container_id: ID of container in which we have to create the artifacts
:return: status(success/failure), message
"""
artifacts_list = []
cat = 'alarm'
# self.save_progress(('action=create_artifacts tenant={} artifact={}').format(self._client_id, json.dumps(alert)))
operation_mapping = {
'File': ['Alarm', 'Process', 'Computer', 'User', 'File'],
'CD/D': ['Alarm', 'Process', 'Computer', 'User', 'File'],
'Netw':
['Alarm', 'Process', 'Computer', 'User', 'File', 'Network'],
'Send': ['Alarm', 'Process', 'Computer', 'User', 'Email'],
'Proc': ['Alarm', 'Process', 'Computer', 'User'],
'Appl': ['Alarm', 'Process', 'Computer', 'User'],
'ADE ': ['Alarm', 'Process', 'Computer', 'User', 'File'],
'Prin':
['Alarm', 'Process', 'Computer', 'User', 'File', 'Network'],
'Othe': ['Alarm']
}
artifacts_mapping = {
'Alarm': {
'Alarm_Name': ('dg_alarm_name', []),
'Alarm_Severity': ('dg_alarm_sev', []),
'Threat_Type': ('dg_tags', []),
'Detection_Name': ('dg_det_name', []),
'Alert_Category': ('dg_alert.dg_category_name', []),
'Policy_Name':
('dg_alert.dg_alert.dg_alert.dg_policy.dg_name', []),
'Action_Was_Blocked': ('dg_alert.dg_hc', []),
'startTime': ('dg_local_timestamp', [])
},
'File': {
'File_Name': ('dg_src_file_name', ['fileName']),
'File_Size': ('dg_alert.dg_total_size', ['fileSize']),
'Classification': ('dg_class.dg_name', []),
'File_Was_Classified': ('dg_hc', []),
'File_Type': ('dg_src_file_ext', ['fileType']),
'File_Path': ('dg_alert.uad_sp', ['filePath']),
'Destination_File_Path': ('dg_alert.uad_dp', ['filePath'])
},
'Process': {
'Process_Name': ('dg_proc_file_name', ['process name']),
'Parent_Process_Name': ('dg_parent_name', ['app']),
'Process_Path': ('pi_fp', ['filePath']),
'Command_Line': ('pi_cmdln', []),
'MD5': ('dg_md5', ['filehash']),
'SHA1': ('dg_sha1', ['filehash']),
'SHA256': ('dg_sha256', ['filehash']),
'VirusTotal_Status': ('dg_vt_status', [])
},
'Email': {
'Attachment_File_Name':
('dg_attachments.dg_src_file_name', ['fileName']),
'Attachment_Was_Classified': ('dg_attachments.uad_sfc', []),
'Email_Subject': ('ua_msb', ['email']),
'Email_Sender': ('ua_ms', ['email']),
'Email_Recipient': ('dg_recipients.uad_mr', ['email']),
'Email_Recipient_Domain':
('dg_recipients.dg_rec_email_domain', ['domain'])
},
'Network': {
'Destination_Address': ('ua_ra', ['ip', 'ipv4']),
'Request_URL': ('ua_up', ['url']),
'Destination_DNS_Domain': ('ua_hn', ['domain']),
'Remote_Port': ('ua_rp', ['ip'])
},
'Computer': {
'Computer_Name': ('dg_machine_name', ['hostname']),
'Computer_Type': ('dg_machine_type', []),
'Source_Host_Name': ('dg_shn', []),
'Source_IP': ('ua_sa', ['ip', 'ipv4']),
'Source_Address': ('ua_sa', ['ip', 'ipv4'])
},
'User': {
'User_Name': ('dg_user', ['suser']),
'NTDomain': ('ua_dn', [])
}
}
specific_alert_mapping = {
'alarm': {
'dgarcUID': ('dg_guid', []),
'dg_process_time': ('dg_process_time', []),
'Activity': ('dg_utype', []),
'os_version': ('os_version', []),
'Policy': ('dg_alert.dg_policy.dg_name', []),
'Printer_Name': ('uad_pn', []),
'os': ('os', []),
'browser': ('browser', []),
'App_Category': ('appcategory', ['category']),
}
}
for (artifact_name, artifact_keys) in artifacts_mapping.items():
temp_dict = {}
cef = {}
cef_types = {}
# self.save_progress(('artifact_name={}').format(artifact_name))
for (artifact_key, artifact_tuple) in artifact_keys.items():
if alert.get(artifact_tuple[0]):
cef[artifact_key] = alert[artifact_tuple[0]]
cef_types[artifact_key] = artifact_tuple[1]
cef['tenant'] = self._client_id
if cef:
temp_dict['cef'] = cef
temp_dict['cef_types'] = cef_types
temp_dict['name'] = artifact_name
temp_dict['label'] = artifact_name
temp_dict['type'] = 'host'
temp_dict['container_id'] = container_id
temp_dict['severity'] = self.convert_to_phantom_severity(alert['dg_alarm_sev'])
temp_dict['source_data_identifier'] = self.create_dict_hash(temp_dict)
temp_dict['tenant'] = self._client_id
operation = alert['dg_utype'][:4]
if operation in operation_mapping.keys():
accepted_types = operation_mapping[operation]
else:
accepted_types = operation_mapping['Othe']
if artifact_name in accepted_types:
artifacts_list.append(temp_dict)
if cat in specific_alert_mapping:
temp_dict = {}
cef = {}
cef_types = {}
artifact_name = '{} Artifact'.format('Alarm Detail')
# artifact_name = '{} Artifact'.format(alert.get('dg_alarm_name'))
for (artifact_key, artifact_tuple) in specific_alert_mapping.get(cat).items():
if alert.get(artifact_tuple[0]):
cef[artifact_key] = alert[artifact_tuple[0]]
cef_types[artifact_key] = artifact_tuple[1]
cef['tenant'] = self._client_id
if cef:
temp_dict['cef'] = cef
temp_dict['cef_types'] = cef_types
temp_dict['name'] = artifact_name
temp_dict['label'] = artifact_name
temp_dict['type'] = 'host'
temp_dict['container_id'] = container_id
temp_dict['severity'] = self.convert_to_phantom_severity(alert['dg_alarm_sev'])
temp_dict['source_data_identifier'] = self.create_dict_hash(temp_dict)
temp_dict['tenant'] = self._client_id
artifacts_list.append(temp_dict)
create_artifact_status, create_artifact_msg, _ = self.save_artifacts(artifacts_list)
if phantom.is_fail(create_artifact_status):
return (phantom.APP_ERROR, create_artifact_msg)
return (phantom.APP_SUCCESS, 'Artifacts created successfully')
def convert_to_phantom_severity(self, dg_severity):
if dg_severity == 'Critical':
phantom_severity = 'High'
elif dg_severity == 'High':
phantom_severity = 'Medium'
else:
phantom_severity = 'Low'
return phantom_severity
# mapping classification name to dlp_high, dlp_restrict,dlp_medium,dlp_low
def convert_to_phantom_sensitivity(self, dg_classification):
if dg_classification[-3:] == 'igh':
phantom_sensitivity = 'red'
elif dg_classification[-3:] == 'ted':
phantom_sensitivity = 'red'
elif dg_classification[-3:] == 'med':
phantom_sensitivity = 'amber'
elif dg_classification[-3:] == 'low':
phantom_sensitivity = 'green'
else:
phantom_sensitivity = 'white'
return phantom_sensitivity
def create_dict_hash(self, input_dict):
if not input_dict:
return
else:
try:
input_dict_str = json.dumps(input_dict, sort_keys=True)
self.debug_print("Input dictionary is {}".format(self._handle_py_ver_compat_for_input_str(input_dict_str)))
return
except Exception as e:
err = self._get_error_message_from_exception(e)
self.debug_print("Handled exception in '_create_dict_hash'", err)
return
def get_watchlist_id(self, watchListName, action_result):
ret_val, message = self.requestApiToken()
if phantom.is_fail(ret_val):
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
full_url = '{0}/watchlists/'.format(self._arc_url.strip("/"))
try:
r = requests.get(url=full_url,
headers=self._client_headers,
verify=False)
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err)), None)
try:
jsonText = json.loads(r.text)
list_id = ''
if 200 <= r.status_code <= 299:
jsonText = json.loads(r.text)
for jText in jsonText:
if self._handle_py_ver_compat_for_input_str(jText['display_name']).lower() == watchListName.lower():
list_id = jText['name']
return RetVal(phantom.APP_SUCCESS, list_id)
return RetVal(phantom.APP_SUCCESS, list_id)
else:
data = self._handle_py_ver_compat_for_input_str(r.text.replace('{', '{{').replace('}', '}}'))
message = 'Error from server. Status Code: {0} Data from server: {1}'.format(r.status_code, data)
return RetVal(action_result.set_status(phantom.APP_ERROR, message), list_id)
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Unable to process response from the server. {0}'.format(err)), list_id)
def _check_watchlist_id(self, watch_list_id, watchlist_entry, action_result):
full_url = '{0}/watchlists/'.format(self._arc_url.strip("/"))
try:
r = requests.get(url='{0}{1}/values?limit=100000'.format(full_url, watch_list_id),
headers=self._client_headers,
verify=False)
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err)), None)
try:
if 200 <= r.status_code <= 299:
jsonText = json.loads(r.text)
entryExists = False
for jText in jsonText:
if self._handle_py_ver_compat_for_input_str(jText['value_name']).lower() == watchlist_entry.lower():
entryExists = True
return RetVal(phantom.APP_SUCCESS, jText['value_id'])
if not entryExists:
return RetVal(phantom.APP_SUCCESS, '')
else:
data = self._handle_py_ver_compat_for_input_str(r.text.replace('{', '{{').replace('}', '}}'))
message = 'Error from server. Status Code: {0} Data from server: {1}'.format(r.status_code, data)
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Unable to process response from the server. {0}'.format(err)), None)
def get_list_id(self, list_name, list_type, action_result):
ret_val, message = self.requestApiToken()
if phantom.is_fail(ret_val):
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
full_url = '{0}/lists/{1}'.format(self._arc_url.strip("/"), list_type)
try:
r = requests.get(url=full_url,
headers=self._client_headers,
verify=False)
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err)), None)
try:
jsonText = json.loads(r.text)
list_id = ""
if 200 <= r.status_code <= 299:
for jText in jsonText:
if self._handle_py_ver_compat_for_input_str(jText['name']).lower() == list_name.lower():
list_id = jText['id']
return RetVal(phantom.APP_SUCCESS, list_id)
return RetVal(phantom.APP_SUCCESS, None)
else:
data = self._handle_py_ver_compat_for_input_str(r.text.replace('{', '{{').replace('}', '}}'))
message = 'Error from server. Status Code: {0} Data from server: {1}'.format(r.status_code, data)
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
except Exception as e:
err = self._get_error_message_from_exception(e)
return RetVal(action_result.set_status(phantom.APP_ERROR, 'Unable to process response from the server. {0}'.format(err)), None)
def _add_watchlist_entry(self, param):
self.save_progress(('In action handler for: {0}').format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
self.debug_print(param)
watchlist_name = self._handle_py_ver_compat_for_input_str(param['watchlist_name'])
watchlist_entry = self._handle_py_ver_compat_for_input_str(param['watchlist_entry'])
msg_string = "{0} to watchlist={1}".format(watchlist_entry, watchlist_name)
# self.save_progress(('Watchlistname={} Watchlistentry={}').format(watchlist_name, watchlist_entry))
ret_val, watch_list_id = self.get_watchlist_id(watchlist_name, action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
if watch_list_id:
watch_list_entry_json = '[{"value_name":"%s"}]' % watchlist_entry
full_url = '{0}/watchlists/'.format(self._arc_url.strip("/"))
try:
r = requests.post(url='{0}{1}/values/'.format(full_url, watch_list_id),
data=watch_list_entry_json,
headers=self._client_headers,
verify=False)
except Exception as e:
err = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err))
if 200 <= r.status_code <= 299:
return action_result.set_status(phantom.APP_SUCCESS, 'Successfully added {0}'.format(msg_string))
else:
return action_result.set_status(phantom.APP_ERROR, 'Failed to add {0}'.format(msg_string))
return action_result.set_status(phantom.APP_ERROR, 'Could not find watch_list = {0}'.format(watchlist_name))
def _remove_watchlist_entry(self, param):
self.save_progress(('In action handler for: {0}').format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
self.debug_print(param)
watchlist_name = self._handle_py_ver_compat_for_input_str(param['watchlist_name'])
watchlist_entry = self._handle_py_ver_compat_for_input_str(param['watchlist_entry'])
msg_string = '{0} from watchlist={1}'.format(watchlist_entry, watchlist_name)
ret_val, watch_list_id = self.get_watchlist_id(watchlist_name, action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
if watch_list_id:
ret_val, watch_list_value_id = self._check_watchlist_id(watch_list_id, watchlist_entry, action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
if watch_list_value_id:
full_url = '{0}/watchlists/'.format(self._arc_url.strip("/"))
try:
r = requests.delete(url='{0}{1}/values/{2}'.format(full_url, watch_list_id, watch_list_value_id),
headers=self._client_headers,
verify=False)
except Exception as e:
err = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err))
if 200 <= r.status_code <= 299:
return action_result.set_status(phantom.APP_SUCCESS, 'Successfully removed {0}'.format(msg_string))
else:
return action_result.set_status(phantom.APP_ERROR, 'Failed to remove {0}'.format(msg_string))
else:
return action_result.set_status(phantom.APP_ERROR, 'Could not find entry {0}'.format(msg_string))
else:
return action_result.set_status(phantom.APP_ERROR, 'Could not find watch_list = {0}'.format(watchlist_name))
def _check_watchlist_entry(self, param):
self.save_progress(('In action handler for: {0}').format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
self.debug_print(param)
watchlist_name = self._handle_py_ver_compat_for_input_str(param['watchlist_name'])
watchlist_entry = self._handle_py_ver_compat_for_input_str(param['watchlist_entry'])
msg_string = '{0} in watchlist={1}'.format(watchlist_entry, watchlist_name)
ret_val, watch_list_id = self.get_watchlist_id(watchlist_name, action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
if watch_list_id:
ret_val, watch_list_value_id = self._check_watchlist_id(watch_list_id, watchlist_entry, action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
if watch_list_value_id:
return action_result.set_status(phantom.APP_SUCCESS, 'Successfully found {0}'.format(msg_string))
else:
return action_result.set_status(phantom.APP_SUCCESS, 'Failed to find entry {0}'.format(msg_string))
else:
return action_result.set_status(phantom.APP_ERROR, 'Could not find watch_list = {0}'.format(watchlist_name))
def _add_componentlist_entry(self, param):
self.save_progress(('In action handler for: {0}').format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
self.debug_print(param)
componentlist_name = self._handle_py_ver_compat_for_input_str(param['componentlist_name'])
componentlist_entry = self._handle_py_ver_compat_for_input_str(param['componentlist_entry'])
msg_string = '{0} to componentlist={1}'.format(componentlist_entry, componentlist_name)
ret_val, list_id = self.get_list_id(componentlist_name, 'component_list', action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
self._client_headers["Content-Type"] = "application/json"
if list_id:
component_list_entry_json = '{"items":["%s"]}' % componentlist_entry
full_url = '{0}/remediation/lists/'.format(self._arc_url.strip("/"))
try:
r = requests.put(url='{0}{1}/append'.format(full_url, list_id),
headers=self._client_headers,
data=component_list_entry_json,
verify=False)
except Exception as e:
err = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err))
if 200 <= r.status_code <= 299:
return action_result.set_status(phantom.APP_SUCCESS, 'Successfully added {0}'.format(msg_string))
else:
return action_result.set_status(phantom.APP_ERROR, 'Failed to add {0}'.format(msg_string))
return action_result.set_status(phantom.APP_ERROR, 'Could not find component_list = {0}'.format(componentlist_name))
def _remove_componentlist_entry(self, param):
self.save_progress(('In action handler for: {0}').format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
self.debug_print(param)
componentlist_name = self._handle_py_ver_compat_for_input_str(param['componentlist_name'])
componentlist_entry = self._handle_py_ver_compat_for_input_str(param['componentlist_entry'])
msg_string = '{0} from componentlist={1}'.format(componentlist_entry, componentlist_name)
ret_val, list_id = self.get_list_id(componentlist_name, 'component_list', action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
self._client_headers["Content-Type"] = "application/json"
if list_id:
component_list_entry_json = '{"items":["%s"]}' % componentlist_entry
full_url = '{0}/remediation/lists/'.format(self._arc_url.strip("/"))
try:
r = requests.post(url='{0}{1}/delete'.format(full_url, list_id),
headers=self._client_headers,
data=component_list_entry_json,
verify=False)
except Exception as e:
err = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err))
if 200 <= r.status_code <= 299:
return action_result.set_status(phantom.APP_SUCCESS, 'Successfully removed {0}'.format(msg_string))
else:
return action_result.set_status(phantom.APP_ERROR, 'Failed to remove {0}'.format(msg_string))
return action_result.set_status(phantom.APP_ERROR, 'Could not find component_list = {0}'.format(componentlist_name))
def _check_componentlist_entry(self, param):
self.save_progress(('In action handler for: {0}').format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
self.debug_print(param)
componentlist_name = self._handle_py_ver_compat_for_input_str(param['componentlist_name'])
componentlist_entry = self._handle_py_ver_compat_for_input_str(param['componentlist_entry'])
msg_string = '{0} in componentlist={1}'.format(componentlist_entry, componentlist_name)
ret_val, list_id = self.get_list_id(componentlist_name, 'component_list', action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
if list_id:
full_url = '{0}/lists/'.format(self._arc_url.strip("/"))
try:
r = requests.get(url='{0}{1}/values?limit=100000'.format(full_url, list_id),
headers=self._client_headers,
verify=False)
except Exception as e:
err = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err))
try:
jsonText = json.loads(r.text)
entryExists = False
if 200 <= r.status_code <= 299:
for jText in jsonText:
entryExists = True
if self._handle_py_ver_compat_for_input_str(jText['content_value']).lower() == componentlist_entry.lower():
return action_result.set_status(phantom.APP_SUCCESS, 'Successfully found {0}'.format(msg_string))
if not entryExists:
return action_result.set_status(phantom.APP_SUCCESS, 'Failed to find entry {0}'.format(msg_string))
except Exception as e:
err = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, 'Unable to parse JSON response from the server. {0}'.format(err))
else:
return action_result.set_status(phantom.APP_ERROR, 'Could not find component_list = {0}'.format(componentlist_name))
def handle_action(self, param):
ret_val = phantom.APP_SUCCESS
action_id = self.get_action_identifier()
self.debug_print('action_id', self.get_action_identifier())
if action_id == 'test_connectivity':
ret_val = self._handle_test_connectivity(param)
elif action_id == 'on_poll':
ret_val = self._handle_on_poll(param)
elif action_id == 'add_watchlist_entry':
ret_val = self._add_watchlist_entry(param)
elif action_id == 'check_watchlist_entry':
ret_val = self._check_watchlist_entry(param)
elif action_id == 'remove_watchlist_entry':
ret_val = self._remove_watchlist_entry(param)
elif action_id == 'add_componentlist_entry':
ret_val = self._add_componentlist_entry(param)
elif action_id == 'remove_componentlist_entry':
ret_val = self._remove_componentlist_entry(param)
elif action_id == 'check_componentlist_entry':
ret_val = self._check_componentlist_entry(param)
return ret_val
def initialize(self):
# Load the state in initialize, use it to store data
# that needs to be accessed across actions
self.debug_print("Action: 'initialize' Status: start")
self._state = self.load_state()
self.debug_print(("Action: 'initialize' State: {}").format(self._state))
# Fetching the Python major version
try:
self._python_version = int(sys.version_info[0])
except:
return self.set_status(phantom.APP_ERROR, "Error occurred while fetching the Phantom server's Python major version")
config = self.get_config()
self._auth_url = self._handle_py_ver_compat_for_input_str(config['auth_url'])
self._arc_url = self._handle_py_ver_compat_for_input_str(config['arc_url'] + '/rest/1.0/')
self._client_id = self._handle_py_ver_compat_for_input_str(config['client_id'])
self._client_secret = config['client_secret']
self._export_profile = self._handle_py_ver_compat_for_input_str(config['export_profile'])
self._client_headers = DG_CLIENT_HEADER
return phantom.APP_SUCCESS
def finalize(self):
# Save the state, this data is saved across actions and app upgrades
# self.save_state(self._state)
return phantom.APP_SUCCESS
def validateApiToken(self):
if self._api_key == '':
return False
payload = {
'client_id': self._client_id,
'client_secret': self._client_secret,
'grant_type':
'urn:pingidentity.com:oauth2:grant_type:validate_bearer',
'token': self._api_key,
}
try:
api_key_response = requests.post(url='{}/as/introspect.oauth2'.format(self._auth_url.strip("/")),
headers=DG_HEADER_URL,
data=payload,
verify=False)
response_json = api_key_response.json()
except Exception as e:
err = self._get_error_message_from_exception(e)
self.debug_print(err)
return False
if api_key_response.status_code == 200 and response_json['active']:
return True
return False
def requestApiToken(self):
if not self.validateApiToken():
payload = {
'client_id': self._client_id,
'client_secret': self._client_secret,
'grant_type': 'client_credentials',
'scope': 'client',
}
try:
url = '{0}/as/token.oauth2'.format(self._auth_url.strip("/"))
api_key_response = requests.post(url=url,
headers=DG_HEADER_URL,
data=payload,
verify=False)
except requests.exceptions.InvalidSchema:
error_message = 'Error connecting to server. No connection adapters were found for %s' % (url)
return (phantom.APP_ERROR, error_message)
except requests.exceptions.InvalidURL:
error_message = 'Error connecting to server. Invalid URL %s' % (url)
return (phantom.APP_ERROR, error_message)
except Exception as e:
err = self._get_error_message_from_exception(e)
return (phantom.APP_ERROR, 'Error connecting to server. {0}'.format(err))
try:
response_json = api_key_response.json()
if api_key_response.status_code == 200:
self._api_key = response_json['access_token']
self._client_headers.update({'Authorization': 'Bearer {}'.format(self._api_key)})
self._client_headers['Authorization'] = 'Bearer {}'.format(self._api_key)
self.save_progress('Got API Token ' + str(self._client_headers['Authorization']))
return (phantom.APP_SUCCESS, None)
else:
return (phantom.APP_ERROR, self._handle_py_ver_compat_for_input_str(api_key_response.text))
except Exception as e:
err = self._get_error_message_from_exception(e)
return (phantom.APP_ERROR, 'Unable to process response from the server. {0}'.format(err))
else:
self._client_headers['Authorization'] = 'Bearer {}'.format(self._api_key)
return (phantom.APP_SUCCESS, None)
if __name__ == '__main__':
import pudb
import argparse
pudb.set_trace()
argparser = argparse.ArgumentParser()
argparser.add_argument('input_test_json', help='Input Test JSON file')
argparser.add_argument('-u', '--username', help='username', required=False)
argparser.add_argument('-p', '--password', help='password', required=False)
args = argparser.parse_args()
session_id = None
username = args.username
password = args.password
if username is not None and password is None:
# User specified a username but not a password, so ask
import getpass
password = <PASSWORD>('Password: ')
if username and password:
try:
login_url = DigitalGuardianArcConnector._get_phantom_base_url() + '/login'
print('Accessing the Login page')
r = requests.get(login_url, verify=False)
csrftoken = r.cookies['csrftoken']
data = dict()
data['username'] = username
data['password'] = password
data['csrfmiddlewaretoken'] = csrftoken
headers = dict()
headers['Cookie'] = 'csrftoken=' + csrftoken
headers['Referer'] = login_url
print('Logging into Platform to get the session id')
r2 = requests.post(login_url,
verify=False,
data=data,
headers=headers)
session_id = r2.cookies['sessionid']
except Exception as e:
print('Unable to get session id from the platform. Error: ' + str(e))
exit(1)
with open(args.input_test_json) as f:
in_json = f.read()
in_json = json.loads(in_json)
print(json.dumps(in_json, indent=4))
connector = DigitalGuardianArcConnector()
connector.print_progress_message = True
if session_id is not None:
in_json['user_session_token'] = session_id
connector._set_csrf_info(csrftoken, headers['Referer'])
ret_val = connector._handle_action(json.dumps(in_json), None)
print(json.dumps(json.loads(ret_val), indent=4))
exit(0) | 0.346984 | 0.089415 |
import glob
import requests
import json
import os
import datetime
import re
from invoke import task
from monty.os import cd
from monty import __version__ as ver
__author__ = "<NAME>"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "Apr 29, 2012"
NEW_VER = datetime.datetime.today().strftime("%Y.%-m.%-d")
@task
def make_doc(ctx):
with cd("docs_rst"):
ctx.run("sphinx-apidoc --separate -d 6 -o . -f ../monty")
for f in glob.glob("*.rst"):
if f.startswith("monty") and f.endswith("rst"):
newoutput = []
suboutput = []
subpackage = False
with open(f, "r") as fid:
for line in fid:
clean = line.strip()
if clean == "Subpackages":
subpackage = True
if not subpackage and not clean.endswith("tests"):
newoutput.append(line)
else:
if not clean.endswith("tests"):
suboutput.append(line)
if clean.startswith("monty") and not clean.endswith("tests"):
newoutput.extend(suboutput)
subpackage = False
suboutput = []
with open(f, "w") as fid:
fid.write("".join(newoutput))
ctx.run("make html")
# ctx.run("cp _static/* ../docs/html/_static")
with cd("docs"):
ctx.run("cp -r html/* .")
ctx.run("rm -r html")
ctx.run("rm -r doctrees")
ctx.run("rm -r _sources")
# This makes sure monty.org works to redirect to the Gihub page
# ctx.run("echo \"monty.org\" > CNAME")
# Avoid the use of jekyll so that _dir works as intended.
ctx.run("touch .nojekyll")
@task
def update_doc(ctx):
ctx.run("git pull", warn=True)
make_doc(ctx)
ctx.run("git add .", warn=True)
ctx.run('git commit -a -m "Update dev docs"', warn=True)
ctx.run("git push", warn=True)
@task
def test(ctx):
ctx.run("pytest")
@task
def setver(ctx):
ctx.run('sed s/version=.*,/version=\\"{}\\",/ setup.py > newsetup'.format(ver))
ctx.run("mv newsetup setup.py")
@task
def release_github(ctx):
with open("docs_rst/changelog.rst") as f:
contents = f.read()
toks = re.split(r"\-+", contents)
desc = toks[1].strip()
toks = desc.split("\n")
desc = "\n".join(toks[:-1]).strip()
payload = {
"tag_name": "v" + NEW_VER,
"target_commitish": "master",
"name": "v" + NEW_VER,
"body": desc,
"draft": False,
"prerelease": False,
}
response = requests.post(
"https://api.github.com/repos/materialsvirtuallab/monty/releases",
data=json.dumps(payload),
headers={"Authorization": "token " + os.environ["GITHUB_RELEASES_TOKEN"]},
)
print(response.text)
@task
def commit(ctx):
ctx.run('git commit -a -m "v%s release"' % NEW_VER, warn=True)
ctx.run("git push", warn=True)
@task
def set_ver(ctx):
with open("monty/__init__.py", "rt") as f:
contents = f.read()
contents = re.sub(r"__version__ = .*\n", '__version__ = "%s"\n' % NEW_VER, contents)
with open("monty/__init__.py", "wt") as f:
f.write(contents)
with open("setup.py", "rt") as f:
contents = f.read()
contents = re.sub(r"version=([^,]+),", 'version="%s",' % NEW_VER, contents)
with open("setup.py", "wt") as f:
f.write(contents)
@task
def release(ctx):
set_ver(ctx)
test(ctx)
update_doc(ctx)
commit(ctx)
release_github(ctx) | tasks.py | import glob
import requests
import json
import os
import datetime
import re
from invoke import task
from monty.os import cd
from monty import __version__ as ver
__author__ = "<NAME>"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "Apr 29, 2012"
NEW_VER = datetime.datetime.today().strftime("%Y.%-m.%-d")
@task
def make_doc(ctx):
with cd("docs_rst"):
ctx.run("sphinx-apidoc --separate -d 6 -o . -f ../monty")
for f in glob.glob("*.rst"):
if f.startswith("monty") and f.endswith("rst"):
newoutput = []
suboutput = []
subpackage = False
with open(f, "r") as fid:
for line in fid:
clean = line.strip()
if clean == "Subpackages":
subpackage = True
if not subpackage and not clean.endswith("tests"):
newoutput.append(line)
else:
if not clean.endswith("tests"):
suboutput.append(line)
if clean.startswith("monty") and not clean.endswith("tests"):
newoutput.extend(suboutput)
subpackage = False
suboutput = []
with open(f, "w") as fid:
fid.write("".join(newoutput))
ctx.run("make html")
# ctx.run("cp _static/* ../docs/html/_static")
with cd("docs"):
ctx.run("cp -r html/* .")
ctx.run("rm -r html")
ctx.run("rm -r doctrees")
ctx.run("rm -r _sources")
# This makes sure monty.org works to redirect to the Gihub page
# ctx.run("echo \"monty.org\" > CNAME")
# Avoid the use of jekyll so that _dir works as intended.
ctx.run("touch .nojekyll")
@task
def update_doc(ctx):
ctx.run("git pull", warn=True)
make_doc(ctx)
ctx.run("git add .", warn=True)
ctx.run('git commit -a -m "Update dev docs"', warn=True)
ctx.run("git push", warn=True)
@task
def test(ctx):
ctx.run("pytest")
@task
def setver(ctx):
ctx.run('sed s/version=.*,/version=\\"{}\\",/ setup.py > newsetup'.format(ver))
ctx.run("mv newsetup setup.py")
@task
def release_github(ctx):
with open("docs_rst/changelog.rst") as f:
contents = f.read()
toks = re.split(r"\-+", contents)
desc = toks[1].strip()
toks = desc.split("\n")
desc = "\n".join(toks[:-1]).strip()
payload = {
"tag_name": "v" + NEW_VER,
"target_commitish": "master",
"name": "v" + NEW_VER,
"body": desc,
"draft": False,
"prerelease": False,
}
response = requests.post(
"https://api.github.com/repos/materialsvirtuallab/monty/releases",
data=json.dumps(payload),
headers={"Authorization": "token " + os.environ["GITHUB_RELEASES_TOKEN"]},
)
print(response.text)
@task
def commit(ctx):
ctx.run('git commit -a -m "v%s release"' % NEW_VER, warn=True)
ctx.run("git push", warn=True)
@task
def set_ver(ctx):
with open("monty/__init__.py", "rt") as f:
contents = f.read()
contents = re.sub(r"__version__ = .*\n", '__version__ = "%s"\n' % NEW_VER, contents)
with open("monty/__init__.py", "wt") as f:
f.write(contents)
with open("setup.py", "rt") as f:
contents = f.read()
contents = re.sub(r"version=([^,]+),", 'version="%s",' % NEW_VER, contents)
with open("setup.py", "wt") as f:
f.write(contents)
@task
def release(ctx):
set_ver(ctx)
test(ctx)
update_doc(ctx)
commit(ctx)
release_github(ctx) | 0.241132 | 0.075995 |
from typing import Optional
import sortedcontainers
import logging
import asyncio
import random
import collections
from gear import Database
from hailtop import aiotools
from hailtop.utils import (
secret_alnum_string,
retry_long_running,
run_if_changed,
time_msecs,
WaitableSharedPool,
AsyncWorkerPool,
Notice,
periodically_call,
)
from ...batch_configuration import STANDING_WORKER_MAX_IDLE_TIME_MSECS
from ...inst_coll_config import PoolConfig
from ...utils import Box, ExceededSharesCounter
from ..instance import Instance
from ..resource_manager import CloudResourceManager
from ..job import schedule_job
from .base import InstanceCollectionManager, InstanceCollection
log = logging.getLogger('pool')
class Pool(InstanceCollection):
@staticmethod
async def create(app,
db: Database, # BORROWED
inst_coll_manager: InstanceCollectionManager,
resource_manager: CloudResourceManager,
machine_name_prefix: str,
config: PoolConfig,
async_worker_pool: AsyncWorkerPool, # BORROWED
task_manager: aiotools.BackgroundTaskManager
) -> 'Pool':
pool = Pool(
app, db, inst_coll_manager, resource_manager, machine_name_prefix, config, async_worker_pool, task_manager)
log.info(f'initializing {pool}')
async for record in db.select_and_fetchall(
'SELECT * FROM instances WHERE removed = 0 AND inst_coll = %s;', (pool.name,)
):
pool.add_instance(Instance.from_record(app, pool, record))
return pool
def __init__(self,
app,
db: Database, # BORROWED
inst_coll_manager: InstanceCollectionManager,
resource_manager: CloudResourceManager,
machine_name_prefix: str,
config: PoolConfig,
async_worker_pool: AsyncWorkerPool, # BORROWED
task_manager: aiotools.BackgroundTaskManager, # BORROWED
):
super().__init__(db,
inst_coll_manager,
resource_manager,
config.cloud,
config.name,
machine_name_prefix,
is_pool=True,
max_instances=config.max_instances,
max_live_instances=config.max_live_instances,
task_manager=task_manager)
self.app = app
self.inst_coll_manager = inst_coll_manager
global_scheduler_state_changed: Notice = self.app['scheduler_state_changed']
self.scheduler_state_changed = global_scheduler_state_changed.subscribe()
self.scheduler = PoolScheduler(self.app, self, async_worker_pool, task_manager)
self.healthy_instances_by_free_cores = sortedcontainers.SortedSet(key=lambda instance: instance.free_cores_mcpu)
self.worker_type = config.worker_type
self.worker_cores = config.worker_cores
self.worker_local_ssd_data_disk = config.worker_local_ssd_data_disk
self.worker_external_ssd_data_disk_size_gb = config.worker_external_ssd_data_disk_size_gb
self.enable_standing_worker = config.enable_standing_worker
self.standing_worker_cores = config.standing_worker_cores
self.boot_disk_size_gb = config.boot_disk_size_gb
self.data_disk_size_gb = config.data_disk_size_gb
self.data_disk_size_standing_gb = config.data_disk_size_standing_gb
task_manager.ensure_future(self.control_loop())
@property
def local_ssd_data_disk(self) -> bool:
return self.worker_local_ssd_data_disk
def _default_location(self) -> str:
return self.inst_coll_manager.location_monitor.default_location()
def config(self):
return {
'name': self.name,
'worker_type': self.worker_type,
'worker_cores': self.worker_cores,
'boot_disk_size_gb': self.boot_disk_size_gb,
'worker_local_ssd_data_disk': self.worker_local_ssd_data_disk,
'worker_external_ssd_data_disk_size_gb': self.worker_external_ssd_data_disk_size_gb,
'enable_standing_worker': self.enable_standing_worker,
'standing_worker_cores': self.standing_worker_cores,
'max_instances': self.max_instances,
'max_live_instances': self.max_live_instances,
}
def configure(self, pool_config: PoolConfig):
assert self.name == pool_config.name
assert self.cloud == pool_config.cloud
assert self.worker_type == pool_config.worker_type
self.worker_cores = pool_config.worker_cores
self.worker_local_ssd_data_disk = pool_config.worker_local_ssd_data_disk
self.worker_external_ssd_data_disk_size_gb = pool_config.worker_external_ssd_data_disk_size_gb
self.enable_standing_worker = pool_config.enable_standing_worker
self.standing_worker_cores = pool_config.standing_worker_cores
self.boot_disk_size_gb = pool_config.boot_disk_size_gb
self.data_disk_size_gb = pool_config.data_disk_size_gb
self.data_disk_size_standing_gb = pool_config.data_disk_size_standing_gb
self.max_instances = pool_config.max_instances
self.max_live_instances = pool_config.max_live_instances
def adjust_for_remove_instance(self, instance):
super().adjust_for_remove_instance(instance)
if instance in self.healthy_instances_by_free_cores:
self.healthy_instances_by_free_cores.remove(instance)
def adjust_for_add_instance(self, instance):
super().adjust_for_add_instance(instance)
if instance.state == 'active' and instance.failed_request_count <= 1:
self.healthy_instances_by_free_cores.add(instance)
def get_instance(self, user, cores_mcpu):
i = self.healthy_instances_by_free_cores.bisect_key_left(cores_mcpu)
while i < len(self.healthy_instances_by_free_cores):
instance = self.healthy_instances_by_free_cores[i]
assert cores_mcpu <= instance.free_cores_mcpu
if user != 'ci' or (user == 'ci' and instance.location == self._default_location()):
return instance
i += 1
histogram = collections.defaultdict(int)
for instance in self.healthy_instances_by_free_cores:
histogram[instance.free_cores_mcpu] += 1
log.info(f'schedule {self}: no viable instances for {cores_mcpu}: {histogram}')
return None
async def create_instance(self,
cores: int,
data_disk_size_gb: int,
max_idle_time_msecs: Optional[int] = None,
location: Optional[str] = None,
):
machine_type = self.resource_manager.machine_type(cores, self.worker_type, self.worker_local_ssd_data_disk)
_, _ = await self._create_instance(
app=self.app,
cores=cores,
machine_type=machine_type,
job_private=False,
location=location,
preemptible=True,
max_idle_time_msecs=max_idle_time_msecs,
local_ssd_data_disk=self.worker_local_ssd_data_disk,
data_disk_size_gb=data_disk_size_gb,
boot_disk_size_gb=self.boot_disk_size_gb
)
async def create_instances_from_ready_cores(self, ready_cores_mcpu, location=None):
n_live_instances = self.n_instances_by_state['pending'] + self.n_instances_by_state['active']
if location is None:
live_free_cores_mcpu = self.live_free_cores_mcpu
else:
live_free_cores_mcpu = self.live_free_cores_mcpu_by_location[location]
instances_needed = (ready_cores_mcpu - live_free_cores_mcpu + (self.worker_cores * 1000) - 1) // (
self.worker_cores * 1000
)
instances_needed = min(
instances_needed,
self.max_live_instances - n_live_instances,
self.max_instances - self.n_instances,
# 20 queries/s; our GCE long-run quota
300,
# n * 16 cores / 15s = excess_scheduling_rate/s = 10/s => n ~= 10
10,
)
if instances_needed > 0:
log.info(f'creating {instances_needed} new instances')
# parallelism will be bounded by thread pool
await asyncio.gather(*[
self.create_instance(
cores=self.worker_cores,
data_disk_size_gb=self.data_disk_size_gb,
location=location
)
for _ in range(instances_needed)])
async def create_instances(self):
if self.app['frozen']:
log.info(f'not creating instances for {self}; batch is frozen')
return
ready_cores_mcpu_per_user = self.db.select_and_fetchall(
'''
SELECT user,
CAST(COALESCE(SUM(ready_cores_mcpu), 0) AS SIGNED) AS ready_cores_mcpu
FROM user_inst_coll_resources
WHERE inst_coll = %s
GROUP BY user;
''',
(self.name,),
)
if ready_cores_mcpu_per_user is None:
ready_cores_mcpu_per_user = {}
else:
ready_cores_mcpu_per_user = {r['user']: r['ready_cores_mcpu'] async for r in ready_cores_mcpu_per_user}
ready_cores_mcpu = sum(ready_cores_mcpu_per_user.values())
free_cores_mcpu = sum([worker.free_cores_mcpu for worker in self.healthy_instances_by_free_cores])
free_cores = free_cores_mcpu / 1000
log.info(
f'{self} n_instances {self.n_instances} {self.n_instances_by_state}'
f' free_cores {free_cores} live_free_cores {self.live_free_cores_mcpu / 1000}'
f' ready_cores {ready_cores_mcpu / 1000}'
)
if ready_cores_mcpu > 0 and free_cores < 500:
await self.create_instances_from_ready_cores(ready_cores_mcpu)
default_location = self._default_location()
ci_ready_cores_mcpu = ready_cores_mcpu_per_user.get('ci', 0)
if ci_ready_cores_mcpu > 0 and self.live_free_cores_mcpu_by_location[default_location] == 0:
await self.create_instances_from_ready_cores(ci_ready_cores_mcpu, location=default_location)
n_live_instances = self.n_instances_by_state['pending'] + self.n_instances_by_state['active']
if self.enable_standing_worker and n_live_instances == 0 and self.max_instances > 0:
await self.create_instance(
cores=self.standing_worker_cores,
data_disk_size_gb=self.data_disk_size_standing_gb,
max_idle_time_msecs=STANDING_WORKER_MAX_IDLE_TIME_MSECS
)
async def control_loop(self):
await periodically_call(15, self.create_instances)
def __str__(self):
return f'pool {self.name}'
class PoolScheduler:
def __init__(self,
app,
pool: Pool,
async_worker_pool: AsyncWorkerPool, # BORROWED
task_manager: aiotools.BackgroundTaskManager, # BORROWED
):
self.app = app
self.scheduler_state_changed = pool.scheduler_state_changed
self.db: Database = app['db']
self.pool = pool
self.async_worker_pool = async_worker_pool
self.exceeded_shares_counter = ExceededSharesCounter()
task_manager.ensure_future(
retry_long_running('schedule_loop', run_if_changed, self.scheduler_state_changed, self.schedule_loop_body)
)
async def compute_fair_share(self):
free_cores_mcpu = sum([worker.free_cores_mcpu for worker in self.pool.healthy_instances_by_free_cores])
user_running_cores_mcpu = {}
user_total_cores_mcpu = {}
result = {}
pending_users_by_running_cores = sortedcontainers.SortedSet(key=lambda user: user_running_cores_mcpu[user])
allocating_users_by_total_cores = sortedcontainers.SortedSet(key=lambda user: user_total_cores_mcpu[user])
records = self.db.execute_and_fetchall(
'''
SELECT user,
CAST(COALESCE(SUM(n_ready_jobs), 0) AS SIGNED) AS n_ready_jobs,
CAST(COALESCE(SUM(ready_cores_mcpu), 0) AS SIGNED) AS ready_cores_mcpu,
CAST(COALESCE(SUM(n_running_jobs), 0) AS SIGNED) AS n_running_jobs,
CAST(COALESCE(SUM(running_cores_mcpu), 0) AS SIGNED) AS running_cores_mcpu
FROM user_inst_coll_resources
WHERE inst_coll = %s
GROUP BY user
HAVING n_ready_jobs + n_running_jobs > 0;
''',
(self.pool.name,),
timer_description=f'in compute_fair_share for {self.pool.name}: aggregate user_inst_coll_resources',
)
async for record in records:
user = record['user']
user_running_cores_mcpu[user] = record['running_cores_mcpu']
user_total_cores_mcpu[user] = record['running_cores_mcpu'] + record['ready_cores_mcpu']
pending_users_by_running_cores.add(user)
record['allocated_cores_mcpu'] = 0
result[user] = record
def allocate_cores(user, mark):
result[user]['allocated_cores_mcpu'] = int(mark - user_running_cores_mcpu[user] + 0.5)
mark = 0
while free_cores_mcpu > 0 and (pending_users_by_running_cores or allocating_users_by_total_cores):
lowest_running = None
lowest_total = None
if pending_users_by_running_cores:
lowest_running_user = pending_users_by_running_cores[0]
lowest_running = user_running_cores_mcpu[lowest_running_user]
if lowest_running == mark:
pending_users_by_running_cores.remove(lowest_running_user)
allocating_users_by_total_cores.add(lowest_running_user)
continue
if allocating_users_by_total_cores:
lowest_total_user = allocating_users_by_total_cores[0]
lowest_total = user_total_cores_mcpu[lowest_total_user]
if lowest_total == mark:
allocating_users_by_total_cores.remove(lowest_total_user)
allocate_cores(lowest_total_user, mark)
continue
allocation = min([c for c in [lowest_running, lowest_total] if c is not None])
n_allocating_users = len(allocating_users_by_total_cores)
cores_to_allocate = n_allocating_users * (allocation - mark)
if cores_to_allocate > free_cores_mcpu:
mark += int(free_cores_mcpu / n_allocating_users + 0.5)
free_cores_mcpu = 0
break
mark = allocation
free_cores_mcpu -= cores_to_allocate
for user in allocating_users_by_total_cores:
allocate_cores(user, mark)
return result
async def schedule_loop_body(self):
if self.app['frozen']:
log.info(f'not scheduling any jobs for {self.pool}; batch is frozen')
return True
log.info(f'schedule {self.pool}: starting')
start = time_msecs()
n_scheduled = 0
user_resources = await self.compute_fair_share()
total = sum(resources['allocated_cores_mcpu'] for resources in user_resources.values())
if not total:
log.info(f'schedule {self.pool}: no allocated cores')
should_wait = True
return should_wait
user_share = {
user: max(int(300 * resources['allocated_cores_mcpu'] / total + 0.5), 20)
for user, resources in user_resources.items()
}
async def user_runnable_jobs(user, remaining):
async for batch in self.db.select_and_fetchall(
'''
SELECT id, cancelled, userdata, user, format_version
FROM batches
WHERE user = %s AND `state` = 'running';
''',
(user,),
timer_description=f'in schedule {self.pool}: get {user} running batches',
):
async for record in self.db.select_and_fetchall(
'''
SELECT job_id, spec, cores_mcpu
FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_inst_coll_cancelled)
WHERE batch_id = %s AND state = 'Ready' AND always_run = 1 AND inst_coll = %s
LIMIT %s;
''',
(batch['id'], self.pool.name, remaining.value),
timer_description=f'in schedule {self.pool}: get {user} batch {batch["id"]} runnable jobs (1)',
):
record['batch_id'] = batch['id']
record['userdata'] = batch['userdata']
record['user'] = batch['user']
record['format_version'] = batch['format_version']
yield record
if not batch['cancelled']:
async for record in self.db.select_and_fetchall(
'''
SELECT job_id, spec, cores_mcpu
FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_cancelled)
WHERE batch_id = %s AND state = 'Ready' AND always_run = 0 AND inst_coll = %s AND cancelled = 0
LIMIT %s;
''',
(batch['id'], self.pool.name, remaining.value),
timer_description=f'in schedule {self.pool}: get {user} batch {batch["id"]} runnable jobs (2)',
):
record['batch_id'] = batch['id']
record['userdata'] = batch['userdata']
record['user'] = batch['user']
record['format_version'] = batch['format_version']
yield record
waitable_pool = WaitableSharedPool(self.async_worker_pool)
should_wait = True
for user, resources in user_resources.items():
allocated_cores_mcpu = resources['allocated_cores_mcpu']
if allocated_cores_mcpu == 0:
continue
scheduled_cores_mcpu = 0
share = user_share[user]
log.info(f'schedule {self.pool}: user-share: {user}: {allocated_cores_mcpu} {share}')
remaining = Box(share)
async for record in user_runnable_jobs(user, remaining):
batch_id = record['batch_id']
job_id = record['job_id']
id = (batch_id, job_id)
attempt_id = secret_alnum_string(6)
record['attempt_id'] = attempt_id
if scheduled_cores_mcpu + record['cores_mcpu'] > allocated_cores_mcpu:
if random.random() > self.exceeded_shares_counter.rate():
self.exceeded_shares_counter.push(True)
self.scheduler_state_changed.set()
break
self.exceeded_shares_counter.push(False)
instance = self.pool.get_instance(user, record['cores_mcpu'])
if instance:
instance.adjust_free_cores_in_memory(-record['cores_mcpu'])
scheduled_cores_mcpu += record['cores_mcpu']
n_scheduled += 1
should_wait = False
async def schedule_with_error_handling(app, record, id, instance):
try:
await schedule_job(app, record, instance)
except Exception:
log.info(f'scheduling job {id} on {instance} for {self.pool}', exc_info=True)
await waitable_pool.call(schedule_with_error_handling, self.app, record, id, instance)
remaining.value -= 1
if remaining.value <= 0:
break
await waitable_pool.wait()
end = time_msecs()
log.info(f'schedule: scheduled {n_scheduled} jobs in {end - start}ms for {self.pool}')
return should_wait | batch/batch/driver/instance_collection/pool.py | from typing import Optional
import sortedcontainers
import logging
import asyncio
import random
import collections
from gear import Database
from hailtop import aiotools
from hailtop.utils import (
secret_alnum_string,
retry_long_running,
run_if_changed,
time_msecs,
WaitableSharedPool,
AsyncWorkerPool,
Notice,
periodically_call,
)
from ...batch_configuration import STANDING_WORKER_MAX_IDLE_TIME_MSECS
from ...inst_coll_config import PoolConfig
from ...utils import Box, ExceededSharesCounter
from ..instance import Instance
from ..resource_manager import CloudResourceManager
from ..job import schedule_job
from .base import InstanceCollectionManager, InstanceCollection
log = logging.getLogger('pool')
class Pool(InstanceCollection):
@staticmethod
async def create(app,
db: Database, # BORROWED
inst_coll_manager: InstanceCollectionManager,
resource_manager: CloudResourceManager,
machine_name_prefix: str,
config: PoolConfig,
async_worker_pool: AsyncWorkerPool, # BORROWED
task_manager: aiotools.BackgroundTaskManager
) -> 'Pool':
pool = Pool(
app, db, inst_coll_manager, resource_manager, machine_name_prefix, config, async_worker_pool, task_manager)
log.info(f'initializing {pool}')
async for record in db.select_and_fetchall(
'SELECT * FROM instances WHERE removed = 0 AND inst_coll = %s;', (pool.name,)
):
pool.add_instance(Instance.from_record(app, pool, record))
return pool
def __init__(self,
app,
db: Database, # BORROWED
inst_coll_manager: InstanceCollectionManager,
resource_manager: CloudResourceManager,
machine_name_prefix: str,
config: PoolConfig,
async_worker_pool: AsyncWorkerPool, # BORROWED
task_manager: aiotools.BackgroundTaskManager, # BORROWED
):
super().__init__(db,
inst_coll_manager,
resource_manager,
config.cloud,
config.name,
machine_name_prefix,
is_pool=True,
max_instances=config.max_instances,
max_live_instances=config.max_live_instances,
task_manager=task_manager)
self.app = app
self.inst_coll_manager = inst_coll_manager
global_scheduler_state_changed: Notice = self.app['scheduler_state_changed']
self.scheduler_state_changed = global_scheduler_state_changed.subscribe()
self.scheduler = PoolScheduler(self.app, self, async_worker_pool, task_manager)
self.healthy_instances_by_free_cores = sortedcontainers.SortedSet(key=lambda instance: instance.free_cores_mcpu)
self.worker_type = config.worker_type
self.worker_cores = config.worker_cores
self.worker_local_ssd_data_disk = config.worker_local_ssd_data_disk
self.worker_external_ssd_data_disk_size_gb = config.worker_external_ssd_data_disk_size_gb
self.enable_standing_worker = config.enable_standing_worker
self.standing_worker_cores = config.standing_worker_cores
self.boot_disk_size_gb = config.boot_disk_size_gb
self.data_disk_size_gb = config.data_disk_size_gb
self.data_disk_size_standing_gb = config.data_disk_size_standing_gb
task_manager.ensure_future(self.control_loop())
@property
def local_ssd_data_disk(self) -> bool:
return self.worker_local_ssd_data_disk
def _default_location(self) -> str:
return self.inst_coll_manager.location_monitor.default_location()
def config(self):
return {
'name': self.name,
'worker_type': self.worker_type,
'worker_cores': self.worker_cores,
'boot_disk_size_gb': self.boot_disk_size_gb,
'worker_local_ssd_data_disk': self.worker_local_ssd_data_disk,
'worker_external_ssd_data_disk_size_gb': self.worker_external_ssd_data_disk_size_gb,
'enable_standing_worker': self.enable_standing_worker,
'standing_worker_cores': self.standing_worker_cores,
'max_instances': self.max_instances,
'max_live_instances': self.max_live_instances,
}
def configure(self, pool_config: PoolConfig):
assert self.name == pool_config.name
assert self.cloud == pool_config.cloud
assert self.worker_type == pool_config.worker_type
self.worker_cores = pool_config.worker_cores
self.worker_local_ssd_data_disk = pool_config.worker_local_ssd_data_disk
self.worker_external_ssd_data_disk_size_gb = pool_config.worker_external_ssd_data_disk_size_gb
self.enable_standing_worker = pool_config.enable_standing_worker
self.standing_worker_cores = pool_config.standing_worker_cores
self.boot_disk_size_gb = pool_config.boot_disk_size_gb
self.data_disk_size_gb = pool_config.data_disk_size_gb
self.data_disk_size_standing_gb = pool_config.data_disk_size_standing_gb
self.max_instances = pool_config.max_instances
self.max_live_instances = pool_config.max_live_instances
def adjust_for_remove_instance(self, instance):
super().adjust_for_remove_instance(instance)
if instance in self.healthy_instances_by_free_cores:
self.healthy_instances_by_free_cores.remove(instance)
def adjust_for_add_instance(self, instance):
super().adjust_for_add_instance(instance)
if instance.state == 'active' and instance.failed_request_count <= 1:
self.healthy_instances_by_free_cores.add(instance)
def get_instance(self, user, cores_mcpu):
i = self.healthy_instances_by_free_cores.bisect_key_left(cores_mcpu)
while i < len(self.healthy_instances_by_free_cores):
instance = self.healthy_instances_by_free_cores[i]
assert cores_mcpu <= instance.free_cores_mcpu
if user != 'ci' or (user == 'ci' and instance.location == self._default_location()):
return instance
i += 1
histogram = collections.defaultdict(int)
for instance in self.healthy_instances_by_free_cores:
histogram[instance.free_cores_mcpu] += 1
log.info(f'schedule {self}: no viable instances for {cores_mcpu}: {histogram}')
return None
async def create_instance(self,
cores: int,
data_disk_size_gb: int,
max_idle_time_msecs: Optional[int] = None,
location: Optional[str] = None,
):
machine_type = self.resource_manager.machine_type(cores, self.worker_type, self.worker_local_ssd_data_disk)
_, _ = await self._create_instance(
app=self.app,
cores=cores,
machine_type=machine_type,
job_private=False,
location=location,
preemptible=True,
max_idle_time_msecs=max_idle_time_msecs,
local_ssd_data_disk=self.worker_local_ssd_data_disk,
data_disk_size_gb=data_disk_size_gb,
boot_disk_size_gb=self.boot_disk_size_gb
)
async def create_instances_from_ready_cores(self, ready_cores_mcpu, location=None):
n_live_instances = self.n_instances_by_state['pending'] + self.n_instances_by_state['active']
if location is None:
live_free_cores_mcpu = self.live_free_cores_mcpu
else:
live_free_cores_mcpu = self.live_free_cores_mcpu_by_location[location]
instances_needed = (ready_cores_mcpu - live_free_cores_mcpu + (self.worker_cores * 1000) - 1) // (
self.worker_cores * 1000
)
instances_needed = min(
instances_needed,
self.max_live_instances - n_live_instances,
self.max_instances - self.n_instances,
# 20 queries/s; our GCE long-run quota
300,
# n * 16 cores / 15s = excess_scheduling_rate/s = 10/s => n ~= 10
10,
)
if instances_needed > 0:
log.info(f'creating {instances_needed} new instances')
# parallelism will be bounded by thread pool
await asyncio.gather(*[
self.create_instance(
cores=self.worker_cores,
data_disk_size_gb=self.data_disk_size_gb,
location=location
)
for _ in range(instances_needed)])
async def create_instances(self):
if self.app['frozen']:
log.info(f'not creating instances for {self}; batch is frozen')
return
ready_cores_mcpu_per_user = self.db.select_and_fetchall(
'''
SELECT user,
CAST(COALESCE(SUM(ready_cores_mcpu), 0) AS SIGNED) AS ready_cores_mcpu
FROM user_inst_coll_resources
WHERE inst_coll = %s
GROUP BY user;
''',
(self.name,),
)
if ready_cores_mcpu_per_user is None:
ready_cores_mcpu_per_user = {}
else:
ready_cores_mcpu_per_user = {r['user']: r['ready_cores_mcpu'] async for r in ready_cores_mcpu_per_user}
ready_cores_mcpu = sum(ready_cores_mcpu_per_user.values())
free_cores_mcpu = sum([worker.free_cores_mcpu for worker in self.healthy_instances_by_free_cores])
free_cores = free_cores_mcpu / 1000
log.info(
f'{self} n_instances {self.n_instances} {self.n_instances_by_state}'
f' free_cores {free_cores} live_free_cores {self.live_free_cores_mcpu / 1000}'
f' ready_cores {ready_cores_mcpu / 1000}'
)
if ready_cores_mcpu > 0 and free_cores < 500:
await self.create_instances_from_ready_cores(ready_cores_mcpu)
default_location = self._default_location()
ci_ready_cores_mcpu = ready_cores_mcpu_per_user.get('ci', 0)
if ci_ready_cores_mcpu > 0 and self.live_free_cores_mcpu_by_location[default_location] == 0:
await self.create_instances_from_ready_cores(ci_ready_cores_mcpu, location=default_location)
n_live_instances = self.n_instances_by_state['pending'] + self.n_instances_by_state['active']
if self.enable_standing_worker and n_live_instances == 0 and self.max_instances > 0:
await self.create_instance(
cores=self.standing_worker_cores,
data_disk_size_gb=self.data_disk_size_standing_gb,
max_idle_time_msecs=STANDING_WORKER_MAX_IDLE_TIME_MSECS
)
async def control_loop(self):
await periodically_call(15, self.create_instances)
def __str__(self):
return f'pool {self.name}'
class PoolScheduler:
def __init__(self,
app,
pool: Pool,
async_worker_pool: AsyncWorkerPool, # BORROWED
task_manager: aiotools.BackgroundTaskManager, # BORROWED
):
self.app = app
self.scheduler_state_changed = pool.scheduler_state_changed
self.db: Database = app['db']
self.pool = pool
self.async_worker_pool = async_worker_pool
self.exceeded_shares_counter = ExceededSharesCounter()
task_manager.ensure_future(
retry_long_running('schedule_loop', run_if_changed, self.scheduler_state_changed, self.schedule_loop_body)
)
async def compute_fair_share(self):
free_cores_mcpu = sum([worker.free_cores_mcpu for worker in self.pool.healthy_instances_by_free_cores])
user_running_cores_mcpu = {}
user_total_cores_mcpu = {}
result = {}
pending_users_by_running_cores = sortedcontainers.SortedSet(key=lambda user: user_running_cores_mcpu[user])
allocating_users_by_total_cores = sortedcontainers.SortedSet(key=lambda user: user_total_cores_mcpu[user])
records = self.db.execute_and_fetchall(
'''
SELECT user,
CAST(COALESCE(SUM(n_ready_jobs), 0) AS SIGNED) AS n_ready_jobs,
CAST(COALESCE(SUM(ready_cores_mcpu), 0) AS SIGNED) AS ready_cores_mcpu,
CAST(COALESCE(SUM(n_running_jobs), 0) AS SIGNED) AS n_running_jobs,
CAST(COALESCE(SUM(running_cores_mcpu), 0) AS SIGNED) AS running_cores_mcpu
FROM user_inst_coll_resources
WHERE inst_coll = %s
GROUP BY user
HAVING n_ready_jobs + n_running_jobs > 0;
''',
(self.pool.name,),
timer_description=f'in compute_fair_share for {self.pool.name}: aggregate user_inst_coll_resources',
)
async for record in records:
user = record['user']
user_running_cores_mcpu[user] = record['running_cores_mcpu']
user_total_cores_mcpu[user] = record['running_cores_mcpu'] + record['ready_cores_mcpu']
pending_users_by_running_cores.add(user)
record['allocated_cores_mcpu'] = 0
result[user] = record
def allocate_cores(user, mark):
result[user]['allocated_cores_mcpu'] = int(mark - user_running_cores_mcpu[user] + 0.5)
mark = 0
while free_cores_mcpu > 0 and (pending_users_by_running_cores or allocating_users_by_total_cores):
lowest_running = None
lowest_total = None
if pending_users_by_running_cores:
lowest_running_user = pending_users_by_running_cores[0]
lowest_running = user_running_cores_mcpu[lowest_running_user]
if lowest_running == mark:
pending_users_by_running_cores.remove(lowest_running_user)
allocating_users_by_total_cores.add(lowest_running_user)
continue
if allocating_users_by_total_cores:
lowest_total_user = allocating_users_by_total_cores[0]
lowest_total = user_total_cores_mcpu[lowest_total_user]
if lowest_total == mark:
allocating_users_by_total_cores.remove(lowest_total_user)
allocate_cores(lowest_total_user, mark)
continue
allocation = min([c for c in [lowest_running, lowest_total] if c is not None])
n_allocating_users = len(allocating_users_by_total_cores)
cores_to_allocate = n_allocating_users * (allocation - mark)
if cores_to_allocate > free_cores_mcpu:
mark += int(free_cores_mcpu / n_allocating_users + 0.5)
free_cores_mcpu = 0
break
mark = allocation
free_cores_mcpu -= cores_to_allocate
for user in allocating_users_by_total_cores:
allocate_cores(user, mark)
return result
async def schedule_loop_body(self):
if self.app['frozen']:
log.info(f'not scheduling any jobs for {self.pool}; batch is frozen')
return True
log.info(f'schedule {self.pool}: starting')
start = time_msecs()
n_scheduled = 0
user_resources = await self.compute_fair_share()
total = sum(resources['allocated_cores_mcpu'] for resources in user_resources.values())
if not total:
log.info(f'schedule {self.pool}: no allocated cores')
should_wait = True
return should_wait
user_share = {
user: max(int(300 * resources['allocated_cores_mcpu'] / total + 0.5), 20)
for user, resources in user_resources.items()
}
async def user_runnable_jobs(user, remaining):
async for batch in self.db.select_and_fetchall(
'''
SELECT id, cancelled, userdata, user, format_version
FROM batches
WHERE user = %s AND `state` = 'running';
''',
(user,),
timer_description=f'in schedule {self.pool}: get {user} running batches',
):
async for record in self.db.select_and_fetchall(
'''
SELECT job_id, spec, cores_mcpu
FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_inst_coll_cancelled)
WHERE batch_id = %s AND state = 'Ready' AND always_run = 1 AND inst_coll = %s
LIMIT %s;
''',
(batch['id'], self.pool.name, remaining.value),
timer_description=f'in schedule {self.pool}: get {user} batch {batch["id"]} runnable jobs (1)',
):
record['batch_id'] = batch['id']
record['userdata'] = batch['userdata']
record['user'] = batch['user']
record['format_version'] = batch['format_version']
yield record
if not batch['cancelled']:
async for record in self.db.select_and_fetchall(
'''
SELECT job_id, spec, cores_mcpu
FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_cancelled)
WHERE batch_id = %s AND state = 'Ready' AND always_run = 0 AND inst_coll = %s AND cancelled = 0
LIMIT %s;
''',
(batch['id'], self.pool.name, remaining.value),
timer_description=f'in schedule {self.pool}: get {user} batch {batch["id"]} runnable jobs (2)',
):
record['batch_id'] = batch['id']
record['userdata'] = batch['userdata']
record['user'] = batch['user']
record['format_version'] = batch['format_version']
yield record
waitable_pool = WaitableSharedPool(self.async_worker_pool)
should_wait = True
for user, resources in user_resources.items():
allocated_cores_mcpu = resources['allocated_cores_mcpu']
if allocated_cores_mcpu == 0:
continue
scheduled_cores_mcpu = 0
share = user_share[user]
log.info(f'schedule {self.pool}: user-share: {user}: {allocated_cores_mcpu} {share}')
remaining = Box(share)
async for record in user_runnable_jobs(user, remaining):
batch_id = record['batch_id']
job_id = record['job_id']
id = (batch_id, job_id)
attempt_id = secret_alnum_string(6)
record['attempt_id'] = attempt_id
if scheduled_cores_mcpu + record['cores_mcpu'] > allocated_cores_mcpu:
if random.random() > self.exceeded_shares_counter.rate():
self.exceeded_shares_counter.push(True)
self.scheduler_state_changed.set()
break
self.exceeded_shares_counter.push(False)
instance = self.pool.get_instance(user, record['cores_mcpu'])
if instance:
instance.adjust_free_cores_in_memory(-record['cores_mcpu'])
scheduled_cores_mcpu += record['cores_mcpu']
n_scheduled += 1
should_wait = False
async def schedule_with_error_handling(app, record, id, instance):
try:
await schedule_job(app, record, instance)
except Exception:
log.info(f'scheduling job {id} on {instance} for {self.pool}', exc_info=True)
await waitable_pool.call(schedule_with_error_handling, self.app, record, id, instance)
remaining.value -= 1
if remaining.value <= 0:
break
await waitable_pool.wait()
end = time_msecs()
log.info(f'schedule: scheduled {n_scheduled} jobs in {end - start}ms for {self.pool}')
return should_wait | 0.779448 | 0.098512 |
from __future__ import division, print_function, unicode_literals
import argparse
import importlib
import itertools
import json
import os
import re
import shutil
import string
import sys
from collections import OrderedDict
from bentoo.common.conf import load_conf
from bentoo.common.utils import replace_template, safe_eval
import bentoo.common.helpers as helpers
class SimpleVectorGenerator(object):
'''Simple test vector generator
Generates a collection of test vectors by iterating over provided test
vector space and expanding possible compacts.
Args:
test_factors (list): test factor names.
raw_vectors (list): test vector collection.
Each element shall be a list denoting the factor values. Its
element is either a blob or a list. If it's a list, it denotes
a range of possible values for the related test factor.
Examples:
A straight forward generator:
SimpleVectorGenerator(["A", "B"], [[1, 2], [1, 3], [2, 3]])
A compact representation:
SimpleVectorGenerator(["A", "B"], [[1, [2, 3]], [2, 3]])
'''
def __init__(self, test_factors, raw_vectors=None):
self.test_factors = test_factors
self.raw_vectors = raw_vectors if raw_vectors else []
def items(self):
'''An iterator over the range of test vectors
Yields:
OrderedDict: a test vector.
OrderedDict.values() is the test factor values and
OrderedDict.keys() is the test factor names.
'''
# expand each vector to support `[0, [1, 2], [3, 4]]`
for item in self.raw_vectors:
iters = [x if isinstance(x, list) else [x] for x in item]
for v in itertools.product(*iters):
yield OrderedDict(zip(self.test_factors, v))
def case_info(self, case):
'''Return extra information associated with the case
Args:
case: The case identifier as returned by items()
Returns:
any python object, None if nothing is assiciated.
'''
return None
class CartProductVectorGenerator(object):
'''Cartetian product test vector generator
Generate a collection of test vectors by iterating a Cartetian product
of possible test factor values.
Args:
test_factors (list): test factor names.
factor_values (list): test factor value ranges.
(k, v) denotes (test factor name, test factor values)
'''
def __init__(self, test_factors, factor_values):
self.test_factors = test_factors
self.factor_values = factor_values
def items(self):
'''An iterator over the range of test vectors
Yields:
OrderedDict: a test vector.
OrderedDict.values() is the test factor values and
OrderedDict.keys() is the test factor names.
'''
factor_values = [self.factor_values[k] for k in self.test_factors]
for v in itertools.product(*factor_values):
yield OrderedDict(zip(self.test_factors, v))
def case_info(self, case):
'''Return extra information associated with the case
Args:
case: The case identifier as returned by items()
Returns:
any python object, None if nothing is assiciated.
'''
return None
BENCH_TEST_FACTORS = [
"model", "bench", "mem_per_node", "series", "nnodes", "ncores"
]
BENCH_TYPES = ["onenode", "weak", "strong"]
class BenchVectorGenerator(object):
'''Benchmarking test vector generator
Generate a collection of test vectors to cover a reproducible cross-system
performance benchmarks.
Args:
test_factors (list): test factor names.
factor_values (list): test factor value ranges.
(k, v) denotes (test factor name, test factor values)
'''
def __init__(self, test_factors, spec):
s0 = set(test_factors)
s1 = set(BENCH_TEST_FACTORS)
assert s0 >= s1
if s0 > s1:
assert "other_factor_values" in spec
self.other_factors = list(s0 - s1)
self.other_factor_values = spec["other_factor_values"]
for factor in self.other_factors:
assert factor in self.other_factor_values
assert isinstance(self.other_factor_values[factor], list)
else:
self.other_factors = None
self.other_factor_values = None
self.test_factors = test_factors
assert "bench_config" in spec
assert "model_config" in spec
assert "system_config" in spec
self.bench_vectors = [] # stores all vectors
self.bench_models = [] # stores model for the corresponding vector
system = spec["system_config"]
self.system_config = system
sys_nnodes = int(system["nnodes"])
sys_cpn = int(system["cores_per_node"])
sys_node_mem = helpers.sizeToFloat(system["mem_per_node"])
sys_free_node_men = helpers.sizeToFloat(system["free_mem_per_node"])
sys_max_app_mem_ratio = sys_free_node_men / sys_node_mem
bench_conf = spec["bench_config"]
for model_name, model_spec in spec["model_config"].items():
model_type = model_spec["type"]
if model_type == "structured_grid":
grid = model_spec["grid"]
total_mem = model_spec["total_mem"]
resizer = helpers.StructuredGridModelResizer(grid, total_mem)
elif model_type == "unstructured_grid":
dim = model_spec["dim"]
total_mem = model_spec["total_mem"]
resizer = helpers.UnstructuredGridModelResizer(dim, total_mem)
elif model_type == "omni":
model_db = model_spec["candidates"]
resizer = helpers.OmniModelResizer(model_db, sys_nnodes)
else:
raise RuntimeError("Invalid model type '%s' in '%s'" %
(model_type, model_name))
benchmarks = model_spec["bench"]
for bench in benchmarks:
assert bench in BENCH_TYPES
assert bench in bench_conf
conf = bench_conf[bench]
if bench == "onenode":
# Generate intra-node benchmarks
result = {
"bench": bench,
"model": model_name,
"nnodes": 1,
"series": 0
}
for mem_per_node_req in conf["mem_per_node"]:
model = resizer.resize(mem_per_node_req)
if model["nnodes"] != 1:
continue
mem_per_node = model["mem_per_node"]
mem_per_node = helpers.sizeToFloat(mem_per_node)
if mem_per_node > sys_max_app_mem_ratio * sys_node_mem:
continue
result["mem_per_node"] = model["mem_per_node"]
ncores = []
nc = sys_cpn
while nc % 2 == 0:
ncores.append(nc)
nc //= 2
ncores.append(nc)
ncores.reverse()
for n in ncores:
if n < conf["min_ncores"]:
continue
result["ncores"] = n
vector = [result[f] for f in BENCH_TEST_FACTORS]
self.bench_vectors.append(vector)
self.bench_models.append(model)
elif bench == "weak":
# Generate internode weak-scaling benchmarks
result = {"bench": bench, "model": model_name, "series": 0}
for mem_per_node_req in conf["mem_per_node"]:
model = resizer.resize(mem_per_node_req)
mem_per_node = model["mem_per_node"]
mem_per_node = helpers.sizeToFloat(mem_per_node)
if mem_per_node > sys_max_app_mem_ratio * sys_node_mem:
continue
result["mem_per_node"] = model["mem_per_node"]
nnodes_min = conf["nnodes"]["min"]
nnodes_max = min(conf["nnodes"]["max"], sys_nnodes)
while model["nnodes"] <= nnodes_max:
if model["nnodes"] >= nnodes_min:
result["nnodes"] = model["nnodes"]
result["ncores"] = model["nnodes"] * sys_cpn
vec = [result[f] for f in BENCH_TEST_FACTORS]
self.bench_vectors.append(vec)
self.bench_models.append(model)
model = resizer.next(model)
# Add the tail case where the system nodes sit in
# between the resize interval.
has_tail_case = result["nnodes"] < nnodes_max
has_tail_case = has_tail_case and resizer.exactResize()
if has_tail_case:
model = resizer.resize(mem_per_node, nnodes_max)
assert model["nnodes"] == nnodes_max
result["nnodes"] = model["nnodes"]
result["ncores"] = model["nnodes"] * sys_cpn
vec = [result[f] for f in BENCH_TEST_FACTORS]
self.bench_vectors.append(vec)
self.bench_models.append(model)
elif bench == "strong":
# Generate internode strong-scaling benchmarks
result = {"bench": bench, "model": model_name}
max_multiple = conf["max_multiple"]
for mem_per_node_req in conf["mem_per_node"]:
for i, base_nnodes in enumerate(conf["base_nnodes"]):
model = resizer.resize(mem_per_node_req,
base_nnodes)
mem_per_node = model["mem_per_node"]
mem_per_node = helpers.sizeToFloat(mem_per_node)
if mem_per_node > sys_max_app_mem_ratio * sys_node_mem:
continue
if base_nnodes * max_multiple <= model["nnodes"]:
continue
result["mem_per_node"] = model["mem_per_node"]
nnodes = model["nnodes"]
max_nnodes = min(nnodes * max_multiple, sys_nnodes)
result["series"] = i
while nnodes <= max_nnodes:
result["nnodes"] = nnodes
result["ncores"] = nnodes * sys_cpn
vec = [result[f] for f in BENCH_TEST_FACTORS]
self.bench_vectors.append(vec)
self.bench_models.append(model)
nnodes *= 2
# append the tail case where system nodes are
# between the power-of-2 interval.
if result["nnodes"] < max_nnodes:
result["nnodes"] = max_nnodes
result["ncores"] = max_nnodes * sys_cpn
vec = [result[f] for f in BENCH_TEST_FACTORS]
self.bench_vectors.append(vec)
self.bench_models.append(model)
else:
raise RuntimeError("Invalid benchmark type '%s'" % bench)
def items(self):
'''An iterator over the range of test vectors
Yields:
OrderedDict: a test vector.
OrderedDict.values() is the test factor values and
OrderedDict.keys() is the test factor names.
'''
if self.other_factors:
others = [self.other_factor_values[f] for f in self.other_factors]
for v in self.bench_vectors:
vals = dict(zip(BENCH_TEST_FACTORS, v))
for other_values in itertools.product(*others):
vals.update(dict(zip(self.other_factors, other_values)))
ovals = [vals[f] for f in self.test_factors]
yield OrderedDict(zip(self.test_factors, ovals))
else:
for v in self.bench_vectors:
vals = dict(zip(BENCH_TEST_FACTORS, v))
ovals = [vals[f] for f in self.test_factors]
yield OrderedDict(zip(self.test_factors, ovals))
def case_info(self, case):
'''Return extra information associated with the case
Args:
case: The case identifier as returned by items()
Returns:
A python dict with a model and a system config. The model is used to
guide model generation and the system is used to guide system
adaptation.
'''
bench_vec = [case[f] for f in BENCH_TEST_FACTORS]
case_index = self.bench_vectors.index(bench_vec)
info = dict(self.bench_models[case_index])
for k, v in self.system_config.items():
info["sys_%s" % k] = v
return info
class CustomVectorGenerator(object):
'''Custom test vector generator
Generate a collection of test vectors by calling a user defined function,
which returns a list of test vectors.
Args:
test_factors (list): test factor names.
spec (dict): generator definition.
'''
def __init__(self, test_factors, spec, project_root):
self.test_factors = test_factors
module = spec["import"]
if not os.path.isabs(module):
module = os.path.abspath(os.path.join(project_root, module))
func = spec["func"]
args = spec.get("args", {})
if not os.path.exists(module):
raise RuntimeError("Module '%s' does not exists" % module)
info_func = spec.get("info_func", None)
module_path = os.path.dirname(module)
if module_path not in sys.path:
sys.path.insert(0, module_path)
module_name = os.path.splitext(os.path.basename(module))[0]
mod = importlib.import_module(module_name)
if not hasattr(mod, func):
raise RuntimeError("Can not find function '%s' in '%s'" %
(func, module))
fun = getattr(mod, func)
real_args = dict(args)
real_args["conf_root"] = os.path.abspath(project_root)
real_args["test_factors"] = self.test_factors
self.test_vectors = fun(**real_args)
self.info_func = None
if info_func:
self.info_func = getattr(mod, info_func)
def items(self):
'''An iterator over the range of test vectors
Yields:
OrderedDict: a test vector.
OrderedDict.values() is the test factor values and
OrderedDict.keys() is the test factor names.
'''
for v in self.test_vectors:
yield OrderedDict(zip(self.test_factors, v))
def case_info(self, case):
'''Return extra information associated with the case
Args:
case: The case identifier as returned by items()
Returns:
any python object, None if nothing is assiciated.
'''
if self.info_func:
return self.info_func(case)
else:
return None
class TemplateCaseGenerator(object):
def __init__(self, template):
assert ("case_spec" in template)
self.template = template.copy()
if "copy_files" not in self.template:
self.template["copy_files"] = OrderedDict()
if "link_files" not in self.template:
self.template["link_files"] = OrderedDict()
if "inst_templates" not in self.template:
self.template["inst_templates"] = OrderedDict()
def make_case(self,
conf_root,
output_root,
case_path,
test_vector,
case_info=None):
'''Generate a test case according to the specified test vector'''
template_vars = dict(test_vector)
template_vars["conf_root"] = conf_root
template_vars["output_root"] = output_root
template_vars["case_path"] = case_path
if case_info:
assert isinstance(case_info, dict)
template_vars.update(case_info)
# copy case files: each file is defiend as (src, dst), where src is
# relative to conf_root and dst is relative to case_path.
for src, dst in self.template["copy_files"].items():
srcpath = replace_template(src, template_vars)
dstpath = replace_template(dst, template_vars)
if not os.path.isabs(srcpath):
srcpath = os.path.join(conf_root, srcpath)
if not os.path.isabs(dstpath):
dstpath = os.path.join(case_path, dstpath)
if os.path.exists(dstpath):
if os.path.isdir(dstpath):
shutil.rmtree(dstpath)
else:
os.remove(dstpath)
if not os.path.exists(srcpath):
raise ValueError("Case file '%s' not found" % srcpath)
if os.path.isdir(srcpath):
shutil.copytree(srcpath, dstpath)
else:
shutil.copyfile(srcpath, dstpath)
# link case files: each file is defiend as (src, dst), where src is
# relative to output_root and dst is relative to case_path.
for src, dst in self.template["link_files"].items():
srcpath = replace_template(src, template_vars)
dstpath = replace_template(dst, template_vars)
if not os.path.isabs(srcpath):
srcpath = os.path.join(output_root, srcpath)
if not os.path.isabs(dstpath):
dstpath = os.path.join(case_path, dstpath)
if os.path.exists(dstpath):
if os.path.isdir(dstpath):
shutil.rmtree(dstpath)
else:
os.remove(dstpath)
if not os.path.exists(srcpath):
raise ValueError("Case file '%s' not found" % srcpath)
srcpath = os.path.relpath(srcpath, case_path)
if not os.path.exists(os.path.dirname(dstpath)):
os.makedirs(os.path.dirname(dstpath))
if os.path.exists(dstpath):
os.remove(dstpath)
os.symlink(srcpath, dstpath)
# instantiate template files based on template substitution
inst_tpls = self.template["inst_templates"]
if inst_tpls:
var_values = {}
for k, v in inst_tpls["variables"].items():
v = replace_template(v, template_vars)
v = safe_eval(v)
var_values[k] = v
for src, dst in inst_tpls["templates"].items():
srcpath = replace_template(src, template_vars)
dstpath = replace_template(dst, template_vars)
if not os.path.isabs(srcpath):
srcpath = os.path.join(conf_root, srcpath)
if not os.path.isabs(dstpath):
dstpath = os.path.join(case_path, dstpath)
if not os.path.exists(srcpath):
raise ValueError("Template '%s' does not exist" % srcpath)
if not os.path.isfile(srcpath):
raise ValueError("Template '%s' is not a file" % srcpath)
if os.path.exists(dstpath):
os.remove(dstpath)
if not os.path.exists(os.path.dirname(dstpath)):
os.makedirs(os.path.dirname(dstpath))
content = replace_template(open(srcpath).read(), var_values)
open(dstpath, "w").write(content)
# generate case spec
spec_template = self.template["case_spec"]
cmd_template = spec_template["cmd"]
cmd = [replace_template(x, template_vars) for x in cmd_template]
def transform_path(x):
x = replace_template(x, {"output_root": output_root})
if os.path.isabs(x) and x.startswith(output_root):
x = os.path.relpath(x, case_path)
p = x if os.path.isabs(x) else os.path.join(x, case_path)
return x if os.path.exists(p) else None
# support output_root in command binary
for i, item in enumerate(cmd):
v = transform_path(item)
if v is not None:
cmd[i] = v
elif i == 0:
raise ValueError("Command binary '%s' does not exists" %
cmd[0])
run_template = spec_template["run"]
run = OrderedDict()
for k in ["nnodes", "procs_per_node", "tasks_per_proc", "nprocs"]:
v = replace_template(run_template[k], template_vars)
v = safe_eval(v)
run[k] = v
rlt_template = spec_template.get("results", [])
results = [replace_template(x, template_vars) for x in rlt_template]
envs_template = spec_template.get("envs", {})
envs = OrderedDict()
for k, v in envs_template.items():
v = replace_template(v, template_vars)
v = safe_eval(v)
envs[k] = v
validator = OrderedDict()
validator_template = spec_template.get("validator", None)
if validator_template:
exists_tpl = validator_template.get("exists", [])
if exists_tpl:
v = [replace_template(x, template_vars) for x in exists_tpl]
validator["exists"] = v
contains_tpl = validator_template.get("contains", {})
if contains_tpl:
contains = OrderedDict()
for k, v in contains_tpl.items():
k = replace_template(k, template_vars)
v = replace_template(v, template_vars)
contains[k] = v
validator["contains"] = contains
case_spec = OrderedDict(
zip(["cmd", "envs", "run", "results", "validator"],
[cmd, envs, run, results, validator]))
mirror_files = spec_template.get("mirror_files", None)
if mirror_files:
case_spec["mirror_files"] = mirror_files
# create empty output file, so when output file is used for special
# signal, it's ready and will not be ignored.
for f in case_spec["results"]:
filepath = os.path.join(case_path, f)
if not os.path.exists(filepath):
open(filepath, "w").write("")
return case_spec
class CustomCaseGenerator(object):
def __init__(self, module, func, args):
if not os.path.exists(module):
raise RuntimeError("Module '%s' does not exists" % module)
sys.path.insert(0, os.path.abspath(os.path.dirname(module)))
module_name = os.path.splitext(os.path.basename(module))[0]
mod = importlib.import_module(module_name)
if not hasattr(mod, func):
raise RuntimeError("Can not find function '%s' in '%s'" %
(func, module))
fun = getattr(mod, func)
self.func = fun
self.args = args
def make_case(self,
conf_root,
output_root,
case_path,
test_vector,
case_info=None):
'''Generate a test case according to the specified test vector
Args:
conf_root (str): Absolute path containing the project config.
output_root (str): Absolute path for the output root.
case_path (str): Absolute path for the test case.
test_vector (OrderedDict): Test case identification.
case_info (dict): Extra information for the case.
Returns:
dict: Test case specification
Test case specification containing the following information to run
a test case:
{
"cmd": ["ls", "-l"] # The command and its arguments
"envs": {"K": "V", ...} # Environment variables to set
"results": ["STDOUT"] # The result files to preserve
"run": {"nprocs": 1, ...} # The runner specific information
"validator": {"exists": [...], ..} # The result validator
}
'''
args = dict(self.args)
args["conf_root"] = conf_root
args["output_root"] = output_root
args["case_path"] = case_path
args["test_vector"] = test_vector
if case_info:
args["case_info"] = case_info
case_spec = self.func(**args)
# create empty output file, so when output file is used for special
# signal, it's ready and will not be ignored.
for f in case_spec["results"]:
filepath = os.path.join(case_path, f)
if not os.path.exists(filepath):
open(filepath, "w").write("")
return case_spec
def identifier(value):
'''Create a valid identifier out of a value'''
a = re.sub(r"\W", "_", str(value).strip().lower())
return re.sub(r"_+", "_", a)
class OutputOrganizer(object):
def __init__(self, version=1):
if version != 1:
raise RuntimeError(
"Unsupported output version '%s': only allow 1" % version)
self.version = version
def get_case_path(self, test_vector):
segs = [
"{0}-{1}".format(identifier(k), identifier(v))
for k, v in test_vector.items()
]
return os.path.join(*segs)
def get_project_info_path(self):
return "TestProject.json"
def get_case_spec_path(self, test_vector):
return os.path.join(self.get_case_path(test_vector), "TestCase.json")
class TestProjectBuilder(object):
def __init__(self, conf_root):
if not os.path.isabs(conf_root):
conf_root = os.path.abspath(conf_root)
self.conf_root = conf_root
spec_file = os.path.join(self.conf_root, "TestProjectConfig.json")
if not os.path.exists(spec_file):
spec_file = os.path.join(self.conf_root, "TestProjectConfig.yml")
if not os.path.exists(spec_file):
raise RuntimeError(
("Either TestProjectConfig.json or " +
"TestProjectConfig.yml shall exists under {}").format(
self.conf_root))
spec = load_conf(spec_file)
# Do minimal sanity check
project_version = spec.get("version", 1)
if int(project_version) != 1:
raise RuntimeError(
"Unsupported project version '%s': only allow '1'" %
project_version)
# Setup basic project information
project_info = spec["project"]
self.name = project_info["name"]
self.test_factors = project_info["test_factors"]
data_files = project_info.get("data_files", [])
self.data_files = data_files
common_case_files = project_info.get("common_case_files", [])
self.common_case_files = common_case_files
# Build test vector generator
test_vector_generator_name = project_info["test_vector_generator"]
if test_vector_generator_name == "cart_product":
args = spec["cart_product_vector_generator"]
test_factor_values = args["test_factor_values"]
self.test_vector_generator = CartProductVectorGenerator(
self.test_factors, test_factor_values)
elif test_vector_generator_name == "simple":
args = spec["simple_vector_generator"]
test_vectors = args["test_vectors"]
self.test_vector_generator = SimpleVectorGenerator(
self.test_factors, test_vectors)
elif test_vector_generator_name == "custom":
args = spec["custom_vector_generator"]
self.test_vector_generator = CustomVectorGenerator(
self.test_factors, args, conf_root)
elif test_vector_generator_name == "bench":
args = spec["bench_vector_generator"]
self.test_vector_generator = BenchVectorGenerator(
self.test_factors, args)
else:
raise RuntimeError("Unknown test vector generator '%s'" %
test_vector_generator_name)
# Build test case generator
test_case_generator_name = project_info["test_case_generator"]
if test_case_generator_name == "custom":
info = spec["custom_case_generator"]
module = info["import"]
if not os.path.isabs(module):
module = os.path.normpath(os.path.join(self.conf_root, module))
func = info["func"]
args = info.get("args", {})
self.test_case_generator = CustomCaseGenerator(module, func, args)
elif test_case_generator_name == "template":
template = spec["template_case_generator"]
self.test_case_generator = TemplateCaseGenerator(template)
else:
raise RuntimeError("Unknown test case generator '%s'" %
test_case_generator_name)
# Build output organizer
self.output_organizer = OutputOrganizer(version=1)
def write(self, output_root, link_files=False):
# Prepare directories
if not os.path.isabs(output_root):
output_root = os.path.abspath(output_root)
if not os.path.exists(output_root):
os.makedirs(output_root)
# Handle data files: leave absolute path as-is, copy or link relative
# path to the output directory
for path in self.data_files:
if os.path.isabs(path):
continue
srcpath = os.path.join(self.conf_root, path)
dstpath = os.path.join(output_root, path)
if not os.path.exists(srcpath):
raise RuntimeError("Data file specified but not found: '%s'" %
path)
if os.path.isdir(srcpath):
dstdir = os.path.dirname(dstpath)
if not os.path.exists(dstdir):
os.makedirs(dstdir)
if os.path.exists(dstpath):
if os.path.islink(dstpath):
os.remove(dstpath)
elif os.path.isdir(dstpath):
shutil.rmtree(dstpath)
else:
os.remove(dstpath)
if link_files:
os.symlink(srcpath, dstpath)
else:
shutil.copytree(srcpath, dstpath)
elif os.path.isfile(srcpath):
dstdir = os.path.dirname(dstpath)
if not os.path.exists(dstdir):
os.makedirs(dstdir)
if os.path.exists(dstpath):
if os.path.islink(dstpath):
os.remove(dstpath)
elif os.path.isdir(dstpath):
shutil.rmtree(dstpath)
else:
os.remove(dstpath)
if link_files:
os.symlink(srcpath, dstpath)
else:
shutil.copyfile(srcpath, dstpath)
shutil.copystat(srcpath, dstpath)
else:
raise RuntimeError("File type not supported: '%s'" % path)
# Generate test cases and write test case config
for case in self.test_vector_generator.items():
case_path = self.output_organizer.get_case_path(case)
case_fullpath = os.path.join(output_root, case_path)
case_info = self.test_vector_generator.case_info(case)
if not os.path.exists(case_fullpath):
os.makedirs(case_fullpath)
# copy common case files to case path, only ordinary file is, each
# file is copied to the case path, without reconstructing the dir.
for path in self.common_case_files:
srcpath = path
if not os.path.isabs(path):
srcpath = os.path.join(self.conf_root, path)
if not os.path.isfile(srcpath):
raise ValueError("Common case file '%s' is not a file." %
path)
if not os.path.exists(srcpath):
raise ValueError("Common case file '%s' not found" % path)
dstpath = os.path.join(case_fullpath, os.path.basename(path))
if os.path.exists(dstpath):
os.remove(dstpath)
shutil.copyfile(srcpath, dstpath)
cwd = os.path.abspath(os.getcwd())
os.chdir(case_fullpath)
try:
case_spec = self.test_case_generator.make_case(
self.conf_root, output_root, case_fullpath, case,
case_info)
if case_info:
case_spec["case_info"] = case_info
finally:
os.chdir(cwd)
case_spec_path = self.output_organizer.get_case_spec_path(case)
case_spec_fullpath = os.path.join(output_root, case_spec_path)
json.dump(case_spec, open(case_spec_fullpath, "w"), indent=2)
# Write project config
info = [("version", 1), ("name", self.name),
("test_factors", self.test_factors)]
info = OrderedDict(info)
info["data_files"] = self.data_files
test_defs = []
for case in self.test_vector_generator.items():
vector = list(case.values())
path = self.output_organizer.get_case_path(case)
test_def = OrderedDict(zip(["test_vector", "path"],
[vector, path]))
case_info = self.test_vector_generator.case_info(case)
if case_info:
test_def["case_info"] = case_info
test_defs.append(test_def)
info["test_cases"] = test_defs
project_info_path = self.output_organizer.get_project_info_path()
project_info_fullpath = os.path.join(output_root, project_info_path)
json.dump(info, open(project_info_fullpath, "w"), indent=2)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("conf_root", help="Project configuration directory")
parser.add_argument("output_root", help="Output directory")
parser.add_argument("--link-files",
action="store_true",
help="Sympolic link data files instead of copy")
config = parser.parse_args()
project = TestProjectBuilder(config.conf_root)
project.write(config.output_root, config.link_files)
if __name__ == "__main__":
main() | bentoo/tools/generator.py |
from __future__ import division, print_function, unicode_literals
import argparse
import importlib
import itertools
import json
import os
import re
import shutil
import string
import sys
from collections import OrderedDict
from bentoo.common.conf import load_conf
from bentoo.common.utils import replace_template, safe_eval
import bentoo.common.helpers as helpers
class SimpleVectorGenerator(object):
'''Simple test vector generator
Generates a collection of test vectors by iterating over provided test
vector space and expanding possible compacts.
Args:
test_factors (list): test factor names.
raw_vectors (list): test vector collection.
Each element shall be a list denoting the factor values. Its
element is either a blob or a list. If it's a list, it denotes
a range of possible values for the related test factor.
Examples:
A straight forward generator:
SimpleVectorGenerator(["A", "B"], [[1, 2], [1, 3], [2, 3]])
A compact representation:
SimpleVectorGenerator(["A", "B"], [[1, [2, 3]], [2, 3]])
'''
def __init__(self, test_factors, raw_vectors=None):
self.test_factors = test_factors
self.raw_vectors = raw_vectors if raw_vectors else []
def items(self):
'''An iterator over the range of test vectors
Yields:
OrderedDict: a test vector.
OrderedDict.values() is the test factor values and
OrderedDict.keys() is the test factor names.
'''
# expand each vector to support `[0, [1, 2], [3, 4]]`
for item in self.raw_vectors:
iters = [x if isinstance(x, list) else [x] for x in item]
for v in itertools.product(*iters):
yield OrderedDict(zip(self.test_factors, v))
def case_info(self, case):
'''Return extra information associated with the case
Args:
case: The case identifier as returned by items()
Returns:
any python object, None if nothing is assiciated.
'''
return None
class CartProductVectorGenerator(object):
'''Cartetian product test vector generator
Generate a collection of test vectors by iterating a Cartetian product
of possible test factor values.
Args:
test_factors (list): test factor names.
factor_values (list): test factor value ranges.
(k, v) denotes (test factor name, test factor values)
'''
def __init__(self, test_factors, factor_values):
self.test_factors = test_factors
self.factor_values = factor_values
def items(self):
'''An iterator over the range of test vectors
Yields:
OrderedDict: a test vector.
OrderedDict.values() is the test factor values and
OrderedDict.keys() is the test factor names.
'''
factor_values = [self.factor_values[k] for k in self.test_factors]
for v in itertools.product(*factor_values):
yield OrderedDict(zip(self.test_factors, v))
def case_info(self, case):
'''Return extra information associated with the case
Args:
case: The case identifier as returned by items()
Returns:
any python object, None if nothing is assiciated.
'''
return None
BENCH_TEST_FACTORS = [
"model", "bench", "mem_per_node", "series", "nnodes", "ncores"
]
BENCH_TYPES = ["onenode", "weak", "strong"]
class BenchVectorGenerator(object):
'''Benchmarking test vector generator
Generate a collection of test vectors to cover a reproducible cross-system
performance benchmarks.
Args:
test_factors (list): test factor names.
factor_values (list): test factor value ranges.
(k, v) denotes (test factor name, test factor values)
'''
def __init__(self, test_factors, spec):
s0 = set(test_factors)
s1 = set(BENCH_TEST_FACTORS)
assert s0 >= s1
if s0 > s1:
assert "other_factor_values" in spec
self.other_factors = list(s0 - s1)
self.other_factor_values = spec["other_factor_values"]
for factor in self.other_factors:
assert factor in self.other_factor_values
assert isinstance(self.other_factor_values[factor], list)
else:
self.other_factors = None
self.other_factor_values = None
self.test_factors = test_factors
assert "bench_config" in spec
assert "model_config" in spec
assert "system_config" in spec
self.bench_vectors = [] # stores all vectors
self.bench_models = [] # stores model for the corresponding vector
system = spec["system_config"]
self.system_config = system
sys_nnodes = int(system["nnodes"])
sys_cpn = int(system["cores_per_node"])
sys_node_mem = helpers.sizeToFloat(system["mem_per_node"])
sys_free_node_men = helpers.sizeToFloat(system["free_mem_per_node"])
sys_max_app_mem_ratio = sys_free_node_men / sys_node_mem
bench_conf = spec["bench_config"]
for model_name, model_spec in spec["model_config"].items():
model_type = model_spec["type"]
if model_type == "structured_grid":
grid = model_spec["grid"]
total_mem = model_spec["total_mem"]
resizer = helpers.StructuredGridModelResizer(grid, total_mem)
elif model_type == "unstructured_grid":
dim = model_spec["dim"]
total_mem = model_spec["total_mem"]
resizer = helpers.UnstructuredGridModelResizer(dim, total_mem)
elif model_type == "omni":
model_db = model_spec["candidates"]
resizer = helpers.OmniModelResizer(model_db, sys_nnodes)
else:
raise RuntimeError("Invalid model type '%s' in '%s'" %
(model_type, model_name))
benchmarks = model_spec["bench"]
for bench in benchmarks:
assert bench in BENCH_TYPES
assert bench in bench_conf
conf = bench_conf[bench]
if bench == "onenode":
# Generate intra-node benchmarks
result = {
"bench": bench,
"model": model_name,
"nnodes": 1,
"series": 0
}
for mem_per_node_req in conf["mem_per_node"]:
model = resizer.resize(mem_per_node_req)
if model["nnodes"] != 1:
continue
mem_per_node = model["mem_per_node"]
mem_per_node = helpers.sizeToFloat(mem_per_node)
if mem_per_node > sys_max_app_mem_ratio * sys_node_mem:
continue
result["mem_per_node"] = model["mem_per_node"]
ncores = []
nc = sys_cpn
while nc % 2 == 0:
ncores.append(nc)
nc //= 2
ncores.append(nc)
ncores.reverse()
for n in ncores:
if n < conf["min_ncores"]:
continue
result["ncores"] = n
vector = [result[f] for f in BENCH_TEST_FACTORS]
self.bench_vectors.append(vector)
self.bench_models.append(model)
elif bench == "weak":
# Generate internode weak-scaling benchmarks
result = {"bench": bench, "model": model_name, "series": 0}
for mem_per_node_req in conf["mem_per_node"]:
model = resizer.resize(mem_per_node_req)
mem_per_node = model["mem_per_node"]
mem_per_node = helpers.sizeToFloat(mem_per_node)
if mem_per_node > sys_max_app_mem_ratio * sys_node_mem:
continue
result["mem_per_node"] = model["mem_per_node"]
nnodes_min = conf["nnodes"]["min"]
nnodes_max = min(conf["nnodes"]["max"], sys_nnodes)
while model["nnodes"] <= nnodes_max:
if model["nnodes"] >= nnodes_min:
result["nnodes"] = model["nnodes"]
result["ncores"] = model["nnodes"] * sys_cpn
vec = [result[f] for f in BENCH_TEST_FACTORS]
self.bench_vectors.append(vec)
self.bench_models.append(model)
model = resizer.next(model)
# Add the tail case where the system nodes sit in
# between the resize interval.
has_tail_case = result["nnodes"] < nnodes_max
has_tail_case = has_tail_case and resizer.exactResize()
if has_tail_case:
model = resizer.resize(mem_per_node, nnodes_max)
assert model["nnodes"] == nnodes_max
result["nnodes"] = model["nnodes"]
result["ncores"] = model["nnodes"] * sys_cpn
vec = [result[f] for f in BENCH_TEST_FACTORS]
self.bench_vectors.append(vec)
self.bench_models.append(model)
elif bench == "strong":
# Generate internode strong-scaling benchmarks
result = {"bench": bench, "model": model_name}
max_multiple = conf["max_multiple"]
for mem_per_node_req in conf["mem_per_node"]:
for i, base_nnodes in enumerate(conf["base_nnodes"]):
model = resizer.resize(mem_per_node_req,
base_nnodes)
mem_per_node = model["mem_per_node"]
mem_per_node = helpers.sizeToFloat(mem_per_node)
if mem_per_node > sys_max_app_mem_ratio * sys_node_mem:
continue
if base_nnodes * max_multiple <= model["nnodes"]:
continue
result["mem_per_node"] = model["mem_per_node"]
nnodes = model["nnodes"]
max_nnodes = min(nnodes * max_multiple, sys_nnodes)
result["series"] = i
while nnodes <= max_nnodes:
result["nnodes"] = nnodes
result["ncores"] = nnodes * sys_cpn
vec = [result[f] for f in BENCH_TEST_FACTORS]
self.bench_vectors.append(vec)
self.bench_models.append(model)
nnodes *= 2
# append the tail case where system nodes are
# between the power-of-2 interval.
if result["nnodes"] < max_nnodes:
result["nnodes"] = max_nnodes
result["ncores"] = max_nnodes * sys_cpn
vec = [result[f] for f in BENCH_TEST_FACTORS]
self.bench_vectors.append(vec)
self.bench_models.append(model)
else:
raise RuntimeError("Invalid benchmark type '%s'" % bench)
def items(self):
'''An iterator over the range of test vectors
Yields:
OrderedDict: a test vector.
OrderedDict.values() is the test factor values and
OrderedDict.keys() is the test factor names.
'''
if self.other_factors:
others = [self.other_factor_values[f] for f in self.other_factors]
for v in self.bench_vectors:
vals = dict(zip(BENCH_TEST_FACTORS, v))
for other_values in itertools.product(*others):
vals.update(dict(zip(self.other_factors, other_values)))
ovals = [vals[f] for f in self.test_factors]
yield OrderedDict(zip(self.test_factors, ovals))
else:
for v in self.bench_vectors:
vals = dict(zip(BENCH_TEST_FACTORS, v))
ovals = [vals[f] for f in self.test_factors]
yield OrderedDict(zip(self.test_factors, ovals))
def case_info(self, case):
'''Return extra information associated with the case
Args:
case: The case identifier as returned by items()
Returns:
A python dict with a model and a system config. The model is used to
guide model generation and the system is used to guide system
adaptation.
'''
bench_vec = [case[f] for f in BENCH_TEST_FACTORS]
case_index = self.bench_vectors.index(bench_vec)
info = dict(self.bench_models[case_index])
for k, v in self.system_config.items():
info["sys_%s" % k] = v
return info
class CustomVectorGenerator(object):
'''Custom test vector generator
Generate a collection of test vectors by calling a user defined function,
which returns a list of test vectors.
Args:
test_factors (list): test factor names.
spec (dict): generator definition.
'''
def __init__(self, test_factors, spec, project_root):
self.test_factors = test_factors
module = spec["import"]
if not os.path.isabs(module):
module = os.path.abspath(os.path.join(project_root, module))
func = spec["func"]
args = spec.get("args", {})
if not os.path.exists(module):
raise RuntimeError("Module '%s' does not exists" % module)
info_func = spec.get("info_func", None)
module_path = os.path.dirname(module)
if module_path not in sys.path:
sys.path.insert(0, module_path)
module_name = os.path.splitext(os.path.basename(module))[0]
mod = importlib.import_module(module_name)
if not hasattr(mod, func):
raise RuntimeError("Can not find function '%s' in '%s'" %
(func, module))
fun = getattr(mod, func)
real_args = dict(args)
real_args["conf_root"] = os.path.abspath(project_root)
real_args["test_factors"] = self.test_factors
self.test_vectors = fun(**real_args)
self.info_func = None
if info_func:
self.info_func = getattr(mod, info_func)
def items(self):
'''An iterator over the range of test vectors
Yields:
OrderedDict: a test vector.
OrderedDict.values() is the test factor values and
OrderedDict.keys() is the test factor names.
'''
for v in self.test_vectors:
yield OrderedDict(zip(self.test_factors, v))
def case_info(self, case):
'''Return extra information associated with the case
Args:
case: The case identifier as returned by items()
Returns:
any python object, None if nothing is assiciated.
'''
if self.info_func:
return self.info_func(case)
else:
return None
class TemplateCaseGenerator(object):
def __init__(self, template):
assert ("case_spec" in template)
self.template = template.copy()
if "copy_files" not in self.template:
self.template["copy_files"] = OrderedDict()
if "link_files" not in self.template:
self.template["link_files"] = OrderedDict()
if "inst_templates" not in self.template:
self.template["inst_templates"] = OrderedDict()
def make_case(self,
conf_root,
output_root,
case_path,
test_vector,
case_info=None):
'''Generate a test case according to the specified test vector'''
template_vars = dict(test_vector)
template_vars["conf_root"] = conf_root
template_vars["output_root"] = output_root
template_vars["case_path"] = case_path
if case_info:
assert isinstance(case_info, dict)
template_vars.update(case_info)
# copy case files: each file is defiend as (src, dst), where src is
# relative to conf_root and dst is relative to case_path.
for src, dst in self.template["copy_files"].items():
srcpath = replace_template(src, template_vars)
dstpath = replace_template(dst, template_vars)
if not os.path.isabs(srcpath):
srcpath = os.path.join(conf_root, srcpath)
if not os.path.isabs(dstpath):
dstpath = os.path.join(case_path, dstpath)
if os.path.exists(dstpath):
if os.path.isdir(dstpath):
shutil.rmtree(dstpath)
else:
os.remove(dstpath)
if not os.path.exists(srcpath):
raise ValueError("Case file '%s' not found" % srcpath)
if os.path.isdir(srcpath):
shutil.copytree(srcpath, dstpath)
else:
shutil.copyfile(srcpath, dstpath)
# link case files: each file is defiend as (src, dst), where src is
# relative to output_root and dst is relative to case_path.
for src, dst in self.template["link_files"].items():
srcpath = replace_template(src, template_vars)
dstpath = replace_template(dst, template_vars)
if not os.path.isabs(srcpath):
srcpath = os.path.join(output_root, srcpath)
if not os.path.isabs(dstpath):
dstpath = os.path.join(case_path, dstpath)
if os.path.exists(dstpath):
if os.path.isdir(dstpath):
shutil.rmtree(dstpath)
else:
os.remove(dstpath)
if not os.path.exists(srcpath):
raise ValueError("Case file '%s' not found" % srcpath)
srcpath = os.path.relpath(srcpath, case_path)
if not os.path.exists(os.path.dirname(dstpath)):
os.makedirs(os.path.dirname(dstpath))
if os.path.exists(dstpath):
os.remove(dstpath)
os.symlink(srcpath, dstpath)
# instantiate template files based on template substitution
inst_tpls = self.template["inst_templates"]
if inst_tpls:
var_values = {}
for k, v in inst_tpls["variables"].items():
v = replace_template(v, template_vars)
v = safe_eval(v)
var_values[k] = v
for src, dst in inst_tpls["templates"].items():
srcpath = replace_template(src, template_vars)
dstpath = replace_template(dst, template_vars)
if not os.path.isabs(srcpath):
srcpath = os.path.join(conf_root, srcpath)
if not os.path.isabs(dstpath):
dstpath = os.path.join(case_path, dstpath)
if not os.path.exists(srcpath):
raise ValueError("Template '%s' does not exist" % srcpath)
if not os.path.isfile(srcpath):
raise ValueError("Template '%s' is not a file" % srcpath)
if os.path.exists(dstpath):
os.remove(dstpath)
if not os.path.exists(os.path.dirname(dstpath)):
os.makedirs(os.path.dirname(dstpath))
content = replace_template(open(srcpath).read(), var_values)
open(dstpath, "w").write(content)
# generate case spec
spec_template = self.template["case_spec"]
cmd_template = spec_template["cmd"]
cmd = [replace_template(x, template_vars) for x in cmd_template]
def transform_path(x):
x = replace_template(x, {"output_root": output_root})
if os.path.isabs(x) and x.startswith(output_root):
x = os.path.relpath(x, case_path)
p = x if os.path.isabs(x) else os.path.join(x, case_path)
return x if os.path.exists(p) else None
# support output_root in command binary
for i, item in enumerate(cmd):
v = transform_path(item)
if v is not None:
cmd[i] = v
elif i == 0:
raise ValueError("Command binary '%s' does not exists" %
cmd[0])
run_template = spec_template["run"]
run = OrderedDict()
for k in ["nnodes", "procs_per_node", "tasks_per_proc", "nprocs"]:
v = replace_template(run_template[k], template_vars)
v = safe_eval(v)
run[k] = v
rlt_template = spec_template.get("results", [])
results = [replace_template(x, template_vars) for x in rlt_template]
envs_template = spec_template.get("envs", {})
envs = OrderedDict()
for k, v in envs_template.items():
v = replace_template(v, template_vars)
v = safe_eval(v)
envs[k] = v
validator = OrderedDict()
validator_template = spec_template.get("validator", None)
if validator_template:
exists_tpl = validator_template.get("exists", [])
if exists_tpl:
v = [replace_template(x, template_vars) for x in exists_tpl]
validator["exists"] = v
contains_tpl = validator_template.get("contains", {})
if contains_tpl:
contains = OrderedDict()
for k, v in contains_tpl.items():
k = replace_template(k, template_vars)
v = replace_template(v, template_vars)
contains[k] = v
validator["contains"] = contains
case_spec = OrderedDict(
zip(["cmd", "envs", "run", "results", "validator"],
[cmd, envs, run, results, validator]))
mirror_files = spec_template.get("mirror_files", None)
if mirror_files:
case_spec["mirror_files"] = mirror_files
# create empty output file, so when output file is used for special
# signal, it's ready and will not be ignored.
for f in case_spec["results"]:
filepath = os.path.join(case_path, f)
if not os.path.exists(filepath):
open(filepath, "w").write("")
return case_spec
class CustomCaseGenerator(object):
def __init__(self, module, func, args):
if not os.path.exists(module):
raise RuntimeError("Module '%s' does not exists" % module)
sys.path.insert(0, os.path.abspath(os.path.dirname(module)))
module_name = os.path.splitext(os.path.basename(module))[0]
mod = importlib.import_module(module_name)
if not hasattr(mod, func):
raise RuntimeError("Can not find function '%s' in '%s'" %
(func, module))
fun = getattr(mod, func)
self.func = fun
self.args = args
def make_case(self,
conf_root,
output_root,
case_path,
test_vector,
case_info=None):
'''Generate a test case according to the specified test vector
Args:
conf_root (str): Absolute path containing the project config.
output_root (str): Absolute path for the output root.
case_path (str): Absolute path for the test case.
test_vector (OrderedDict): Test case identification.
case_info (dict): Extra information for the case.
Returns:
dict: Test case specification
Test case specification containing the following information to run
a test case:
{
"cmd": ["ls", "-l"] # The command and its arguments
"envs": {"K": "V", ...} # Environment variables to set
"results": ["STDOUT"] # The result files to preserve
"run": {"nprocs": 1, ...} # The runner specific information
"validator": {"exists": [...], ..} # The result validator
}
'''
args = dict(self.args)
args["conf_root"] = conf_root
args["output_root"] = output_root
args["case_path"] = case_path
args["test_vector"] = test_vector
if case_info:
args["case_info"] = case_info
case_spec = self.func(**args)
# create empty output file, so when output file is used for special
# signal, it's ready and will not be ignored.
for f in case_spec["results"]:
filepath = os.path.join(case_path, f)
if not os.path.exists(filepath):
open(filepath, "w").write("")
return case_spec
def identifier(value):
'''Create a valid identifier out of a value'''
a = re.sub(r"\W", "_", str(value).strip().lower())
return re.sub(r"_+", "_", a)
class OutputOrganizer(object):
def __init__(self, version=1):
if version != 1:
raise RuntimeError(
"Unsupported output version '%s': only allow 1" % version)
self.version = version
def get_case_path(self, test_vector):
segs = [
"{0}-{1}".format(identifier(k), identifier(v))
for k, v in test_vector.items()
]
return os.path.join(*segs)
def get_project_info_path(self):
return "TestProject.json"
def get_case_spec_path(self, test_vector):
return os.path.join(self.get_case_path(test_vector), "TestCase.json")
class TestProjectBuilder(object):
def __init__(self, conf_root):
if not os.path.isabs(conf_root):
conf_root = os.path.abspath(conf_root)
self.conf_root = conf_root
spec_file = os.path.join(self.conf_root, "TestProjectConfig.json")
if not os.path.exists(spec_file):
spec_file = os.path.join(self.conf_root, "TestProjectConfig.yml")
if not os.path.exists(spec_file):
raise RuntimeError(
("Either TestProjectConfig.json or " +
"TestProjectConfig.yml shall exists under {}").format(
self.conf_root))
spec = load_conf(spec_file)
# Do minimal sanity check
project_version = spec.get("version", 1)
if int(project_version) != 1:
raise RuntimeError(
"Unsupported project version '%s': only allow '1'" %
project_version)
# Setup basic project information
project_info = spec["project"]
self.name = project_info["name"]
self.test_factors = project_info["test_factors"]
data_files = project_info.get("data_files", [])
self.data_files = data_files
common_case_files = project_info.get("common_case_files", [])
self.common_case_files = common_case_files
# Build test vector generator
test_vector_generator_name = project_info["test_vector_generator"]
if test_vector_generator_name == "cart_product":
args = spec["cart_product_vector_generator"]
test_factor_values = args["test_factor_values"]
self.test_vector_generator = CartProductVectorGenerator(
self.test_factors, test_factor_values)
elif test_vector_generator_name == "simple":
args = spec["simple_vector_generator"]
test_vectors = args["test_vectors"]
self.test_vector_generator = SimpleVectorGenerator(
self.test_factors, test_vectors)
elif test_vector_generator_name == "custom":
args = spec["custom_vector_generator"]
self.test_vector_generator = CustomVectorGenerator(
self.test_factors, args, conf_root)
elif test_vector_generator_name == "bench":
args = spec["bench_vector_generator"]
self.test_vector_generator = BenchVectorGenerator(
self.test_factors, args)
else:
raise RuntimeError("Unknown test vector generator '%s'" %
test_vector_generator_name)
# Build test case generator
test_case_generator_name = project_info["test_case_generator"]
if test_case_generator_name == "custom":
info = spec["custom_case_generator"]
module = info["import"]
if not os.path.isabs(module):
module = os.path.normpath(os.path.join(self.conf_root, module))
func = info["func"]
args = info.get("args", {})
self.test_case_generator = CustomCaseGenerator(module, func, args)
elif test_case_generator_name == "template":
template = spec["template_case_generator"]
self.test_case_generator = TemplateCaseGenerator(template)
else:
raise RuntimeError("Unknown test case generator '%s'" %
test_case_generator_name)
# Build output organizer
self.output_organizer = OutputOrganizer(version=1)
def write(self, output_root, link_files=False):
# Prepare directories
if not os.path.isabs(output_root):
output_root = os.path.abspath(output_root)
if not os.path.exists(output_root):
os.makedirs(output_root)
# Handle data files: leave absolute path as-is, copy or link relative
# path to the output directory
for path in self.data_files:
if os.path.isabs(path):
continue
srcpath = os.path.join(self.conf_root, path)
dstpath = os.path.join(output_root, path)
if not os.path.exists(srcpath):
raise RuntimeError("Data file specified but not found: '%s'" %
path)
if os.path.isdir(srcpath):
dstdir = os.path.dirname(dstpath)
if not os.path.exists(dstdir):
os.makedirs(dstdir)
if os.path.exists(dstpath):
if os.path.islink(dstpath):
os.remove(dstpath)
elif os.path.isdir(dstpath):
shutil.rmtree(dstpath)
else:
os.remove(dstpath)
if link_files:
os.symlink(srcpath, dstpath)
else:
shutil.copytree(srcpath, dstpath)
elif os.path.isfile(srcpath):
dstdir = os.path.dirname(dstpath)
if not os.path.exists(dstdir):
os.makedirs(dstdir)
if os.path.exists(dstpath):
if os.path.islink(dstpath):
os.remove(dstpath)
elif os.path.isdir(dstpath):
shutil.rmtree(dstpath)
else:
os.remove(dstpath)
if link_files:
os.symlink(srcpath, dstpath)
else:
shutil.copyfile(srcpath, dstpath)
shutil.copystat(srcpath, dstpath)
else:
raise RuntimeError("File type not supported: '%s'" % path)
# Generate test cases and write test case config
for case in self.test_vector_generator.items():
case_path = self.output_organizer.get_case_path(case)
case_fullpath = os.path.join(output_root, case_path)
case_info = self.test_vector_generator.case_info(case)
if not os.path.exists(case_fullpath):
os.makedirs(case_fullpath)
# copy common case files to case path, only ordinary file is, each
# file is copied to the case path, without reconstructing the dir.
for path in self.common_case_files:
srcpath = path
if not os.path.isabs(path):
srcpath = os.path.join(self.conf_root, path)
if not os.path.isfile(srcpath):
raise ValueError("Common case file '%s' is not a file." %
path)
if not os.path.exists(srcpath):
raise ValueError("Common case file '%s' not found" % path)
dstpath = os.path.join(case_fullpath, os.path.basename(path))
if os.path.exists(dstpath):
os.remove(dstpath)
shutil.copyfile(srcpath, dstpath)
cwd = os.path.abspath(os.getcwd())
os.chdir(case_fullpath)
try:
case_spec = self.test_case_generator.make_case(
self.conf_root, output_root, case_fullpath, case,
case_info)
if case_info:
case_spec["case_info"] = case_info
finally:
os.chdir(cwd)
case_spec_path = self.output_organizer.get_case_spec_path(case)
case_spec_fullpath = os.path.join(output_root, case_spec_path)
json.dump(case_spec, open(case_spec_fullpath, "w"), indent=2)
# Write project config
info = [("version", 1), ("name", self.name),
("test_factors", self.test_factors)]
info = OrderedDict(info)
info["data_files"] = self.data_files
test_defs = []
for case in self.test_vector_generator.items():
vector = list(case.values())
path = self.output_organizer.get_case_path(case)
test_def = OrderedDict(zip(["test_vector", "path"],
[vector, path]))
case_info = self.test_vector_generator.case_info(case)
if case_info:
test_def["case_info"] = case_info
test_defs.append(test_def)
info["test_cases"] = test_defs
project_info_path = self.output_organizer.get_project_info_path()
project_info_fullpath = os.path.join(output_root, project_info_path)
json.dump(info, open(project_info_fullpath, "w"), indent=2)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("conf_root", help="Project configuration directory")
parser.add_argument("output_root", help="Output directory")
parser.add_argument("--link-files",
action="store_true",
help="Sympolic link data files instead of copy")
config = parser.parse_args()
project = TestProjectBuilder(config.conf_root)
project.write(config.output_root, config.link_files)
if __name__ == "__main__":
main() | 0.657209 | 0.600013 |
import json
import jsonpickle
from .errors import InvalidInstance
from ..paystack_config import PaystackConfig
class Base():
'''
Abstract Base Class
'''
def __init__(self):
if type(self) is Base:
raise TypeError("Can not make instance of abstract base class")
def to_json(self, pickled=False):
'''
Method to serialize class instance
'''
if pickled:
return jsonpickle.encode(self)
else:
data = json.JSONDecoder().decode(jsonpickle.encode(self))
data.pop("py/object")
return json.dumps(data)
@classmethod
def from_json(cls, data, pickled=False):
'''
Method to return a class instance from given json dict
'''
class_name = cls.__name__
class_object = None
if pickled:
class_object = jsonpickle.decode(data)
else:
py_object = str(cls).replace('<class ', '')
py_object = py_object.replace('>', '')
py_object = py_object.replace("'", "")
data = json.JSONDecoder().decode(data)
data['py/object'] = py_object
data = json.JSONEncoder().encode(data)
class_object = jsonpickle.decode(data)
if isinstance(class_object, cls):
return class_object
else:
raise InvalidInstance(class_name)
class Manager(Base):
'''
Abstract base class for 'Manager' Classes
'''
PAYSTACK_URL = None
SECRET_KEY = None
LOCAL_COST = None
INTL_COST = None
PASS_ON_TRANSACTION_COST = None
decoder = json.JSONDecoder()
def __init__(self):
super().__init__()
if type(self) is Manager:
raise TypeError("Can not make instance of abstract base class")
if not PaystackConfig.SECRET_KEY or not PaystackConfig.PUBLIC_KEY:
raise ValueError("No secret key or public key found,"
"assign values using PaystackConfig.SECRET_KEY = SECRET_KEY and"
"PaystackConfig.PUBLIC_KEY = PUBLIC_KEY")
self.PAYSTACK_URL = PaystackConfig.PAYSTACK_URL
self.SECRET_KEY = PaystackConfig.SECRET_KEY
def get_content_status(self, content):
'''
Method to return the status and message from an API response
Arguments :
content : Response as a dict
'''
if not isinstance(content, dict):
raise TypeError("Content argument should be a dict")
return (content['status'], content['message'])
def parse_response_content(self, content):
'''
Method to convert a response's content in bytes to a string.
Arguments:
content : Response in bytes
'''
content = bytes.decode(content)
content = self.decoder.decode(content)
return content
def build_request_args(self, data=None):
'''
Method for generating required headers.
Returns a tuple containing the generated headers and the data in json.
Arguments :
data(Dict) : An optional data argument which holds the body of the request.
'''
headers = {'Authorization' : 'Bearer %s' % self.SECRET_KEY,
'Content-Type' : 'application/json',
'cache-control' : 'no-cache'
}
data = json.dumps(data)
return (headers, data) | python_paystack/objects/base.py | import json
import jsonpickle
from .errors import InvalidInstance
from ..paystack_config import PaystackConfig
class Base():
'''
Abstract Base Class
'''
def __init__(self):
if type(self) is Base:
raise TypeError("Can not make instance of abstract base class")
def to_json(self, pickled=False):
'''
Method to serialize class instance
'''
if pickled:
return jsonpickle.encode(self)
else:
data = json.JSONDecoder().decode(jsonpickle.encode(self))
data.pop("py/object")
return json.dumps(data)
@classmethod
def from_json(cls, data, pickled=False):
'''
Method to return a class instance from given json dict
'''
class_name = cls.__name__
class_object = None
if pickled:
class_object = jsonpickle.decode(data)
else:
py_object = str(cls).replace('<class ', '')
py_object = py_object.replace('>', '')
py_object = py_object.replace("'", "")
data = json.JSONDecoder().decode(data)
data['py/object'] = py_object
data = json.JSONEncoder().encode(data)
class_object = jsonpickle.decode(data)
if isinstance(class_object, cls):
return class_object
else:
raise InvalidInstance(class_name)
class Manager(Base):
'''
Abstract base class for 'Manager' Classes
'''
PAYSTACK_URL = None
SECRET_KEY = None
LOCAL_COST = None
INTL_COST = None
PASS_ON_TRANSACTION_COST = None
decoder = json.JSONDecoder()
def __init__(self):
super().__init__()
if type(self) is Manager:
raise TypeError("Can not make instance of abstract base class")
if not PaystackConfig.SECRET_KEY or not PaystackConfig.PUBLIC_KEY:
raise ValueError("No secret key or public key found,"
"assign values using PaystackConfig.SECRET_KEY = SECRET_KEY and"
"PaystackConfig.PUBLIC_KEY = PUBLIC_KEY")
self.PAYSTACK_URL = PaystackConfig.PAYSTACK_URL
self.SECRET_KEY = PaystackConfig.SECRET_KEY
def get_content_status(self, content):
'''
Method to return the status and message from an API response
Arguments :
content : Response as a dict
'''
if not isinstance(content, dict):
raise TypeError("Content argument should be a dict")
return (content['status'], content['message'])
def parse_response_content(self, content):
'''
Method to convert a response's content in bytes to a string.
Arguments:
content : Response in bytes
'''
content = bytes.decode(content)
content = self.decoder.decode(content)
return content
def build_request_args(self, data=None):
'''
Method for generating required headers.
Returns a tuple containing the generated headers and the data in json.
Arguments :
data(Dict) : An optional data argument which holds the body of the request.
'''
headers = {'Authorization' : 'Bearer %s' % self.SECRET_KEY,
'Content-Type' : 'application/json',
'cache-control' : 'no-cache'
}
data = json.dumps(data)
return (headers, data) | 0.495361 | 0.072768 |
import unittest
from watcher import Watcher, StoppableThread
class WatcherTestCase(unittest.TestCase):
def callback(self, message):
self.message = message
return message
def setUp(self):
self.watcher = Watcher(self.callback)
self.message = ''
def test_callback(self):
self.assertEqual(self.watcher.callback, self.callback,
'watcher.callback != callback')
message = 'test message'
self.watcher.callback(message)
self.assertEqual(message, self.message,
'callback didn\'t return same message')
def test_scan_directories_no_match(self):
self.watcher.directories = list()
self.watcher.files = list()
self.watcher._scan_directories()
self.assertFalse(self.watcher._scan_results,
'_scan_results should be empty')
def test_scan_directories_match(self):
self.watcher.directories = ['.', 'tests']
self.watcher.files = ['txt']
self.watcher._scan_directories()
self.assertTrue(self.watcher._scan_results,
'_scan_results should not be empty')
self.assertIn('tests\\ascii_encoded.txt',
self.watcher._scan_results.keys())
self.assertIn('tests\\utf-16_encoded.txt',
self.watcher._scan_results.keys())
def test_scan_files_no_match(self):
self.message = ''
self.watcher.directories = ['.', 'tests']
self.watcher.files = ['txt']
self.watcher._scan_directories()
self.assertTrue(self.watcher._scan_results, 'missing scan results')
self.watcher._scan_files()
self.assertEqual(self.message, '',
'message should not have been modified')
def test_scan_files_match(self):
self.message = ''
self.watcher.directories = ['.', 'tests']
self.watcher.files = ['txt']
self.watcher._scan_directories()
self.assertTrue(self.watcher._scan_results, 'missing scan results')
self.assertIn('tests\\ascii_encoded.txt',
self.watcher._scan_results.keys())
with open('tests/ascii_encoded.txt', 'a') as file:
file.write('extra line')
self.watcher._scan_files()
self.assertEqual(self.message, 'extra line',
'message should have been modified')
def test_stop_monitor(self):
thread = StoppableThread(None, None)
self.watcher._monitor_thread = thread
self.watcher._scan_thread = thread
thread.start()
self.assertTrue(self.watcher._monitor_thread.isAlive(),
'thread should be alive')
self.assertTrue(self.watcher._scan_thread.isAlive(),
'thread should be alive')
self.watcher.stop_monitor()
self.assertFalse(self.watcher._monitor_thread.isAlive(),
'thread should not be alive')
self.assertFalse(self.watcher._scan_thread.isAlive(),
'thread should not be alive')
if __name__ == '__main__':
unittest.main() | tests/test_watcher.py | import unittest
from watcher import Watcher, StoppableThread
class WatcherTestCase(unittest.TestCase):
def callback(self, message):
self.message = message
return message
def setUp(self):
self.watcher = Watcher(self.callback)
self.message = ''
def test_callback(self):
self.assertEqual(self.watcher.callback, self.callback,
'watcher.callback != callback')
message = 'test message'
self.watcher.callback(message)
self.assertEqual(message, self.message,
'callback didn\'t return same message')
def test_scan_directories_no_match(self):
self.watcher.directories = list()
self.watcher.files = list()
self.watcher._scan_directories()
self.assertFalse(self.watcher._scan_results,
'_scan_results should be empty')
def test_scan_directories_match(self):
self.watcher.directories = ['.', 'tests']
self.watcher.files = ['txt']
self.watcher._scan_directories()
self.assertTrue(self.watcher._scan_results,
'_scan_results should not be empty')
self.assertIn('tests\\ascii_encoded.txt',
self.watcher._scan_results.keys())
self.assertIn('tests\\utf-16_encoded.txt',
self.watcher._scan_results.keys())
def test_scan_files_no_match(self):
self.message = ''
self.watcher.directories = ['.', 'tests']
self.watcher.files = ['txt']
self.watcher._scan_directories()
self.assertTrue(self.watcher._scan_results, 'missing scan results')
self.watcher._scan_files()
self.assertEqual(self.message, '',
'message should not have been modified')
def test_scan_files_match(self):
self.message = ''
self.watcher.directories = ['.', 'tests']
self.watcher.files = ['txt']
self.watcher._scan_directories()
self.assertTrue(self.watcher._scan_results, 'missing scan results')
self.assertIn('tests\\ascii_encoded.txt',
self.watcher._scan_results.keys())
with open('tests/ascii_encoded.txt', 'a') as file:
file.write('extra line')
self.watcher._scan_files()
self.assertEqual(self.message, 'extra line',
'message should have been modified')
def test_stop_monitor(self):
thread = StoppableThread(None, None)
self.watcher._monitor_thread = thread
self.watcher._scan_thread = thread
thread.start()
self.assertTrue(self.watcher._monitor_thread.isAlive(),
'thread should be alive')
self.assertTrue(self.watcher._scan_thread.isAlive(),
'thread should be alive')
self.watcher.stop_monitor()
self.assertFalse(self.watcher._monitor_thread.isAlive(),
'thread should not be alive')
self.assertFalse(self.watcher._scan_thread.isAlive(),
'thread should not be alive')
if __name__ == '__main__':
unittest.main() | 0.596786 | 0.261794 |
from pymtl3 import *
from pymtl3.stdlib.test import TestSinkCL
from pymtl3.stdlib.test.test_srcs import TestSrcRTL
from ...lib.opt_type import *
from ...lib.messages import *
from ...lib.ctrl_helper import *
from ...fu.flexible.FlexibleFuRTL import FlexibleFuRTL
from ...fu.single.AdderRTL import AdderRTL
from ...fu.single.MemUnitRTL import MemUnitRTL
from ..CGRACL import CGRACL
import os
#-------------------------------------------------------------------------
# Test harness
#-------------------------------------------------------------------------
class TestHarness( Component ):
def construct( s, DUT, FunctionUnit, FuList, DataType, CtrlType,
width, height, ctrl_mem_size, data_mem_size,
src_opt, preload_data, preload_const ):
s.num_tiles = width * height
AddrType = mk_bits( clog2( ctrl_mem_size ) )
s.dut = DUT( FunctionUnit, FuList, DataType, CtrlType, width, height,
ctrl_mem_size, data_mem_size, len( src_opt ), src_opt,
preload_data, preload_const )
def line_trace( s ):
return s.dut.line_trace()
def run_sim( test_harness, max_cycles=7 ):
test_harness.elaborate()
test_harness.apply( SimulationPass() )
test_harness.sim_reset()
# Run simulation
ncycles = 0
print()
print( "{}:{}".format( ncycles, test_harness.line_trace() ))
while ncycles < max_cycles:
test_harness.tick()
ncycles += 1
print( "{}:{}".format( ncycles, test_harness.line_trace() ))
# Check timeout
# assert ncycles < max_cycles
test_harness.tick()
test_harness.tick()
test_harness.tick()
def test_cgra_2x2_universal():
num_tile_inports = 4
num_tile_outports = 4
num_xbar_inports = 6
num_xbar_outports = 8
ctrl_mem_size = 8
width = 2
height = 2
RouteType = mk_bits( clog2( num_xbar_inports + 1 ) )
AddrType = mk_bits( clog2( ctrl_mem_size ) )
num_tiles = width * height
ctrl_mem_size = 8
data_mem_size = 10
num_fu_in = 4
DUT = CGRACL
FunctionUnit = FlexibleFuRTL
FuList = [AdderRTL, MemUnitRTL]
DataType = mk_data( 16, 1 )
CtrlType = mk_ctrl( num_fu_in, num_xbar_inports, num_xbar_outports )
FuInType = mk_bits( clog2( num_fu_in + 1 ) )
pickRegister = [ FuInType( x+1 ) for x in range( num_fu_in ) ]
src_opt = [[CtrlType( OPT_ADD_CONST, pickRegister, [
RouteType(3), RouteType(2), RouteType(1), RouteType(0),
RouteType(4), RouteType(4), RouteType(4), RouteType(4)] ),
CtrlType( OPT_ADD, pickRegister, [
RouteType(3),RouteType(2), RouteType(1), RouteType(0),
RouteType(4), RouteType(4), RouteType(4), RouteType(4)] ),
CtrlType( OPT_STR, pickRegister, [
RouteType(3),RouteType(2), RouteType(1), RouteType(0),
RouteType(4), RouteType(4), RouteType(4), RouteType(4)] ),
CtrlType( OPT_SUB, pickRegister, [
RouteType(3),RouteType(2), RouteType(1), RouteType(0),
RouteType(4), RouteType(4), RouteType(4), RouteType(4)] ) ]
for _ in range( num_tiles ) ]
preload_data = [DataType(7, 1), DataType(7, 1), DataType(7, 1), DataType(7, 1)]
preload_const = [[DataType(2, 1)], [DataType(1, 1)],
[DataType(4, 1)], [DataType(3, 1)]]
th = TestHarness( DUT, FunctionUnit, FuList, DataType, CtrlType,
width, height, ctrl_mem_size, data_mem_size,
src_opt, preload_data, preload_const )
run_sim( th )
def test_cgra_4x4_universal_fir():
target_json = "config_fir.json"
script_dir = os.path.dirname(__file__)
file_path = os.path.join( script_dir, target_json )
II = 4
num_tile_inports = 4
num_tile_outports = 4
num_xbar_inports = 6
num_xbar_outports = 8
ctrl_mem_size = 8
width = 4
height = 4
num_tiles = width * height
RouteType = mk_bits( clog2( num_xbar_inports + 1 ) )
AddrType = mk_bits( clog2( ctrl_mem_size ) )
num_tiles = width * height
ctrl_mem_size = II
data_mem_size = 10
num_fu_in = 4
DUT = CGRACL
FunctionUnit = FlexibleFuRTL
FuList = [AdderRTL, MemUnitRTL]
DataType = mk_data( 16, 1 )
CtrlType = mk_ctrl( num_fu_in, num_xbar_inports, num_xbar_outports )
cgra_ctrl = CGRACtrl( file_path, CtrlType, RouteType, width, height,
num_fu_in, num_xbar_outports, II )
src_opt = cgra_ctrl.get_ctrl()
print( src_opt )
preload_data = [ DataType( 1, 1 ) ] * data_mem_size
preload_const = [ [ DataType( 1, 1 ) ] * II ] * num_tiles
th = TestHarness( DUT, FunctionUnit, FuList, DataType, CtrlType,
width, height, ctrl_mem_size, data_mem_size,
src_opt, preload_data, preload_const )
run_sim( th ) | cgra/test/CGRACL_test.py | from pymtl3 import *
from pymtl3.stdlib.test import TestSinkCL
from pymtl3.stdlib.test.test_srcs import TestSrcRTL
from ...lib.opt_type import *
from ...lib.messages import *
from ...lib.ctrl_helper import *
from ...fu.flexible.FlexibleFuRTL import FlexibleFuRTL
from ...fu.single.AdderRTL import AdderRTL
from ...fu.single.MemUnitRTL import MemUnitRTL
from ..CGRACL import CGRACL
import os
#-------------------------------------------------------------------------
# Test harness
#-------------------------------------------------------------------------
class TestHarness( Component ):
def construct( s, DUT, FunctionUnit, FuList, DataType, CtrlType,
width, height, ctrl_mem_size, data_mem_size,
src_opt, preload_data, preload_const ):
s.num_tiles = width * height
AddrType = mk_bits( clog2( ctrl_mem_size ) )
s.dut = DUT( FunctionUnit, FuList, DataType, CtrlType, width, height,
ctrl_mem_size, data_mem_size, len( src_opt ), src_opt,
preload_data, preload_const )
def line_trace( s ):
return s.dut.line_trace()
def run_sim( test_harness, max_cycles=7 ):
test_harness.elaborate()
test_harness.apply( SimulationPass() )
test_harness.sim_reset()
# Run simulation
ncycles = 0
print()
print( "{}:{}".format( ncycles, test_harness.line_trace() ))
while ncycles < max_cycles:
test_harness.tick()
ncycles += 1
print( "{}:{}".format( ncycles, test_harness.line_trace() ))
# Check timeout
# assert ncycles < max_cycles
test_harness.tick()
test_harness.tick()
test_harness.tick()
def test_cgra_2x2_universal():
num_tile_inports = 4
num_tile_outports = 4
num_xbar_inports = 6
num_xbar_outports = 8
ctrl_mem_size = 8
width = 2
height = 2
RouteType = mk_bits( clog2( num_xbar_inports + 1 ) )
AddrType = mk_bits( clog2( ctrl_mem_size ) )
num_tiles = width * height
ctrl_mem_size = 8
data_mem_size = 10
num_fu_in = 4
DUT = CGRACL
FunctionUnit = FlexibleFuRTL
FuList = [AdderRTL, MemUnitRTL]
DataType = mk_data( 16, 1 )
CtrlType = mk_ctrl( num_fu_in, num_xbar_inports, num_xbar_outports )
FuInType = mk_bits( clog2( num_fu_in + 1 ) )
pickRegister = [ FuInType( x+1 ) for x in range( num_fu_in ) ]
src_opt = [[CtrlType( OPT_ADD_CONST, pickRegister, [
RouteType(3), RouteType(2), RouteType(1), RouteType(0),
RouteType(4), RouteType(4), RouteType(4), RouteType(4)] ),
CtrlType( OPT_ADD, pickRegister, [
RouteType(3),RouteType(2), RouteType(1), RouteType(0),
RouteType(4), RouteType(4), RouteType(4), RouteType(4)] ),
CtrlType( OPT_STR, pickRegister, [
RouteType(3),RouteType(2), RouteType(1), RouteType(0),
RouteType(4), RouteType(4), RouteType(4), RouteType(4)] ),
CtrlType( OPT_SUB, pickRegister, [
RouteType(3),RouteType(2), RouteType(1), RouteType(0),
RouteType(4), RouteType(4), RouteType(4), RouteType(4)] ) ]
for _ in range( num_tiles ) ]
preload_data = [DataType(7, 1), DataType(7, 1), DataType(7, 1), DataType(7, 1)]
preload_const = [[DataType(2, 1)], [DataType(1, 1)],
[DataType(4, 1)], [DataType(3, 1)]]
th = TestHarness( DUT, FunctionUnit, FuList, DataType, CtrlType,
width, height, ctrl_mem_size, data_mem_size,
src_opt, preload_data, preload_const )
run_sim( th )
def test_cgra_4x4_universal_fir():
target_json = "config_fir.json"
script_dir = os.path.dirname(__file__)
file_path = os.path.join( script_dir, target_json )
II = 4
num_tile_inports = 4
num_tile_outports = 4
num_xbar_inports = 6
num_xbar_outports = 8
ctrl_mem_size = 8
width = 4
height = 4
num_tiles = width * height
RouteType = mk_bits( clog2( num_xbar_inports + 1 ) )
AddrType = mk_bits( clog2( ctrl_mem_size ) )
num_tiles = width * height
ctrl_mem_size = II
data_mem_size = 10
num_fu_in = 4
DUT = CGRACL
FunctionUnit = FlexibleFuRTL
FuList = [AdderRTL, MemUnitRTL]
DataType = mk_data( 16, 1 )
CtrlType = mk_ctrl( num_fu_in, num_xbar_inports, num_xbar_outports )
cgra_ctrl = CGRACtrl( file_path, CtrlType, RouteType, width, height,
num_fu_in, num_xbar_outports, II )
src_opt = cgra_ctrl.get_ctrl()
print( src_opt )
preload_data = [ DataType( 1, 1 ) ] * data_mem_size
preload_const = [ [ DataType( 1, 1 ) ] * II ] * num_tiles
th = TestHarness( DUT, FunctionUnit, FuList, DataType, CtrlType,
width, height, ctrl_mem_size, data_mem_size,
src_opt, preload_data, preload_const )
run_sim( th ) | 0.249722 | 0.383699 |
import os
import requests
import json
import time
import datetime
# API Tokens
# Add api keys to Heroku config vars to access them
# Using Openweather API
open_weather_token = os.environ.get('WEATHER_KEY')
# DS Logic imports
import pandas as pd
import numpy as np
from math import radians, cos, sin, asin, sqrt
from sklearn.cluster import DBSCAN
from geopy.distance import great_circle
from shapely.geometry import MultiPoint
import feedparser
# Functions
# Distance function
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * asin(sqrt(a))
r = 3956 # radius of earth in miles mean of poles and equator radius
return c * r
def get_weather(lat, lon):
open_weather_url = f'http://api.openweathermap.org/data/2.5/weather?lat={lat}&lon={lon}&units=imperial&APPID={open_weather_token}'
response = requests.get(open_weather_url)
if response.status_code == 200:
return json.loads(response.content.decode('utf-8'))
else:
return response.status_code
def get_modis_data():
modis_url = "https://firms.modaps.eosdis.nasa.gov/data/active_fire/c6/csv/MODIS_C6_USA_contiguous_and_Hawaii_24h.csv"
modis_data = pd.read_csv(modis_url)
return modis_data
def process_live_data(original_df):
"""
Pre processes live data to match pipeline expectations.
"""
print("process_live_data!")
df = original_df.copy()
# process satellite labels
df["satellite"] = df["satellite"].replace({"T": "Terra", "A": "Aqua"})
# process time features
df["acq_time"] = (df["acq_time"] // 100) * 60 + (df["acq_time"] % 100)
df["timestamp"] = df.apply(
lambda x: datetime.datetime.strptime(x["acq_date"], "%Y-%m-%d")
+ datetime.timedelta(minutes=x["acq_time"]),
axis=1,
)
# df['version'] = df['version'].apply(str)
df["month"] = df["timestamp"].dt.month
df["week"] = df["timestamp"].dt.weekofyear
df.drop(columns=["acq_date", "acq_time", "timestamp"], inplace=True)
return df
# Getting the centermost point of a cluster
def get_centermost_point(cluster):
centroid = (MultiPoint(cluster).centroid.x, MultiPoint(cluster).centroid.y)
centermost_point = min(cluster, key=lambda point: great_circle(point, centroid).m)
return tuple(centermost_point)
# Reduces the number of points in the modis data frame
def reduce_points(df, distance = 1.5):
# Rename the latitude and longitude columns for ease
df = df.rename({'latitude': 'lat', 'longitude': 'lon'}, axis=1)
# Get the data points where the confidence is above 70
df = df[df['confidence'] >= 70]
# Grab the coords of the df
coords = df.as_matrix(columns=['lat', 'lon'])
# Compute DBSCAN; distance is the max distance for 'cluster'
kms_per_radian = 6371.0088
epsilon = distance / kms_per_radian
# use the haversine metric and ball tree algorithm to calculate great circle distances between points
db = DBSCAN(eps=epsilon, min_samples=1, algorithm='ball_tree', metric='haversine').fit(np.radians(coords))
cluster_labels = db.labels_
num_clusters = len(set(cluster_labels))
clusters = pd.Series([coords[cluster_labels == n] for n in range(num_clusters)])
# returns the center-most point from a cluster
centermost_points = clusters.map(get_centermost_point)
# turn these center-most points into a pandas dataframe
lats, lons = zip(*centermost_points)
rep_points = pd.DataFrame({'lon':lons, 'lat':lats})
# pull the full row from the original data set where the lat and lon match
rs = rep_points.apply(lambda row: df[(df['lat']==row['lat']) & (df['lon']==row['lon'])].iloc[0], axis=1)
return rs
# Adds the empty weather columns
def add_weather_columns(df):
df['temp'] = np.nan
df['humidity'] = np.nan
df['wind_speed'] = np.nan
df['wind_direction'] = np.nan
return df
# Adds weather to modis dataframe
def populate_weather(df):
api_count = 0
for i in range(df.shape[0]):
if api_count % 60 != 0:
lat = df.lat[i]
lon = df.lon[i]
weather = get_weather(lat, lon)
api_count += 1
df['temp'][i] = weather['main']['temp']
df['humidity'][i] = weather['main']['humidity']
df['wind_speed'][i] = weather['wind']['speed']
if 'deg' in weather['wind']:
df['wind_direction'][i] = weather['wind']['deg']
elif api_count == 0:
lat = df.lat[i]
lon = df.lon[i]
weather = get_weather(lat, lon)
api_count += 1
df['temp'][i] = weather['main']['temp']
df['humidity'][i] = weather['main']['humidity']
df['wind_speed'][i] = weather['wind']['speed']
if 'deg' in weather['wind']:
df['wind_direction'][i] = weather['wind']['deg']
else:
print('Sleeping for 60 seconds')
time.sleep(60)
print('Starting up again')
lat = df.lat[i]
lon = df.lon[i]
weather = get_weather(lat, lon)
api_count += 1
df['temp'][i] = weather['main']['temp']
df['humidity'][i] = weather['main']['humidity']
df['wind_speed'][i] = weather['wind']['speed']
if 'deg' in weather['wind']:
df['wind_direction'][i] = weather['wind']['deg']
return 'Done'
# Function to pull all fires
def fires_list():
url = 'https://inciweb.nwcg.gov/feeds/rss/incidents/'
fires = feedparser.parse(url)
rss_fires = []
for entry in fires.entries:
# Return a dict for each fire with name and location
fire_dict = {'name': entry.title, 'location': entry.where.coordinates}
rss_fires.append(fire_dict)
return rss_fires
# Label data
def label_fires(df):
print('labelling data')
# Instantiate labels list
labels = []
# Get lats and lons from df
lats = df['lat'].tolist()
lons = df['lon'].tolist()
# Pull confirmed fires
fires = fires_list()
locations = [entry['location'] for entry in fires]
# loop data points
for n in range(len(lats)):
# loop fires
for fire in locations:
distance = haversine(lons[n], lats[n], fire[1], fire[0])
label = 0
if distance < 0.3:
label = 1
labels.append(label)
break
else:
pass
if label != 1:
labels.append(label)
# append labels to df
labelled_df = df.copy()
labelled_df['fire'] = labels
return labelled_df
def clean_df(df):
clean_df = df.fillna(0)
return clean_df | Getter_Api/app/functions.py | import os
import requests
import json
import time
import datetime
# API Tokens
# Add api keys to Heroku config vars to access them
# Using Openweather API
open_weather_token = os.environ.get('WEATHER_KEY')
# DS Logic imports
import pandas as pd
import numpy as np
from math import radians, cos, sin, asin, sqrt
from sklearn.cluster import DBSCAN
from geopy.distance import great_circle
from shapely.geometry import MultiPoint
import feedparser
# Functions
# Distance function
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * asin(sqrt(a))
r = 3956 # radius of earth in miles mean of poles and equator radius
return c * r
def get_weather(lat, lon):
open_weather_url = f'http://api.openweathermap.org/data/2.5/weather?lat={lat}&lon={lon}&units=imperial&APPID={open_weather_token}'
response = requests.get(open_weather_url)
if response.status_code == 200:
return json.loads(response.content.decode('utf-8'))
else:
return response.status_code
def get_modis_data():
modis_url = "https://firms.modaps.eosdis.nasa.gov/data/active_fire/c6/csv/MODIS_C6_USA_contiguous_and_Hawaii_24h.csv"
modis_data = pd.read_csv(modis_url)
return modis_data
def process_live_data(original_df):
"""
Pre processes live data to match pipeline expectations.
"""
print("process_live_data!")
df = original_df.copy()
# process satellite labels
df["satellite"] = df["satellite"].replace({"T": "Terra", "A": "Aqua"})
# process time features
df["acq_time"] = (df["acq_time"] // 100) * 60 + (df["acq_time"] % 100)
df["timestamp"] = df.apply(
lambda x: datetime.datetime.strptime(x["acq_date"], "%Y-%m-%d")
+ datetime.timedelta(minutes=x["acq_time"]),
axis=1,
)
# df['version'] = df['version'].apply(str)
df["month"] = df["timestamp"].dt.month
df["week"] = df["timestamp"].dt.weekofyear
df.drop(columns=["acq_date", "acq_time", "timestamp"], inplace=True)
return df
# Getting the centermost point of a cluster
def get_centermost_point(cluster):
centroid = (MultiPoint(cluster).centroid.x, MultiPoint(cluster).centroid.y)
centermost_point = min(cluster, key=lambda point: great_circle(point, centroid).m)
return tuple(centermost_point)
# Reduces the number of points in the modis data frame
def reduce_points(df, distance = 1.5):
# Rename the latitude and longitude columns for ease
df = df.rename({'latitude': 'lat', 'longitude': 'lon'}, axis=1)
# Get the data points where the confidence is above 70
df = df[df['confidence'] >= 70]
# Grab the coords of the df
coords = df.as_matrix(columns=['lat', 'lon'])
# Compute DBSCAN; distance is the max distance for 'cluster'
kms_per_radian = 6371.0088
epsilon = distance / kms_per_radian
# use the haversine metric and ball tree algorithm to calculate great circle distances between points
db = DBSCAN(eps=epsilon, min_samples=1, algorithm='ball_tree', metric='haversine').fit(np.radians(coords))
cluster_labels = db.labels_
num_clusters = len(set(cluster_labels))
clusters = pd.Series([coords[cluster_labels == n] for n in range(num_clusters)])
# returns the center-most point from a cluster
centermost_points = clusters.map(get_centermost_point)
# turn these center-most points into a pandas dataframe
lats, lons = zip(*centermost_points)
rep_points = pd.DataFrame({'lon':lons, 'lat':lats})
# pull the full row from the original data set where the lat and lon match
rs = rep_points.apply(lambda row: df[(df['lat']==row['lat']) & (df['lon']==row['lon'])].iloc[0], axis=1)
return rs
# Adds the empty weather columns
def add_weather_columns(df):
df['temp'] = np.nan
df['humidity'] = np.nan
df['wind_speed'] = np.nan
df['wind_direction'] = np.nan
return df
# Adds weather to modis dataframe
def populate_weather(df):
api_count = 0
for i in range(df.shape[0]):
if api_count % 60 != 0:
lat = df.lat[i]
lon = df.lon[i]
weather = get_weather(lat, lon)
api_count += 1
df['temp'][i] = weather['main']['temp']
df['humidity'][i] = weather['main']['humidity']
df['wind_speed'][i] = weather['wind']['speed']
if 'deg' in weather['wind']:
df['wind_direction'][i] = weather['wind']['deg']
elif api_count == 0:
lat = df.lat[i]
lon = df.lon[i]
weather = get_weather(lat, lon)
api_count += 1
df['temp'][i] = weather['main']['temp']
df['humidity'][i] = weather['main']['humidity']
df['wind_speed'][i] = weather['wind']['speed']
if 'deg' in weather['wind']:
df['wind_direction'][i] = weather['wind']['deg']
else:
print('Sleeping for 60 seconds')
time.sleep(60)
print('Starting up again')
lat = df.lat[i]
lon = df.lon[i]
weather = get_weather(lat, lon)
api_count += 1
df['temp'][i] = weather['main']['temp']
df['humidity'][i] = weather['main']['humidity']
df['wind_speed'][i] = weather['wind']['speed']
if 'deg' in weather['wind']:
df['wind_direction'][i] = weather['wind']['deg']
return 'Done'
# Function to pull all fires
def fires_list():
url = 'https://inciweb.nwcg.gov/feeds/rss/incidents/'
fires = feedparser.parse(url)
rss_fires = []
for entry in fires.entries:
# Return a dict for each fire with name and location
fire_dict = {'name': entry.title, 'location': entry.where.coordinates}
rss_fires.append(fire_dict)
return rss_fires
# Label data
def label_fires(df):
print('labelling data')
# Instantiate labels list
labels = []
# Get lats and lons from df
lats = df['lat'].tolist()
lons = df['lon'].tolist()
# Pull confirmed fires
fires = fires_list()
locations = [entry['location'] for entry in fires]
# loop data points
for n in range(len(lats)):
# loop fires
for fire in locations:
distance = haversine(lons[n], lats[n], fire[1], fire[0])
label = 0
if distance < 0.3:
label = 1
labels.append(label)
break
else:
pass
if label != 1:
labels.append(label)
# append labels to df
labelled_df = df.copy()
labelled_df['fire'] = labels
return labelled_df
def clean_df(df):
clean_df = df.fillna(0)
return clean_df | 0.63409 | 0.333368 |
import logging
import os
logger = logging.getLogger(__name__)
class MigratorMixin:
"""Mixin to allow the running of migrations.
Your class must provide a `client` attribute (a PostgreSQLClient),
as well as override some class attributes.
"""
"""Name of this migrator (e.g. "storage"). Override this."""
name = None
"""The current "newest" schema version. Override this."""
schema_version = None
"""The file to find the current "newest" schema in. Override this."""
schema_file = None
"""The directory to find migration files in.
Migrations should each be a file named migration_nnn_mmm.sql, where mmm = nnn + 1.
"""
migrations_directory = None
def get_installed_version(self):
"""Return current version of schema or None if none found.
Override this.
This may be called several times during a single migration.
"""
raise NotImplementedError("method not overridden") # pragma: no cover
def create_or_migrate_schema(self, dry_run=False):
"""Either create or migrate the schema, as needed."""
version = self.get_installed_version()
if not version:
self.create_schema(dry_run)
return
logger.info(f"Detected PostgreSQL {self.name} schema version {version}.")
if version == self.schema_version:
logger.info(f"PostgreSQL {self.name} schema is up-to-date.")
return
self.migrate_schema(version, dry_run)
def create_schema(self, dry_run):
"""Actually create the schema from scratch using self.schema_file.
You can override this if you want to add additional sanity checks.
"""
logger.info(
f"Create PostgreSQL {self.name} schema at version {self.schema_version} from {self.schema_file}."
)
if not dry_run:
self._execute_sql_file(self.schema_file)
logger.info(f"Created PostgreSQL {self.name} schema (version {self.schema_version}).")
def migrate_schema(self, start_version, dry_run):
migrations = [(v, v + 1) for v in range(start_version, self.schema_version)]
for migration in migrations:
expected = migration[0]
current = self.get_installed_version()
error_msg = f"PostgreSQL {self.name} schema: Expected version {expected}. Found version {current}."
if not dry_run and expected != current:
raise AssertionError(error_msg)
logger.info(
f"Migrate PostgreSQL {self.name} schema from version {migration[0]} to {migration[1]}."
)
filename = "migration_{0:03d}_{1:03d}.sql".format(*migration)
filepath = os.path.join(self.migrations_directory, filename)
logger.info(f"Execute PostgreSQL {self.name} migration from {filepath}")
if not dry_run:
self._execute_sql_file(filepath)
logger.info(
f"PostgreSQL {self.name} schema migration {'simulated' if dry_run else 'done'}"
)
def _execute_sql_file(self, filepath):
"""Helper method to execute the SQL in a file."""
with open(filepath) as f:
schema = f.read()
# Since called outside request, force commit.
with self.client.connect(force_commit=True) as conn:
conn.execute(schema) | kinto/core/storage/postgresql/migrator.py | import logging
import os
logger = logging.getLogger(__name__)
class MigratorMixin:
"""Mixin to allow the running of migrations.
Your class must provide a `client` attribute (a PostgreSQLClient),
as well as override some class attributes.
"""
"""Name of this migrator (e.g. "storage"). Override this."""
name = None
"""The current "newest" schema version. Override this."""
schema_version = None
"""The file to find the current "newest" schema in. Override this."""
schema_file = None
"""The directory to find migration files in.
Migrations should each be a file named migration_nnn_mmm.sql, where mmm = nnn + 1.
"""
migrations_directory = None
def get_installed_version(self):
"""Return current version of schema or None if none found.
Override this.
This may be called several times during a single migration.
"""
raise NotImplementedError("method not overridden") # pragma: no cover
def create_or_migrate_schema(self, dry_run=False):
"""Either create or migrate the schema, as needed."""
version = self.get_installed_version()
if not version:
self.create_schema(dry_run)
return
logger.info(f"Detected PostgreSQL {self.name} schema version {version}.")
if version == self.schema_version:
logger.info(f"PostgreSQL {self.name} schema is up-to-date.")
return
self.migrate_schema(version, dry_run)
def create_schema(self, dry_run):
"""Actually create the schema from scratch using self.schema_file.
You can override this if you want to add additional sanity checks.
"""
logger.info(
f"Create PostgreSQL {self.name} schema at version {self.schema_version} from {self.schema_file}."
)
if not dry_run:
self._execute_sql_file(self.schema_file)
logger.info(f"Created PostgreSQL {self.name} schema (version {self.schema_version}).")
def migrate_schema(self, start_version, dry_run):
migrations = [(v, v + 1) for v in range(start_version, self.schema_version)]
for migration in migrations:
expected = migration[0]
current = self.get_installed_version()
error_msg = f"PostgreSQL {self.name} schema: Expected version {expected}. Found version {current}."
if not dry_run and expected != current:
raise AssertionError(error_msg)
logger.info(
f"Migrate PostgreSQL {self.name} schema from version {migration[0]} to {migration[1]}."
)
filename = "migration_{0:03d}_{1:03d}.sql".format(*migration)
filepath = os.path.join(self.migrations_directory, filename)
logger.info(f"Execute PostgreSQL {self.name} migration from {filepath}")
if not dry_run:
self._execute_sql_file(filepath)
logger.info(
f"PostgreSQL {self.name} schema migration {'simulated' if dry_run else 'done'}"
)
def _execute_sql_file(self, filepath):
"""Helper method to execute the SQL in a file."""
with open(filepath) as f:
schema = f.read()
# Since called outside request, force commit.
with self.client.connect(force_commit=True) as conn:
conn.execute(schema) | 0.777553 | 0.21844 |
import os
from tqdm import trange
import torch
from torch.nn import functional as F
from torch import distributions as dist
from src.common import (
compute_iou, make_3d_grid, add_key,
)
from src.utils import visualize as vis
from src.training import BaseTrainer
class Trainer(BaseTrainer):
''' Trainer object for the Occupancy Network.
Args:
model (nn.Module): Occupancy Network model
optimizer (optimizer): pytorch optimizer object
device (device): pytorch device
input_type (str): input type
vis_dir (str): visualization directory
threshold (float): threshold value
eval_sample (bool): whether to evaluate samples
'''
def __init__(self, model, optimizer, device=None, input_type='pointcloud',
vis_dir=None, threshold=0.5, eval_sample=False):
self.model = model
self.optimizer = optimizer
self.device = device
self.input_type = input_type
self.vis_dir = vis_dir
self.threshold = threshold
self.eval_sample = eval_sample
if vis_dir is not None and not os.path.exists(vis_dir):
os.makedirs(vis_dir)
def train_step(self, data):
''' Performs a training step.
Args:
data (dict): data dictionary
'''
self.model.train()
self.optimizer.zero_grad()
loss = self.compute_loss(data)
loss.backward()
self.optimizer.step()
return loss.item()
def eval_step(self, data):
''' Performs an evaluation step.
Args:
data (dict): data dictionary
'''
self.model.eval()
device = self.device
threshold = self.threshold
eval_dict = {}
points = data.get('points').to(device)
occ = data.get('points.occ').to(device)
inputs = data.get('inputs', torch.empty(points.size(0), 0)).to(device)
voxels_occ = data.get('voxels')
points_iou = data.get('points_iou').to(device)
occ_iou = data.get('points_iou.occ').to(device)
batch_size = points.size(0)
kwargs = {}
# add pre-computed index
inputs = add_key(inputs, data.get('inputs.ind'), 'points', 'index', device=device)
# add pre-computed normalized coordinates
points = add_key(points, data.get('points.normalized'), 'p', 'p_n', device=device)
points_iou = add_key(points_iou, data.get('points_iou.normalized'), 'p', 'p_n', device=device)
# Compute iou
with torch.no_grad():
p_out = self.model(points_iou, inputs,
sample=self.eval_sample, **kwargs)
occ_iou_np = (occ_iou >= 0.5).cpu().numpy()
occ_iou_hat_np = (p_out.probs >= threshold).cpu().numpy()
iou = compute_iou(occ_iou_np, occ_iou_hat_np).mean()
eval_dict['iou'] = iou
# Estimate voxel iou
if voxels_occ is not None:
voxels_occ = voxels_occ.to(device)
points_voxels = make_3d_grid(
(-0.5 + 1/64,) * 3, (0.5 - 1/64,) * 3, voxels_occ.shape[1:])
points_voxels = points_voxels.expand(
batch_size, *points_voxels.size())
points_voxels = points_voxels.to(device)
with torch.no_grad():
p_out = self.model(points_voxels, inputs,
sample=self.eval_sample, **kwargs)
voxels_occ_np = (voxels_occ >= 0.5).cpu().numpy()
occ_hat_np = (p_out.probs >= threshold).cpu().numpy()
iou_voxels = compute_iou(voxels_occ_np, occ_hat_np).mean()
eval_dict['iou_voxels'] = iou_voxels
return eval_dict
def compute_loss(self, data):
''' Computes the loss.
Args:
data (dict): data dictionary
'''
device = self.device
p = data.get('points').to(device)
occ = data.get('points.occ').to(device)
inputs = data.get('inputs', torch.empty(p.size(0), 0)).to(device)
if 'pointcloud_crop' in data.keys():
# add pre-computed index
inputs = add_key(inputs, data.get('inputs.ind'), 'points', 'index', device=device)
inputs['mask'] = data.get('inputs.mask').to(device)
# add pre-computed normalized coordinates
p = add_key(p, data.get('points.normalized'), 'p', 'p_n', device=device)
c = self.model.encode_inputs(inputs)
kwargs = {}
# General points
logits = self.model.decode(p, c, **kwargs).logits
loss_i = F.binary_cross_entropy_with_logits(
logits, occ, reduction='none')
loss = loss_i.sum(-1).mean()
return loss | src/conv_onet/training.py | import os
from tqdm import trange
import torch
from torch.nn import functional as F
from torch import distributions as dist
from src.common import (
compute_iou, make_3d_grid, add_key,
)
from src.utils import visualize as vis
from src.training import BaseTrainer
class Trainer(BaseTrainer):
''' Trainer object for the Occupancy Network.
Args:
model (nn.Module): Occupancy Network model
optimizer (optimizer): pytorch optimizer object
device (device): pytorch device
input_type (str): input type
vis_dir (str): visualization directory
threshold (float): threshold value
eval_sample (bool): whether to evaluate samples
'''
def __init__(self, model, optimizer, device=None, input_type='pointcloud',
vis_dir=None, threshold=0.5, eval_sample=False):
self.model = model
self.optimizer = optimizer
self.device = device
self.input_type = input_type
self.vis_dir = vis_dir
self.threshold = threshold
self.eval_sample = eval_sample
if vis_dir is not None and not os.path.exists(vis_dir):
os.makedirs(vis_dir)
def train_step(self, data):
''' Performs a training step.
Args:
data (dict): data dictionary
'''
self.model.train()
self.optimizer.zero_grad()
loss = self.compute_loss(data)
loss.backward()
self.optimizer.step()
return loss.item()
def eval_step(self, data):
''' Performs an evaluation step.
Args:
data (dict): data dictionary
'''
self.model.eval()
device = self.device
threshold = self.threshold
eval_dict = {}
points = data.get('points').to(device)
occ = data.get('points.occ').to(device)
inputs = data.get('inputs', torch.empty(points.size(0), 0)).to(device)
voxels_occ = data.get('voxels')
points_iou = data.get('points_iou').to(device)
occ_iou = data.get('points_iou.occ').to(device)
batch_size = points.size(0)
kwargs = {}
# add pre-computed index
inputs = add_key(inputs, data.get('inputs.ind'), 'points', 'index', device=device)
# add pre-computed normalized coordinates
points = add_key(points, data.get('points.normalized'), 'p', 'p_n', device=device)
points_iou = add_key(points_iou, data.get('points_iou.normalized'), 'p', 'p_n', device=device)
# Compute iou
with torch.no_grad():
p_out = self.model(points_iou, inputs,
sample=self.eval_sample, **kwargs)
occ_iou_np = (occ_iou >= 0.5).cpu().numpy()
occ_iou_hat_np = (p_out.probs >= threshold).cpu().numpy()
iou = compute_iou(occ_iou_np, occ_iou_hat_np).mean()
eval_dict['iou'] = iou
# Estimate voxel iou
if voxels_occ is not None:
voxels_occ = voxels_occ.to(device)
points_voxels = make_3d_grid(
(-0.5 + 1/64,) * 3, (0.5 - 1/64,) * 3, voxels_occ.shape[1:])
points_voxels = points_voxels.expand(
batch_size, *points_voxels.size())
points_voxels = points_voxels.to(device)
with torch.no_grad():
p_out = self.model(points_voxels, inputs,
sample=self.eval_sample, **kwargs)
voxels_occ_np = (voxels_occ >= 0.5).cpu().numpy()
occ_hat_np = (p_out.probs >= threshold).cpu().numpy()
iou_voxels = compute_iou(voxels_occ_np, occ_hat_np).mean()
eval_dict['iou_voxels'] = iou_voxels
return eval_dict
def compute_loss(self, data):
''' Computes the loss.
Args:
data (dict): data dictionary
'''
device = self.device
p = data.get('points').to(device)
occ = data.get('points.occ').to(device)
inputs = data.get('inputs', torch.empty(p.size(0), 0)).to(device)
if 'pointcloud_crop' in data.keys():
# add pre-computed index
inputs = add_key(inputs, data.get('inputs.ind'), 'points', 'index', device=device)
inputs['mask'] = data.get('inputs.mask').to(device)
# add pre-computed normalized coordinates
p = add_key(p, data.get('points.normalized'), 'p', 'p_n', device=device)
c = self.model.encode_inputs(inputs)
kwargs = {}
# General points
logits = self.model.decode(p, c, **kwargs).logits
loss_i = F.binary_cross_entropy_with_logits(
logits, occ, reduction='none')
loss = loss_i.sum(-1).mean()
return loss | 0.922416 | 0.365627 |