content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
"""Support for Xiaomi Mi Air Quality Monitor (PM2.5) and Humidifier."""
from __future__ import annotations
from dataclasses import dataclass
import logging
from miio import AirQualityMonitor, DeviceException
from miio.gateway.gateway import (
GATEWAY_MODEL_AC_V1,
GATEWAY_MODEL_AC_V2,
GATEWAY_MODEL_AC_V3,
GATEWAY_MODEL_AQARA,
GATEWAY_MODEL_EU,
GatewayException,
)
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
AREA_SQUARE_METERS,
ATTR_BATTERY_LEVEL,
ATTR_TEMPERATURE,
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_MILLION,
CONF_HOST,
CONF_TOKEN,
LIGHT_LUX,
PERCENTAGE,
POWER_WATT,
PRESSURE_HPA,
TEMP_CELSIUS,
TIME_HOURS,
TIME_SECONDS,
VOLUME_CUBIC_METERS,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util import dt as dt_util
from . import VacuumCoordinatorDataAttributes
from .const import (
CONF_DEVICE,
CONF_FLOW_TYPE,
CONF_GATEWAY,
CONF_MODEL,
DOMAIN,
KEY_COORDINATOR,
KEY_DEVICE,
MODEL_AIRFRESH_A1,
MODEL_AIRFRESH_T2017,
MODEL_AIRFRESH_VA2,
MODEL_AIRHUMIDIFIER_CA1,
MODEL_AIRHUMIDIFIER_CB1,
MODEL_AIRPURIFIER_3C,
MODEL_AIRPURIFIER_PRO,
MODEL_AIRPURIFIER_PRO_V7,
MODEL_AIRPURIFIER_V2,
MODEL_AIRPURIFIER_V3,
MODEL_FAN_P5,
MODEL_FAN_V2,
MODEL_FAN_V3,
MODEL_FAN_ZA1,
MODEL_FAN_ZA3,
MODEL_FAN_ZA4,
MODEL_FAN_ZA5,
MODELS_AIR_QUALITY_MONITOR,
MODELS_HUMIDIFIER_MIIO,
MODELS_HUMIDIFIER_MIOT,
MODELS_HUMIDIFIER_MJJSQ,
MODELS_PURIFIER_MIIO,
MODELS_PURIFIER_MIOT,
MODELS_VACUUM,
ROBOROCK_GENERIC,
ROCKROBO_GENERIC,
)
from .device import XiaomiCoordinatedMiioEntity, XiaomiMiioEntity
from .gateway import XiaomiGatewayDevice
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Xiaomi Miio Sensor"
UNIT_LUMEN = "lm"
ATTR_ACTUAL_SPEED = "actual_speed"
ATTR_AIR_QUALITY = "air_quality"
ATTR_AQI = "aqi"
ATTR_BATTERY = "battery"
ATTR_CARBON_DIOXIDE = "co2"
ATTR_CHARGING = "charging"
ATTR_DISPLAY_CLOCK = "display_clock"
ATTR_FILTER_LIFE_REMAINING = "filter_life_remaining"
ATTR_FILTER_HOURS_USED = "filter_hours_used"
ATTR_FILTER_USE = "filter_use"
ATTR_HUMIDITY = "humidity"
ATTR_ILLUMINANCE = "illuminance"
ATTR_ILLUMINANCE_LUX = "illuminance_lux"
ATTR_LOAD_POWER = "load_power"
ATTR_MOTOR2_SPEED = "motor2_speed"
ATTR_MOTOR_SPEED = "motor_speed"
ATTR_NIGHT_MODE = "night_mode"
ATTR_NIGHT_TIME_BEGIN = "night_time_begin"
ATTR_NIGHT_TIME_END = "night_time_end"
ATTR_PM25 = "pm25"
ATTR_POWER = "power"
ATTR_PRESSURE = "pressure"
ATTR_PURIFY_VOLUME = "purify_volume"
ATTR_SENSOR_STATE = "sensor_state"
ATTR_USE_TIME = "use_time"
ATTR_WATER_LEVEL = "water_level"
ATTR_DND_START = "start"
ATTR_DND_END = "end"
ATTR_LAST_CLEAN_TIME = "duration"
ATTR_LAST_CLEAN_AREA = "area"
ATTR_STATUS_CLEAN_TIME = "clean_time"
ATTR_STATUS_CLEAN_AREA = "clean_area"
ATTR_LAST_CLEAN_START = "start"
ATTR_LAST_CLEAN_END = "end"
ATTR_CLEAN_HISTORY_TOTAL_DURATION = "total_duration"
ATTR_CLEAN_HISTORY_TOTAL_AREA = "total_area"
ATTR_CLEAN_HISTORY_COUNT = "count"
ATTR_CLEAN_HISTORY_DUST_COLLECTION_COUNT = "dust_collection_count"
ATTR_CONSUMABLE_STATUS_MAIN_BRUSH_LEFT = "main_brush_left"
ATTR_CONSUMABLE_STATUS_SIDE_BRUSH_LEFT = "side_brush_left"
ATTR_CONSUMABLE_STATUS_FILTER_LEFT = "filter_left"
ATTR_CONSUMABLE_STATUS_SENSOR_DIRTY_LEFT = "sensor_dirty_left"
@dataclass
class XiaomiMiioSensorDescription(SensorEntityDescription):
"""Class that holds device specific info for a xiaomi aqara or humidifier sensor."""
attributes: tuple = ()
parent_key: str | None = None
SENSOR_TYPES = {
ATTR_TEMPERATURE: XiaomiMiioSensorDescription(
key=ATTR_TEMPERATURE,
name="Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
ATTR_HUMIDITY: XiaomiMiioSensorDescription(
key=ATTR_HUMIDITY,
name="Humidity",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
state_class=SensorStateClass.MEASUREMENT,
),
ATTR_PRESSURE: XiaomiMiioSensorDescription(
key=ATTR_PRESSURE,
name="Pressure",
native_unit_of_measurement=PRESSURE_HPA,
device_class=SensorDeviceClass.PRESSURE,
state_class=SensorStateClass.MEASUREMENT,
),
ATTR_LOAD_POWER: XiaomiMiioSensorDescription(
key=ATTR_LOAD_POWER,
name="Load Power",
native_unit_of_measurement=POWER_WATT,
device_class=SensorDeviceClass.POWER,
),
ATTR_WATER_LEVEL: XiaomiMiioSensorDescription(
key=ATTR_WATER_LEVEL,
name="Water Level",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:water-check",
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
),
ATTR_ACTUAL_SPEED: XiaomiMiioSensorDescription(
key=ATTR_ACTUAL_SPEED,
name="Actual Speed",
native_unit_of_measurement="rpm",
icon="mdi:fast-forward",
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
),
ATTR_MOTOR_SPEED: XiaomiMiioSensorDescription(
key=ATTR_MOTOR_SPEED,
name="Motor Speed",
native_unit_of_measurement="rpm",
icon="mdi:fast-forward",
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
),
ATTR_MOTOR2_SPEED: XiaomiMiioSensorDescription(
key=ATTR_MOTOR2_SPEED,
name="Second Motor Speed",
native_unit_of_measurement="rpm",
icon="mdi:fast-forward",
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
),
ATTR_USE_TIME: XiaomiMiioSensorDescription(
key=ATTR_USE_TIME,
name="Use Time",
native_unit_of_measurement=TIME_SECONDS,
icon="mdi:progress-clock",
state_class=SensorStateClass.TOTAL_INCREASING,
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
),
ATTR_ILLUMINANCE: XiaomiMiioSensorDescription(
key=ATTR_ILLUMINANCE,
name="Illuminance",
native_unit_of_measurement=UNIT_LUMEN,
device_class=SensorDeviceClass.ILLUMINANCE,
state_class=SensorStateClass.MEASUREMENT,
),
ATTR_ILLUMINANCE_LUX: XiaomiMiioSensorDescription(
key=ATTR_ILLUMINANCE,
name="Illuminance",
native_unit_of_measurement=LIGHT_LUX,
device_class=SensorDeviceClass.ILLUMINANCE,
state_class=SensorStateClass.MEASUREMENT,
),
ATTR_AIR_QUALITY: XiaomiMiioSensorDescription(
key=ATTR_AIR_QUALITY,
native_unit_of_measurement="AQI",
icon="mdi:cloud",
state_class=SensorStateClass.MEASUREMENT,
),
ATTR_PM25: XiaomiMiioSensorDescription(
key=ATTR_AQI,
name="PM2.5",
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
device_class=SensorDeviceClass.PM25,
state_class=SensorStateClass.MEASUREMENT,
),
ATTR_FILTER_LIFE_REMAINING: XiaomiMiioSensorDescription(
key=ATTR_FILTER_LIFE_REMAINING,
name="Filter Life Remaining",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:air-filter",
state_class=SensorStateClass.MEASUREMENT,
attributes=("filter_type",),
entity_category=EntityCategory.DIAGNOSTIC,
),
ATTR_FILTER_USE: XiaomiMiioSensorDescription(
key=ATTR_FILTER_HOURS_USED,
name="Filter Use",
native_unit_of_measurement=TIME_HOURS,
icon="mdi:clock-outline",
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
),
ATTR_CARBON_DIOXIDE: XiaomiMiioSensorDescription(
key=ATTR_CARBON_DIOXIDE,
name="Carbon Dioxide",
native_unit_of_measurement=CONCENTRATION_PARTS_PER_MILLION,
device_class=SensorDeviceClass.CO2,
state_class=SensorStateClass.MEASUREMENT,
),
ATTR_PURIFY_VOLUME: XiaomiMiioSensorDescription(
key=ATTR_PURIFY_VOLUME,
name="Purify Volume",
native_unit_of_measurement=VOLUME_CUBIC_METERS,
device_class=SensorDeviceClass.GAS,
state_class=SensorStateClass.TOTAL_INCREASING,
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
),
ATTR_BATTERY: XiaomiMiioSensorDescription(
key=ATTR_BATTERY,
name="Battery",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.BATTERY,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
),
}
HUMIDIFIER_MIIO_SENSORS = (
ATTR_HUMIDITY,
ATTR_TEMPERATURE,
ATTR_USE_TIME,
ATTR_WATER_LEVEL,
)
HUMIDIFIER_CA1_CB1_SENSORS = (
ATTR_HUMIDITY,
ATTR_TEMPERATURE,
ATTR_MOTOR_SPEED,
ATTR_USE_TIME,
ATTR_WATER_LEVEL,
)
HUMIDIFIER_MIOT_SENSORS = (
ATTR_ACTUAL_SPEED,
ATTR_HUMIDITY,
ATTR_TEMPERATURE,
ATTR_USE_TIME,
ATTR_WATER_LEVEL,
)
HUMIDIFIER_MJJSQ_SENSORS = (ATTR_HUMIDITY, ATTR_TEMPERATURE)
PURIFIER_MIIO_SENSORS = (
ATTR_FILTER_LIFE_REMAINING,
ATTR_FILTER_USE,
ATTR_HUMIDITY,
ATTR_MOTOR_SPEED,
ATTR_PM25,
ATTR_TEMPERATURE,
ATTR_USE_TIME,
)
PURIFIER_MIOT_SENSORS = (
ATTR_FILTER_LIFE_REMAINING,
ATTR_FILTER_USE,
ATTR_HUMIDITY,
ATTR_MOTOR_SPEED,
ATTR_PM25,
ATTR_PURIFY_VOLUME,
ATTR_TEMPERATURE,
ATTR_USE_TIME,
)
PURIFIER_3C_SENSORS = (
ATTR_FILTER_LIFE_REMAINING,
ATTR_FILTER_USE,
ATTR_MOTOR_SPEED,
ATTR_PM25,
)
PURIFIER_V2_SENSORS = (
ATTR_FILTER_LIFE_REMAINING,
ATTR_FILTER_USE,
ATTR_HUMIDITY,
ATTR_MOTOR_SPEED,
ATTR_PM25,
ATTR_PURIFY_VOLUME,
ATTR_TEMPERATURE,
ATTR_USE_TIME,
)
PURIFIER_V3_SENSORS = (
ATTR_FILTER_LIFE_REMAINING,
ATTR_FILTER_USE,
ATTR_ILLUMINANCE_LUX,
ATTR_MOTOR2_SPEED,
ATTR_MOTOR_SPEED,
ATTR_PM25,
ATTR_PURIFY_VOLUME,
ATTR_USE_TIME,
)
PURIFIER_PRO_SENSORS = (
ATTR_FILTER_LIFE_REMAINING,
ATTR_FILTER_USE,
ATTR_HUMIDITY,
ATTR_ILLUMINANCE_LUX,
ATTR_MOTOR2_SPEED,
ATTR_MOTOR_SPEED,
ATTR_PM25,
ATTR_PURIFY_VOLUME,
ATTR_TEMPERATURE,
ATTR_USE_TIME,
)
PURIFIER_PRO_V7_SENSORS = (
ATTR_FILTER_LIFE_REMAINING,
ATTR_FILTER_USE,
ATTR_HUMIDITY,
ATTR_ILLUMINANCE_LUX,
ATTR_MOTOR2_SPEED,
ATTR_MOTOR_SPEED,
ATTR_PM25,
ATTR_TEMPERATURE,
ATTR_USE_TIME,
)
AIRFRESH_SENSORS = (
ATTR_CARBON_DIOXIDE,
ATTR_FILTER_LIFE_REMAINING,
ATTR_FILTER_USE,
ATTR_HUMIDITY,
ATTR_PM25,
ATTR_TEMPERATURE,
ATTR_USE_TIME,
)
AIRFRESH_SENSORS_A1 = (
ATTR_CARBON_DIOXIDE,
ATTR_TEMPERATURE,
)
AIRFRESH_SENSORS_T2017 = (
ATTR_CARBON_DIOXIDE,
ATTR_TEMPERATURE,
)
FAN_V2_V3_SENSORS = (
ATTR_BATTERY,
ATTR_HUMIDITY,
ATTR_TEMPERATURE,
)
FAN_ZA5_SENSORS = (ATTR_HUMIDITY, ATTR_TEMPERATURE)
MODEL_TO_SENSORS_MAP = {
MODEL_AIRFRESH_A1: AIRFRESH_SENSORS_A1,
MODEL_AIRFRESH_VA2: AIRFRESH_SENSORS,
MODEL_AIRFRESH_T2017: AIRFRESH_SENSORS_T2017,
MODEL_AIRHUMIDIFIER_CA1: HUMIDIFIER_CA1_CB1_SENSORS,
MODEL_AIRHUMIDIFIER_CB1: HUMIDIFIER_CA1_CB1_SENSORS,
MODEL_AIRPURIFIER_3C: PURIFIER_3C_SENSORS,
MODEL_AIRPURIFIER_PRO: PURIFIER_PRO_SENSORS,
MODEL_AIRPURIFIER_PRO_V7: PURIFIER_PRO_V7_SENSORS,
MODEL_AIRPURIFIER_V2: PURIFIER_V2_SENSORS,
MODEL_AIRPURIFIER_V3: PURIFIER_V3_SENSORS,
MODEL_FAN_V2: FAN_V2_V3_SENSORS,
MODEL_FAN_V3: FAN_V2_V3_SENSORS,
MODEL_FAN_ZA5: FAN_ZA5_SENSORS,
}
VACUUM_SENSORS = {
f"dnd_{ATTR_DND_START}": XiaomiMiioSensorDescription(
key=ATTR_DND_START,
icon="mdi:minus-circle-off",
name="DnD Start",
device_class=SensorDeviceClass.TIMESTAMP,
parent_key=VacuumCoordinatorDataAttributes.dnd_status,
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
),
f"dnd_{ATTR_DND_END}": XiaomiMiioSensorDescription(
key=ATTR_DND_END,
icon="mdi:minus-circle-off",
name="DnD End",
device_class=SensorDeviceClass.TIMESTAMP,
parent_key=VacuumCoordinatorDataAttributes.dnd_status,
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
),
f"last_clean_{ATTR_LAST_CLEAN_START}": XiaomiMiioSensorDescription(
key=ATTR_LAST_CLEAN_START,
icon="mdi:clock-time-twelve",
name="Last Clean Start",
device_class=SensorDeviceClass.TIMESTAMP,
parent_key=VacuumCoordinatorDataAttributes.last_clean_details,
entity_category=EntityCategory.DIAGNOSTIC,
),
f"last_clean_{ATTR_LAST_CLEAN_END}": XiaomiMiioSensorDescription(
key=ATTR_LAST_CLEAN_END,
icon="mdi:clock-time-twelve",
device_class=SensorDeviceClass.TIMESTAMP,
parent_key=VacuumCoordinatorDataAttributes.last_clean_details,
name="Last Clean End",
entity_category=EntityCategory.DIAGNOSTIC,
),
f"last_clean_{ATTR_LAST_CLEAN_TIME}": XiaomiMiioSensorDescription(
native_unit_of_measurement=TIME_SECONDS,
icon="mdi:timer-sand",
key=ATTR_LAST_CLEAN_TIME,
parent_key=VacuumCoordinatorDataAttributes.last_clean_details,
name="Last Clean Duration",
entity_category=EntityCategory.DIAGNOSTIC,
),
f"last_clean_{ATTR_LAST_CLEAN_AREA}": XiaomiMiioSensorDescription(
native_unit_of_measurement=AREA_SQUARE_METERS,
icon="mdi:texture-box",
key=ATTR_LAST_CLEAN_AREA,
parent_key=VacuumCoordinatorDataAttributes.last_clean_details,
name="Last Clean Area",
entity_category=EntityCategory.DIAGNOSTIC,
),
f"current_{ATTR_STATUS_CLEAN_TIME}": XiaomiMiioSensorDescription(
native_unit_of_measurement=TIME_SECONDS,
icon="mdi:timer-sand",
key=ATTR_STATUS_CLEAN_TIME,
parent_key=VacuumCoordinatorDataAttributes.status,
name="Current Clean Duration",
entity_category=EntityCategory.DIAGNOSTIC,
),
f"current_{ATTR_LAST_CLEAN_AREA}": XiaomiMiioSensorDescription(
native_unit_of_measurement=AREA_SQUARE_METERS,
icon="mdi:texture-box",
key=ATTR_STATUS_CLEAN_AREA,
parent_key=VacuumCoordinatorDataAttributes.status,
entity_category=EntityCategory.DIAGNOSTIC,
name="Current Clean Area",
),
f"clean_history_{ATTR_CLEAN_HISTORY_TOTAL_DURATION}": XiaomiMiioSensorDescription(
native_unit_of_measurement=TIME_SECONDS,
icon="mdi:timer-sand",
key=ATTR_CLEAN_HISTORY_TOTAL_DURATION,
parent_key=VacuumCoordinatorDataAttributes.clean_history_status,
name="Total duration",
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
),
f"clean_history_{ATTR_CLEAN_HISTORY_TOTAL_AREA}": XiaomiMiioSensorDescription(
native_unit_of_measurement=AREA_SQUARE_METERS,
icon="mdi:texture-box",
key=ATTR_CLEAN_HISTORY_TOTAL_AREA,
parent_key=VacuumCoordinatorDataAttributes.clean_history_status,
name="Total Clean Area",
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
),
f"clean_history_{ATTR_CLEAN_HISTORY_COUNT}": XiaomiMiioSensorDescription(
native_unit_of_measurement="",
icon="mdi:counter",
state_class=SensorStateClass.TOTAL_INCREASING,
key=ATTR_CLEAN_HISTORY_COUNT,
parent_key=VacuumCoordinatorDataAttributes.clean_history_status,
name="Total Clean Count",
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
),
f"clean_history_{ATTR_CLEAN_HISTORY_DUST_COLLECTION_COUNT}": XiaomiMiioSensorDescription(
native_unit_of_measurement="",
icon="mdi:counter",
state_class="total_increasing",
key=ATTR_CLEAN_HISTORY_DUST_COLLECTION_COUNT,
parent_key=VacuumCoordinatorDataAttributes.clean_history_status,
name="Total Dust Collection Count",
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
),
f"consumable_{ATTR_CONSUMABLE_STATUS_MAIN_BRUSH_LEFT}": XiaomiMiioSensorDescription(
native_unit_of_measurement=TIME_SECONDS,
icon="mdi:brush",
key=ATTR_CONSUMABLE_STATUS_MAIN_BRUSH_LEFT,
parent_key=VacuumCoordinatorDataAttributes.consumable_status,
name="Main Brush Left",
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
),
f"consumable_{ATTR_CONSUMABLE_STATUS_SIDE_BRUSH_LEFT}": XiaomiMiioSensorDescription(
native_unit_of_measurement=TIME_SECONDS,
icon="mdi:brush",
key=ATTR_CONSUMABLE_STATUS_SIDE_BRUSH_LEFT,
parent_key=VacuumCoordinatorDataAttributes.consumable_status,
name="Side Brush Left",
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
),
f"consumable_{ATTR_CONSUMABLE_STATUS_FILTER_LEFT}": XiaomiMiioSensorDescription(
native_unit_of_measurement=TIME_SECONDS,
icon="mdi:air-filter",
key=ATTR_CONSUMABLE_STATUS_FILTER_LEFT,
parent_key=VacuumCoordinatorDataAttributes.consumable_status,
name="Filter Left",
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
),
f"consumable_{ATTR_CONSUMABLE_STATUS_SENSOR_DIRTY_LEFT}": XiaomiMiioSensorDescription(
native_unit_of_measurement=TIME_SECONDS,
icon="mdi:eye-outline",
key=ATTR_CONSUMABLE_STATUS_SENSOR_DIRTY_LEFT,
parent_key=VacuumCoordinatorDataAttributes.consumable_status,
name="Sensor Dirty Left",
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
),
}
def _setup_vacuum_sensors(hass, config_entry, async_add_entities):
"""Set up the Xiaomi vacuum sensors."""
device = hass.data[DOMAIN][config_entry.entry_id].get(KEY_DEVICE)
coordinator = hass.data[DOMAIN][config_entry.entry_id][KEY_COORDINATOR]
entities = []
for sensor, description in VACUUM_SENSORS.items():
parent_key_data = getattr(coordinator.data, description.parent_key)
if getattr(parent_key_data, description.key, None) is None:
_LOGGER.debug(
"It seems the %s does not support the %s as the initial value is None",
config_entry.data[CONF_MODEL],
description.key,
)
continue
entities.append(
XiaomiGenericSensor(
f"{config_entry.title} {description.name}",
device,
config_entry,
f"{sensor}_{config_entry.unique_id}",
coordinator,
description,
)
)
async_add_entities(entities)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Xiaomi sensor from a config entry."""
entities = []
if config_entry.data[CONF_FLOW_TYPE] == CONF_GATEWAY:
gateway = hass.data[DOMAIN][config_entry.entry_id][CONF_GATEWAY]
# Gateway illuminance sensor
if gateway.model not in [
GATEWAY_MODEL_AC_V1,
GATEWAY_MODEL_AC_V2,
GATEWAY_MODEL_AC_V3,
GATEWAY_MODEL_AQARA,
GATEWAY_MODEL_EU,
]:
description = SENSOR_TYPES[ATTR_ILLUMINANCE]
entities.append(
XiaomiGatewayIlluminanceSensor(
gateway, config_entry.title, config_entry.unique_id, description
)
)
# Gateway sub devices
sub_devices = gateway.devices
coordinator = hass.data[DOMAIN][config_entry.entry_id][KEY_COORDINATOR]
for sub_device in sub_devices.values():
for sensor, description in SENSOR_TYPES.items():
if sensor not in sub_device.status:
continue
entities.append(
XiaomiGatewaySensor(
coordinator, sub_device, config_entry, description
)
)
elif config_entry.data[CONF_FLOW_TYPE] == CONF_DEVICE:
host = config_entry.data[CONF_HOST]
token = config_entry.data[CONF_TOKEN]
model: str = config_entry.data[CONF_MODEL]
if model in (MODEL_FAN_ZA1, MODEL_FAN_ZA3, MODEL_FAN_ZA4, MODEL_FAN_P5):
return
if model in MODELS_AIR_QUALITY_MONITOR:
unique_id = config_entry.unique_id
name = config_entry.title
_LOGGER.debug("Initializing with host %s (token %s...)", host, token[:5])
device = AirQualityMonitor(host, token)
description = SENSOR_TYPES[ATTR_AIR_QUALITY]
entities.append(
XiaomiAirQualityMonitor(
name, device, config_entry, unique_id, description
)
)
else:
device = hass.data[DOMAIN][config_entry.entry_id][KEY_DEVICE]
sensors = []
if model in MODEL_TO_SENSORS_MAP:
sensors = MODEL_TO_SENSORS_MAP[model]
elif model in MODELS_HUMIDIFIER_MIOT:
sensors = HUMIDIFIER_MIOT_SENSORS
elif model in MODELS_HUMIDIFIER_MJJSQ:
sensors = HUMIDIFIER_MJJSQ_SENSORS
elif model in MODELS_HUMIDIFIER_MIIO:
sensors = HUMIDIFIER_MIIO_SENSORS
elif model in MODELS_PURIFIER_MIIO:
sensors = PURIFIER_MIIO_SENSORS
elif model in MODELS_PURIFIER_MIOT:
sensors = PURIFIER_MIOT_SENSORS
elif (
model in MODELS_VACUUM
or model.startswith(ROBOROCK_GENERIC)
or model.startswith(ROCKROBO_GENERIC)
):
return _setup_vacuum_sensors(hass, config_entry, async_add_entities)
for sensor, description in SENSOR_TYPES.items():
if sensor not in sensors:
continue
entities.append(
XiaomiGenericSensor(
f"{config_entry.title} {description.name}",
device,
config_entry,
f"{sensor}_{config_entry.unique_id}",
hass.data[DOMAIN][config_entry.entry_id][KEY_COORDINATOR],
description,
)
)
async_add_entities(entities)
class XiaomiGenericSensor(XiaomiCoordinatedMiioEntity, SensorEntity):
"""Representation of a Xiaomi generic sensor."""
entity_description: XiaomiMiioSensorDescription
def __init__(self, name, device, entry, unique_id, coordinator, description):
"""Initialize the entity."""
super().__init__(name, device, entry, unique_id, coordinator)
self.entity_description = description
self._attr_unique_id = unique_id
self._attr_native_value = self._determine_native_value()
self._attr_extra_state_attributes = self._extract_attributes(coordinator.data)
@callback
def _extract_attributes(self, data):
"""Return state attributes with valid values."""
return {
attr: value
for attr in self.entity_description.attributes
if hasattr(data, attr)
and (value := self._extract_value_from_attribute(data, attr)) is not None
}
@callback
def _handle_coordinator_update(self):
"""Fetch state from the device."""
native_value = self._determine_native_value()
# Sometimes (quite rarely) the device returns None as the sensor value so we
# check that the value is not None before updating the state.
if native_value is not None:
self._attr_native_value = native_value
self._attr_extra_state_attributes = self._extract_attributes(
self.coordinator.data
)
self.async_write_ha_state()
def _determine_native_value(self):
"""Determine native value."""
if self.entity_description.parent_key is not None:
native_value = self._extract_value_from_attribute(
getattr(self.coordinator.data, self.entity_description.parent_key),
self.entity_description.key,
)
else:
native_value = self._extract_value_from_attribute(
self.coordinator.data, self.entity_description.key
)
if (
self.device_class == SensorDeviceClass.TIMESTAMP
and native_value is not None
and (native_datetime := dt_util.parse_datetime(str(native_value)))
is not None
):
return native_datetime.astimezone(dt_util.UTC)
return native_value
class XiaomiAirQualityMonitor(XiaomiMiioEntity, SensorEntity):
"""Representation of a Xiaomi Air Quality Monitor."""
def __init__(self, name, device, entry, unique_id, description):
"""Initialize the entity."""
super().__init__(name, device, entry, unique_id)
self._available = None
self._state = None
self._state_attrs = {
ATTR_POWER: None,
ATTR_BATTERY_LEVEL: None,
ATTR_CHARGING: None,
ATTR_DISPLAY_CLOCK: None,
ATTR_NIGHT_MODE: None,
ATTR_NIGHT_TIME_BEGIN: None,
ATTR_NIGHT_TIME_END: None,
ATTR_SENSOR_STATE: None,
}
self.entity_description = description
@property
def available(self):
"""Return true when state is known."""
return self._available
@property
def native_value(self):
"""Return the state of the device."""
return self._state
@property
def extra_state_attributes(self):
"""Return the state attributes of the device."""
return self._state_attrs
async def async_update(self):
"""Fetch state from the miio device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.aqi
self._state_attrs.update(
{
ATTR_POWER: state.power,
ATTR_CHARGING: state.usb_power,
ATTR_BATTERY_LEVEL: state.battery,
ATTR_DISPLAY_CLOCK: state.display_clock,
ATTR_NIGHT_MODE: state.night_mode,
ATTR_NIGHT_TIME_BEGIN: state.night_time_begin,
ATTR_NIGHT_TIME_END: state.night_time_end,
ATTR_SENSOR_STATE: state.sensor_state,
}
)
except DeviceException as ex:
if self._available:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
class XiaomiGatewaySensor(XiaomiGatewayDevice, SensorEntity):
"""Representation of a XiaomiGatewaySensor."""
def __init__(self, coordinator, sub_device, entry, description):
"""Initialize the XiaomiSensor."""
super().__init__(coordinator, sub_device, entry)
self._unique_id = f"{sub_device.sid}-{description.key}"
self._name = f"{description.key} ({sub_device.sid})".capitalize()
self.entity_description = description
@property
def native_value(self):
"""Return the state of the sensor."""
return self._sub_device.status[self.entity_description.key]
class XiaomiGatewayIlluminanceSensor(SensorEntity):
"""Representation of the gateway device's illuminance sensor."""
def __init__(self, gateway_device, gateway_name, gateway_device_id, description):
"""Initialize the entity."""
self._attr_name = f"{gateway_name} {description.name}"
self._attr_unique_id = f"{gateway_device_id}-{description.key}"
self._attr_device_info = {"identifiers": {(DOMAIN, gateway_device_id)}}
self._gateway = gateway_device
self.entity_description = description
self._available = False
self._state = None
@property
def available(self):
"""Return true when state is known."""
return self._available
@property
def native_value(self):
"""Return the state of the device."""
return self._state
async def async_update(self):
"""Fetch state from the device."""
try:
self._state = await self.hass.async_add_executor_job(
self._gateway.get_illumination
)
self._available = True
except GatewayException as ex:
if self._available:
self._available = False
_LOGGER.error(
"Got exception while fetching the gateway illuminance state: %s", ex
)
|
nilq/baby-python
|
python
|
import subprocess
import sys
import getopt
import os
from datetime import datetime, time
def substring(s, debut, fin):
pos = s.find(debut)
if pos >= 0:
if fin == "":
return s[pos+len(debut):]
else:
pos2 = s.find(fin, pos)
if pos2 >= 0:
return s[pos+len(debut):pos2]
else:
return s[pos+len(debut):]
else:
return ""
def decoupe_stdout(s):
resultat = {
"duree_build": "",
"duree_run": ""
}
s2 = substring(s, "Build ...", "Run ...")
if s2 != "":
s02 = substring(s2, "system", "elapsed")
s02 = s02.strip()
resultat["duree_build"] = s02
s3 = substring(s, "Run ...", "")
if s3 != "":
s03 = substring(s3, "system", "elapsed")
s03 = s03.strip()
resultat["duree_run"] = s03
return resultat
def getNano(t):
t2 = datetime.strptime(t, "%M:%S.%f")
return t2.minute*60*1000000+t2.second*1000000+t2.microsecond
def nanoToDatetime(nanos):
heure=nanos//(60*60*1000000)
minute=(nanos//(60*1000000))%60
secondes=(nanos//(1000000))%60
milli=(nanos)%1000000
t = time(heure, minute, secondes,milli)
return t
def convToTime(s):
if s!="":
t=getNano(s)
return nanoToDatetime(t)
else:
return None
def build_run(param):
print("hello")
list_langage = param["langage"]
affiche_stdout = param["affiche_stdout"]
nb_operation = param["nb_operation"]
for lang in list_langage:
print("run:"+lang)
for nbop in nb_operation:
print("nb op:"+nbop)
for i in range(0, param["nbrun"]):
param_run = ["python", "build.py",
"--langage="+lang, "--nbop="+nbop]
if i > 0:
param_run.append("--norebuild")
list_files = subprocess.run(
param_run, capture_output=True)
print("The exit code was: %d" % list_files.returncode)
if list_files.stdout != None:
if affiche_stdout:
print("res="+list_files.stdout.decode('utf-8'))
p = decoupe_stdout(list_files.stdout.decode('utf-8'))
print("build:"+p["duree_build"])
print("run:"+p["duree_run"])
time_build = convToTime(p["duree_build"])
time_run = convToTime(p["duree_run"])
#n=time.fromisoformat(p["duree_run"])
#print("time0:",n)
print("time:",time_build, time_run)
#print("time2:",nanoToDatetime(time_build), nanoToDatetime(time_run))
else:
if affiche_stdout:
print("res=")
def main(argv):
try:
opts, args = getopt.getopt(
argv, "hl:a:n:d", ["langage=", "action=", "nbop=", "debug", "nbrun=", "affiche_stdout"])
param = {
"langage": ["c"],
"action": "sort",
"nb_operation": ["100"],
"debug": False,
"nbrun": 5,
"affiche_stdout": False
}
for opt, arg in opts:
if opt == '-h':
print('build.py -hland')
sys.exit()
elif opt in ("-l", "--langage"):
if "," in arg:
param["langage"] = arg.split(",")
else:
param["langage"] = [arg]
elif opt in ("-a", "--action"):
param["action"] = arg
elif opt in ("-n", "--nbop"):
if "," in arg:
param["nb_operation"] = arg.split(",")
else:
param["nb_operation"] = [arg]
elif opt in ("-d", "--debug"):
param["debug"] = True
elif opt in ("--nbrun"):
param["nbrun"] = int(arg)
elif opt in ("--affiche_stdout"):
param["affiche_stdout"] = True
build_run(param)
except getopt.GetoptError:
print('build.py -hland')
sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
|
nilq/baby-python
|
python
|
from twitchstream.outputvideo import TwitchBufferedOutputStream
import argparse
import time
import numpy as np
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
required = parser.add_argument_group('required arguments')
required.add_argument('-s', '--streamkey',
help='twitch streamkey',
required=True)
args = parser.parse_args()
# load two streams:
# * one stream to send the video
# * one stream to interact with the chat
with TwitchBufferedOutputStream(
twitch_stream_key=args.streamkey,
width=640,
height=480,
fps=30.,
enable_audio=True,
verbose=False) as videostream:
frame = np.zeros((480, 640, 3))
frequency = 100
last_phase = 0
# The main loop to create videos
while True:
# Every loop, call to receive messages.
# This is important, when it is not called,
# Twitch will automatically log you out.
# This call is non-blocking.
# If there are not enough video frames left,
# add some more.
if videostream.get_video_frame_buffer_state() < 30:
videostream.send_video_frame(frame)
# If there are not enough audio fragments left,
# add some more, but take care to stay in sync with
# the video! Audio and video buffer separately,
# so they will go out of sync if the number of video
# frames does not match the number of audio samples!
elif videostream.get_audio_buffer_state() < 30:
x = np.linspace(last_phase,
last_phase +
frequency*2*np.pi/videostream.fps,
int(44100 / videostream.fps) + 1)
last_phase = x[-1]
audio = np.sin(x[:-1])
videostream.send_audio(audio, audio)
# If nothing is happening, it is okay to sleep for a while
# and take some pressure of the CPU. But not too long, if
# the buffers run dry, audio and video will go out of sync.
else:
time.sleep(.001)
|
nilq/baby-python
|
python
|
from __future__ import unicode_literals
from .base import Base
from trustar2.base import fluent, ParamsSerializer, Param, get_timestamp
from trustar2.trustar_enums import AttributeTypes, ObservableTypes
@fluent
class Entity(Base):
FIELD_METHOD_MAPPING = {
"validFrom": "set_valid_from",
"validTo": "set_valid_to",
"confidenceScore": "set_confidence_score",
}
VALID_TYPES = list(AttributeTypes.members()) + list(ObservableTypes.members())
def __init__(self, validator, entity_type, value, alias='entity'):
self.params = ParamsSerializer()
self.validator = validator
if entity_type not in validator.members():
raise AttributeError(
"Attribute type should be in the following: {}".format(
list(validator.members())
))
self.key = alias
self.set_custom_param(self.key, {"value": value, "type": entity_type})
def __repr__(self):
entity = "Observable" if isinstance(self.validator, ObservableTypes) else "Attribute"
return "{}(type={}, value={})".format(entity, self.type, self.value)
@classmethod
def attribute(cls, entity_type, value):
return Entity(AttributeTypes, entity_type, value)
@classmethod
def observable(cls, entity_type, value):
return Entity(ObservableTypes, entity_type, value)
@property
def type(self):
return self.params.get(self.key).get("type")
@property
def value(self):
return self.params.get(self.key).get("value")
@property
def valid_to(self):
return self.params.get("validTo")
@property
def valid_from(self):
return self.params.get("validFrom")
@property
def malicious_score(self):
return self.params.get("maliciousScore")
@property
def confidence_score(self):
return self.params.get("confidenceScore")
@property
def properties(self):
return self.params.get("properties")
def set_valid_from(self, valid_from):
if valid_from is not None:
valid_from = get_timestamp(valid_from) if not isinstance(valid_from, int) else valid_from
self.set_custom_param("validFrom", valid_from)
def set_valid_to(self, valid_to):
if valid_to is not None:
valid_to = get_timestamp(valid_to) if not isinstance(valid_to, int) else valid_to
self.set_custom_param("validTo", valid_to)
def set_confidence_score(self, confidence_score):
if confidence_score is not None:
self.set_custom_param("confidenceScore", confidence_score)
def set_malicious_score(self, malicious_score):
self.set_custom_param("maliciousScore", malicious_score)
def set_properties(self, properties):
if len(properties) > 20:
raise AttributeError("properties field can not have more than 20 elements")
for k, v in properties.items():
if not isinstance(k, type("")) or not isinstance(v, type("")): # py2 / py3 compatibility
raise AttributeError("Both key and value of the properties should be strings.")
self.set_custom_param("properties", properties)
def set_custom_param(self, key, value):
param = Param(key=key, value=value)
self.params.add(param)
def serialize(self):
return self.params.serialize()
@classmethod
def _get_entity_obj(cls, entity_type, entity):
return (
cls.attribute(entity_type, entity.get("value"))
if entity_type in AttributeTypes.members()
else cls.observable(entity_type, entity.get("value"))
)
@classmethod
def from_dict(cls, entity_dict):
entity = entity_dict.pop("entity")
entity_type = entity.get("type")
if entity_type not in cls.VALID_TYPES:
raise AttributeError("Entity type does not correspond to a valid entity type")
entity_obj = cls._get_entity_obj(entity_type, entity)
for field, value in entity_dict.items():
method_name = cls.FIELD_METHOD_MAPPING.get(field)
if method_name:
method = getattr(entity_obj, method_name)
method(value)
return entity_obj
|
nilq/baby-python
|
python
|
# Copyright (c) 2019, Digi International, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import Any, Tuple
def calcsize(fmt: str) -> int:
"""
Returns the number of bytes needed to store the given ``fmt``.
:param fmt: Identifier of the typecode to get its size.
:return: The number of bytes needed.
"""
...
def pack(fmt: str, v1: Any, *vn: Any) -> bytes:
"""
Returns a bytes object containing the values v1, v2, ... packed according
to the format string ``fmt``.
:param fmt: Format string sequence of the values to pack.
:param v1: Value to pack.
:param vn: Additional values to pack.
:return: Bytes object with the values packed according to the given format.
"""
...
def pack_into(fmt: str, buff: Any, offset: int, v1: Any, *vn: Any) -> None:
"""
Packs the values v1, v2, ... according to the format string ``fmt`` and
writes the packed bytes into the writable buffer ``buf`` starting at
``offset``.
**Note**: The offset is a required argument.
:param fmt: Format string sequence of the values to pack.
:param buff: Buffer to write the packed values into.
:param offset: Starting offset to pack values within the buffer.
:param v1: Value to pack.
:param vn: Additional values to pack.
"""
...
def unpack(fmt: str, buffer: Any) -> Tuple:
"""
Returns a tuple containing values unpacked according to the format string
``fmt``. The buffer's size in bytes must be ``calcsize(fmt)``.
:param fmt: Format string sequence of the packed values.
:param buffer: Buffer containing the packed values to unpack.
:return: Tuple containing the unpacked values.
"""
...
def unpack_from(fmt: str, buffer: Any, offset: int=0) -> None:
"""
Returns a tuple containing values unpacked according to the format string
``fmt``. The buffer's size, minus ``offset``, must be at least
``calcsize(fmt)``.
:param fmt: Format string sequence of the packed values.
:param buffer: Buffer containing the packed values to unpack.
:param offset: Offset within buffer to start unpacking values.
:return: Tuple containing the unpacked values.
"""
...
|
nilq/baby-python
|
python
|
# Copyright 2017 The Wallaroo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
This is an example of a stateless application that takes a floating point
Celsius value and sends out a floating point Fahrenheit value.
"""
import struct
import wallaroo
def application_setup(args):
in_host, in_port = wallaroo.tcp_parse_input_addrs(args)[0]
out_host, out_port = wallaroo.tcp_parse_output_addrs(args)[0]
inputs = wallaroo.source("Celsius Conversion",
wallaroo.TCPSourceConfig(in_host, in_port, decoder))
pipeline = (inputs
.to(multiply)
.to(add)
.to_sink(wallaroo.TCPSinkConfig(out_host, out_port, encoder)))
return wallaroo.build_application("Celsius to Fahrenheit", pipeline)
@wallaroo.decoder(header_length=4, length_fmt=">I")
def decoder(bs):
return struct.unpack(">f", bs)[0]
@wallaroo.computation(name="multiply by 1.8")
def multiply(data):
return data * 1.8
@wallaroo.computation(name="add 32")
def add(data):
return data + 32
@wallaroo.encoder
def encoder(data):
return ("%.6f\n" % data).encode()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual
# contributors (see AUTHORS file for details). All rights reserved.
import numpy as np
import pandas as pd
from collections import OrderedDict
from pandapower.timeseries import DFData
from pandapower.control import ConstControl
from simbench import csv_tablenames, idx_in_2nd_array, merge_dataframes
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
__author__ = 'smeinecke'
def get_applied_profiles(net, profile_type):
""" Returns a list of unique profiles in element tables, e.g. net.sgen.profile.
profile_type must be in ["load", "renewables", "powerplants", "storage"]. """
applied_profiles = []
if profile_type in ["renewables", "powerplants"]:
phys_type = "RES" if profile_type == "renewables" else "PP"
fitting_elm = {"renewables": "sgen", "powerplants": "gen"}[profile_type]
for elm in ['sgen', 'gen', 'ext_grid']:
if 'profile' in net[elm].columns:
if "phys_type" in net[elm].columns:
idx = net[elm].index[net[elm].phys_type == phys_type]
else:
idx = net[elm].index if elm == fitting_elm else []
applied_profiles += list(net[elm].profile[idx].dropna().unique())
else:
if 'profile' in net[profile_type].columns:
applied_profiles += list(net[profile_type].profile.dropna().unique())
return applied_profiles
def get_available_profiles(net, profile_type, p_or_q=None, continue_on_missing=False):
""" Returns a list of unique profiles in net.profiles.
profile_type in ["load", "renewables", "powerplants", "storage"]
p_or_q can be None, "p", or "q" """
p_or_q = None if profile_type != "load" else p_or_q
if "profiles" in net.keys() and profile_type in net["profiles"].keys():
avail_prof = net["profiles"][profile_type].columns
avail_prof = avail_prof if "time" not in avail_prof else avail_prof.difference(["time"])
avail_prof = pd.Series(avail_prof)
if p_or_q is None:
return avail_prof
elif p_or_q == "p":
return avail_prof.loc[avail_prof.str.endswith("_pload")].str[:-6]
elif p_or_q == "q":
return avail_prof.loc[avail_prof.str.endswith("_qload")].str[:-6]
else:
raise ValueError(str(p_or_q) + " is unknown as 'p_or_q'.")
elif continue_on_missing:
logger.warning("%s is not in net['profiles'].keys()" % profile_type)
return pd.Series()
else:
raise ValueError("%s is not in net['profiles'].keys()" % profile_type)
def get_missing_profiles(net, profile_type, p_or_q=None):
""" Returns a set of profiles which miss in net.profiles compared to the profile columns in the
element tables. """
return set(get_applied_profiles(net, profile_type)) - set(get_available_profiles(
net, profile_type, p_or_q=p_or_q))
def dismantle_dict_values_to_deep_list(dict_):
""" returns a list of dict values even if the values of the dict are dicts again. """
dict_ = OrderedDict(sorted(dict_.items()))
return [val if not isinstance(val, dict) else dismantle_dict_values_to_deep_list(
val) for val in dict_.values()]
def dismantle_dict_values_to_list(dict_):
""" returns a list of dict values even if the values of the dict are dicts again. """
dict_ = OrderedDict(sorted(dict_.items()))
list_ = []
for val in dict_.values():
if not isinstance(val, dict):
list_.append(val)
else:
list_ += dismantle_dict_values_to_list(val)
return list_
def profiles_are_missing(net, return_as_bool=True):
""" Checks whether any of the used profiles (requested in net[elm].profile) misses in
net.profiles. """
profile_types = ["load", "renewables", "powerplants", "storage"]
return_ = dict.fromkeys(profile_types)
return_["load"] = {"p": "p", "q": "q"}
for profile_type in return_.keys():
if isinstance(return_[profile_type], dict):
for p_or_q in return_[profile_type].keys():
return_[profile_type][p_or_q] = get_missing_profiles(net, profile_type,
p_or_q=p_or_q)
else:
return_[profile_type] = get_missing_profiles(net, profile_type)
if return_as_bool:
return bool(len(set.union(*dismantle_dict_values_to_list(return_)).difference(
set([np.nan]))))
else:
return return_
def filter_unapplied_profiles(csv_data):
""" Filters unapplied profiles from csv_data. """
profile_tables = csv_tablenames('profiles')
element_tables = list(pd.Series(profile_tables).str.split("Profile", expand=True)[0])
for prof_tab, elm_tab in zip(profile_tables, element_tables):
applied_profiles = list(csv_data[elm_tab].profile.dropna().unique())
if elm_tab == "Load" and len(applied_profiles):
applied_profiles_p = pd.Series(applied_profiles) + "_pload"
applied_profiles_q = pd.Series(applied_profiles) + "_qload"
applied_profiles = list(applied_profiles_p) + list(applied_profiles_q)
applied_profiles.append("time")
unapplied_profiles = csv_data[prof_tab].columns.difference(applied_profiles)
logger.debug("These %ss are dropped: " % prof_tab + str(unapplied_profiles))
csv_data[prof_tab].drop(unapplied_profiles, axis=1, inplace=True)
def get_absolute_profiles_from_relative_profiles(
net, element, multiplying_column, relative_profiles=None, profile_column="profile",
profile_suffix=None, time_as_index=False, **kwargs):
"""
Returns a DataFrame with profiles for the given element (e.g. loads or sgens). The profiles
values are calculated by multiplying the relative profiles given by relative_profiles
(or if not given, from net["profiles"]) with the values in net[element][multiplying_column].
INPUT:
**net** (pandapowerNet) - pandapower net
**element** (str) - element type for which absolute profiles are calculated. Possible are
"load", "gen", "sgen" or "storage".
**multiplying_column** (str) - column name within net[element].columns which should be
multiplied with the relative profiles. Usual multiply_columns are 'p_mw' or 'q_mvar'.
Additional Feature: If multiplying_column is not a string, the relative profiles are not
multiplied with net[element][multiplying_column] but with 'multiplying_column' itself.
OPTIONAL:
**relative_profiles** (DataFrame, None) - DataFrame of relative profiles as input. If None,
net["profiles"] is considered.
**profile_column** (str, "profile") - Name of the column which contains information about
which element is assigned to which profile. In SimBench grids, this information is
given in the column "profile". For that reason, 'profile' is the default.
**profile_suffix** (str, None) - For the case that different profiles are given for p and q,
these can be distinguished by a suffix. For loads this can be "_pload" and "_qload",
which will be automatically assumed, if profile_suffix is None.
**time_as_index** (bool, False) - If True, the returned DataFrame has
relative_profiles["time"] as index. If False, relative_profiles.index is used.
****kwargs** - key word arguments for merge_dataframes()
OUTPUT:
**output_profiles** (DataFrame) - calculated absolute profiles
"""
# --- use net.profiles if relative_profiles is None
if relative_profiles is None:
if element in ["load", "storage"]:
relative_profiles = net.profiles[element]
elif element in ["gen", "sgen"]:
# Since RES and Powerplants can be converted to pandapower as both, gen or sgen, both
# are considered together
relative_profiles = merge_dataframes(
[net.profiles["powerplants"], net.profiles["renewables"]], **kwargs)
else:
raise ValueError("element %s is unknown." % str(element))
# --- set index
index = relative_profiles["time"] if time_as_index else relative_profiles.index
if "time" in relative_profiles:
del relative_profiles["time"]
# --- do profile_suffix assumptions if profile_suffix is None
if profile_suffix is None:
if element == "load":
if multiplying_column == "p_mw":
profile_suffix = "_pload"
elif multiplying_column == "q_mvar":
profile_suffix = "_qload"
profile_suffix = "" if profile_suffix is None else profile_suffix
# --- get relative profiles with respect to each element index
if profile_column in net[element].columns:
applied_profiles = net[element][profile_column] + profile_suffix
else: # missing profile column
logger.warning("In %s table, profile column '%s' is missing. Scalings of 1 are assumed." % (
element, profile_column))
missing_col_handling = "missing_col_handling"
applied_profiles = pd.Series([missing_col_handling]*net[element].shape[0],
index=net[element].index)
relative_profiles[missing_col_handling] = 1
# nan profile handling
if applied_profiles.isnull().any():
logger.debug("There are nan profiles. Scalings of 1 are assumed.")
nan_profile_handling = "nan_profile_scaling"
assert nan_profile_handling not in relative_profiles.columns
applied_profiles.loc[applied_profiles.isnull()] = nan_profile_handling
relative_profiles[nan_profile_handling] = 1
relative_output_profiles = relative_profiles.values[:, idx_in_2nd_array(
applied_profiles.values, np.array(relative_profiles.columns))]
# --- get factor to multiply with (consider additional feature of 'multiplying_column')
if isinstance(multiplying_column, str):
if multiplying_column in net[element].columns:
factor = net[element][multiplying_column].values.reshape(1, -1)
else:
raise ValueError("'multiplying_column' %s is not net[%s].columns." % (
multiplying_column, element))
else:
factor = multiplying_column
# --- multiply relative profiles with factor and return results
output_profiles = pd.DataFrame(relative_output_profiles*factor, index=index,
columns=net[element].index)
return output_profiles
def get_absolute_values(net, profiles_instead_of_study_cases, **kwargs):
"""
Is a convenience function using get_absolute_profiles_from_relative_profiles(). This function
returns a dict with all absolute values, calculated from scaling factors and maximum
active or reactive powers.
INPUT:
**net** (pandapowerNet) - pandapower net
**profiles_instead_of_study_cases** (bool) - Flag to decide whether profiles or loadcases
should be considered.
****kwargs** - key word arguments for get_absolute_profiles_from_relative_profiles()
(especially for merge_dataframes())
OUTPUT:
**abs_val** (dict) - absolute values calculated from relative scaling factors and maximum
active or reactive powers. The keys of this dict are tuples consisting of element and
parameter. The values are DataFrames with absolute power values.
"""
abs_val = dict()
if profiles_instead_of_study_cases: # use given profiles
for elm_col in [("load", "p_mw"), ("load", "q_mvar"), ("sgen", "p_mw"), ("gen", "p_mw"),
("storage", "p_mw")]:
abs_val[elm_col] = get_absolute_profiles_from_relative_profiles(
net, elm_col[0], elm_col[1], **kwargs)
else: # use predefined study cases
# --- voltage set point
slack_base_case = pd.DataFrame(net["ext_grid"]["vm_pu"].values.reshape(1, -1),
columns=net["ext_grid"].index, index=["bc"])
abs_val[("ext_grid", "vm_pu")] = pd.DataFrame(net.loadcases["Slack_vm"].values.reshape(
-1, 1).repeat(net["ext_grid"].shape[0], axis=1), columns=net["ext_grid"].index,
index=net.loadcases["Slack_vm"].index)
abs_val[("ext_grid", "vm_pu")] = pd.concat([slack_base_case,
abs_val[("ext_grid", "vm_pu")]])
# --- active and reactive scaling factors
for elm_col in [("load", "p_mw"), ("load", "q_mvar"), ("sgen", "p_mw")]:
loadcase_type = {"load": {"p_mw": "pload",
"q_mvar": "qload"},
"sgen": {"p_mw": ["Wind_p", "PV_p", "RES_p"]}}[elm_col[0]][elm_col[1]]
if isinstance(loadcase_type, list):
assert elm_col[0] == "sgen"
assert len(loadcase_type) == 3
Idx_wind = net.sgen.loc[(net.sgen.type.str.contains("Wind").fillna(False)) |
(net.sgen.type.str.contains("WP").fillna(False))].index
Idx_pv = net.sgen.loc[net.sgen.type.str.contains("PV").fillna(False)].index
Idx_sgen = net.sgen.index.difference(Idx_wind | Idx_pv)
net.sgen["loadcase_type"] = ""
net.sgen['loadcase_type'].loc[Idx_wind] = loadcase_type[0]
net.sgen['loadcase_type'].loc[Idx_pv] = loadcase_type[1]
net.sgen['loadcase_type'].loc[Idx_sgen] = loadcase_type[2]
else:
net[elm_col[0]]["loadcase_type"] = loadcase_type
abs_val[elm_col] = get_absolute_profiles_from_relative_profiles(
net, elm_col[0], elm_col[1], profile_column="loadcase_type",
relative_profiles=net.loadcases, profile_suffix="", **kwargs)
base_case = pd.DataFrame(net[elm_col[0]][elm_col[1]].values.reshape(1, -1),
columns=net[elm_col[0]].index, index=["bc"])
abs_val[elm_col] = pd.concat([base_case, abs_val[elm_col]])
del net[elm_col[0]]["loadcase_type"]
return abs_val
def apply_const_controllers(net, absolute_profiles_values):
"""
Applys ConstControl instances to the net. As a result, one can easily run timeseries with given
power values of e.g. loads, sgens, storages or gens.
INPUT:
**net** - pandapower net
**absolute_profiles_values** - dict of Dataframes with absolute values for the profiles,
keys should be tuples of length 2 (element and parameter), DataFrame size is
timesteps x number of elements
"""
n_time_steps = dict()
for (elm, param), values in absolute_profiles_values.items():
if values.shape[1]:
# check DataFrame shape[0] == time_steps
if elm in n_time_steps.keys():
if n_time_steps[elm] != values.shape[0]:
logger.warning("There are two profiles for %ss which have different " % elm +
"amount of time steps.")
else:
n_time_steps[elm] = values.shape[0]
# check DataFrame shape[1] == net[elm].index
unknown_idx = values.columns.difference(net[elm].index)
if len(unknown_idx):
logger.warning("In absolute_profiles_values[%s][%s], " % (elm, param) +
"there are indices additional & unknown to net[%s].index" % elm +
str(["%i" % i for i in unknown_idx]))
missing_idx = net[elm].index.difference(values.columns)
if len(missing_idx):
logger.warning("In absolute_profiles_values[%s][%s], " % (elm, param) +
"these indices are missing compared to net[%s].index" % elm +
str(["%i" % i for i in missing_idx]))
# apply const controllers
idx = list(net[elm].index.intersection(values.columns))
ConstControl(net, element=elm, variable=param,
element_index=idx, profile_name=idx,
data_source=DFData(absolute_profiles_values[(elm, param)][idx]))
# compare all DataFrame shape[0] == time_steps
if len(set(n_time_steps.values())) > 1:
logger.warning("The profiles have different amount of time steps:")
logger.warning(n_time_steps)
if __name__ == "__main__":
pass
|
nilq/baby-python
|
python
|
"""Hacs models."""
|
nilq/baby-python
|
python
|
# -*- encoding: utf-8 -*-
import json
import logging
import shlex
from aws_gate.constants import (
AWS_DEFAULT_PROFILE,
AWS_DEFAULT_REGION,
DEFAULT_OS_USER,
DEFAULT_SSH_PORT,
DEFAULT_KEY_ALGORITHM,
DEFAULT_KEY_SIZE,
PLUGIN_INSTALL_PATH,
DEBUG,
DEFAULT_GATE_KEY_PATH,
)
from aws_gate.decorators import (
plugin_required,
)
from aws_gate.query import query_instance
from aws_gate.ssh_proxy import SshProxySession
from aws_gate.ssh_common import SshKey, SshKeyUploader
from aws_gate.utils import (
fetch_instance_details_from_config,
execute,
)
logger = logging.getLogger(__name__)
class SshSession(SshProxySession):
def __init__(
self,
instance_id,
region_name=AWS_DEFAULT_REGION,
profile_name=AWS_DEFAULT_PROFILE,
port=DEFAULT_SSH_PORT,
user=DEFAULT_OS_USER,
command=None,
local_forward=None,
remote_forward=None,
dynamic_forward=None,
):
super().__init__(instance_id, region_name, profile_name, port, user)
self._command = command
self._local_forward = local_forward
self._remote_forward = remote_forward
self._dynamic_forward = dynamic_forward
self._ssh_cmd = None
def _build_ssh_command(self):
cmd = [
"ssh",
"-l",
self._user,
"-p",
str(self._port),
"-F",
"/dev/null",
"-i",
DEFAULT_GATE_KEY_PATH,
]
if self._local_forward or self._remote_forward or self._dynamic_forward:
cmd.append("-N")
if self._local_forward:
cmd.extend(["-L", self._local_forward])
if self._remote_forward:
cmd.extend(["-R", self._remote_forward])
if self._dynamic_forward:
cmd.extend(["-D", self._dynamic_forward])
if DEBUG:
cmd.append("-vv")
else:
cmd.append("-q")
proxy_command_args = [
PLUGIN_INSTALL_PATH,
json.dumps(self._response),
self._region_name,
"StartSession",
self._profile_name,
json.dumps(self._session_parameters),
self._ssm.meta.endpoint_url,
]
proxy_command = " ".join(shlex.quote(i) for i in proxy_command_args)
ssh_options = [
"IdentitiesOnly=yes",
"UserKnownHostsFile=/dev/null",
"StrictHostKeyChecking=no",
"ProxyCommand={}".format(proxy_command),
]
for ssh_option in ssh_options:
cmd.append("-o")
cmd.append(ssh_option)
cmd.append(self._instance_id)
if self._command:
cmd.append("--")
cmd.extend(self._command)
return cmd
def open(self):
self._ssh_cmd = self._build_ssh_command()
return execute(self._ssh_cmd[0], self._ssh_cmd[1:])
@plugin_required
def ssh(
config,
instance_name,
user=DEFAULT_OS_USER,
port=DEFAULT_SSH_PORT,
key_type=DEFAULT_KEY_ALGORITHM,
key_size=DEFAULT_KEY_SIZE,
profile_name=AWS_DEFAULT_PROFILE,
region_name=AWS_DEFAULT_REGION,
command=None,
local_forward=None,
remote_forward=None,
dynamic_forward=None,
):
instance, profile, region = fetch_instance_details_from_config(
config, instance_name, profile_name, region_name
)
instance_obj = query_instance(name=instance, region_name=region_name, profile_name=profile_name)
if instance_obj is None:
raise ValueError("No instance could be found for name: {}".format(instance_obj))
instance_id = instance_obj.instance_id
az = instance_obj.placement["AvailabilityZone"]
logger.info(
"Opening SSH session on instance %s (%s) via profile %s",
instance_id,
region,
profile,
)
if local_forward: # pragma: no cover
logger.info("SSH session will do a local port forwarding: %s", local_forward)
if remote_forward: # pragma: no cover
logger.info("SSH session will do a remote port forwarding: %s", remote_forward)
if dynamic_forward: # pragma: no cover
logger.info(
"SSH session will do a dynamic port forwarding: %s", dynamic_forward
)
with SshKey(key_type=key_type, key_size=key_size) as ssh_key:
with SshKeyUploader(
instance_id=instance_id, az=az, region_name=region, profile_name=profile, user=user, ssh_key=ssh_key
):
with SshSession(
instance_id,
region_name=region,
profile_name=profile,
port=port,
user=user,
command=command,
local_forward=local_forward,
remote_forward=remote_forward,
dynamic_forward=dynamic_forward,
) as ssh_session:
ssh_session.open()
|
nilq/baby-python
|
python
|
import json
from flask import make_response
def get_response(status, body):
response = make_response(str(body), status)
response.headers['Content-Type'] = 'application/json'
response.headers['Access-Control-Allow-Origin'] = '*'
return response
def error_handler(message, status=400):
return get_response(status, json.dumps(dict(status="error", message=message)).encode('utf-8'))
|
nilq/baby-python
|
python
|
# Copyright (c) Fraunhofer MEVIS, Germany. All rights reserved.
# **InsertLicense** code
__author__ = 'gchlebus'
from data.cityscapes.cityscapes_labels import Label
labels = [
Label("background", 0, 0, "bg", 0, False, False, (0, 0, 0)),
Label("liver", 1, 1, "liver", 0, False, False, (255, 255, 255)),
]
|
nilq/baby-python
|
python
|
import picamera
from time import sleep
import face_recognition
from time import time
import os
# https://picamera.readthedocs.io/en/release-1.0/api.html
def get_number_faces():
time_now = int(time())
# take picture
camera = picamera.PiCamera()
# set resolution
camera.resolution = (1024, 768)
camera.start_preview()
sleep(3)
camera.capture('./camera/images/{}.jpg'.format(time_now))
camera.stop_preview()
# facial recognition
image = face_recognition.load_image_file('./camera/images/{}.jpg'.format(time_now))
face_locations = face_recognition.face_locations(image)
# faces in picture
num_faces = len(face_locations)
# save picture only has faces on it
pic_name = ''
if num_faces:
pic_name = '{0}_{1}_faces.jpg'.format(time_now, num_faces)
os.rename('./camera/images/{}.jpg'.format(time_now), './camera/images/{}'.format(pic_name))
else:
os.remove('./camera/images/{}.jpg'.format(time_now))
return num_faces, pic_name
|
nilq/baby-python
|
python
|
from rest_framework import serializers
from . import models
class LabeledImageSerializer(serializers.ModelSerializer):
class Meta:
model = models.LabeledImage
fields = '__all__'
class ImageSerializer(serializers.ModelSerializer):
labeled_images = LabeledImageSerializer(many=True, read_only=True)
class Meta:
model = models.Image
fields = '__all__'
class DemographicSerialer(serializers.ModelSerializer):
class Meta:
model = models.Demographic
fields = '__all__'
class AcquisitionSerializer(serializers.ModelSerializer):
images = ImageSerializer(many=True, read_only=True)
demographic = DemographicSerialer(many=False, read_only=True)
class Meta:
model = models.Acquisition
fields = '__all__'
|
nilq/baby-python
|
python
|
from setuptools import setup, find_packages
setup(
version='0.6.3',
name='vinepy',
description='Python wrapper for the Vine Private API',
license='MIT',
author='David Gomez Urquiza',
author_email='david.gurquiza@gmail.com',
install_requires=['requests'],
url='https://github.com/davoclavo/vinepy',
keywords=['vine', 'library', 'api', 'wrapper'],
packages=find_packages(),
)
|
nilq/baby-python
|
python
|
from .jschemalite import match, sample_match, to_json_schema
__all__ = ['match','sample_match','to_json_schema']
|
nilq/baby-python
|
python
|
# Copyright 2022 Kaiyu Zheng
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sciex import Experiment, Trial, Event, Result
from mos3d.tests.experiments.runner import *
from mos3d.tests.experiments.experiment import make_domain, make_trial
from mos3d import *
import matplotlib.pyplot as plt
import os
import random
ABS_PATH = os.path.dirname(os.path.abspath(__file__))
VIZ = False
output_dir = os.path.join("results", "scalability-II")
prior_type = "uniform"
discount_factor = 0.99
detect_after_look = True
def main():
# Some arbitrary seeds for reproductive world generation;
# How many trials is enough? Suppose in a world of size
# 4x4x4, there are 5 objects. Then there are (4*4*4)^5,
# around 1billion possible such worlds. To have 95+/-5
# confidence interval in the results, assuming the possible
# worlds distribute normally, then we need to run 384 trials.
# For 95+/-10 confidence interval, it is 96.
#
# For our purpose, we don't care about arbitrary worlds that
# much (we care to some extend); We care more about whether
# the algorithm works for a randomly chosen world, under
# various world settings; If 100 trials (across different settings)
# indicate that our approach is better, then we have a pretty
# good confidence that our approach is better. For safety,
# we can bump that to 200. That means each setting takes
# about 25 trials; to round it up, do 30.
num_trials = 40
seeds = [random.randint(1, 1000000) for i in range(500)]
scenarios = [(4, 2, 3, 10, 3.0, 500, 120),
(4, 4, 3, 10, 3.0, 500, 120),
(4, 6, 3, 10, 3.0, 500, 120),
(8, 2, 4, 10, 3.0, 500, 240),
(8, 4, 4, 10, 3.0, 500, 240),
(8, 6, 4, 10, 3.0, 500, 240),
(16, 2, 7, 10, 3.0, 500, 360),
(16, 4, 7, 10, 3.0, 500, 360),
(16, 6, 7, 10, 3.0, 500, 360),
(32, 2, 16, 10, 3.0, 500, 480),
(32, 4, 16, 10, 3.0, 500, 480),
(32, 6, 16, 10, 3.0, 500, 480)]
random.shuffle(scenarios)
# Split the seeds into |scenarios| groups
splitted_seeds = []
for i in range(len(scenarios)):
if (i+1)*num_trials > len(seeds):
raise ValueError("Not enough seeds generated.")
splitted_seeds.append(seeds[i*num_trials:(i+1)*num_trials])
all_trials = []
for i in range(len(scenarios)): # n, k, d, max_depth, planning_time, max_steps, max_time
n, k, d, max_depth, planning_time, max_steps, max_time = scenarios[i]
for seed in splitted_seeds[i]:
random.seed(seed)
# Make trials
worldstr = make_domain(n, k, d)
## parameters
big = 1000
small = 1
exploration_const = 1000
alpha = ALPHA # ALPHA = 1e5
beta = BETA # BETA = 0
params = {"prior_type": prior_type,
"discount_factor": discount_factor,
"max_depth": max_depth,
"planning_time": planning_time,
"max_steps": max_steps,
"max_time": max_time,
"detect_after_look": detect_after_look,
"big": big,
"small": small,
"exploration_const": exploration_const,
"alpha": alpha,
"beta": beta}
if n == 4:
setting_hier = [(1,1,max_depth), (2,2,max_depth)]
setting_op = [(1,1,max_depth), (1,2,max_depth)]
elif n == 8:
setting_hier = [(1,1,max_depth), (2,2,max_depth), (4,4,max_depth)]
setting_op = [(1,1,max_depth), (1,2,max_depth), (1,4,max_depth)]
elif n == 16:
setting_hier = [(1,1,max_depth), (2,2,max_depth), (4,4,max_depth)]
setting_op = [(1,1,max_depth), (1,2,max_depth), (1,4,max_depth)]
alpha = 1e7
elif n == 32:
setting_hier = [(1,1,max_depth), (4,4,max_depth), (8,8,max_depth)]
setting_op = [(1,1,max_depth), (1,4,max_depth), (1,8,max_depth)]
alpha = 1e8
elif n == 64:
setting_hier = [(1,1,max_depth), (4,4,max_depth), (8,8,max_depth)]
setting_op = [(1,1,max_depth), (1,4,max_depth), (1,8,max_depth)]
alpha = 1e9
params['alpha'] = alpha
trial_name = "domain%s_%s" % (str(scenarios[i]).replace(", ", "-"), str(seed))
pouct_trial = make_trial(trial_name, worldstr,
"pouct", "octree", **params)
multires_trial = make_trial(trial_name, worldstr,
"hierarchical", "octree",
setting=setting_hier, **params)
options_trial = make_trial(trial_name, worldstr,
"options", "octree",
setting=setting_op, **params)
pomcp_trial = make_trial(trial_name, worldstr,
"pomcp", "particles",
num_particles=1000, **params)
random_trial = make_trial(trial_name, worldstr,
"purelyrandom", "octree", **params)
porollout_trial = make_trial(trial_name, worldstr,
"porollout", "octree",
porollout_policy=PolicyModel(detect_after_look=detect_after_look),
**params)
greedy_trial = make_trial(trial_name, worldstr,
"greedy", "octree",
**params)
bruteforce_trial = make_trial(trial_name, worldstr,
"bruteforce", "octree",
**params)
all_trials.extend([pouct_trial,
multires_trial,
options_trial,
pomcp_trial,
random_trial,
porollout_trial,
greedy_trial,
bruteforce_trial])
# Generate scripts to run experiments and gather results
exp = Experiment("ScalabilityYAgainQQ", all_trials, output_dir, verbose=True)
exp.generate_trial_scripts(split=400)
print("Find multiple computers to run these experiments.")
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
"""
Harness for GP Bandit Optimisation.
-- kandasamy@cs.cmu.edu
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=super-on-old-class
from argparse import Namespace
import numpy as np
# Local imports
from bo import acquisitions
from blackbox_optimiser import blackbox_opt_args, BlackboxOptimiser
from gp.kernel import SEKernel
from gp.gp_core import GP, mandatory_gp_args
from gp.gp_instances import SimpleGPFitter, all_simple_gp_args
from utils.optimisers import random_maximise
from utils.option_handler import get_option_specs, load_options
from utils.function_caller import get_function_caller_from_function
from utils.reporters import get_reporter
gp_bandit_args = [
# Acquisition
get_option_specs('acq', False, None,
'Which acquisition to use: TS, UCB, BUCB, UCBPE.'),
get_option_specs('acq_opt_criterion', False, 'rand',
'Which optimiser to use when maximising the acquisition function.'),
get_option_specs('acq_opt_max_evals', False, -1,
'Number of evaluations when maximising acquisition. If negative uses default value.'),
# The following are perhaps not so important.
get_option_specs('shrink_kernel_with_time', False, 0,
'If True, shrinks the kernel with time so that we don\'t get stuck.'),
get_option_specs('perturb_thresh', False, 1e-4,
('If the next point chosen is too close to an exisiting point by this times the '
'diameter, then we will perturb the point a little bit before querying. This is '
'mainly to avoid numerical stability issues.')),
get_option_specs('track_every_time_step', False, 0,
('If 1, it tracks every time step.')),
# TODO: implement code for next_pt_std_thresh
get_option_specs('next_pt_std_thresh', False, 0.005,
('If the std of the queried point queries below this times the kernel scale ',
'frequently we will reduce the bandwidth range')),
]
all_gp_bandit_args = all_simple_gp_args + blackbox_opt_args + gp_bandit_args
# The GPBandit Class
# ========================================================================================
class GPBandit(BlackboxOptimiser):
""" GPBandit Class. """
# pylint: disable=attribute-defined-outside-init
# Constructor.
def __init__(self, func_caller, worker_manager, options=None, reporter=None):
""" Constructor. """
self.reporter = get_reporter(reporter)
if options is None:
options = load_options(all_gp_bandit_args, reporter=reporter)
super(GPBandit, self).__init__(func_caller, worker_manager, options, self.reporter)
def _child_set_up(self):
""" Some set up for the GPBandit class. """
# Set up acquisition optimisation
self._set_up_acq_opt()
self.method_name = 'GP-' + str(self.options.acq)
def _set_up_acq_opt(self):
""" Sets up optimisation for acquisition. """
# First set up function to get maximum evaluations.
if isinstance(self.options.acq_opt_max_evals, int):
if self.options.acq_opt_max_evals > 0:
self.get_acq_opt_max_evals = lambda t: self.options.acq_opt_max_evals
else:
self.get_acq_opt_max_evals = None
else: # In this case, the user likely passed a function here.
self.get_acq_opt_max_evals = self.options.acq_opt_max_evals
# Additional set up based on the specific optimisation procedure
if self.options.acq_opt_criterion == 'direct':
self._set_up_acq_opt_direct()
elif self.options.acq_opt_criterion == 'rand':
self._set_up_acq_opt_rand()
else:
raise NotImplementedError('Not implemented acquisition optimisation for %s yet.'%(
self.options.acq_opt_criterion))
def _set_up_acq_opt_direct(self):
""" Sets up optimisation for acquisition using direct. """
raise NotImplementedError('Not implemented DiRect yet.')
def _set_up_acq_opt_rand(self):
""" Sets up optimisation for acquisition using random search. """
def _random_max_wrap(*args):
""" A wrapper so as to only return optimal point."""
_, opt_pt = random_maximise(*args)
return opt_pt
# Set this up in acq_optimise
self.acq_optimise = lambda obj, max_evals: _random_max_wrap(obj, self.domain_bounds,
max_evals)
if self.get_acq_opt_max_evals is None:
lead_const = 10 * min(5, self.domain_dim)**2
self.get_acq_opt_max_evals = lambda t: np.clip(
lead_const * np.sqrt(min(t, 1000)), 2000, 3e4)
# Acquisition function should be evaluated via multiple evaluations
self.acq_query_type = 'multiple'
# Managing the GP ---------------------------------------------------------
def _child_build_new_model(self):
""" Builds a new model. """
self._build_new_gp()
def _build_new_gp(self):
""" Builds a GP with the data in history and stores in self.gp. """
if hasattr(self.func_caller, 'init_gp') and self.func_caller.init_gp is not None:
# If you know the true GP.
raise NotImplementedError('Not implemented passing given GP yet.')
else:
if self.options.shrink_kernel_with_time:
raise NotImplementedError('Not implemented kernel shrinking for the GP yet.')
else:
self.options.bandwidth_log_bounds = np.array([[0.0, 4.1]] * self.domain_dim)
# Invoke the GP fitter.
reg_X = np.concatenate((self.pre_eval_points, self.history.query_points), axis=0)
reg_Y = np.concatenate((self.pre_eval_vals, self.history.query_vals), axis=0)
gp_fitter = SimpleGPFitter(reg_X, reg_Y,
options=self.options, reporter=self.reporter)
self.gp, _ = gp_fitter.fit_gp()
gp_fit_report_str = ' -- Fitting GP (j=%d): %s'%(self.step_idx, str(self.gp))
self.reporter.writeln(gp_fit_report_str)
def _add_data_to_model(self, qinfos):
""" Add data to self.gp """
if len(qinfos) == 0:
return
new_points = np.empty((0, self.domain_dim))
new_vals = np.empty(0)
for i in range(len(qinfos)):
new_points = np.concatenate((new_points,
qinfos[i].point.reshape(-1, self.domain_dim)), axis=0)
new_vals = np.append(new_vals, [qinfos[i].val], axis=0)
if self.gp is not None:
self.gp.add_data(new_points, new_vals)
# Methods needed for initialisation ----------------------------------------
def _child_init(self):
""" Any initialisation for a child class. """
self._create_init_gp()
def _create_init_gp(self):
""" Creates an initial GP. """
reg_X = np.concatenate((self.pre_eval_points, self.history.query_points), axis=0)
reg_Y = np.concatenate((self.pre_eval_vals, self.history.query_vals), axis=0)
range_Y = reg_Y.max() - reg_Y.min()
mean_func = lambda x: np.array([np.median(reg_X)] * len(x))
kernel = SEKernel(self.domain_dim, range_Y/4.0,
dim_bandwidths=0.05*np.sqrt(self.domain_dim))
noise_var = (reg_Y.std()**2)/10
self.gp = GP(reg_X, reg_Y, kernel, mean_func, noise_var)
# Methods needed for optimisation ----------------------------------------
def _get_ancillary_data_for_acquisition(self):
""" Returns ancillary data for the acquisitions. """
max_num_acq_opt_evals = self.get_acq_opt_max_evals(self.step_idx)
return Namespace(max_evals=max_num_acq_opt_evals,
t=self.step_idx,
curr_max_val=self.curr_opt_val,
evals_in_progress=self.eval_points_in_progress)
def _determine_next_eval_point(self):
""" Determine the next point for evaluation. """
anc_data = self._get_ancillary_data_for_acquisition()
acq_to_use = getattr(acquisitions.asy, self.options.acq.lower())
next_eval_point = acq_to_use(self.gp, self.acq_optimise, anc_data)
return next_eval_point
def _determine_next_batch_of_eval_points(self):
""" Determine the next batch of eavluation points. """
anc_data = self._get_ancillary_data_for_acquisition()
acq_to_use = getattr(acquisitions.syn, self.options.acq.lower())
next_batch_of_eval_points = acq_to_use(self.num_workers, self.gp, self.acq_optimise,
anc_data)
return next_batch_of_eval_points
def update_model(self):
""" Update the model. """
raise NotImplementedError('Implement in a child class.!')
# GP Bandit class ends here
# =====================================================================================
# APIs for GP Bandit optimisation. ----------------------------------------------------
# 1. Optimisation from a FunctionCaller object.
def gpb_from_func_caller(func_caller, worker_manager, max_capital, mode=None, acq=None,
options=None, reporter='default'):
""" GP Bandit optimisation from a utils.function_caller.FunctionCaller instance. """
if options is None:
reporter = get_reporter(reporter)
options = load_options(all_gp_bandit_args, reporter=reporter)
options.acq = acq
options.mode = mode
return (GPBandit(func_caller, worker_manager, options, reporter)).optimise(max_capital)
# 2. Optimisation from all args.
def gpb_from_args(func, domain_bounds, max_capital, acq=None, options=None, reporter=None,
vectorised=False, **kwargs):
""" This function executes GP Bandit (Bayesian) Optimisation.
Input Arguments:
- func: The function to be optimised.
- domain_bounds: The bounds for the domain.
- max_capital: The maximum capital for optimisation.
- options: A namespace which gives other options.
- reporter: A reporter object to write outputs.
- vectorised: If true, it means func take matrix inputs. If
false, they take only single point inputs.
- true_opt_pt, true_opt_val: The true optimum point and value (if known). Mostly for
experimenting with synthetic problems.
- time_distro: The time distribution to be used when sampling.
- time_distro_params: parameters for the time distribution.
Returns: (gpb_opt_pt, gpb_opt_val, history)
- gpb_opt_pt, gpb_opt_val: The optimum point and value.
- history: A namespace which contains a history of all the previous queries.
"""
func_caller = get_function_caller_from_function(func, domain_bounds=domain_bounds,
vectorised=vectorised, **kwargs)
return gpb_from_func_caller(func_caller, max_capital, acq, options, reporter)
|
nilq/baby-python
|
python
|
from rec_to_nwb.processing.nwb.components.iterator.multi_thread_data_iterator import MultiThreadDataIterator
from rec_to_nwb.processing.nwb.components.iterator.multi_thread_timestamp_iterator import MultiThreadTimestampIterator
from rec_to_nwb.processing.nwb.components.position.old_pos_timestamp_manager import OldPosTimestampManager
from rec_to_nwb.processing.nwb.components.position.pos_data_manager import PosDataManager
from rec_to_nwb.processing.tools.beartype.beartype import beartype
class OldFlPositionExtractor:
@beartype
def __init__(self, datasets: list):
self.datasets = datasets
self.all_pos = self.__extract_data()
def __extract_data(self):
all_pos = []
for dataset in self.datasets:
data_from_current_dataset = [
dataset.get_data_path_from_dataset('pos') + pos_file for pos_file in
dataset.get_all_data_from_dataset('pos') if
(pos_file.endswith('.pos_online.dat'))]
all_pos.append(data_from_current_dataset)
return all_pos
def get_positions(self):
pos_datas = [
PosDataManager(directories=[single_pos])
for single_pos in self.all_pos
]
return [
MultiThreadDataIterator(pos_data)
for pos_data in pos_datas
]
def get_columns_labels(self):
pos_datas = [
PosDataManager(directories=[single_pos])
for single_pos in self.all_pos
]
return [
pos_data.get_column_labels_as_string()
for pos_data in pos_datas
]
def get_timestamps(self):
old_pos_timestamp_managers = [
OldPosTimestampManager(
directories=[single_pos],
)
for single_pos in self.all_pos
]
return [
MultiThreadTimestampIterator(old_pos_timestamp_manager)
for old_pos_timestamp_manager in old_pos_timestamp_managers
]
|
nilq/baby-python
|
python
|
import gmpy2
n = 6146024643941503757217715363256725297474582575057128830681803952150464985329239705861504172069973746764596350359462277397739134788481500502387716062571912861345331755396960400668616401300689786263797654804338789112750913548642482662809784602704174564885963722422299918304645125966515910080631257020529794610856299507980828520629245187681653190311198219403188372517508164871722474627810848320169613689716990022730088459821267951447201867517626158744944551445617408339432658443496118067189012595726036261168251749186085493288311314941584653172141498507582033165337666796171940245572657593635107816849481870784366174740265906662098222589242955869775789843661127411493630943226776741646463845546396213149027737171200372484413863565567390083316799725434855960709541328144058411807356607316377373917707720258565704707770352508576366053160404360862976120784192082599228536166245480722359263166146184992593735550019325337524138545418186493193366973466749752806880403086988489013389009843734224502284325825989
'''
p = a * 10^250 + b
q = b * 10^250 + a
pq = ab * 10^500 + (a^2 + b^2) * 10^250 + ab = x ^ 10^500 + y * 10^250 + x
x = ab
y = a^2+b^2
********************
xxxxxxxxxx
cyyyyyyyyyy
xxxxxxxxxx
y = (pq - x * 10^500 - x) / 10^250
phiN = (p - 1)(q - 1) = pq - (p + q) + 1 = N + 1 - (p + q)
p + q = (a + b) * 10^250 + (a + b)
a + b = sqrt((a+b)^2) = sqrt(2x + y)
'''
d250 = 10 ** 250
d500 = 10 ** 500
highx = int(str(n)[:250])
lowx = int(str(n)[-250:])
for c in [0, 1]:
x = (highx - c) * d250 + lowx
y = (n - x * d500 - x) / d250
a_plus_b, is_square = gmpy2.iroot(2 * x + y, 2)
if (is_square):
ct = 3572030904528013180691184031825875018560018830056027446538585108046374607199842488138228426133620939067295245642162497675548656988031367698701161407333098336631469820625758165691216722102954230039803062571915807926805842311530808555825502457067483266045370081698397234434007948071948000301674260889742505705689105049976374758307610890478956315615270346544731420764623411884522772647227485422185741972880247913540403503772495257290866993158120540920089734332219140638231258380844037266185237491107152677366121632644100162619601924591268704611229987050199163281293994502948372872259033482851597923104208041748275169138684724529347356731689014177146308752441720676090362823472528200449780703866597108548404590800249980122989260948630061847682889941399385098680402067366390334436739269305750501804725143228482932118740926602413362231953728010397307348540059759689560081517028515279382023371274623802620886821099991568528927696544505357451279263250695311793770159474896431625763008081110926072287874375257
phiN = n + 1 - (a_plus_b * d250 + a_plus_b)
d = gmpy2.invert(65537, phiN)
m = gmpy2.powmod(ct, d, n)
print ("%x"%m).decode("hex")
break
|
nilq/baby-python
|
python
|
import pandas as pd
class ProjectSQLiteHandler:
def __init__(self, database='project_manager'):
import sqlite3 as lite
self.connection = lite.connect(database)
self.cursor = self.connection.cursor()
def closeDatabase(self):
self.cursor.close()
self.connection.close()
return
def getProjectPath(self):
if self.tableExists('project'):
projectPath = self.cursor.execute("select project_path from project").fetchone()
if projectPath is not None:
return projectPath[0]
def tableExists(self, table):
try:
self.cursor.execute("select * from " + table + " limit 1").fetchall()
except:
return False
return True
#String, integer -> String
def getComponentData(self, column, row):
if column != '':
values = self.cursor.execute("select " + column + " from components limit ?", [row+1]).fetchall()
if (len(values) > row) :
value = values[row][0]
if value is not None:
return value
return
def createRefTable(self, tablename):
self.cursor.execute("DROP TABLE If EXISTS " + tablename)
self.cursor.execute("""
CREATE TABLE """ + tablename +
"""(
_id integer primary key,
sort_order integer,
code text unique,
description text);
"""
)
self.connection.commit()
#String, ListOfTuples -> None
def addRefValues(self, tablename, values):
self.cursor.executemany("INSERT INTO " + tablename + "(sort_order,code, description) VALUES (?,?,?)" ,values)
self.connection.commit()
#makes the default database associated with every new project.
def makeDatabase(self):
print('Making default database')
refTables = [
'ref_component_attribute',
'ref_component_type',
'ref_date_format',
'ref_time_format',
'ref_data_format',
'ref_power_units',
'ref_attributes',
'ref_time_units',
'ref_speed_units',
'ref_flow_units',
'ref_voltage_units',
'ref_current_units',
'ref_irradiation_units',
'ref_temperature_units',
'ref_true_false',
'ref_env_attributes',
'ref_frequency_units',
'ref_file_type'
]
for r in refTables:
self.createRefTable(r)
self.addRefValues('ref_file_type',[(0,'CSV','Comma Seperated Values'), (1,'MET','MET text file'), (2,'TXT','Tab delimited')])
self.addRefValues('ref_current_units',[(0,'A','amps'),(1,'kA','kiloamps')])
self.addRefValues('ref_frequency_units',[(0, 'Hz','hertz')])
self.addRefValues('ref_temperature_units',[(0,'C','Celcius'),(1,'F','Farhenheit'),(2,'K','Kelvin')])
self.addRefValues('ref_irradiation_units',[(0,'W/m2','Watts per square meter')])
self.addRefValues('ref_flow_units',[(0,'m3/s', 'cubic meter per second'),(1, 'L/s', 'liters per second'),
(2, 'cfm', 'cubic feet per meter'),(3,'gal/min','gallon per minute'),(4, 'kg/s', 'killograms per second')])
self.addRefValues('ref_voltage_units',[(0,'V','volts'),(1, 'kV','kilovolts')])
self.addRefValues('ref_true_false',[(0,'T','True'),(1,'F','False')])
self.addRefValues('ref_speed_units', [(0, 'm/s','meters per second'),(1,'ft/s','feet per second'),
(2,'km/hr','kilometers per hour'),(3,'mi/hr','miles per hour')])
self.addRefValues('ref_time_units',[(0,'S','Seconds'),(1,'m','Minutes')])
self.addRefValues('ref_date_format',[(0,'MM/DD/YY','MM/DD/YY'),(1,'MM/DD/YYYY','MM/DD/YYYY'),
(2,'YYYY/MM/DD','YYYY/MM/DD'),(3,'DD/MM/YYYY','DD/MM/YYYY'),
(4, 'MM-DD-YY', 'MM-DD-YY'), (5, 'MM-DD-YYYY', 'MM-DD-YYYY'),
(6, 'YYYY-MM-DD', 'YYYY-MM-DD'), (7, 'DD-MM-YYYY', 'DD-MM-YYYY'),
(8, 'mon dd yyyy', 'mon dd yyyy'),
(9, 'days', 'days')
])
self.addRefValues('ref_time_format',[(0,'HH:MM:SS','HH:MM:SS'),(1,'HH:MM','HH:MM'),
(2,'hours','hours'),
(3,'minutes','minutes'),(4,'seconds','seconds')
])
self.addRefValues('ref_data_format',[(0,'components + MET', 'Load and component data are seperate from wind data'),
(1,'components', 'All component, load and environemtnal data is within a single timeseries file'),
(2, 'component + load + environment', 'Seperate files for load, component and wind data.')
])
self.addRefValues('ref_component_type' ,[(0,'wtg', 'windturbine'),
(1,'gen', 'diesel generator'), (2,'inv','inverter'),(3,'tes','thermal energy storage'),(4, 'ees','energy storage'),(5, 'load', 'total load')])
self.addRefValues('ref_power_units',[(0,'W', 'watts'), (1,'kW', 'Kilowatts'),(2,'MW','Megawatts'),
(3, 'var', 'vars'),(4,'kvar','kilovars'),(5,'Mvar','Megavars'),
(6, 'VA','volt-ampere'),(7,'kVA','kilovolt-ampere'),(8,'MVA','megavolt-ampere'),(9, 'pu',''),(10,'PU',''),(11,'PU*s','')])
self.addRefValues('ref_env_attributes', [(0,'WS', 'Windspeed'), (1,'IR', 'Solar Irradiation'),
(2,'WF','Waterflow'),(3,'Tamb','Ambient Temperature')])
self.addRefValues('ref_attributes' ,[(0,'P', 'Real Power'), (1,'Q','Reactive Power'),(2,'S','Apparent Power'),
(3,'PF','Power Factor'),(4,'V','Voltage'),(5, 'I', 'Current'),
(6, 'f', 'Frequency'), (7,'TStorage','Internal Temperature Thermal Storage'),
(8,'PAvail','Available Real Power'), (9,'QAvail','Available Reactive Power'),
(10,'SAvail','Available Apparent Power')])
#merge unit reference tables
self.cursor.execute("DROP TABLE IF EXISTS ref_units")
self.cursor.executescript("CREATE TABLE ref_units (_id integer primary key, code text unique, description text)")
unit_tables_tuple = self.cursor.execute("select name from sqlite_master where type = 'table' and name like '%units'").fetchall()
for u in unit_tables_tuple:
self.cursor.execute("INSERT INTO ref_units(code, description) SELECT code, description from " + u[0] + " Where code not in (select code from ref_units)")
self.connection.commit()
#project table
self.cursor.execute("DROP TABLE IF EXISTS project")
self.cursor.executescript("""CREATE TABLE project
(_id integer primary key,
project_path text,
project_name text);""")
#component table
self.cursor.execute("DROP TABLE IF EXISTS components")
self.cursor.executescript("""CREATE TABLE components
(_id integer primary key,
inputfiledir text,
original_field_name text,
component_type text,
component_name text,
units text,
scale numeric,
offset numeric,
attribute text,
tags text,
FOREIGN KEY (component_type) REFERENCES ref_component_type(code),
FOREIGN KEY (units) REFERENCES ref_universal_units(code),
FOREIGN KEY (attribute) REFERENCES ref_attributes(code)
);""")
self.connection.commit()
self.cursor.execute("DROP TABLE IF EXISTS sets")
self.cursor.executescript("""
CREATE TABLE IF NOT EXISTS sets
(_id integer primary key,
set_name text ,
component text ,
change_tag text,
to_value text);""")
self.cursor.execute("DROP TABLE IF EXISTS input_files")
self.cursor.executescript("""
CREATE TABLE IF NOT EXISTS input_files
(_id integer primary key,
inputfiletypevalue text ,
datatype text ,
inputfiledirvalue text,
timestep text,
datechannelvalue text,
datechannelformat text,
timechannelvalue text,
timechannelformat text,
includechannels text,
timezonevalue text,
usedstvalue text,
FOREIGN KEY (timechannelformat) REFERENCES ref_time_format(code),
FOREIGN KEY (datechannelformat) REFERENCES ref_date_format(code));""")
#The table optimize input only contains parameters that were changed from the default
self.cursor.execute("Drop TABLE IF EXISTS optimize_input")
self.cursor.executescript("""
CREATE TABLE IF NOT EXISTS optimizer_input
(_id integer primary key,
parameter text,
parameter_value text);""")
self.cursor.execute("DROP TABLE IF EXISTS runs")
self.cursor.executescript("""
CREATE TABLE IF NOT EXISTS runs
(_id integer primary key,
set_id text,
set_name text
run_name text);""")
self.cursor.execute("DROP TABLE IF EXISTS setup")
self.cursor.executescript("""
CREATE TABLE IF NOT EXISTS setup
(_id integer primary key,
set_name unique,
date_start text,
date_end text,
timestep integer,
component_names text
);""")
self.cursor.execute("INSERT INTO setup (set_name,timestep,date_start,date_end) values('default',1,'2016-01-01','2016-12-31')")
self.cursor.execute("DROP TABLE IF EXISTS environment")
self.cursor.executescript("""CREATE TABLE IF NOT EXISTS environment
(_id integer primary key,
inputfiledir text,
original_field_name text,
component_name text unique,
units text,
scale numeric,
offset numeric,
attribute text,
tags text,
FOREIGN KEY (units) REFERENCES ref_universal_units(code),
FOREIGN KEY (attribute) REFERENCES ref_env_attributes(code)
);""")
self.connection.commit()
#get the set info for a specific set or default values if no set is specified
#String -> dictionary
def getSetInfo(self,set='default'):
setDict = {}
#get tuple
values = self.cursor.execute("select timestep, date_start, date_end, component_names from setup where set_name = '" + set + "'").fetchone()
if values is None:
values = self.cursor.execute(
"select timestep, date_start, date_end, component_names from setup where set_name = 'default'").fetchone()
setDict['timestep'] = values[0]
setDict['date_start'] = values[1]
setDict['date_end'] = values[2]
setDict['component_names'] = values[3]
values = self.cursor.execute("select date_start, date_end from setup where set_name = 'default'").fetchone()
setDict['min_date'] = values[0]
setDict['max_date'] = values[1]
if setDict.get('component_names') is None:
setDict['component_names'] = []
return setDict
#inserts a single record into a specified table given a list of fields to insert values into and a list of values
#String, ListOfString, ListOfString
def insertRecord(self, table, fields, values):
string_fields = ','.join(fields)
string_values = ','.join('?' * len(values))
try:
self.cursor.execute("INSERT INTO " + table + "(" + string_fields + ")" + "VALUES (" + string_values + ")", values)
self.connection.commit()
return True
except Exception as e:
print(e)
return False
# updates a single record in a specified table given a field to match, value to match, list of fields to insert values into and a list of values
# String, ListOfString, ListOfString, ListOfString, ListOfString
def updateRecord(self,table, keyField,keyValue,fields,values):
updateFields = ', '.join([a + " = '" + b + "'" for a,b in zip(fields,values)])
keyFields = ', '.join([a + " = '" + b + "'" for a,b in zip(keyField,keyValue)])
try:
self.cursor.execute("UPDATE " + table + " SET " + updateFields + " WHERE " + keyFields
)
self.connection.commit()
return True
except Exception as e:
print(e)
print(type(e))
return False
return
#resturns a string that combines code and descriptor columns from a reference table into a single '-' sepearted string
#String -> String
def getRefInput(self, tables):
#table is a list of tables
# create list of values for a combo box
valueStrings = []
for t in tables:
values = pd.read_sql_query("SELECT code, description FROM " + t + " ORDER By sort_order", self.connection)
for v in range(len(values)):
valueStrings.append(values.loc[v, 'code'] + ' - ' + values.loc[v, 'description'])
return valueStrings
#returns the number of components of a specific type withing the component table
#String -> integer
def getTypeCount(self,componentType):
import re
#get the highest component name (biggest number)
finalName = self.cursor.execute("SELECT component_name FROM components where component_type = '" + componentType + "' ORDER BY component_name DESC").fetchone()
if finalName[0] is not None:
finalName=finalName[0]
#extract the numbers in the name
count = re.findall(r'\d+',finalName)
#if there is more than one number use only the last number and add 1 to it
#if there aren't any other components of that type return 0
if len(count) > 0:
count = int(count[0])
return count +1
return 0
def dataCheck(self,table):
import re
#get the highest component name (biggest number)
data = self.cursor.execute("SELECT * FROM " + table).fetchall()
return data
#returns a list of column names for a table
# String -> list
def getHeaders(self,table):
#Todo read from database
headers = self.cursor.execute("select sql from sqlite_master where name = " + table + " and type = 'table'")
return headers
#returns true if a field within the specified table has a reference constraint
#String, String -> Boolean
def hasRef(self,column, table):
sql = self.cursor.execute("SELECT sql FROM sqlite_master WHERE type = 'table' and name = '" + table + "'").fetchone()
if column + ') references ' in sql[0].lower():
return True
return False
#returns the name of a reference table for a specified column in a table
#String, String -> String
def getRef(self,column, table):
s1 = self.cursor.execute("SELECT sql FROM sqlite_master WHERE type = 'table' and name = '" + table + "'").fetchone()
s1 = s1[0].lower()
s2 = column + ") references "
table = s1[s1.find(s2) + len(s2):].replace("(code)", "")
table = table.replace(")","")
table = table.split(",")[0]
table = table.strip()
return table
#updates the component table with a key and values in a dictionary
#Dictionary -> None
def updateComponent(self, dict):
for k in dict.keys():
try:
self.cursor.execute("UPDATE components SET " + k + " = ? WHERE component_name = ?", [dict[k],dict['component_name']])
except:
print('%s column was not found in the data table' %k)
self.connection.commit()
#determines if a component record needs to be created or updated and implements the correct function
#returns true if the record is a new record and was added to the table
#dictionary -> Boolean
def writeComponent(self,componentDict):
if len(self.cursor.execute("SELECT * FROM components where component_name = ?", [componentDict['component_name']]).fetchall()) > 0:
self.updateComponent(componentDict)
else:
self.cursor.execute('INSERT INTO components (component_name) VALUES (?)', [componentDict['component_name']])
self.updateComponent(componentDict)
return True
return False
#returns a table of values for the code column in a a reference table
#String -> pandas.Series
def getCodes(self,table):
import pandas as pd
codes = pd.read_sql_query("select code from " + table + " ORDER BY sort_order", self.connection)
codes = (codes['code']).tolist()
return codes
#returns a list of components associated with a project
def getComponentNames(self):
names = self.cursor.execute("select component_name from components").fetchall()
if names is not None:
names = [''.join(i) for i in names if i is not None]
return pd.Series(names).tolist()
return []
def getComponentsTable(self, filter):
sql = """select component_name, component_type, original_field_name, units,attribute from components where inputfiledir = ?"""
df = pd.read_sql_query(sql,self.connection,params=[filter])
sql = """select component_name, 'env', original_field_name, units,attribute from environment where inputfiledir = ?"""
df.append(pd.read_sql_query(sql,self.connection,params=[filter]))
return df
def getInputPath(self, pathNum):
'''returns the file folder for the given input file number (corrasponds to fileblock in setup page)'''
path = self.cursor.execute("select inputfiledirvalue from input_files where _id = " + pathNum).fetchone()
if path is not None:
return path[0]
return
def dataComplete(self):
required={'components':['original_field_name','component_type','component_name','units','attribute'],
'environment':['original_field_name','component_name','units','attribute'],
'project':['project_path']}
for k in required.keys():
condition = ' OR '.join(['{0} IS NULL'.format(x) for x in required[k]])
m = self.cursor.execute("select * from " + k + " where " + condition).fetchall()
if len(self.cursor.execute("select * from " + k + " where " + condition).fetchall()) > 1 :
return False
return True
'''gets a list of possible component types from the ref_component_type table'''
def getComponentTypes(self):
loT = pd.read_sql_query("select code from ref_component_type",self.connection)
loT = pd.Series(loT).tolist()
return loT
|
nilq/baby-python
|
python
|
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Service Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.script
import typing
from abc import abstractmethod
from .x_storage_based_library_container import XStorageBasedLibraryContainer as XStorageBasedLibraryContainer_d72a1321
if typing.TYPE_CHECKING:
from ..document.x_storage_based_document import XStorageBasedDocument as XStorageBasedDocument_6b1310b2
class DocumentDialogLibraryContainer(XStorageBasedLibraryContainer_d72a1321):
"""
Service Class
defines a container of dialog libraries, which is to be made persistent in a sub storage of a document storage.
**since**
OOo 2.3
See Also:
`API DocumentDialogLibraryContainer <https://api.libreoffice.org/docs/idl/ref/servicecom_1_1sun_1_1star_1_1script_1_1DocumentDialogLibraryContainer.html>`_
"""
__ooo_ns__: str = 'com.sun.star.script'
__ooo_full_ns__: str = 'com.sun.star.script.DocumentDialogLibraryContainer'
__ooo_type_name__: str = 'service'
@abstractmethod
def create(self, Document: 'XStorageBasedDocument_6b1310b2') -> None:
"""
creates an instance of the DocumentDialogLibraryContainer, belonging to a document
The current storage of the document will be set as initial root storage (see XPersistentLibraryContainer.RootStorage) of the container.
Usually, you will only create a DocumentDialogLibraryContainer within the implementation of the document to which the container should belong.
Raises:
com.sun.star.lang.IllegalArgumentException: ``IllegalArgumentException``
"""
@abstractmethod
def createWithURL(self, URL: str) -> None:
"""
"""
__all__ = ['DocumentDialogLibraryContainer']
|
nilq/baby-python
|
python
|
# Specific imports.
from ml_util import split_dataset_at_feature
# Public interface.
__all__ = ['ID3']
# Current version.
__version__ = '0.0.1'
# Author.
__author__ = "Michalis Vrettas, PhD - Email: michail.vrettas@gmail.com"
# ID3 class definition.
class ID3(object):
"""
Description:
TBD
"""
def __init__(self, data):
"""
Description:
Constructor for an ID3 object.
Args:
- data: list of lists.
Raises:
- ValueError: If the range limits (low, high) are not ordered correctly.
"""
self._data = data
# _end_def_
@staticmethod
def calc_shannon_entropy(data):
"""
Description:
It computes the Shannon entropy of a data set. The more organized a data set is,
the lower the entropy value will be. Here we choose the base log2() function but
this is not very important at the moment.
Args:
- data: (list of lists) input data-set.
Note:
- We assume that the last column in the data contains the class label.
"""
# Sanity check.
if not data:
raise ValueError(" Input data set is empty.")
# _end_if_
# Label counter.
label_counts = {}
# Check all the entries in the data-set.
for record in data:
# Get the label of the input vector.
this_label = record[-1]
# If it is not in the dictionary add it.
if this_label not in label_counts:
label_counts[this_label] = 0
# _end_if_
# Increase counter.
label_counts[this_label] += 1
# _end_for_
# Define the entropy variable.
total_entropy = 0.0
# Get the total number of data vectors.
num_n = float(len(data))
# Compute the entropy.
for key in label_counts:
prob = float(label_counts[key])/num_n
total_entropy -= prob * np.log2(prob)
# _end_for_
return total_entropy
# _end_def_
def choose_best_feature(self, data):
"""
Description:
Selects the best feature to split the data set, using the entropy as a measure of goodness.
Args:
- data: (list of lists) input data-set.
Note:
- We assume that the last column in the data contains the class label.
"""
# Number of samples in the data set.
tot_n = len(data)
# Initial entropy of the data set.
entropy = self.calc_shannon_entropy(data)
# Information gain.
best_info_gain = 0.0
# Best feature.
best_feature = -1
# Go through all the features.
for i in range(len(data[0]) - 1):
# Split the data set on the feature 'i'.
sub_set = split_dataset_at_feature(data, i)
# Entropy for the current split.
split_entropy = 0.0
# Calculate the combined entropy of the split.
for j in sub_set:
split_entropy += (len(sub_set[j])/tot_n)*self.calc_shannon_entropy(sub_set[j])
# _end_for_
# Compute the information gain (w.r.t. the initial entropy).
split_info_gain = entropy - split_entropy
# If the split has reduced the entropy update the values.
if split_info_gain > best_info_gain:
best_info_gain = split_info_gain
best_feature = i
# _end_if_
# _end_for_
return best_feature
# _end_def_
# _end_class_
|
nilq/baby-python
|
python
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for
# license information.
# -------------------------------------------------------------------------
from flask import (
Response,
Blueprint,
)
errors_api = Blueprint('errors_api', __name__)
@errors_api.route('/403', methods=['GET'])
def get_403():
return Response(status=403)
@errors_api.route('/500', methods=['GET'])
def get_500():
return Response(status=500)
@errors_api.route('/stream', methods=['GET'])
def get_stream():
class StreamingBody:
def __iter__(self):
yield b"Hello, "
yield b"world!"
return Response(StreamingBody(), status=500)
@errors_api.route('/short-data', methods=['GET'])
def get_short_data():
response = Response(b"X" * 4, status=200)
response.automatically_set_content_length = False
response.headers["Content-Length"] = "8"
return response
@errors_api.route('/non-odatav4-body', methods=['GET'])
def get_non_odata_v4_response_body():
return Response(
'{"code": 400, "error": {"global": ["MY-ERROR-MESSAGE-THAT-IS-COMING-FROM-THE-API"]}}',
status=400
)
@errors_api.route('/malformed-json', methods=['GET'])
def get_malformed_json():
return Response(
'{"code": 400, "error": {"global": ["MY-ERROR-MESSAGE-THAT-IS-COMING-FROM-THE-API"]',
status=400
)
@errors_api.route('/text', methods=['GET'])
def get_text_body():
return Response(
'I am throwing an error',
status=400
)
@errors_api.route('/odatav4', methods=['GET'])
def get_odatav4():
return Response(
'{"error": {"code": "501", "message": "Unsupported functionality", "target": "query", "details": [{"code": "301", "target": "$search", "message": "$search query option not supported"}], "innererror": {"trace": [], "context": {}}}}',
status=400
)
|
nilq/baby-python
|
python
|
"""Base class for deriving trainable modules."""
# local
from ivy.stateful.module import Module
class Sequential(Module):
def __init__(self, *sub_modules, device=None, v=None):
"""A sequential container. Modules will be added to it in the order they are
passed in the constructor.
:param submodules: Submodules to chain together into a sequence.
:type submodules: sequence of ivy.Module instances
:param device: device on which to create the layer's variables 'cuda:0',
'cuda:1', 'cpu' etc.
:type device: ivy.Device, optional
:param v: the variables for each submodule in the sequence, constructed
internally by default.
:type v: ivy container of variables, optional
"""
if v is not None:
for i, submod in enumerate(sub_modules):
try:
submod.v = v["submodules"]["v" + str(i)]
except KeyError:
if submod.v:
raise Exception(
"variables v passed to Sequential class must have key "
"chains in the form of `submodules/v{}`, where {} is an idx"
)
self._submodules = list(sub_modules)
Module.__init__(self, device, v)
def _forward(self, inputs):
"""Perform forward pass of the Linear layer.
:param inputs: Inputs to process.
:type inputs: array
:return: The outputs following the linear operation and bias addition.
"""
x = inputs
for i, submod in enumerate(self._submodules):
try:
x = submod(x, v=self.v.submodules["v" + str(i)])
except KeyError:
if submod.v:
raise Exception(
"variables v passed to Sequential class must have key chains in"
"the form of 'submodules/v{}', where {} is an idx"
)
x = submod(x)
return x
|
nilq/baby-python
|
python
|
import os
current_path = os.path.abspath(os.path.dirname(__file__))
filelist_path = current_path + "/filelist.txt"
filelist = open(filelist_path, "w")
for filename in os.listdir(current_path):
if filename.split(".")[1] == "mp4":
if filename.find("[") != -1:
print(filename.find("["))
newname = filename[:filename.find("[")] + ".mp4"
os.rename(filename, newname)
filelist.write("file " + newname + "\n")
else:
filelist.write("file " + filename + "\n")
filelist.close()
|
nilq/baby-python
|
python
|
'''
Bank Transfer System
'''
__author__ = 'Dilmuratjohn'
import sys
import pymysql
class Transfer(object):
def __init__(self,connection):
self.connection = connection
def check_account(self,account):
print("checking account[%s]..." %(account))
cursor=self.connection.cursor()
try:
sql = "select * from account where accountID=%s" %(account)
cursor.execute(sql)
rows = cursor.fetchall()
if len(rows)!=1:
raise Exception("Account %s does not exist" %(account))
finally:
cursor.close()
print("account checked")
def check_balance(self,account,transfer_amount):
print("checking balance ...")
cursor=self.connection.cursor()
try:
sql = "select * from account where accountID=%s and balance>%s" %(account,transfer_amount)
cursor.execute(sql)
rows = cursor.fetchall()
if len(rows)!=1:
raise Exception("Account %s's balance is insufficient" %(account))
finally:
cursor.close()
print("balance checked")
def withdrawals(self,account,balance):
print("making withdrawals...")
cursor=self.connection.cursor()
try:
sql = "update account set balance = balance-%s where accountID=%s " %(transfer_amount,account)
cursor.execute(sql)
rows = cursor.fetchall()
if cursor.rowcount !=1:
raise Exception("withdrawals failure" %(account))
finally:
cursor.close()
print("withdrawals accomplished")
def deposit(self,account,balance):
print("making deposit...")
cursor=self.connection.cursor()
try:
sql = "update account set balance = balance+%s where accountID=%s " %(transfer_amount,account)
cursor.execute(sql)
rows = cursor.fetchall()
if cursor.rowcount !=1:
raise Exception("deposit failure" %(account))
finally:
cursor.close()
print("deposit accomplished")
def transfer(self,source_account,transfer_account,transfer_amount):
try:
self.check_account(source_account)
self.check_account(transfer_account)
self.check_balance(source_account,transfer_amount)
self.withdrawals(source_account,transfer_amount)
self.deposit(transfer_account,transfer_amount)
self.connection.commit()
except Exception as e:
self.connection.rollback()
raise e
if __name__=="__main__":
source_account = account1
transfer_account = account2
transfer_amount = aomunt_of_money
connection = pymysql.connect(
host='***.***.***.***',
port=PORT,
user='username',
password='******',
db='database',
charset='utf8')
Transfer_=Transfer(connection)
try:
Transfer_.transfer(source_account,transfer_account,transfer_amount)
except Exception as e:
print("Error:"+str(e))
finally:
connection.close()
|
nilq/baby-python
|
python
|
import graphene
from .. import type_
from .... import ops
##__________________________________________________________________||
class CommonInputFields:
"""Common input fields of mutations for creating and updating file paths"""
path = graphene.String()
note = graphene.String()
class CreateProductFilePathInput(graphene.InputObjectType, CommonInputFields):
product_id = graphene.Int()
class UpdateProductFilePathInput(graphene.InputObjectType, CommonInputFields):
pass
##__________________________________________________________________||
class CreateProductFilePath(graphene.Mutation):
class Arguments:
input = CreateProductFilePathInput(required=True)
ok = graphene.Boolean()
productFilePath = graphene.Field(lambda: type_.ProductFilePath)
def mutate(root, info, input):
model = ops.create_product_file_path(**input)
ops.commit()
ok = True
return CreateProductFilePath(productFilePath=model, ok=ok)
class UpdateProductFilePath(graphene.Mutation):
class Arguments:
path_id = graphene.Int()
input = UpdateProductFilePathInput(required=True)
ok = graphene.Boolean()
productFilePath = graphene.Field(lambda: type_.ProductFilePath)
def mutate(root, info, path_id, input):
model = ops.update_product_file_path(path_id, **input)
ops.commit()
ok = True
return UpdateProductFilePath(productFilePath=model, ok=ok)
class DeleteProductFilePath(graphene.Mutation):
class Arguments:
path_id = graphene.Int()
ok = graphene.Boolean()
def mutate(root, info, path_id):
ops.delete_product_file_path(path_id)
ops.commit()
ok = True
return DeleteProductFilePath(ok=ok)
##__________________________________________________________________||
|
nilq/baby-python
|
python
|
from django.conf import settings
from wq.db.patterns.models import LabelModel
if settings.WITH_GIS:
from django.contrib.gis.db import models
class GeometryModel(LabelModel):
name = models.CharField(max_length=255)
geometry = models.GeometryField(srid=settings.SRID)
class PointModel(LabelModel):
name = models.CharField(max_length=255)
geometry = models.PointField(srid=settings.SRID)
else:
GeometryModel = None
PointModel = None
|
nilq/baby-python
|
python
|
import collections
import event_model
import itertools
from bluesky.plans import count
from intake.catalog.utils import RemoteCatalogError
import numpy
import ophyd.sim
import os
import pytest
import time
import uuid
def normalize(gen):
"""
Converted any pages to singles.
"""
for name, doc in gen:
if name == 'event_page':
for event in event_model.unpack_event_page(doc):
yield 'event', event
elif name == 'datum_page':
for datum in event_model.unpack_datum_page(doc):
yield 'datum', datum
else:
yield name, doc
def compare(a, b):
a = normalize(a)
b = normalize(b)
a_indexed = {}
b_indexed = {}
for name, doc in a:
if name == 'resource':
# Check for an extraneous duplicate key in old documents.
if 'id' in doc:
assert doc['id'] == doc['uid']
doc = doc.copy()
doc.pop('id')
if name == 'datum':
a_indexed[('datum', doc['datum_id'])] = doc
# v0 yields {'_name": 'RunStop'} if the stop doc is missing; v2 yields None.
elif name == 'stop' and doc is None or 'uid' not in doc:
a_indexed[(name, None)] = None
else:
a_indexed[(name, doc['uid'])] = doc
for name, doc in b:
if name == 'resource':
# Check for an extraneous duplicate key in old documents.
if 'id' in doc:
assert doc['id'] == doc['uid']
doc = doc.copy()
doc.pop('id')
if name == 'datum':
b_indexed[('datum', doc['datum_id'])] = doc
# v0 yields {'_name": 'RunStop'} if the stop doc is missing; v2 yields None.
elif name == 'stop' and doc is None or 'uid' not in doc:
b_indexed[(name, None)] = None
else:
b_indexed[(name, doc['uid'])] = doc
# Same number of each type of document?
a_counter = collections.Counter(name for name, uid in a_indexed)
b_counter = collections.Counter(name for name, uid in b_indexed)
assert a_counter == b_counter
# Same uids and names?
assert set(a_indexed) == set(b_indexed)
# Now delve into the documents themselves...
for (name, unique_id), a_doc in a_indexed.items():
b_doc = b_indexed[name, unique_id]
# Handle special case if 'stop' is None.
if name == 'stop' and unique_id is None:
assert b_doc is None or 'uid' not in b_doc
continue
# Same top-level keys?
assert set(a_doc) == set(b_doc)
# Same contents?
try:
a_doc == b_doc
except ValueError:
# We end up here if, for example, the dict contains numpy arrays.
event_model.sanitize_doc(a_doc) == event_model.sanitize_doc(b_doc)
def test_fixture(bundle):
"Simply open the Catalog created by the fixture."
def test_search(bundle):
"Test search and progressive (nested) search with Mongo queries."
cat = bundle.cat
# Make sure the Catalog is nonempty.
assert list(cat['xyz']())
# Null search should return full Catalog.
assert list(cat['xyz']()) == list(cat['xyz'].search({}))
# Progressive (i.e. nested) search:
result = (cat['xyz']
.search({'plan_name': 'scan'})
.search({'time': {'$gt': 0}}))
assert bundle.uid in result
def test_repr(bundle):
"Test that custom repr (with run uid) appears and is one line only."
entry = bundle.cat['xyz']()[bundle.uid]
assert bundle.uid in repr(entry)
run = entry()
assert bundle.uid in repr(run)
assert len(repr(run).splitlines()) == 1
def test_repr_pretty(bundle):
"Test the IPython _repr_pretty_ has uid and also stream names."
formatters = pytest.importorskip("IPython.core.formatters")
f = formatters.PlainTextFormatter()
entry = bundle.cat['xyz']()[bundle.uid]
assert bundle.uid in f(entry)
# Stream names should be displayed.
assert 'primary' in f(entry)
run = entry()
assert bundle.uid in f(run)
assert 'primary' in f(run)
def test_iteration(bundle):
cat = bundle.cat['xyz']()
list(cat)
def test_len(bundle):
"""
Test that Catalog implements __len__.
Otherwise intake will loop it as `sum(1 for _ in catalog)` which is likely
less efficient.
"""
cat = bundle.cat['xyz']()
len(cat) # If not implemented, will raise TypeError
def test_getitem_sugar(bundle):
cat = bundle.cat['xyz']()
# Test lookup by recency (e.g. -1 is latest)
cat[-1]
with pytest.raises((IndexError, RemoteCatalogError)):
cat[-(1 + len(cat))] # There aren't this many entries
# Test lookup by integer, not globally-unique, 'scan_id'.
expected = cat[bundle.uid]()
scan_id = expected.metadata['start']['scan_id']
actual = cat[scan_id]()
assert actual.metadata['start']['uid'] == expected.metadata['start']['uid']
with pytest.raises((KeyError, RemoteCatalogError)):
cat[234234234234234234] # This scan_id doesn't exit.
# Test lookup by partial uid.
expected = cat[bundle.uid]()
uid = bundle.uid
for j in itertools.count(8, len(uid)):
trunc_uid = uid[:j]
try:
int(trunc_uid)
except ValueError:
break
else:
continue
else:
raise pytest.skip(
"got an all int (!?) uid, can not truncate and retrieve "
"due to intake not respecting types in getitem across the network.")
actual = cat[trunc_uid]()
assert actual.metadata['start']['uid'] == expected.metadata['start']['uid']
def test_run_read_not_implemented(bundle):
"Test that custom repr (with run uid) appears."
run = bundle.cat['xyz']()[bundle.uid]
with pytest.raises(NotImplementedError):
run.read()
with pytest.raises(NotImplementedError):
run.to_dask()
def test_run_metadata(bundle):
"Find 'start' and 'stop' in the Entry metadata."
run = bundle.cat['xyz']()[bundle.uid]
for key in ('start', 'stop'):
assert key in run.metadata # entry
assert key in run().metadata # datasource
def test_canonical(bundle):
run = bundle.cat['xyz']()[bundle.uid]
filler = event_model.Filler({'NPY_SEQ': ophyd.sim.NumpySeqHandler},
inplace=False)
# Smoke test for back-compat alias
with pytest.warns(UserWarning):
next(run.read_canonical())
compare(run.canonical(fill='yes'),
(filler(name, doc) for name, doc in bundle.docs))
def test_canonical_unfilled(bundle):
run = bundle.cat['xyz']()[bundle.uid]
run.canonical(fill='no')
compare(run.canonical(fill='no'), bundle.docs)
# Passing the run through the filler to check resource and datum are
# received before corresponding event.
filler = event_model.Filler({'NPY_SEQ': ophyd.sim.NumpySeqHandler},
inplace=False)
for name, doc in run.canonical(fill='no'):
filler(name, doc)
def test_canonical_delayed(bundle):
run = bundle.cat['xyz']()[bundle.uid]
filler = event_model.Filler({'NPY_SEQ': ophyd.sim.NumpySeqHandler},
inplace=False)
if bundle.remote:
with pytest.raises(NotImplementedError):
next(run.canonical(fill='delayed'))
else:
compare(run.canonical(fill='delayed'),
(filler(name, doc) for name, doc in bundle.docs))
def test_canonical_duplicates(bundle):
run = bundle.cat['xyz']()[bundle.uid]
history = set()
run_start_uid = None
for name, doc in run.canonical(fill='no'):
if name == 'start':
run_start_uid = doc['uid']
elif name == 'datum':
assert doc['datum_id'] not in history
history .add(doc['datum_id'])
elif name == 'datum_page':
assert tuple(doc['datum_id']) not in history
history.add(tuple(doc['datum_id']))
elif name == 'event_page':
for uid in doc['uid']:
assert uid not in history
history .add(uid)
elif name == 'resource':
assert doc.get('run_start', run_start_uid) == run_start_uid
assert doc['uid'] not in history
history.add(doc['uid'])
else:
assert doc['uid'] not in history
history.add(doc['uid'])
def test_read(bundle):
run = bundle.cat['xyz']()[bundle.uid]()
entry = run['primary']
entry.read()
entry().to_dask()
entry().to_dask().load()
def test_dot_access(bundle):
run = bundle.cat['xyz']()[bundle.uid]()
entry = run['primary']
entry = getattr(run, 'primary')
def test_include_and_exclude(bundle):
run = bundle.cat['xyz']()[bundle.uid]()
entry = run['primary']
assert 'motor' in entry().read().variables
assert 'motor' not in entry(exclude=['motor']).read().variables
assert 'motor' in entry(exclude=['NONEXISTENT']).read().variables
expected = set(['time', 'uid', 'seq_num', 'motor'])
assert set(entry(include=['motor']).read().variables) == expected
expected = set(['time', 'uid', 'seq_num', 'motor:motor_velocity'])
assert set(entry(include=['motor:motor_velocity']).read().variables) == expected
def test_transforms(bundle):
run = bundle.cat['xyz_with_transforms']()[bundle.uid]
for name, doc in run.canonical(fill='no'):
if name in {'start', 'stop', 'resource', 'descriptor'}:
assert doc.get('test_key') == 'test_value'
def test_metadata_keys(bundle):
run = bundle.cat['xyz']()[bundle.uid]()
run_metadata = run.metadata
assert 'start' in run_metadata
assert 'stop' in run_metadata
stream_metadata = run['primary']().metadata
assert 'descriptors' in stream_metadata
def test_infinite_recursion_bug(bundle):
run = bundle.cat['xyz']()[bundle.uid]()
with pytest.raises(AttributeError):
# used to raise RecursionErrror
run.does_not_exist
def test_items(bundle):
if bundle.remote:
pytest.xfail("Regression in intake 0.6.0 awaiting patch")
for uid, run in bundle.cat['xyz']().items():
assert hasattr(run, 'canonical')
'''
def test_catalog_update(bundle, RE, hw):
"""
Check that a new run is accessable with -1 immediatly after it is
finished being serialized.
"""
with bundle.serializer_partial() as serializer:
new_uid = RE(count([hw.img]), serializer)[0]
new_file = serializer.artifacts['all'][0]
name, start_doc = next(bundle.cat['xyz']()[-1].canonical(fill='no'))
assert start_doc['uid'] == new_uid
os.unlink(new_file)
bundle.cat['xyz'].force_reload()
print(new_file)a
'''
|
nilq/baby-python
|
python
|
from flask import Flask, request
import json
import webbrowser, random, threading
import base64
import io
import matplotlib.image as mpimg # TODO: remove matplotlib dependency
import numpy as np
from laserCAM import Project, Image, Engraving, Laser, Machine, Preprocessor
import os
app = Flask(__name__, static_url_path='')
project = Project()
import cPickle as pickle #TODO: remove
app._static_folder = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
@app.route("/")
def hello():
return app.send_static_file('index.html')
@app.route('/project/upload', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
print 'hey, here'
received = json.loads(request.get_data())
encoded = received['the_file'].split(',')
with open('receivedImage.pkl', 'wb') as fo: # TODO: remove
pickle.dump(received, fo)
imgData = encoded[1]
ext = encoded[0].split('/')[1].split(';')[0]
imgData = base64.b64decode(imgData)
with open("imageToSave." + ext, "wb") as fh: # TODO: remove if file never used
fh.write(imgData)
imgData = io.BytesIO(imgData)
imgData = mpimg.imread(imgData, format=ext) # TODO: check what extensions are valid
tmp = Image(imgData, ext)
project.image = tmp
del(tmp)
return json.dumps([{'hey2': str(type(project.image.image_data))}])
return json.dumps([{'hey': 'hey'}])
@app.route("/project/settings", methods=['GET', 'POST'])
def project_settings():
if request.method == 'POST':
received = json.loads(request.get_data())
_engrave = received['engraving']
_laser = received['laser']
_machine = received['machine']
_preproc = received['preprocessing']
engrave = Engraving(pixel_width=_engrave['width'], pixel_height=_engrave['height'])
laser = Laser(power_low=_laser['powerLow'], power_high=_laser['powerHigh'], power_off=_laser['powerOff'],
power_band=_laser['powerBand'])
machine = Machine(units=_machine['units'], feed_rate=_machine['feedRate'],
overrun=_machine['overrun'])
preprocessor = Preprocessor(ignore_white=_preproc['ignoreWhite'], split_white=_preproc['splitWhite'],
split_white_value=_preproc['splitMin'], white_cutoff=_preproc['whiteCutoff'])
project.engraving = engrave
project.laser = laser
project.machine = machine
project.preprocessor = preprocessor
del(engrave);del(laser);del(machine);del(preprocessor) # A bit of manual cleanup
project.generate_gcode()
return json.dumps([{'testPower': list(project.laser.power_band_fn(np.asarray([0.0, .2, .5, 1.0])))}])
return json.dumps([{'project': 'settings'}])
if __name__ == "__main__":
port = 5000 + random.randint(0, 999)
url = "http://127.0.0.1:{0}".format(port)
threading.Timer(1.25, lambda: webbrowser.open(url)).start()
app.run(port=port, debug=False)
|
nilq/baby-python
|
python
|
from terrascript import _resource
class circonus_check(_resource): pass
check = circonus_check
class circonus_contact_group(_resource): pass
contact_group = circonus_contact_group
class circonus_graph(_resource): pass
graph = circonus_graph
class circonus_metric(_resource): pass
metric = circonus_metric
class circonus_metric_cluster(_resource): pass
metric_cluster = circonus_metric_cluster
class circonus_rule_set(_resource): pass
rule_set = circonus_rule_set
|
nilq/baby-python
|
python
|
from django.conf.urls import url, include
from rest_framework.urlpatterns import format_suffix_patterns
from .views import CreateView, DetailsView
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = {
url(r'^bucketlists/$', CreateView.as_view(), name="create"),
url(r'^bucketlists/(?P<pk>[0-9]+)/$',
DetailsView.as_view(), name="details"),
url(r'^auth/', include('rest_framework.urls',
namespace='rest_framework')),
url(r'^get-token/', obtain_auth_token),
}
urlpatterns = format_suffix_patterns(urlpatterns)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
#
# Modifications Copyright OpenSearch Contributors. See
# GitHub history for details.
#
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import gzip
import io
import json
import os
import re
import ssl
import warnings
from platform import python_version
import pytest
import urllib3
from mock import Mock, patch
from requests.auth import AuthBase
from urllib3._collections import HTTPHeaderDict
from opensearchpy import __versionstr__
from opensearchpy.compat import reraise_exceptions
from opensearchpy.connection import (
Connection,
RequestsHttpConnection,
Urllib3HttpConnection,
)
from opensearchpy.exceptions import (
ConflictError,
ConnectionError,
NotFoundError,
RequestError,
TransportError,
)
from .test_cases import SkipTest, TestCase
CLOUD_ID_PORT_443 = "cluster:d2VzdGV1cm9wZS5henVyZS5lbGFzdGljLWNsb3VkLmNvbTo0NDMkZTdkZTlmMTM0NWU0NDkwMjgzZDkwM2JlNWI2ZjkxOWUk"
CLOUD_ID_KIBANA = "cluster:d2VzdGV1cm9wZS5henVyZS5lbGFzdGljLWNsb3VkLmNvbSQ4YWY3ZWUzNTQyMGY0NThlOTAzMDI2YjQwNjQwODFmMiQyMDA2MTU1NmM1NDA0OTg2YmZmOTU3ZDg0YTZlYjUxZg=="
CLOUD_ID_PORT_AND_KIBANA = "cluster:d2VzdGV1cm9wZS5henVyZS5lbGFzdGljLWNsb3VkLmNvbTo5MjQzJGM2NjM3ZjMxMmM1MjQzY2RhN2RlZDZlOTllM2QyYzE5JA=="
CLOUD_ID_NO_PORT_OR_KIBANA = "cluster:d2VzdGV1cm9wZS5henVyZS5lbGFzdGljLWNsb3VkLmNvbSRlN2RlOWYxMzQ1ZTQ0OTAyODNkOTAzYmU1YjZmOTE5ZSQ="
def gzip_decompress(data):
buf = gzip.GzipFile(fileobj=io.BytesIO(data), mode="rb")
return buf.read()
class TestBaseConnection(TestCase):
def test_parse_cloud_id(self):
# Embedded port in cloud_id
con = Connection(cloud_id=CLOUD_ID_PORT_AND_KIBANA)
self.assertEqual(
con.host,
"https://c6637f312c5243cda7ded6e99e3d2c19.westeurope.azure.elastic-cloud.com:9243",
)
self.assertEqual(con.port, 9243)
self.assertEqual(
con.hostname,
"c6637f312c5243cda7ded6e99e3d2c19.westeurope.azure.elastic-cloud.com",
)
# Embedded port but overridden
con = Connection(
cloud_id=CLOUD_ID_PORT_AND_KIBANA,
port=443,
)
self.assertEqual(
con.host,
"https://c6637f312c5243cda7ded6e99e3d2c19.westeurope.azure.elastic-cloud.com:443",
)
self.assertEqual(con.port, 443)
self.assertEqual(
con.hostname,
"c6637f312c5243cda7ded6e99e3d2c19.westeurope.azure.elastic-cloud.com",
)
# Port is 443, removed by default.
con = Connection(cloud_id=CLOUD_ID_PORT_443)
self.assertEqual(
con.host,
"https://e7de9f1345e4490283d903be5b6f919e.westeurope.azure.elastic-cloud.com",
)
self.assertEqual(con.port, None)
self.assertEqual(
con.hostname,
"e7de9f1345e4490283d903be5b6f919e.westeurope.azure.elastic-cloud.com",
)
# No port, contains Kibana UUID
con = Connection(cloud_id=CLOUD_ID_KIBANA)
self.assertEqual(
con.host,
"https://8af7ee35420f458e903026b4064081f2.westeurope.azure.elastic-cloud.com",
)
self.assertEqual(con.port, None)
self.assertEqual(
con.hostname,
"8af7ee35420f458e903026b4064081f2.westeurope.azure.elastic-cloud.com",
)
def test_empty_warnings(self):
con = Connection()
with warnings.catch_warnings(record=True) as w:
con._raise_warnings(())
con._raise_warnings([])
self.assertEqual(w, [])
def test_raises_warnings(self):
con = Connection()
with warnings.catch_warnings(record=True) as warn:
con._raise_warnings(['299 OpenSearch-7.6.1-aa751 "this is deprecated"'])
self.assertEqual([str(w.message) for w in warn], ["this is deprecated"])
with warnings.catch_warnings(record=True) as warn:
con._raise_warnings(
[
'299 OpenSearch-7.6.1-aa751 "this is also deprecated"',
'299 OpenSearch-7.6.1-aa751 "this is also deprecated"',
'299 OpenSearch-7.6.1-aa751 "guess what? deprecated"',
]
)
self.assertEqual(
[str(w.message) for w in warn],
["this is also deprecated", "guess what? deprecated"],
)
def test_raises_warnings_when_folded(self):
con = Connection()
with warnings.catch_warnings(record=True) as warn:
con._raise_warnings(
[
'299 OpenSearch-7.6.1-aa751 "warning",'
'299 OpenSearch-7.6.1-aa751 "folded"',
]
)
self.assertEqual([str(w.message) for w in warn], ["warning", "folded"])
def test_ipv6_host_and_port(self):
for kwargs, expected_host in [
({"host": "::1"}, "http://[::1]:9200"),
({"host": "::1", "port": 443}, "http://[::1]:443"),
({"host": "::1", "use_ssl": True}, "https://[::1]:9200"),
({"host": "127.0.0.1", "port": 1234}, "http://127.0.0.1:1234"),
({"host": "localhost", "use_ssl": True}, "https://localhost:9200"),
]:
conn = Connection(**kwargs)
assert conn.host == expected_host
def test_compatibility_accept_header(self):
try:
conn = Connection()
assert "accept" not in conn.headers
os.environ["ELASTIC_CLIENT_APIVERSIONING"] = "0"
conn = Connection()
assert "accept" not in conn.headers
os.environ["ELASTIC_CLIENT_APIVERSIONING"] = "1"
conn = Connection()
assert (
conn.headers["accept"]
== "application/vnd.elasticsearch+json;compatible-with=7"
)
finally:
os.environ.pop("ELASTIC_CLIENT_APIVERSIONING")
class TestUrllib3Connection(TestCase):
def _get_mock_connection(self, connection_params={}, response_body=b"{}"):
con = Urllib3HttpConnection(**connection_params)
def _dummy_urlopen(*args, **kwargs):
dummy_response = Mock()
dummy_response.headers = HTTPHeaderDict({})
dummy_response.status = 200
dummy_response.data = response_body
_dummy_urlopen.call_args = (args, kwargs)
return dummy_response
con.pool.urlopen = _dummy_urlopen
return con
def test_ssl_context(self):
try:
context = ssl.create_default_context()
except AttributeError:
# if create_default_context raises an AttributeError Exception
# it means SSLContext is not available for that version of python
# and we should skip this test.
raise SkipTest(
"Test test_ssl_context is skipped cause SSLContext is not available for this version of ptyhon"
)
con = Urllib3HttpConnection(use_ssl=True, ssl_context=context)
self.assertEqual(len(con.pool.conn_kw.keys()), 1)
self.assertIsInstance(con.pool.conn_kw["ssl_context"], ssl.SSLContext)
self.assertTrue(con.use_ssl)
def test_opaque_id(self):
con = Urllib3HttpConnection(opaque_id="app-1")
self.assertEqual(con.headers["x-opaque-id"], "app-1")
def test_http_cloud_id(self):
con = Urllib3HttpConnection(
cloud_id="cluster:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5NyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5Ng=="
)
self.assertTrue(con.use_ssl)
self.assertEqual(
con.host, "https://4fa8821e75634032bed1cf22110e2f97.us-east-1.aws.found.io"
)
self.assertEqual(con.port, None)
self.assertEqual(
con.hostname, "4fa8821e75634032bed1cf22110e2f97.us-east-1.aws.found.io"
)
self.assertTrue(con.http_compress)
con = Urllib3HttpConnection(
cloud_id="cluster:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5NyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5Ng==",
port=9243,
)
self.assertEqual(
con.host,
"https://4fa8821e75634032bed1cf22110e2f97.us-east-1.aws.found.io:9243",
)
self.assertEqual(con.port, 9243)
self.assertEqual(
con.hostname, "4fa8821e75634032bed1cf22110e2f97.us-east-1.aws.found.io"
)
def test_api_key_auth(self):
# test with tuple
con = Urllib3HttpConnection(
cloud_id="cluster:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5NyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5Ng==",
api_key=("elastic", "changeme1"),
)
self.assertEqual(
con.headers["authorization"], "ApiKey ZWxhc3RpYzpjaGFuZ2VtZTE="
)
self.assertEqual(
con.host, "https://4fa8821e75634032bed1cf22110e2f97.us-east-1.aws.found.io"
)
# test with base64 encoded string
con = Urllib3HttpConnection(
cloud_id="cluster:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5NyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5Ng==",
api_key="ZWxhc3RpYzpjaGFuZ2VtZTI=",
)
self.assertEqual(
con.headers["authorization"], "ApiKey ZWxhc3RpYzpjaGFuZ2VtZTI="
)
self.assertEqual(
con.host, "https://4fa8821e75634032bed1cf22110e2f97.us-east-1.aws.found.io"
)
def test_no_http_compression(self):
con = self._get_mock_connection()
self.assertFalse(con.http_compress)
self.assertNotIn("accept-encoding", con.headers)
con.perform_request("GET", "/")
(_, _, req_body), kwargs = con.pool.urlopen.call_args
self.assertFalse(req_body)
self.assertNotIn("accept-encoding", kwargs["headers"])
self.assertNotIn("content-encoding", kwargs["headers"])
def test_http_compression(self):
con = self._get_mock_connection({"http_compress": True})
self.assertTrue(con.http_compress)
self.assertEqual(con.headers["accept-encoding"], "gzip,deflate")
# 'content-encoding' shouldn't be set at a connection level.
# Should be applied only if the request is sent with a body.
self.assertNotIn("content-encoding", con.headers)
con.perform_request("GET", "/", body=b"{}")
(_, _, req_body), kwargs = con.pool.urlopen.call_args
self.assertEqual(gzip_decompress(req_body), b"{}")
self.assertEqual(kwargs["headers"]["accept-encoding"], "gzip,deflate")
self.assertEqual(kwargs["headers"]["content-encoding"], "gzip")
con.perform_request("GET", "/")
(_, _, req_body), kwargs = con.pool.urlopen.call_args
self.assertFalse(req_body)
self.assertEqual(kwargs["headers"]["accept-encoding"], "gzip,deflate")
self.assertNotIn("content-encoding", kwargs["headers"])
def test_cloud_id_http_compress_override(self):
# 'http_compress' will be 'True' by default for connections with
# 'cloud_id' set but should prioritize user-defined values.
con = Urllib3HttpConnection(
cloud_id="cluster:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5NyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5Ng==",
)
self.assertEqual(con.http_compress, True)
con = Urllib3HttpConnection(
cloud_id="cluster:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5NyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5Ng==",
http_compress=False,
)
self.assertEqual(con.http_compress, False)
con = Urllib3HttpConnection(
cloud_id="cluster:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5NyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5Ng==",
http_compress=True,
)
self.assertEqual(con.http_compress, True)
def test_default_user_agent(self):
con = Urllib3HttpConnection()
self.assertEqual(
con._get_default_user_agent(),
"opensearch-py/%s (Python %s)" % (__versionstr__, python_version()),
)
def test_timeout_set(self):
con = Urllib3HttpConnection(timeout=42)
self.assertEqual(42, con.timeout)
def test_keep_alive_is_on_by_default(self):
con = Urllib3HttpConnection()
self.assertEqual(
{
"connection": "keep-alive",
"content-type": "application/json",
"user-agent": con._get_default_user_agent(),
},
con.headers,
)
def test_http_auth(self):
con = Urllib3HttpConnection(http_auth="username:secret")
self.assertEqual(
{
"authorization": "Basic dXNlcm5hbWU6c2VjcmV0",
"connection": "keep-alive",
"content-type": "application/json",
"user-agent": con._get_default_user_agent(),
},
con.headers,
)
def test_http_auth_tuple(self):
con = Urllib3HttpConnection(http_auth=("username", "secret"))
self.assertEqual(
{
"authorization": "Basic dXNlcm5hbWU6c2VjcmV0",
"content-type": "application/json",
"connection": "keep-alive",
"user-agent": con._get_default_user_agent(),
},
con.headers,
)
def test_http_auth_list(self):
con = Urllib3HttpConnection(http_auth=["username", "secret"])
self.assertEqual(
{
"authorization": "Basic dXNlcm5hbWU6c2VjcmV0",
"content-type": "application/json",
"connection": "keep-alive",
"user-agent": con._get_default_user_agent(),
},
con.headers,
)
def test_uses_https_if_verify_certs_is_off(self):
with warnings.catch_warnings(record=True) as w:
con = Urllib3HttpConnection(use_ssl=True, verify_certs=False)
self.assertEqual(1, len(w))
self.assertEqual(
"Connecting to https://localhost:9200 using SSL with verify_certs=False is insecure.",
str(w[0].message),
)
self.assertIsInstance(con.pool, urllib3.HTTPSConnectionPool)
def test_nowarn_when_uses_https_if_verify_certs_is_off(self):
with warnings.catch_warnings(record=True) as w:
con = Urllib3HttpConnection(
use_ssl=True, verify_certs=False, ssl_show_warn=False
)
self.assertEqual(0, len(w))
self.assertIsInstance(con.pool, urllib3.HTTPSConnectionPool)
def test_doesnt_use_https_if_not_specified(self):
con = Urllib3HttpConnection()
self.assertIsInstance(con.pool, urllib3.HTTPConnectionPool)
def test_no_warning_when_using_ssl_context(self):
ctx = ssl.create_default_context()
with warnings.catch_warnings(record=True) as w:
Urllib3HttpConnection(ssl_context=ctx)
self.assertEqual(0, len(w))
def test_warns_if_using_non_default_ssl_kwargs_with_ssl_context(self):
for kwargs in (
{"ssl_show_warn": False},
{"ssl_show_warn": True},
{"verify_certs": True},
{"verify_certs": False},
{"ca_certs": "/path/to/certs"},
{"ssl_show_warn": True, "ca_certs": "/path/to/certs"},
):
kwargs["ssl_context"] = ssl.create_default_context()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
Urllib3HttpConnection(**kwargs)
self.assertEqual(1, len(w))
self.assertEqual(
"When using `ssl_context`, all other SSL related kwargs are ignored",
str(w[0].message),
)
@patch("opensearchpy.connection.base.logger")
def test_uncompressed_body_logged(self, logger):
con = self._get_mock_connection(connection_params={"http_compress": True})
con.perform_request("GET", "/", body=b'{"example": "body"}')
self.assertEqual(2, logger.debug.call_count)
req, resp = logger.debug.call_args_list
self.assertEqual('> {"example": "body"}', req[0][0] % req[0][1:])
self.assertEqual("< {}", resp[0][0] % resp[0][1:])
def test_surrogatepass_into_bytes(self):
buf = b"\xe4\xbd\xa0\xe5\xa5\xbd\xed\xa9\xaa"
con = self._get_mock_connection(response_body=buf)
status, headers, data = con.perform_request("GET", "/")
self.assertEqual(u"你好\uda6a", data)
@pytest.mark.skipif(
not reraise_exceptions, reason="RecursionError isn't defined in Python <3.5"
)
def test_recursion_error_reraised(self):
conn = Urllib3HttpConnection()
def urlopen_raise(*_, **__):
raise RecursionError("Wasn't modified!")
conn.pool.urlopen = urlopen_raise
with pytest.raises(RecursionError) as e:
conn.perform_request("GET", "/")
assert str(e.value) == "Wasn't modified!"
class TestRequestsConnection(TestCase):
def _get_mock_connection(
self, connection_params={}, status_code=200, response_body=b"{}"
):
con = RequestsHttpConnection(**connection_params)
def _dummy_send(*args, **kwargs):
dummy_response = Mock()
dummy_response.headers = {}
dummy_response.status_code = status_code
dummy_response.content = response_body
dummy_response.request = args[0]
dummy_response.cookies = {}
_dummy_send.call_args = (args, kwargs)
return dummy_response
con.session.send = _dummy_send
return con
def _get_request(self, connection, *args, **kwargs):
if "body" in kwargs:
kwargs["body"] = kwargs["body"].encode("utf-8")
status, headers, data = connection.perform_request(*args, **kwargs)
self.assertEqual(200, status)
self.assertEqual("{}", data)
timeout = kwargs.pop("timeout", connection.timeout)
args, kwargs = connection.session.send.call_args
self.assertEqual(timeout, kwargs["timeout"])
self.assertEqual(1, len(args))
return args[0]
def test_custom_http_auth_is_allowed(self):
auth = AuthBase()
c = RequestsHttpConnection(http_auth=auth)
self.assertEqual(auth, c.session.auth)
def test_timeout_set(self):
con = RequestsHttpConnection(timeout=42)
self.assertEqual(42, con.timeout)
def test_opaque_id(self):
con = RequestsHttpConnection(opaque_id="app-1")
self.assertEqual(con.headers["x-opaque-id"], "app-1")
def test_http_cloud_id(self):
con = RequestsHttpConnection(
cloud_id="cluster:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5NyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5Ng=="
)
self.assertTrue(con.use_ssl)
self.assertEqual(
con.host, "https://4fa8821e75634032bed1cf22110e2f97.us-east-1.aws.found.io"
)
self.assertEqual(con.port, None)
self.assertEqual(
con.hostname, "4fa8821e75634032bed1cf22110e2f97.us-east-1.aws.found.io"
)
self.assertTrue(con.http_compress)
con = RequestsHttpConnection(
cloud_id="cluster:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5NyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5Ng==",
port=9243,
)
self.assertEqual(
con.host,
"https://4fa8821e75634032bed1cf22110e2f97.us-east-1.aws.found.io:9243",
)
self.assertEqual(con.port, 9243)
self.assertEqual(
con.hostname, "4fa8821e75634032bed1cf22110e2f97.us-east-1.aws.found.io"
)
def test_api_key_auth(self):
# test with tuple
con = RequestsHttpConnection(
cloud_id="cluster:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5NyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5Ng==",
api_key=("elastic", "changeme1"),
)
self.assertEqual(
con.session.headers["authorization"], "ApiKey ZWxhc3RpYzpjaGFuZ2VtZTE="
)
self.assertEqual(
con.host, "https://4fa8821e75634032bed1cf22110e2f97.us-east-1.aws.found.io"
)
# test with base64 encoded string
con = RequestsHttpConnection(
cloud_id="cluster:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5NyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5Ng==",
api_key="ZWxhc3RpYzpjaGFuZ2VtZTI=",
)
self.assertEqual(
con.session.headers["authorization"], "ApiKey ZWxhc3RpYzpjaGFuZ2VtZTI="
)
self.assertEqual(
con.host, "https://4fa8821e75634032bed1cf22110e2f97.us-east-1.aws.found.io"
)
def test_no_http_compression(self):
con = self._get_mock_connection()
self.assertFalse(con.http_compress)
self.assertNotIn("content-encoding", con.session.headers)
con.perform_request("GET", "/")
req = con.session.send.call_args[0][0]
self.assertNotIn("content-encoding", req.headers)
self.assertNotIn("accept-encoding", req.headers)
def test_http_compression(self):
con = self._get_mock_connection(
{"http_compress": True},
)
self.assertTrue(con.http_compress)
# 'content-encoding' shouldn't be set at a session level.
# Should be applied only if the request is sent with a body.
self.assertNotIn("content-encoding", con.session.headers)
con.perform_request("GET", "/", body=b"{}")
req = con.session.send.call_args[0][0]
self.assertEqual(req.headers["content-encoding"], "gzip")
self.assertEqual(req.headers["accept-encoding"], "gzip,deflate")
con.perform_request("GET", "/")
req = con.session.send.call_args[0][0]
self.assertNotIn("content-encoding", req.headers)
self.assertEqual(req.headers["accept-encoding"], "gzip,deflate")
def test_cloud_id_http_compress_override(self):
# 'http_compress' will be 'True' by default for connections with
# 'cloud_id' set but should prioritize user-defined values.
con = RequestsHttpConnection(
cloud_id="cluster:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5NyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5Ng==",
)
self.assertEqual(con.http_compress, True)
con = RequestsHttpConnection(
cloud_id="cluster:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5NyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5Ng==",
http_compress=False,
)
self.assertEqual(con.http_compress, False)
con = RequestsHttpConnection(
cloud_id="cluster:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5NyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5Ng==",
http_compress=True,
)
self.assertEqual(con.http_compress, True)
def test_uses_https_if_verify_certs_is_off(self):
with warnings.catch_warnings(record=True) as w:
con = self._get_mock_connection(
{"use_ssl": True, "url_prefix": "url", "verify_certs": False}
)
self.assertEqual(1, len(w))
self.assertEqual(
"Connecting to https://localhost:9200 using SSL with verify_certs=False is insecure.",
str(w[0].message),
)
request = self._get_request(con, "GET", "/")
self.assertEqual("https://localhost:9200/url/", request.url)
self.assertEqual("GET", request.method)
self.assertEqual(None, request.body)
def test_nowarn_when_uses_https_if_verify_certs_is_off(self):
with warnings.catch_warnings(record=True) as w:
con = self._get_mock_connection(
{
"use_ssl": True,
"url_prefix": "url",
"verify_certs": False,
"ssl_show_warn": False,
}
)
self.assertEqual(0, len(w))
request = self._get_request(con, "GET", "/")
self.assertEqual("https://localhost:9200/url/", request.url)
self.assertEqual("GET", request.method)
self.assertEqual(None, request.body)
def test_merge_headers(self):
con = self._get_mock_connection(
connection_params={"headers": {"h1": "v1", "h2": "v2"}}
)
req = self._get_request(con, "GET", "/", headers={"h2": "v2p", "h3": "v3"})
self.assertEqual(req.headers["h1"], "v1")
self.assertEqual(req.headers["h2"], "v2p")
self.assertEqual(req.headers["h3"], "v3")
def test_default_headers(self):
con = self._get_mock_connection()
req = self._get_request(con, "GET", "/")
self.assertEqual(req.headers["content-type"], "application/json")
self.assertEqual(req.headers["user-agent"], con._get_default_user_agent())
def test_custom_headers(self):
con = self._get_mock_connection()
req = self._get_request(
con,
"GET",
"/",
headers={
"content-type": "application/x-ndjson",
"user-agent": "custom-agent/1.2.3",
},
)
self.assertEqual(req.headers["content-type"], "application/x-ndjson")
self.assertEqual(req.headers["user-agent"], "custom-agent/1.2.3")
def test_http_auth(self):
con = RequestsHttpConnection(http_auth="username:secret")
self.assertEqual(("username", "secret"), con.session.auth)
def test_http_auth_tuple(self):
con = RequestsHttpConnection(http_auth=("username", "secret"))
self.assertEqual(("username", "secret"), con.session.auth)
def test_http_auth_list(self):
con = RequestsHttpConnection(http_auth=["username", "secret"])
self.assertEqual(("username", "secret"), con.session.auth)
def test_repr(self):
con = self._get_mock_connection({"host": "opensearchpy.com", "port": 443})
self.assertEqual(
"<RequestsHttpConnection: http://opensearchpy.com:443>", repr(con)
)
def test_conflict_error_is_returned_on_409(self):
con = self._get_mock_connection(status_code=409)
self.assertRaises(ConflictError, con.perform_request, "GET", "/", {}, "")
def test_not_found_error_is_returned_on_404(self):
con = self._get_mock_connection(status_code=404)
self.assertRaises(NotFoundError, con.perform_request, "GET", "/", {}, "")
def test_request_error_is_returned_on_400(self):
con = self._get_mock_connection(status_code=400)
self.assertRaises(RequestError, con.perform_request, "GET", "/", {}, "")
@patch("opensearchpy.connection.base.logger")
def test_head_with_404_doesnt_get_logged(self, logger):
con = self._get_mock_connection(status_code=404)
self.assertRaises(NotFoundError, con.perform_request, "HEAD", "/", {}, "")
self.assertEqual(0, logger.warning.call_count)
@patch("opensearchpy.connection.base.tracer")
@patch("opensearchpy.connection.base.logger")
def test_failed_request_logs_and_traces(self, logger, tracer):
con = self._get_mock_connection(
response_body=b'{"answer": 42}', status_code=500
)
self.assertRaises(
TransportError,
con.perform_request,
"GET",
"/",
{"param": 42},
"{}".encode("utf-8"),
)
# trace request
self.assertEqual(1, tracer.info.call_count)
# trace response
self.assertEqual(1, tracer.debug.call_count)
# log url and duration
self.assertEqual(1, logger.warning.call_count)
self.assertTrue(
re.match(
r"^GET http://localhost:9200/\?param=42 \[status:500 request:0.[0-9]{3}s\]",
logger.warning.call_args[0][0] % logger.warning.call_args[0][1:],
)
)
@patch("opensearchpy.connection.base.tracer")
@patch("opensearchpy.connection.base.logger")
def test_success_logs_and_traces(self, logger, tracer):
con = self._get_mock_connection(response_body=b"""{"answer": "that's it!"}""")
status, headers, data = con.perform_request(
"GET",
"/",
{"param": 42},
"""{"question": "what's that?"}""".encode("utf-8"),
)
# trace request
self.assertEqual(1, tracer.info.call_count)
self.assertEqual(
"""curl -H 'Content-Type: application/json' -XGET 'http://localhost:9200/?pretty¶m=42' -d '{\n "question": "what\\u0027s that?"\n}'""",
tracer.info.call_args[0][0] % tracer.info.call_args[0][1:],
)
# trace response
self.assertEqual(1, tracer.debug.call_count)
self.assertTrue(
re.match(
r'#\[200\] \(0.[0-9]{3}s\)\n#{\n# "answer": "that\\u0027s it!"\n#}',
tracer.debug.call_args[0][0] % tracer.debug.call_args[0][1:],
)
)
# log url and duration
self.assertEqual(1, logger.info.call_count)
self.assertTrue(
re.match(
r"GET http://localhost:9200/\?param=42 \[status:200 request:0.[0-9]{3}s\]",
logger.info.call_args[0][0] % logger.info.call_args[0][1:],
)
)
# log request body and response
self.assertEqual(2, logger.debug.call_count)
req, resp = logger.debug.call_args_list
self.assertEqual('> {"question": "what\'s that?"}', req[0][0] % req[0][1:])
self.assertEqual('< {"answer": "that\'s it!"}', resp[0][0] % resp[0][1:])
@patch("opensearchpy.connection.base.logger")
def test_uncompressed_body_logged(self, logger):
con = self._get_mock_connection(connection_params={"http_compress": True})
con.perform_request("GET", "/", body=b'{"example": "body"}')
self.assertEqual(2, logger.debug.call_count)
req, resp = logger.debug.call_args_list
self.assertEqual('> {"example": "body"}', req[0][0] % req[0][1:])
self.assertEqual("< {}", resp[0][0] % resp[0][1:])
con = self._get_mock_connection(
connection_params={"http_compress": True},
status_code=500,
response_body=b'{"hello":"world"}',
)
with pytest.raises(TransportError):
con.perform_request("GET", "/", body=b'{"example": "body2"}')
self.assertEqual(4, logger.debug.call_count)
_, _, req, resp = logger.debug.call_args_list
self.assertEqual('> {"example": "body2"}', req[0][0] % req[0][1:])
self.assertEqual('< {"hello":"world"}', resp[0][0] % resp[0][1:])
def test_defaults(self):
con = self._get_mock_connection()
request = self._get_request(con, "GET", "/")
self.assertEqual("http://localhost:9200/", request.url)
self.assertEqual("GET", request.method)
self.assertEqual(None, request.body)
def test_params_properly_encoded(self):
con = self._get_mock_connection()
request = self._get_request(
con, "GET", "/", params={"param": "value with spaces"}
)
self.assertEqual("http://localhost:9200/?param=value+with+spaces", request.url)
self.assertEqual("GET", request.method)
self.assertEqual(None, request.body)
def test_body_attached(self):
con = self._get_mock_connection()
request = self._get_request(con, "GET", "/", body='{"answer": 42}')
self.assertEqual("http://localhost:9200/", request.url)
self.assertEqual("GET", request.method)
self.assertEqual('{"answer": 42}'.encode("utf-8"), request.body)
def test_http_auth_attached(self):
con = self._get_mock_connection({"http_auth": "username:secret"})
request = self._get_request(con, "GET", "/")
self.assertEqual(request.headers["authorization"], "Basic dXNlcm5hbWU6c2VjcmV0")
@patch("opensearchpy.connection.base.tracer")
def test_url_prefix(self, tracer):
con = self._get_mock_connection({"url_prefix": "/some-prefix/"})
request = self._get_request(
con, "GET", "/_search", body='{"answer": 42}', timeout=0.1
)
self.assertEqual("http://localhost:9200/some-prefix/_search", request.url)
self.assertEqual("GET", request.method)
self.assertEqual('{"answer": 42}'.encode("utf-8"), request.body)
# trace request
self.assertEqual(1, tracer.info.call_count)
self.assertEqual(
"curl -H 'Content-Type: application/json' -XGET 'http://localhost:9200/_search?pretty' -d '{\n \"answer\": 42\n}'",
tracer.info.call_args[0][0] % tracer.info.call_args[0][1:],
)
def test_surrogatepass_into_bytes(self):
buf = b"\xe4\xbd\xa0\xe5\xa5\xbd\xed\xa9\xaa"
con = self._get_mock_connection(response_body=buf)
status, headers, data = con.perform_request("GET", "/")
self.assertEqual(u"你好\uda6a", data)
@pytest.mark.skipif(
not reraise_exceptions, reason="RecursionError isn't defined in Python <3.5"
)
def test_recursion_error_reraised(self):
conn = RequestsHttpConnection()
def send_raise(*_, **__):
raise RecursionError("Wasn't modified!")
conn.session.send = send_raise
with pytest.raises(RecursionError) as e:
conn.perform_request("GET", "/")
assert str(e.value) == "Wasn't modified!"
class TestConnectionHttpbin:
"""Tests the HTTP connection implementations against a live server E2E"""
def httpbin_anything(self, conn, **kwargs):
status, headers, data = conn.perform_request("GET", "/anything", **kwargs)
data = json.loads(data)
data["headers"].pop(
"X-Amzn-Trace-Id", None
) # Remove this header as it's put there by AWS.
return (status, data)
def test_urllib3_connection(self):
# Defaults
conn = Urllib3HttpConnection("httpbin.org", port=443, use_ssl=True)
user_agent = conn._get_default_user_agent()
status, data = self.httpbin_anything(conn)
assert status == 200
assert data["method"] == "GET"
assert data["headers"] == {
"Accept-Encoding": "identity",
"Content-Type": "application/json",
"Host": "httpbin.org",
"User-Agent": user_agent,
}
# http_compress=False
conn = Urllib3HttpConnection(
"httpbin.org", port=443, use_ssl=True, http_compress=False
)
status, data = self.httpbin_anything(conn)
assert status == 200
assert data["method"] == "GET"
assert data["headers"] == {
"Accept-Encoding": "identity",
"Content-Type": "application/json",
"Host": "httpbin.org",
"User-Agent": user_agent,
}
# http_compress=True
conn = Urllib3HttpConnection(
"httpbin.org", port=443, use_ssl=True, http_compress=True
)
status, data = self.httpbin_anything(conn)
assert status == 200
assert data["headers"] == {
"Accept-Encoding": "gzip,deflate",
"Content-Type": "application/json",
"Host": "httpbin.org",
"User-Agent": user_agent,
}
# Headers
conn = Urllib3HttpConnection(
"httpbin.org",
port=443,
use_ssl=True,
http_compress=True,
headers={"header1": "value1"},
)
status, data = self.httpbin_anything(
conn, headers={"header2": "value2", "header1": "override!"}
)
assert status == 200
assert data["headers"] == {
"Accept-Encoding": "gzip,deflate",
"Content-Type": "application/json",
"Host": "httpbin.org",
"Header1": "override!",
"Header2": "value2",
"User-Agent": user_agent,
}
def test_urllib3_connection_error(self):
conn = Urllib3HttpConnection("not.a.host.name")
with pytest.raises(ConnectionError):
conn.perform_request("GET", "/")
def test_requests_connection(self):
# Defaults
conn = RequestsHttpConnection("httpbin.org", port=443, use_ssl=True)
user_agent = conn._get_default_user_agent()
status, data = self.httpbin_anything(conn)
assert status == 200
assert data["method"] == "GET"
assert data["headers"] == {
"Accept-Encoding": "identity",
"Content-Type": "application/json",
"Host": "httpbin.org",
"User-Agent": user_agent,
}
# http_compress=False
conn = RequestsHttpConnection(
"httpbin.org", port=443, use_ssl=True, http_compress=False
)
status, data = self.httpbin_anything(conn)
assert status == 200
assert data["method"] == "GET"
assert data["headers"] == {
"Accept-Encoding": "identity",
"Content-Type": "application/json",
"Host": "httpbin.org",
"User-Agent": user_agent,
}
# http_compress=True
conn = RequestsHttpConnection(
"httpbin.org", port=443, use_ssl=True, http_compress=True
)
status, data = self.httpbin_anything(conn)
assert status == 200
assert data["headers"] == {
"Accept-Encoding": "gzip,deflate",
"Content-Type": "application/json",
"Host": "httpbin.org",
"User-Agent": user_agent,
}
# Headers
conn = RequestsHttpConnection(
"httpbin.org",
port=443,
use_ssl=True,
http_compress=True,
headers={"header1": "value1"},
)
status, data = self.httpbin_anything(
conn, headers={"header2": "value2", "header1": "override!"}
)
assert status == 200
assert data["headers"] == {
"Accept-Encoding": "gzip,deflate",
"Content-Type": "application/json",
"Host": "httpbin.org",
"Header1": "override!",
"Header2": "value2",
"User-Agent": user_agent,
}
def test_requests_connection_error(self):
conn = RequestsHttpConnection("not.a.host.name")
with pytest.raises(ConnectionError):
conn.perform_request("GET", "/")
|
nilq/baby-python
|
python
|
# -*-coding:Utf-8 -*
# Sprites display
import harfang as hg
class Sprite:
tex0_program = None
spr_render_state = None
spr_model = None
vs_pos_tex0_decl = None
@classmethod
def init_system(cls):
cls.tex0_program = hg.LoadProgramFromAssets("shaders/sprite.vsb", "shaders/sprite.fsb")
cls.vs_pos_tex0_decl = hg.VertexLayout()
cls.vs_pos_tex0_decl.Begin()
cls.vs_pos_tex0_decl.Add(hg.A_Position, 3, hg.AT_Float)
cls.vs_pos_tex0_decl.Add(hg.A_TexCoord0, 3, hg.AT_Float)
cls.vs_pos_tex0_decl.End()
cls.spr_model = hg.CreatePlaneModel(cls.vs_pos_tex0_decl, 1, 1, 1, 1)
cls.spr_render_state = hg.ComputeRenderState(hg.BM_Alpha, hg.DT_Disabled, hg.FC_Disabled)
cls.vr_size = None
cls.vr_distance = 1
@classmethod
def setup_matrix_sprites2D(cls, vid, resolution: hg.Vec2):
vs = hg.ComputeOrthographicViewState(hg.TranslationMat4(hg.Vec3(resolution.x / 2, resolution.y / 2, 0)), resolution.y, 0.1, 100, hg.Vec2(resolution.x / resolution.y, 1))
hg.SetViewTransform(vid, vs.view, vs.proj)
def __init__(self, w, h, texture_path):
self.width = w
self.height = h
self.texture_path = texture_path
self.texture = hg.LoadTextureFromAssets(self.texture_path, 0)[0]
self.texture_uniform = hg.MakeUniformSetTexture("s_tex", self.texture, 0)
self.color = hg.Color(1, 1, 1, 1)
self.uniform_set_value_list = hg.UniformSetValueList()
self.uniform_set_texture_list = hg.UniformSetTextureList()
self.uniform_set_texture_list.push_back(self.texture_uniform)
self.color_set_value = hg.MakeUniformSetValue("color", hg.Vec4(self.color.r, self.color.g, self.color.b, self.color.a))
self.uv_scale = hg.Vec2(1, 1)
self.uv_scale_set_value = hg.MakeUniformSetValue("uv_scale", hg.Vec4(self.uv_scale.x, self.uv_scale.y, 0, 0))
self.position = hg.Vec3(0, 0, 2)
self.scale = hg.Vec3(self.width, 1, self.height)
self.rotation = hg.Vec3(0, 0, 0)
self.size = 1
def compute_matrix(self):
return hg.TransformationMat4(self.position, self.rotation) * hg.TransformationMat4(hg.Vec3(0, 0, 0), hg.Vec3(hg.Deg(90), 0, 0), self.scale * self.size)
def set_position(self, x, y):
self.position.x = x
self.position.y = y
def set_uv_scale(self, uv_scale: hg.Vec2):
self.uv_scale = uv_scale
self.uv_scale_set_value = hg.MakeUniformSetValue("uv_scale", hg.Vec4(self.uv_scale.x, self.uv_scale.y, 0, 0))
def set_size(self, size):
self.size = size
def set_color(self, color: hg.Color):
self.color = color
self.color_set_value = hg.MakeUniformSetValue("color", hg.Vec4(self.color.r, self.color.g, self.color.b, self.color.a))
def draw(self, v_id):
self.uniform_set_value_list.clear()
self.uniform_set_value_list.push_back(self.color_set_value)
self.uniform_set_value_list.push_back(self.uv_scale_set_value)
matrix = self.compute_matrix()
hg.DrawModel(v_id, Sprite.spr_model, Sprite.tex0_program, self.uniform_set_value_list, self.uniform_set_texture_list, matrix, Sprite.spr_render_state)
def draw_vr(self, v_id, vr_matrix, resolution, vr_hud):
pos_vr = hg.Vec3((self.position.x / resolution.x - 0.5) * vr_hud.x, (self.position.y / resolution.y - 0.5) * vr_hud.y, vr_hud.z)
scale_vr = hg.Vec3(self.scale.x / resolution.x * vr_hud.x, 1, self.scale.z / resolution.y * vr_hud.y)
matrix = vr_matrix * hg.TransformationMat4(pos_vr, self.rotation) * hg.TransformationMat4(hg.Vec3(0, 0, 0), hg.Vec3(hg.Deg(90), 0, 0), scale_vr * self.size)
self.uniform_set_value_list.clear()
self.uniform_set_value_list.push_back(self.color_set_value)
self.uniform_set_value_list.push_back(self.uv_scale_set_value)
hg.DrawModel(v_id, Sprite.spr_model, Sprite.tex0_program, self.uniform_set_value_list, self.uniform_set_texture_list, matrix, Sprite.spr_render_state)
|
nilq/baby-python
|
python
|
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
from transformers import glue_processors as processors
from transformers import glue_output_modes as output_modes
from transformers import glue_convert_examples_to_features as convert_examples_to_features
import os
import logging
import numpy as np
import random
from argparse import Namespace
from collections import defaultdict
logger = logging.getLogger(__name__)
def main():
model = AutoModelForSequenceClassification.from_pretrained("/home/jovyan/working/class_projects/nlp_11711_project/revisit-bert-finetuning/replicate/bert_output/model_test/reinit_debiased/RTE/SEED0/checkpoint-last")
test_set_path = "/home/jovyan/working/class_projects/nlp_11711_project/bert_finetuning_test/glue/glue_data/RTE/test.tsv"
device = torch.device("cuda")
tokenizer = AutoTokenizer.from_pretrained(
'bert-large-uncased',
do_lower_case=True,
cache_dir='/home/jovyan/working/class_projects/nlp_11711_project/bert_finetuning_test/cache',
)
args = Namespace(
local_rank=-1,
data_dir='/home/jovyan/working/class_projects/nlp_11711_project/bert_finetuning_test/glue/glue_data/RTE',
resplit_val=0,
model_name_or_path='bert-large-uncased',
max_seq_length=128,
overwrite_cache=False,
model_type="bert",
downsample_trainset=-1,
seed=0
)
task = "rte"
eval_dataset = load_and_cache_examples(args, task, tokenizer, evaluate=True)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=64)
for batch in eval_dataloader:
batch = tuple(t.to(device) for t in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
}
model(**inputs)
print("done")
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def load_and_cache_examples(args, task, tokenizer, evaluate=False):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task]()
output_mode = output_modes[task]
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}_{}".format(
"dev" if (evaluate and args.resplit_val <= 0) else "train",
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta", "xlmroberta"]:
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
examples = (
processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir)
)
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
if args.downsample_trainset > 0 and not evaluate:
assert (args.downsample_trainset + args.resplit_val) <= len(features)
if args.downsample_trainset > 0 or args.resplit_val > 0:
set_seed(0) # use the same seed for downsample
if output_mode == "classification":
label_to_idx = defaultdict(list)
for i, f in enumerate(features):
label_to_idx[f.label].append(i)
samples_per_class = args.resplit_val if evaluate else args.downsample_trainset
samples_per_class = samples_per_class // len(label_to_idx)
for k in label_to_idx:
label_to_idx[k] = np.array(label_to_idx[k])
np.random.shuffle(label_to_idx[k])
if evaluate:
if args.resplit_val > 0:
label_to_idx[k] = label_to_idx[k][-samples_per_class:]
else:
pass
else:
if args.resplit_val > 0 and args.downsample_trainset <= 0:
samples_per_class = len(label_to_idx[k]) - args.resplit_val // len(label_to_idx)
label_to_idx[k] = label_to_idx[k][:samples_per_class]
sampled_idx = np.concatenate(list(label_to_idx.values()))
else:
if args.downsample_trainset > 0:
sampled_idx = torch.randperm(len(features))[: args.downsample_trainset]
else:
raise NotImplementedError
set_seed(args.seed)
features = [features[i] for i in sampled_idx]
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if output_mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
''' watchsnmp.py
set snmp device(s) to monitor
set snmp oid list to monitor
get snmp info from devices using oid list
save snmp data
read saved snmp data
diff from prev smp data
graph snmp data
send alert email
'''
from snmp_helper import snmp_get_oid_v3,snmp_extract
from watchdata import WatchData
import time
ip='184.105.247.70'
port=161
devicename='pynet-rtr1'
device=(ip,port)
a_user='pysnmp'
auth_key='galileo1'
encrypt_key='galileo1'
snmp_user=(a_user, auth_key, encrypt_key)
filename='snmpdata.dat'
polltime=300
endtime=3600
debugflag=True
oidlist=[
('ifDescr_fa4', '1.3.6.1.2.1.2.2.1.2.5'),
('ifInOctets_fa4', '1.3.6.1.2.1.2.2.1.10.5'),
('ifInUcastPkts_fa4', '1.3.6.1.2.1.2.2.1.11.5'),
('ifOutOctets_fa4', '1.3.6.1.2.1.2.2.1.16.5'),
('ifOutUcastPkts_fa4', '1.3.6.1.2.1.2.2.1.17.5')
]
''' data structures:
oid_nums [oid1, oid2, ...]
oid_names [oid1name, oid2name, ...]
oid_sets [ [oid1name, oid1], [oid2name, oid2], ...]
Uses the "WatchData" class
polltime (single value in seconds)
device IP list
Note that first reading is the initial values,
not graphed or reported if using differences
'''
def debug(msg):
if debugflag:
print("Debug: " + msg)
watchobj=WatchData(filename,oidlist,debugflag=debugflag)
# polling loop
timer=0
while timer <= endtime:
# gather data
valuelist=[]
for (oidname,oid) in oidlist:
snmp_data=snmp_get_oid_v3(device,snmp_user, oid)
data=snmp_extract(snmp_data)
debug( "valuelist before:")
debug( valuelist)
debug( oidname + " " + oid + " " + data)
valuelist.append(data)
watchobj.add(valuelist)
time.sleep(polltime)
|
nilq/baby-python
|
python
|
from . import mod_process
from . import sn_constant
from . import sn_phossite
from . import sn_result
from . import sn_utils
from .sn_lib import SpectronautLibrary
|
nilq/baby-python
|
python
|
import re
import logging
import munch
from . import shell
from builtins import staticmethod
import os
LSPCI_D_REGEX = re.compile("(([0-9a-f]{4}):([0-9a-f]{2}):([0-9a-f]{2}).([0-9a-f]))\s*")
class Device(munch.Munch):
def __init__(self, domain, bus, slot, function, info):
super().__init__(dict(domain=domain,
bus=bus,
slot=slot,
function=function,
info=info))
@staticmethod
def from_full_address(address):
match = LSPCI_D_REGEX.match(address)
if match is None:
raise Exception("Address %s is not a pci address" % address)
pci_info = match.groups()
info = device_info(address)
return Device(pci_info[1], pci_info[2], pci_info[3], pci_info[4],info=info)
@property
def full_address(self):
return "%s:%s:%s.%s" % (self.domain, self.bus, self.slot, self.function)
def local_nvidia():
output = shell.run_cmd("sudo lspci -D").split('\n')
logging.debug("parsing lspci %s", output)
return parse_nvidia_lspci_output(output)
def device_info(pci_address):
info = {}
info_files = ("current_link_speed", "max_link_speed", "max_link_width",
"current_link_width", "local_cpulist")
for info_name in info_files:
with open("/sys/bus/pci/devices/%s/%s" % (pci_address, info_name)) as f:
info[info_name] = f.read().strip()
return info
def parse_nvidia_lspci_output(lspci_output):
nvidia_devices = {}
for line in lspci_output:
ignore_case_line = line.lower()
# check if this is an nvidia device but not sound device
if "nvidia" in ignore_case_line and "audio" not in ignore_case_line:
bus_function = LSPCI_D_REGEX.match(ignore_case_line)
if not bus_function:
logging.error("Unexpected output from pci device %s", line)
continue
pci_device_string = bus_function.groups()[0]
domain = bus_function.groups()[1]
bus = bus_function.groups()[2]
slot = bus_function.groups()[3]
function = bus_function.groups()[4]
info = device_info(pci_device_string)
logging.debug("Found device %s in %s", pci_device_string, line)
device = Device(domain=domain,
bus=bus,
slot=slot,
function=function,
info=info)
nvidia_devices[pci_device_string] = device
return nvidia_devices
def vfio_bind_pci_device(device):
logging.debug("vfio bind device %s", device)
shell.run_cmd(["/usr/local/bin/vfio-pci-bind.sh", device.full_address])
def device_driver(device):
path = f'/sys/bus/pci/devices/{device.full_address}/driver'
try:
driver_path = os.readlink(path)
return driver_path.split('/')[-1]
except OSError:
return None
def enable_count(device):
path = f'/sys/bus/pci/devices/{device.full_address}/enable'
with open(path, 'r') as f:
return int(f.read().strip())
def vfio_bind_pci_devices(devices):
logging.debug("Going to vfio bind devices %s", devices)
for device in devices:
vfio_bind_pci_device(device)
if __name__ == '__main__':
print(local_nvidia())
|
nilq/baby-python
|
python
|
import numpy as np
from PIL import Image as Image
from scipy.ndimage import median_filter as _median_filter
from skimage.restoration import denoise_tv_bregman as _denoise_tv_bregman
import tensorflow as tf
def _get_image_from_arr(img_arr):
return Image.fromarray(
np.asarray(img_arr, dtype='uint8'))
def median_filter(img_arr, size=3):
return _median_filter(img_arr, size=size)
def denoise_tv_bregman(img_arr, weight=30):
denoised = _denoise_tv_bregman(img_arr, weight=weight) * 255.
return np.array(denoised, dtype=img_arr.dtype)
def jpeg_compress(x, quality=75):
return tf.image.decode_jpeg(
tf.image.encode_jpeg(
x, format='rgb', quality=quality),
channels=3)
def slq(x, qualities=(20, 40, 60, 80), patch_size=8):
num_qualities = len(qualities)
with tf.name_scope('slq'):
one = tf.constant(1, name='one')
zero = tf.constant(0, name='zero')
x_shape = tf.shape(x)
n, m = x_shape[0], x_shape[1]
patch_n = tf.cast(n / patch_size, dtype=tf.int32) \
+ tf.cond(n % patch_size > 0, lambda: one, lambda: zero)
patch_m = tf.cast(m / patch_size, dtype=tf.int32) \
+ tf.cond(n % patch_size > 0, lambda: one, lambda: zero)
R = tf.tile(tf.reshape(tf.range(n), (n, 1)), [1, m])
C = tf.reshape(tf.tile(tf.range(m), [n]), (n, m))
Z = tf.image.resize_nearest_neighbor(
[tf.random_uniform(
(patch_n, patch_m, 3),
0, num_qualities, dtype=tf.int32)],
(patch_n * patch_size, patch_m * patch_size),
name='random_layer_indices')[0, :, :, 0][:n, :m]
indices = tf.transpose(
tf.stack([Z, R, C]),
perm=[1, 2, 0],
name='random_layer_indices')
x_compressed_stack = tf.stack(
list(map(
lambda q: tf.image.decode_jpeg(tf.image.encode_jpeg(
x, format='rgb', quality=q), channels=3),
qualities)),
name='compressed_images')
x_slq = tf.gather_nd(x_compressed_stack, indices, name='final_image')
return x_slq
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os
import sys
import matplotlib.pyplot as plt
import nibabel as nib
import nilearn.image as nimage
import numpy as np
import pandas as pd
import seaborn as sns
import scipy.linalg as la
from glob import glob
from budapestcode.utils import compute_tsnr
from budapestcode.viz import make_mosaic, plot_mosaic
if len(sys.argv) < 2:
print(f"Usage: {os.path.basename(__file__)} subject_id")
sys.exit(1)
subject = sys.argv[1]
if not subject.startswith('sub-'):
subject = f'sub-{subject}'
HERE = os.path.dirname(__file__)
OUTPUT_DIR = os.path.abspath(os.path.join(HERE, '../../outputs'))
INDIR = f"{OUTPUT_DIR}/fmriprep"
OUTDIR = f"{OUTPUT_DIR}/datapaper/tsnr"
func_fns = sorted(glob(f'{INDIR}/{subject}/func/*space-T1w_desc-preproc_bold.nii.gz'))
conf_fns = sorted(glob(f'{INDIR}/{subject}/func/*tsv'))
# compute tSNR for every run
tsnr_runs = []
print("Computing tSNR")
for f, c in zip(func_fns, conf_fns):
print(f" {f.split('/')[-1]}")
data = nib.load(f).get_fdata()
conf = pd.read_csv(c, sep='\t')
tsnr_runs.append(compute_tsnr(data, conf))
# make mosaics
mosaic_runs = [make_mosaic(t) for t in tsnr_runs]
# compute median tsnr
tsnr_median = np.median(tsnr_runs, 0)
mosaic_median_run = make_mosaic(tsnr_median)
IMGOUT = f'{OUTDIR}/figures/{subject}'
os.makedirs(IMGOUT, exist_ok=True)
# Save images
print("Saving images")
for i, mat in enumerate(mosaic_runs, 1):
fig = plot_mosaic(mat, vmin=0, vmax=150, title=f'{subject}: run {i}');
plt.tight_layout()
fnout = f'{subject}_tsnr-mosaic_run-{i:02d}.png'
print(fnout)
fig.savefig(f'{IMGOUT}/{fnout}', dpi=150, bbox_inches='tight')
# median
fnout = f'{subject}_tsnr-mosaic_run-median.png'
print(fnout)
fig = plot_mosaic(mosaic_median_run, vmin=0, vmax=150, title=f'{subject}: median tSNR');
fig.savefig(f'{IMGOUT}/{fnout}', dpi=150, bbox_inches='tight')
# Now make violinplot
# first compute a conjuction brain mask
mask_fns = sorted(glob(f'{INDIR}/{subject}/func/*space-T1w_desc-brain_mask.nii.gz'))
# make a conjuction mask
brainmask = np.ones_like(tsnr_runs[0])
for mask_fn in mask_fns:
bm = nib.load(mask_fn).get_fdata()
brainmask *= bm
# plot it
mat_brainmask = make_mosaic(brainmask)
fig = plot_mosaic(mat_brainmask, vmin=0, vmax=1, title='Conjuction brainmask');
fnout = f'{subject}_brainmask-conjunction.png'
print(fnout)
fig.savefig(f'{IMGOUT}/{fnout}', dpi=150, bbox_inches='tight')
# mask the runs
tsnr_runs_masked = [t[brainmask.astype(bool)] for t in tsnr_runs]
# compute median
tsnr_median_masked = np.median(tsnr_runs_masked, 0)
tsnr_runs_masked.append(tsnr_median_masked)
# make a pretty plot please
fig, ax = plt.subplots(1, 1, figsize=(10, 6))
pos =[0, 1, 2, 3, 4, 5.5]
parts = ax.violinplot(tsnr_runs_masked, positions=pos, showmedians=True);
for pc in parts['bodies']:
pc.set_facecolor('gray')
pc.set_edgecolor('black')
pc.set_alpha(0.5)
for p in ['cbars', 'cmins', 'cmaxes', 'cmedians']:
parts[p].set_edgecolor('black')
ax.set_xticks(pos)
ax.set_xticklabels([f"Run {i}" for i in range(1, 6)] + ['Median tSNR'], fontsize=12)
ax.set_ylabel('tSNR', fontsize=12)
ax.set_title(subject, fontsize=14)
sns.despine()
plt.tight_layout()
fnout = f'{subject}_tsnr-violinplot.png'
print(fnout)
fig.savefig(f'{IMGOUT}/{fnout}', dpi=150, bbox_inches='tight')
# finally store the tSNR data so we can do group analyses
tsnr_tosave = tsnr_runs + [tsnr_median]
run_types = [f'{i:02d}' for i in range(1, 6)] + ['median']
OUTDIR = f"{OUTDIR}/{subject}"
os.makedirs(OUTDIR, exist_ok=True)
for run, t in zip(run_types, tsnr_tosave):
t_img = nimage.new_img_like(func_fns[0], t)
fnout = f'{subject}_task-movie_run-{run}_space-T1w_desc-tsnr.nii.gz'
print(fnout)
fnout = f"{OUTDIR}/{fnout}"
t_img.to_filename(fnout)
|
nilq/baby-python
|
python
|
import ConfigParser
import os
from core.basesingleton import BaseSingleton
from core.settings import Settings
class ConfigurationManager(BaseSingleton):
@classmethod
def load_configuration(cls):
cls.get_instance()._load_configuration()
@classmethod
def save_configuration(cls):
cls.get_instance()._save_configuration()
def __init__(self):
self._filename = 'settings.cfg'
self._filepath = os.path.join(os.getcwd(),"..", self._filename)
self._config_exists = os.path.exists(self._filepath)
self._config = ConfigParser.ConfigParser()
def __del__(self):
self.save_configuration()
def _check_type(self, value):
# Default to string
value_type = "str"
# Check Boolean
if value == "True" or value == "False":
value_type = "bool"
# Check list
elif value[0] == "[" and value[-1] == "]":
value_type = "list"
# Check int
elif value.find('.') == -1:
try:
int(value)
value_type = "int"
except ValueError:
pass
elif value.find('.') > -1:
try:
float(value)
value_type = "float"
except ValueError:
pass
return value_type
def _load_configuration(self):
if self._config_exists:
# If read ok
read_result = self._config.read(self._filepath)
if len(read_result) > 0 and os.path.basename(read_result[0]) == self._filename:
# Populate the classes with the appropriate values
for section in self._config.sections():
#print "Reading Config Section: " + section
for option in self._config.options(section):
value_type = "str"
# If the section has been defined in the default settings
if Settings.has_section(section):
value_type = Settings.get(section, option).__class__.__name__
# Else use a manual technique to figure this out.
else:
value = self._config.get(section, option)
value_type = self._check_type( value )
if value_type == "str":
Settings.set(section, option, self._config.get(section, option) )
elif value_type == "int":
Settings.set(section, option, self._config.getint(section, option) )
elif value_type == "float":
Settings.set(section, option, self._config.getfloat(section, option) )
elif value_type == "bool":
Settings.set(section, option, self._config.getboolean(section, option) )
elif value_type == "list":
# If the item is a list get it as a string and process it as appropriate
# only lists containing homogeneous values are supported
list_value = self._config.get(section, option)
# If the section has already been defined in the default settings
if Settings.has_section(section):
#assuming that the list has more than one value...
list_type = Settings.get(section, option)[0].__class__.__name__
# Otherwise extract the type
else:
#Extract the first list element
# Default to a single element list
list_element = list_value[1:-1]
#Check for more and adjust as necessary
if list_value.find(',') > 0:
list_element = list_value[1:list_value.find(',')]
# Get the element_type
list_type = self._check_type( list_element )
# In place of python's lack of a switch statement, defaulting to str if None
cast_func = { 'int' : int, 'float' : float, 'bool' : bool, 'str' : str, 'NoneType' : str }[list_type]
# Generate a list from the string
Settings.set(section, option, [cast_func(value) for value in list_value[1:-1].split(',')] )
value = self._config.get(section, option)
# print "Reading property class: %s name: %s value: %s" % ( section, option, str(value) )
# print "Class value class: %s name: %s value: %s valuetype: %s" % ( section, option, str(Settings.get(section, option)), Settings.get(section, option).__class__ )
else:
print "Can't find configuration file: %s " % self._filename
def _save_configuration(self):
print "Shutting down ConfigurationManager"
# For each of the settings sections
for section in Settings.get_sections():
# if they don't yet have a section in the settings file, add one.
if not section in self._config.sections():
self._config.add_section(section)
# Store their values
for option in Settings.get_options(section):
value = Settings.get(section, option)
self._config.set(section, option, value )
# print "Class value class: %s name: %s value: %s valuetype: %s" % ( section, option, value, value.__class__)
# for class_def in self._settings_classes:
# class_name = class_def.__name__
# # if they don't yet have a section in the settings file, add one.
# if not class_name in self._config.sections():
# self._config.add_section(class_name)
# # Store their values
# for name, value in class_def.__dict__.iteritems():
# # Ignoring generated class properties
# if name[:2] != "__":
# self._config.set(class_name, name, value)
# print "Class value class: %s name: %s value: %s valuetype: %s" % ( class_name, name, value, value.__class__)
with open(self._filepath, 'w') as configfile:
self._config.write(configfile)
|
nilq/baby-python
|
python
|
###############################################################################
#
# \file ResultProcessor.py
# \author Sudnya Diamos <sudnyadiamos@gmail.com>
# \date Saturday August 12, 2017
# \brief Class that converts a probability distribution over classes to class
# label and returns result as a json object
###############################################################################
import argparse
import logging
import numpy
logger = logging.getLogger("ResultProcessor")
class ResultProcessor:
def __init__(self):
pass
def getLabels(self, pd, labelMapper):
#TODO: [probabilities] {batch, probs} -> pick max entry -> class label
batchSize = pd.shape[0]
labelCount = pd.shape[1]
labels = []
for batchElement in range(batchSize):
probs = numpy.reshape(pd[batchElement:batchElement + 1, :], (labelCount))
mostLikelyLabelIndex = numpy.argmax(probs)
logger.info("Most like label: " + str(mostLikelyLabelIndex) \
+ " with score: " + str(probs[mostLikelyLabelIndex]))
label = labelMapper.getLabelForLogit(mostLikelyLabelIndex)
top5LabelIndices = numpy.argpartition(probs, -5)[-5:]
top5LabelIndices = reversed(top5LabelIndices[numpy.argsort(probs[top5LabelIndices])])
top5Labels = [labelMapper.getLabelForLogit(index) for index in top5LabelIndices]
result = {"label" : label, "top-5-labels" : top5Labels}
logger.info(" result: " + str(result))
labels.append(result)
return labels
|
nilq/baby-python
|
python
|
# Generated by Django 3.0.6 on 2020-05-16 17:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('confesion', '0004_remove_confesion_comentarios'),
]
operations = [
migrations.AddField(
model_name='comentario',
name='add_comentario',
field=models.TextField(default='', max_length=420),
),
]
|
nilq/baby-python
|
python
|
import pytest
MODEL = 'ecmwf'
class VariableInfo:
def __init__(self):
self.name = 'Product'
@pytest.mark.parametrize("key", [
'cf_V', 'cf_A', 'cf_V_adv', 'cf_A_adv'])
def test_get_cf_title(key):
from model_evaluation.plotting.plotting import _get_cf_title
var = VariableInfo()
field_name = key + '_' + MODEL
value = 'Product, Volume'
if 'A' in key:
value = 'Product, Area'
x = _get_cf_title(field_name, var)
assert x == value
@pytest.mark.parametrize("key", [
'cf_V', 'cf_A', 'cf_V_adv', 'cf_A_adv'])
def test_get_cf_title_cycle(key):
from model_evaluation.plotting.plotting import _get_cf_title
var = VariableInfo()
field_name = key + '_' + MODEL + '_001'
value = 'Product, Volume'
if 'A' in key:
value = 'Product, Area'
x = _get_cf_title(field_name, var)
assert x == value
@pytest.mark.parametrize("key, value", [
('iwc', 'Product'),
('iwc_att', 'Product with good attenuation'),
('iwc_rain', 'Product with rain'),
('iwc_adv', 'Product'),
('iwc_att_adv', 'Product with good attenuation'),
('iwc_rain_adv', 'Product with rain')])
def test_get_iwc_title(key, value):
from model_evaluation.plotting.plotting import _get_iwc_title
var = VariableInfo()
field_name = key + '_' + MODEL
x = _get_iwc_title(field_name, var)
assert x == value
@pytest.mark.parametrize("key, value", [
('iwc', 'Product'),
('iwc_att', 'Product with good attenuation'),
('iwc_rain', 'Product with rain'),
('iwc_adv', 'Product'),
('iwc_att_adv', 'Product with good attenuation'),
('iwc_rain_adv', 'Product with rain')])
def test_get_iwc_title_cycle(key, value):
from model_evaluation.plotting.plotting import _get_iwc_title
var = VariableInfo()
field_name = key + '_' + MODEL + '_001'
x = _get_iwc_title(field_name, var)
assert x == value
@pytest.mark.parametrize("key", ['lwc','lwc_adv'])
def test_get_product_title(key):
from model_evaluation.plotting.plotting import _get_product_title
var = VariableInfo()
value = 'Product'
x = _get_product_title( var)
assert x == value
@pytest.mark.parametrize("key", ['lwc','lwc_adv'])
def test_get_product_title_cycle(key):
from model_evaluation.plotting.plotting import _get_product_title
var = VariableInfo()
value = 'Product'
x = _get_product_title(var)
assert x == value
@pytest.mark.parametrize("key, title", [
('lwc', "Product"),
('lwc_adv', "Product (Advection time)")])
def test_get_stat_titles(key, title):
from model_evaluation.plotting.plotting import _get_stat_titles
field_name = key + '_' + MODEL
var = VariableInfo()
x = _get_stat_titles(field_name, key, var)
assert x == title
@pytest.mark.parametrize("key", [
'cf_V', 'cf_A', 'cf_V_adv', 'cf_A_adv'])
def test_get_cf_title_stat(key):
from model_evaluation.plotting.plotting import _get_cf_title_stat
field_name = key + '_' + MODEL
var = VariableInfo()
x = _get_cf_title_stat(field_name, var)
value = 'Product volume'
if 'A' in key:
value = 'Product area'
assert x == value
@pytest.mark.parametrize("key, value", [
('iwc', 'Product'),
('iwc_att', 'Product with good attenuation'),
('iwc_rain', 'Product with rain')])
def test_get_iwc_title_stat(key, value):
from model_evaluation.plotting.plotting import _get_iwc_title_stat
field_name = key + '_' + MODEL
var = VariableInfo()
x = _get_iwc_title_stat(field_name, var)
assert x == value
@pytest.mark.parametrize("key", ['lwc'])
def test_get_product_title_stat(key):
from model_evaluation.plotting.plotting import _get_product_title_stat
var = VariableInfo()
x = _get_product_title_stat(var)
assert x == 'Product'
|
nilq/baby-python
|
python
|
#! -*- codinf:utf-8 -*-
import time
import pandas as pd
import numpy as np
import torch
from numba import njit
from pyteomics.mgf import read
from pyteomics.mgf import read_header
"""
This script is used to compare the use-time of NDP and DLEAMS
"""
@njit
def caculate_spec(bin_spec):
ndp_spec = np.math.sqrt(np.dot(bin_spec, bin_spec))
return ndp_spec
@njit
def get_bin_index(mz, min_mz, bin_size):
relative_mz = mz - min_mz
return max(0, int(np.floor(relative_mz / bin_size)))
def ndp_bin_spectrum(mz_array, intensity_array):
"""
bin spectrum and this algorithm reference from 'https://github.com/dhmay/param-medic/blob/master/parammedic/binning.pyx'
:param mz_array:
:param intensity_array:
:param max_mz:
:param min_mz:
:param bin_size:
:return:
"""
# key = mz_array.__str__()
# if key in spectrum_dict.keys(): # use cache just take 4s
# # if False: use the old one may take 7s for 50
# return spectrum_dict[key]
# else:
max_mz = int(2500)
min_mz = float(50.5)
bin_size = float(1.0005079)
# max_mz = int(1995)
# min_mz = float(84)
# bin_size = float(1)
nbins = int(float(max_mz - min_mz) / float(bin_size)) + 1
results_dict = {}
results = np.zeros(nbins)
final_results = np.zeros(nbins)
for index in range(len(mz_array)):
mz = float(mz_array[index])
intensity = intensity_array[index]
intensity = np.math.sqrt(intensity)
if mz < min_mz or mz > max_mz:
continue
bin_index = get_bin_index(mz, min_mz, bin_size)
if bin_index < 0 or bin_index > nbins - 1:
continue
if results[bin_index] == 0:
results[bin_index] = intensity
else:
results[bin_index] += intensity
intensity_sum = results.sum()
if intensity_sum > 0:
results /= intensity_sum
# spectrum_dict[key] = results
else:
print('zero intensity found')
# 取出前100个最高度的峰
# print(results)
# print(results)
# for i in range(results.shape[0]):
# results_dict[i] = results[i]
# final_results[i] = 0
results_tensor = torch.from_numpy(results)
results_tensor = results_tensor.cuda()
test_topk = torch.topk(results_tensor, k=100)
top100_intens = np.array(test_topk[0].cpu())
top100_index = np.array(test_topk[1].cpu())
for i in range(top100_index.shape[0]):
final_results[top100_index[i]] = top100_intens[i]
return final_results
def caculate_nornalization_dp(bin_spectrum01, bin_spectrum02):
tmp_01 = caculate_spec(bin_spectrum01)
tmp_02 = caculate_spec(bin_spectrum02)
dvi = np.dot(tmp_01, tmp_02)
tmp_dp_list = np.dot(bin_spectrum01, bin_spectrum02)
result = tmp_dp_list / dvi
return result
def calculate_ndp_time(spectra_mgf_file1, spectra_mgf_file2):
score_list = []
bins_spectrum_01, bins_spectrum_02 = [], []
tmp_time_01 = time.perf_counter()
spectra01 = read(spectra_mgf_file1, convert_arrays=1)
spectra02 = read(spectra_mgf_file2, convert_arrays=1)
for data01 in spectra01:
spectrum01_mz_array = data01.get("m/z array")
spectrum01_intens_array = data01.get("intensity array")
bin_spectrum01 = ndp_bin_spectrum(spectrum01_mz_array, spectrum01_intens_array)
bins_spectrum_01.append(bin_spectrum01)
for data02 in spectra02:
spectrum02_mz_array = data02.get("m/z array")
spectrum02_intens_array = data02.get("intensity array")
bin_spectrum02 = ndp_bin_spectrum(spectrum02_mz_array, spectrum02_intens_array)
bins_spectrum_02.append(bin_spectrum02)
time01 = time.perf_counter()
print("两文件编码所用的时间为:{}".format(time01 - tmp_time_01))
for j in range(len(bins_spectrum_01)):
score = caculate_nornalization_dp(bins_spectrum_01[j], bins_spectrum_02[j])
score_list.append(score)
# np.savetxt("./data/1130_test_use_time_ndp.txt", score_list)
time02 = time.perf_counter()
print("Similarity use time: {}".format(time02 - time01))
if __name__ == '__main__':
print("test")
time_01 = time.perf_counter()
# 首先是定义代码的输入,需要输入谱图对数据,然后需要数据谱图对数据对应的mgf文件
# spectra_pairs_file = "./data/062401_test_ups_specs_BC_NFTR_NFTR_NF_None_TR_None_PPR_None_CHR_givenCharge_PRECTOL_3.0_binScores.txt"
# spectra_mgf_file1 = "./data/0622_Orbi2_study6a_W080314_6E008_yeast_S48_ft8_pc_SCAN.mgf"
# spectra_mgf_file2 = "./data/0622_Orbi2_study6a_W080314_6E008_yeast_S48_ft8_pc_SCAN.mgf"
# spectra_mgf_file1 = "./data/OEI04195.mgf"
# spectra_mgf_file2 = "./data/OEI04195.mgf"
# spectra_mgf_file1 = "./data/test50000.mgf"
# spectra_mgf_file2 = "./data/test50000.mgf"
# spectra_mgf_file1 = "./data/crap.mgf"
# spectra_mgf_file2 = "./data/crap.mgf"
# spectra_mgf_file1 = "./data/sample10000_mgf.mgf"
# spectra_mgf_file2 = "./data/sample10000_mgf.mgf"
# spectra_mgf_file1 = "./data/sample20000_mgf.mgf"
# spectra_mgf_file2 = "./data/sample20000_mgf.mgf"
#
# spectra_mgf_file1 = "./data/sample40000_mgf.mgf"
# spectra_mgf_file2 = "./data/sample40000_mgf.mgf"
#
# spectra_mgf_file1 = "./data/sample80000_mgf.mgf"
# spectra_mgf_file2 = "./data/sample80000_mgf.mgf"
spectra_mgf_file1 = "./data/crap_40000_mgf.mgf"
spectra_mgf_file2 = "./data/crap_40000_mgf.mgf"
# spectra_mgf_file1 = "../SimilarityScoring/data/before_0622/Orbi2_study6a_W080314_6E008_yeast_S48_ft8_pc.mgf"
# spectra_mgf_file2 = "../SimilarityScoring/data/before_0622/Orbi2_study6a_W080314_6E008_yeast_S48_ft8_pc.mgf"
tmp_time_00 = time.perf_counter()
calculate_ndp_time(spectra_mgf_file1, spectra_mgf_file2)
time_02 = time.perf_counter()
print("不计算文件加载,仅计算编码和计算NDP的总时间:{}".format(time_02 - tmp_time_00))
print("Total use time: {}".format(time_02 - time_01))
|
nilq/baby-python
|
python
|
import pylab
import numpy
import ardustat_library_simple as ard
import time
import sys
from glob import glob
import os
def get_latest():
data_files = glob("*.dat")
high_time = 0
recent_file = "foo"
for d in data_files:
if os.path.getmtime(d) > high_time:
high_time = os.path.getmtime(d)
recent_file = d
return recent_file
try:
file_name = sys.argv[1]
except Exception, err:
file_name = get_latest()
print "defaulting to most recent file:", file_name
data = open(file_name).read()
data = data.split("\n")
times = []
potential = []
current = []
cycles = []
this_cycle = 0
for d in data:
try:
parts = d.split(",")
times.append(parts[0])
potential.append(parts[1])
current.append(parts[2])
cycle = int(parts[3])
if cycle != this_cycle:
this_cycle = cycle
cycles.append({'times':times,'potential':potential,'current':current})
times = []
potential = []
current = []
except Exception, err:
foo = err
cycles.append({'times':times,'potential':potential,'current':current})
counter = 1
for c in cycles:
pylab.plot(c['potential'],c['current'],label='Cycle '+str(counter))
pylab.legend(loc="best")
pylab.ylabel("Current (A)")
pylab.xlabel("Potential (V)")
counter += 1
pylab.savefig("out-cv.png")
|
nilq/baby-python
|
python
|
import numpy as np
from ..prediction import *
def test_predict_seebeck():
try:
predict_seebeck(1234, 62, 400)
except(TypeError):
pass
else:
raise Exception("Bad input allowed",
"Error not raised when `compound` isn't a string")
try:
predict_seebeck('CaMnO3', 62.0, 400)
except(TypeError):
pass
else:
raise Exception("Bad input allowed",
"Error not raised when `spacegroup` isn't an int")
try:
predict_seebeck('CaMnO3', 62, '400')
except(TypeError):
pass
else:
raise Exception("Bad input allowed",
"Error not raised when `T` isn't an int or float")
try:
predict_seebeck('CaLaTiLiMnO3', 62, 400)
except(Exception):
pass
else:
raise Exception("Bad input allowed",
"Error not raised when too many elements are passed")
try:
predict_seebeck('CaMnO3', 240, 400)
except(Exception):
pass
else:
raise Exception("Bad input allowed",
"Error not raised when `spacegroup`" +
" is greater than 230")
return
def test_scale_features_1():
nn = TEMANN()
try:
nn._scale_features('12345')
except(Exception):
pass
else:
raise Exception("Bad input allowed",
"Error not raised when input is not numpy array")
return
def test_transform_compound_1():
nn = TEMANN()
compound = "CaMnO3"
output = nn._transform_compound(compound)
assert len(output) == 80, "Padding of np array not completed"
return
def test_transform_compound_2():
nn = TEMANN()
try:
nn._transform_compound(1234)
except(Exception):
pass
else:
raise Exception("Bad input allowed",
"Error not raised when input is not a string")
return
def test_transform_spacegroup_1():
nn = TEMANN()
output = nn._transform_spacegroup(221)
assert len(output) == 14, "Not all spacegroup features transformed."
return
def test_transform_spacegroup_2():
nn = TEMANN()
try:
nn._transform_spacegroup(221.0)
except(Exception):
pass
else:
raise Exception("Bad input allowed",
"Error not raised when input is not an integer")
return
def test_replace_with_list_values_1():
nn = TEMANN()
list1 = [1, 2, 3, 4, 5, 6, 7, 8]
list2 = ['A', 'B']
nn._replace_with_list_values(1, list1, list2)
assert len(list1) > len(list2), 'Original list not modified correctly, check that\
to be inserted is not empty'
return
def test_replace_with_list_values_2():
nn = TEMANN()
list1 = [1, 2, 3, 4, 5, 6, 7, 8]
try:
nn._replace_with_list_values(1, list1, 'A')
except(Exception):
pass
else:
raise Exception("Bad input allowed",
"Error not raised when input is not a list")
return
def test_load_encoder_1():
nn = TEMANN()
encoder_file = 1234
encoder_id = '0'
try:
nn._load_encoder(encoder_file, encoder_id)
except(Exception):
pass
else:
raise Exception("Bad input allowed",
"Error not raised when encoder_file input in not" +
" a string, or encoder_id is not an int")
return
def test_load_scaler_1():
nn = TEMANN()
scaler_file = 1234
try:
nn._load_scaler(scaler_file)
except(Exception):
pass
else:
raise Exception("Bad input allowed",
"Error not raised when scaler_file is not a string.")
return
def test_load_neural_network_1():
nn = TEMANN()
json_file = 1234
weights_file = 5678
try:
nn._load_neural_network(json_file, 'model.h5')
except(Exception):
pass
else:
raise Exception("Bad input allowed",
"Error not raised when json_file is not a string.")
try:
nn._load_neural_network('model.json', weights_file)
except(Exception):
pass
else:
raise Exception("Bad input allowed",
"Error not raised when weights_file is not a string.")
return
|
nilq/baby-python
|
python
|
import numpy as np
import pandas as pd
import pytest
from scipy.sparse import coo_matrix
from collie.cross_validation import random_split, stratified_split
from collie.interactions import ExplicitInteractions, Interactions
def test_bad_random_split_HDF5Interactions(hdf5_interactions):
with pytest.raises(AssertionError):
random_split(
interactions=hdf5_interactions,
)
def test_bad_stratified_split_HDF5Interactions(hdf5_interactions):
with pytest.raises(AssertionError):
stratified_split(
interactions=hdf5_interactions,
)
@pytest.mark.parametrize('data_type', ['implicit', 'explicit'])
def test_random_split(implicit_interactions_to_split,
explicit_interactions_to_split,
data_type):
if data_type == 'implicit':
interactions_class = Interactions
interactions_kwargs = {
'check_num_negative_samples_is_valid': False,
}
interactions_to_split = implicit_interactions_to_split
else:
interactions_class = ExplicitInteractions
interactions_kwargs = {}
interactions_to_split = explicit_interactions_to_split
train_expected_df = pd.DataFrame(
data={
'user_id': [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 4, 4],
'item_id': [0, 1, 2, 3, 4, 5, 8, 1, 3, 4, 1, 3, 4, 2, 2, 4],
'rating': [1, 2, 3, 4, 5, 4, 1, 1, 3, 4, 2, 4, 5, 5, 3, 5],
}
)
train_expected = interactions_class(
mat=coo_matrix(
(
train_expected_df['rating'],
(train_expected_df['user_id'], train_expected_df['item_id']),
),
shape=(interactions_to_split.num_users, interactions_to_split.num_items),
),
allow_missing_ids=True,
**interactions_kwargs,
)
validate_expected_df = pd.DataFrame(
data={'user_id': [3, 4, 4], 'item_id': [1, 1, 5], 'rating': [1, 2, 4]}
)
validate_expected = interactions_class(
mat=coo_matrix(
(
validate_expected_df['rating'],
(validate_expected_df['user_id'], validate_expected_df['item_id']),
),
shape=(interactions_to_split.num_users, interactions_to_split.num_items),
),
allow_missing_ids=True,
**interactions_kwargs,
)
test_expected_df = pd.DataFrame(
data={
'user_id': [0, 0, 1, 2, 3],
'item_id': [6, 7, 2, 2, 4],
'rating': [3, 2, 2, 3, 4],
}
)
test_expected = interactions_class(
mat=coo_matrix(
(
test_expected_df['rating'],
(test_expected_df['user_id'], test_expected_df['item_id']),
),
shape=(interactions_to_split.num_users, interactions_to_split.num_items),
),
allow_missing_ids=True,
**interactions_kwargs,
)
(train_actual, validate_actual, test_actual) = random_split(
interactions=interactions_to_split, val_p=0.1, test_p=0.2, seed=42
)
np.testing.assert_array_equal(train_actual.toarray(), train_expected.toarray())
np.testing.assert_array_equal(
validate_actual.toarray(), validate_expected.toarray()
)
np.testing.assert_array_equal(test_actual.toarray(), test_expected.toarray())
assert (
train_actual.num_users
== train_expected.num_users
== validate_actual.num_users
== validate_expected.num_users
== test_actual.num_users
== test_expected.num_users
)
assert (
train_actual.num_items
== train_expected.num_items
== validate_actual.num_items
== validate_expected.num_items
== test_actual.num_items
== test_expected.num_items
)
assert (
type(train_actual)
== type(train_expected)
== type(validate_actual)
== type(validate_expected)
== type(test_actual)
== type(test_expected)
== interactions_class
)
def test_random_split_with_users_with_only_one_interaction(
interactions_to_split_with_users_with_only_one_interaction,
):
# unlike for ``stratified_split``, this should work without error
random_split(
interactions=interactions_to_split_with_users_with_only_one_interaction,
)
@pytest.mark.parametrize('data_type', ['implicit', 'explicit'])
def test_stratified_split(implicit_interactions_to_split,
explicit_interactions_to_split,
data_type):
if data_type == 'implicit':
interactions_class = Interactions
interactions_kwargs = {
'check_num_negative_samples_is_valid': False,
}
interactions_to_split = implicit_interactions_to_split
else:
interactions_class = ExplicitInteractions
interactions_kwargs = {}
interactions_to_split = explicit_interactions_to_split
train_expected_df = pd.DataFrame(
data={
'user_id': [0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 4, 4],
'item_id': [1, 2, 3, 4, 6, 8, 1, 2, 3, 4, 2, 4, 5],
'rating': [2, 3, 4, 5, 3, 1, 1, 2, 4, 5, 5, 5, 4],
}
)
train_expected = interactions_class(
mat=coo_matrix(
(
train_expected_df['rating'],
(train_expected_df['user_id'], train_expected_df['item_id']),
),
shape=(interactions_to_split.num_users, interactions_to_split.num_items),
),
allow_missing_ids=True,
**interactions_kwargs,
)
validate_expected_df = pd.DataFrame(
data={
'user_id': [0, 1, 2, 3, 4],
'item_id': [7, 3, 2, 1, 2],
'rating': [2, 3, 3, 1, 3],
}
)
validate_expected = interactions_class(
mat=coo_matrix(
(
validate_expected_df['rating'],
(validate_expected_df['user_id'], validate_expected_df['item_id']),
),
shape=(interactions_to_split.num_users, interactions_to_split.num_items),
),
allow_missing_ids=True,
**interactions_kwargs,
)
test_expected_df = pd.DataFrame(
data={
'user_id': [0, 0, 1, 2, 3, 4],
'item_id': [0, 5, 4, 1, 4, 1],
'rating': [1, 4, 4, 2, 4, 2],
}
)
test_expected = interactions_class(
mat=coo_matrix(
(
test_expected_df['rating'],
(test_expected_df['user_id'], test_expected_df['item_id']),
),
shape=(interactions_to_split.num_users, interactions_to_split.num_items),
),
allow_missing_ids=True,
**interactions_kwargs,
)
(train_actual, validate_actual, test_actual) = stratified_split(
interactions=interactions_to_split, val_p=0.1, test_p=0.2, seed=46
)
np.testing.assert_array_equal(train_actual.toarray(), train_expected.toarray())
np.testing.assert_array_equal(
validate_actual.toarray(), validate_expected.toarray()
)
np.testing.assert_array_equal(test_actual.toarray(), test_expected.toarray())
assert (
train_actual.num_users
== train_expected.num_users
== validate_actual.num_users
== validate_expected.num_users
== test_actual.num_users
== test_expected.num_users
)
assert (
train_actual.num_items
== train_expected.num_items
== validate_actual.num_items
== validate_expected.num_items
== test_actual.num_items
== test_expected.num_items
)
assert (
type(train_actual)
== type(train_expected)
== type(validate_actual)
== type(validate_expected)
== type(test_actual)
== type(test_expected)
== interactions_class
)
@pytest.mark.parametrize('processes', [0, -1])
def test_stratified_split_with_users_with_only_one_interaction_raises_error(
interactions_to_split_with_users_with_only_one_interaction,
processes
):
with pytest.raises(
ValueError,
match='Unable to stratify split on users - the ``interactions`` object contains users '
'with a single interaction. Either set ``force_split = True`` to put all users '
'with a single interaction in the training set or run '
'``collie.utils.remove_users_or_items_with_fewer_than_n_interactions`` first.'
):
stratified_split(
interactions=interactions_to_split_with_users_with_only_one_interaction,
test_p=0.2,
seed=42,
processes=processes,
)
@pytest.mark.parametrize('processes', [0, -1])
def test_stratified_split_with_items_with_only_one_interaction_raises_error(
interactions_to_split_with_items_with_only_one_interaction,
processes
):
with pytest.raises(
ValueError,
match='Unable to stratify split on items - the ``interactions`` object contains items '
'with a single interaction. Either set ``force_split = True`` to put all items '
'with a single interaction in the training set or run '
'``collie.utils.remove_users_or_items_with_fewer_than_n_interactions`` first.'
):
stratified_split(
interactions=interactions_to_split_with_items_with_only_one_interaction,
test_p=0.2,
seed=42,
processes=processes,
)
@pytest.mark.parametrize('processes', [0, -1])
def test_stratified_split_with_users_with_only_one_interaction_force_split(
interactions_to_split_with_users_with_only_one_interaction,
processes
):
users_with_only_one_interaction = [0, 5, 6]
(train_actual, _, _) = stratified_split(
interactions=interactions_to_split_with_users_with_only_one_interaction,
val_p=0.1,
test_p=0.2,
seed=42,
processes=processes,
force_split=True
)
assert all(user in train_actual[:][0][0].tolist() for user in users_with_only_one_interaction)
class TestSplitsWithWrongP:
def test_combined_too_large_random(self, implicit_interactions_to_split):
with pytest.raises(ValueError):
random_split(interactions=implicit_interactions_to_split, val_p=0.9, test_p=0.2)
def test_combined_too_large_stratified(self, implicit_interactions_to_split):
with pytest.raises(ValueError):
stratified_split(interactions=implicit_interactions_to_split, val_p=0.9, test_p=0.2)
def test_combined_equal_one_random(self, implicit_interactions_to_split):
with pytest.raises(ValueError):
random_split(interactions=implicit_interactions_to_split, val_p=0.7, test_p=0.3)
def test_combined_equal_one_stratified(self, implicit_interactions_to_split):
with pytest.raises(ValueError):
stratified_split(interactions=implicit_interactions_to_split, val_p=0.7, test_p=0.3)
def test_val_negative_but_combined_good_random(self, implicit_interactions_to_split):
with pytest.raises(ValueError):
random_split(interactions=implicit_interactions_to_split, val_p=-0.1, test_p=0.3)
def test_val_negative_but_combined_good_stratified(self, implicit_interactions_to_split):
with pytest.raises(ValueError):
stratified_split(interactions=implicit_interactions_to_split, val_p=-0.1, test_p=0.3)
def test_test_p_too_large_random(self, implicit_interactions_to_split):
with pytest.raises(ValueError):
random_split(interactions=implicit_interactions_to_split, test_p=1.1)
def test_test_p_too_large_stratified(self, implicit_interactions_to_split):
with pytest.raises(ValueError):
stratified_split(interactions=implicit_interactions_to_split, test_p=1.1)
def test_test_p_equal_one_random(self, implicit_interactions_to_split):
with pytest.raises(ValueError):
random_split(interactions=implicit_interactions_to_split, test_p=1)
def test_test_p_equal_one_stratified(self, implicit_interactions_to_split):
with pytest.raises(ValueError):
stratified_split(interactions=implicit_interactions_to_split, test_p=1)
def test_test_p_negative_random(self, implicit_interactions_to_split):
with pytest.raises(ValueError):
random_split(interactions=implicit_interactions_to_split, test_p=-0.7)
def test_test_p_negative_stratified(self, implicit_interactions_to_split):
with pytest.raises(ValueError):
stratified_split(interactions=implicit_interactions_to_split, test_p=-0.7)
def test_splits_vary_number_of_processes(implicit_interactions_to_split):
train_1, test_1 = stratified_split(interactions=implicit_interactions_to_split,
seed=42,
processes=-1)
train_2, test_2 = stratified_split(interactions=implicit_interactions_to_split,
seed=42,
processes=0)
train_3, test_3 = stratified_split(interactions=implicit_interactions_to_split,
seed=42,
processes=1)
train_4, test_4 = stratified_split(interactions=implicit_interactions_to_split,
seed=42,
processes=2)
# transitive property in action here
np.testing.assert_array_equal(train_1.toarray(), train_2.toarray())
np.testing.assert_array_equal(train_2.toarray(), train_3.toarray())
np.testing.assert_array_equal(train_3.toarray(), train_4.toarray())
np.testing.assert_array_equal(test_1.toarray(), test_2.toarray())
np.testing.assert_array_equal(test_2.toarray(), test_3.toarray())
np.testing.assert_array_equal(test_3.toarray(), test_4.toarray())
|
nilq/baby-python
|
python
|
from .loader import TableData
class MeasurementTypeData(TableData):
DATA = [
{"measurement_type_id": 0, "name": "generic measurement"},
{"measurement_type_id": 1, "name": "generic liquid sample"},
{"measurement_type_id": 2, "name": "whole blood"},
{"measurement_type_id": 3, "name": "serum or plasma"},
{"measurement_type_id": 4, "name": "buffycoat"},
{"measurement_type_id": 5, "name": "paxgene rna"},
{"measurement_type_id": 6, "name": "paxgene dna"},
]
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt
from torchvision import transforms, datasets
from torchvision.models import vgg19, densenet121, vgg16
from torchvision import datasets, models, transforms
import torchvision
from torch import nn, optim
import torch
import torch.nn.functional as F
from collections import OrderedDict
import json
import numpy as np
from PIL import Image
import argparse
parser = argparse.ArgumentParser(description='Predict the type of a flower')
parser.add_argument('--checkpoint', type=str, help='Path to checkpoint' , default='checkpoint.pth')
parser.add_argument('--image_path', type=str, help='Path to file' , default='flowers/test/28/image_05230.jpg')
parser.add_argument('--gpu', type=bool, default=True, help='Whether to use GPU during inference or not')
parser.add_argument('--topk', type=int, help='Number of k to predict' , default=0)
parser.add_argument('--cat_to_name_json', type=str, help='Json file to load for class values to name conversion' , default='cat_to_name.json')
args = parser.parse_args()
with open(args.cat_to_name_json, 'r') as f:
cat_to_name = json.load(f)
image_path = args.image_path
device = 'cuda' if args.gpu else 'cpu'
# : Write a function that loads a checkpoint and rebuilds the model
def load_checkpoint(checkpoint):
checkpoint = torch.load(args.checkpoint)
model = getattr(torchvision.models, checkpoint['arch'])(pretrained=True)
model.classifier = checkpoint['classifier']
for param in model.parameters():
param.requires_grad = False
model.load_state_dict(checkpoint['state_dict'])
optimizer = checkpoint['optimizer']
optimizer.load_state_dict(checkpoint['optimizer_dict'])
return model, checkpoint
model, checkpoint = load_checkpoint(args.checkpoint)
def process_image(image):
image = image.resize((round(256*image.size[0]/image.size[1]) if image.size[0]>image.size[1] else 256,
round(256*image.size[1]/image.size[0]) if image.size[1]>image.size[0] else 256))
image = image.crop((image.size[0]/2-224/2, image.size[1]/2-224/2, image.size[0]/2+224/2, image.size[1]/2+224/2))
np_image = (np.array(image)/255-[0.485,0.456,0.406])/[0.229, 0.224, 0.225]
np_image = np_image.transpose((2,0,1))
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
return torch.from_numpy(np_image)
# : Process a PIL image for use in a PyTorch model
im = Image.open(image_path)
processed_im = process_image(im)
def predict(image_path, model, topk=5):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
# : Implement the code to predict the class from an image file
im = Image.open(image_path)
processed_im = process_image(im).unsqueeze(0)
model.to(device)
model.eval()
with torch.no_grad():
processed_im = processed_im.to(device).float()
output = model(processed_im)
ps = torch.exp(output)
pred = ps.topk(topk)
flower_ids = pred[1][0].to('cpu')
flower_ids = torch.Tensor.numpy(flower_ids)
probs = pred[0][0].to('cpu')
idx_to_class = {k:v for v,k in checkpoint['class_to_idx'].items()}
flower_names = np.array([cat_to_name[idx_to_class[x]] for x in flower_ids])
return probs, flower_names
if args.topk:
probs, flower_names = predict(image_path, model, args.topk)
print('Probabilities of top {} flowers:'.format(args.topk))
for i in range(args.topk):
print('{} : {:.2f}'.format(flower_names[i],probs[i]))
else:
probs, flower_names = predict(image_path, model)
print('Flower is predicted to be {} with {:.2f} probability'.format(flower_names[0], probs[0]))
|
nilq/baby-python
|
python
|
import subprocess
import os
import signal
import psutil
import time
class ShellProcessRunner(object):
def __init__(self):
self.cmd = None
self.started = False
def start(self):
if self.started:
return
if self.cmd is None:
raise Exception("Process cmd is None.")
self.process = subprocess.Popen(self.cmd, shell=True)
if self.process.poll() is not None:
raise Exception("Process died immediately. returncode: %s."%(
self.process.returncode,
))
self.started = True
def stop(self):
if not self.started:
return
ppid = self.process.pid
parent = psutil.Process(ppid)
for child in parent.children(recursive=True):
try:
child.send_signal(signal.SIGINT)
child.wait()
except psutil.NoSuchProcess:
pass
parent.send_signal(signal.SIGINT)
parent.wait()
self.started = False
def check_if_successfully_start(self):
# TODO: implement custom ways to check successful start
pass
|
nilq/baby-python
|
python
|
import os
import fnmatch
import re
import subprocess
import sys
import readline
import shutil
import random
settings_file = '%s/.infinispan_dev_settings' % os.getenv('HOME')
upstream_url = 'git@github.com:infinispan/infinispan.git'
### Known config keys
local_mvn_repo_dir_key = "local_mvn_repo_dir"
maven_pom_xml_namespace = "http://maven.apache.org/POM/4.0.0"
default_settings = {'dry_run': False, 'multi_threaded': False, 'verbose': False, 'use_colors': True}
boolean_keys = ['dry_run', 'multi_threaded', 'verbose']
class Colors(object):
MAGENTA = '\033[95m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
CYAN = '\033[96m'
END = '\033[0m'
UNDERLINE = '\033[4m'
@staticmethod
def magenta():
if use_colors():
return Colors.MAGENTA
else:
return ""
@staticmethod
def green():
if use_colors():
return Colors.GREEN
else:
return ""
@staticmethod
def yellow():
if use_colors():
return Colors.YELLOW
else:
return ""
@staticmethod
def red():
if use_colors():
return Colors.RED
else:
return ""
@staticmethod
def cyan():
if use_colors():
return Colors.CYAN
else:
return ""
@staticmethod
def end_color():
if use_colors():
return Colors.END
else:
return ""
class Levels(Colors):
C_DEBUG = Colors.CYAN
C_INFO = Colors.GREEN
C_WARNING = Colors.YELLOW
C_FATAL = Colors.RED
C_ENDC = Colors.END
DEBUG = "DEBUG"
INFO = "INFO"
WARNING = "WARNING"
FATAL = "FATAL"
@staticmethod
def get_color(level):
if use_colors():
return getattr(Levels, "C_" + level)
else:
return ""
def use_colors():
return ('use_colors' in settings and settings['use_colors']) or ('use_colors' not in settings)
def prettyprint(message, level):
start_color = Levels.get_color(level)
end_color = Levels.end_color()
print "[%s%s%s] %s" % (start_color, level, end_color, message)
def apply_defaults(s):
for e in default_settings.items():
if e[0] not in s:
s[e[0]] = e[1]
return s
def to_bool(x):
if type(x) == bool:
return x
if type(x) == str:
return {'true': True, 'false': False}.get(x.strip().lower())
def get_settings():
"""Retrieves user-specific settings for all Infinispan tools. Returns a dict of key/value pairs, or an empty dict if the settings file doesn't exist."""
f = None
try:
settings = {}
f = open(settings_file)
for l in f:
if not l.strip().startswith("#"):
kvp = l.split("=")
if kvp and len(kvp) > 0 and kvp[0] and len(kvp) > 1:
settings[kvp[0].strip()] = kvp[1].strip()
settings = apply_defaults(settings)
for k in boolean_keys:
settings[k] = to_bool(settings[k])
return settings
except IOError as ioe:
return {}
finally:
if f:
f.close()
settings = get_settings()
def input_with_default(msg, default):
i = raw_input("%s %s[%s]%s: " % (msg, Colors.magenta(), default, Colors.end_color()))
if i.strip() == "":
i = default
return i
def handle_release_virgin():
"""This sounds dirty!"""
prettyprint("""
It appears that this is the first time you are using this script. I need to ask you a few questions before
we can proceed. Default values are in brackets, just hitting ENTER will accept the default value.
Lets get started!
""", Levels.WARNING)
s = {}
s["verbose"] = input_with_default("Be verbose?", False)
s["multi_threaded"] = input_with_default("Run multi-threaded? (Disable to debug)", True)
s["use_colors"] = input_with_default("Use colors?", True)
s = apply_defaults(s)
f = open(settings_file, "w")
try:
for e in s.keys():
f.write(" %s = %s \n" % (e, s[e]))
finally:
f.close()
def require_settings_file(recursive = False):
"""Tests whether the settings file exists, and if not prompts the user to create one."""
f = None
try:
f = open(settings_file)
except IOError as ioe:
if not recursive:
handle_release_virgin()
require_settings_file(True)
prettyprint("User-specific environment settings file %s created! Please start this script again!" % settings_file, Levels.INFO)
sys.exit(4)
else:
prettyprint("User-specific environment settings file %s is missing! Cannot proceed!" % settings_file, Levels.FATAL)
prettyprint("Please create a file called %s with the following lines:" % settings_file, Levels.FATAL)
prettyprint( '''
verbose = False
use_colors = True
multi_threaded = True
''', Levels.INFO)
sys.exit(3)
finally:
if f:
f.close()
def get_search_path(executable):
"""Retrieves a search path based on where the current executable is located. Returns a string to be prepended to add"""
in_bin_dir = re.compile('^.*/?bin/.*.py')
if in_bin_dir.search(executable):
return "./"
else:
return "../"
def strip_leading_dots(filename):
return filename.strip('/. ')
def to_set(list):
"""Crappy implementation of creating a Set from a List. To cope with older Python versions"""
temp_dict = {}
for entry in list:
temp_dict[entry] = "dummy"
return temp_dict.keys()
class GlobDirectoryWalker:
"""A forward iterator that traverses a directory tree"""
def __init__(self, directory, pattern="*"):
self.stack = [directory]
self.pattern = pattern
self.files = []
self.index = 0
def __getitem__(self, index):
while True:
try:
file = self.files[self.index]
self.index = self.index + 1
except IndexError:
# pop next directory from stack
self.directory = self.stack.pop()
self.files = os.listdir(self.directory)
self.index = 0
else:
# got a filename
fullname = os.path.join(self.directory, file)
if os.path.isdir(fullname) and not os.path.islink(fullname):
self.stack.append(fullname)
if fnmatch.fnmatch(file, self.pattern):
return fullname
class Git(object):
'''Encapsulates git functionality necessary for releasing Infinispan'''
cmd = 'git'
# Helper functions to clean up branch lists
@staticmethod
def clean(e): return e.strip().replace(' ', '').replace('*', '')
@staticmethod
def non_empty(e): return e != None and e.strip() != ''
@staticmethod
def current(e): return e != None and e.strip().replace(' ', '').startswith('*')
def __init__(self, branch, tag_name):
if not self.is_git_directory():
raise Exception('Attempting to run git outside of a repository. Current directory is %s' % os.path.abspath(os.path.curdir))
self.branch = branch
self.tag = tag_name
self.verbose = False
if settings['verbose']:
self.verbose = True
rand = '%x'.upper() % (random.random() * 100000)
self.working_branch = '__temp_%s' % rand
self.original_branch = self.current_branch()
def run_git(self, opts):
call = [self.cmd]
if type(opts) == list:
for o in opts:
call.append(o)
elif type(opts) == str:
for o in opts.split(' '):
if o != '':
call.append(o)
else:
raise Error("Cannot handle argument of type %s" % type(opts))
if settings['verbose']:
prettyprint( 'Executing %s' % call, Levels.DEBUG )
return subprocess.Popen(call, stdout=subprocess.PIPE).communicate()[0].split('\n')
def is_git_directory(self):
return self.run_git('branch')[0] != ''
def is_upstream_clone(self):
r = self.run_git('remote show -n origin')
cleaned = map(self.clean, r)
def push(e): return e.startswith('PushURL:')
def remove_noise(e): return e.replace('PushURL:', '')
push_urls = map(remove_noise, filter(push, cleaned))
return len(push_urls) == 1 and push_urls[0] == upstream_url
def clean_branches(self, raw_branch_list):
return map(self.clean, filter(self.non_empty, raw_branch_list))
def remote_branch_exists(self):
'''Tests whether the branch exists on the remote origin'''
branches = self.clean_branches(self.run_git("branch -r"))
def replace_origin(b): return b.replace('origin/', '')
return self.branch in map(replace_origin, branches)
def switch_to_branch(self):
'''Switches the local repository to the specified branch. Creates it if it doesn't already exist.'''
local_branches = self.clean_branches(self.run_git("branch"))
if self.branch not in local_branches:
self.run_git("branch %s origin/%s" % (self.branch, self.branch))
self.run_git("checkout %s" % self.branch)
def create_tag_branch(self):
'''Creates and switches to a temp tagging branch, based off the release branch.'''
self.run_git("checkout -b %s %s" % (self.working_branch, self.branch))
def commit(self, files, message):
'''Commits the set of files to the current branch with a generated commit message.'''
for f in files:
self.run_git("add %s" % f)
self.run_git(["commit", "-m", message])
def commit_modified(self, message):
'''Commits all the files that were modified in working copy to the current branch with a generated commit message.'''
self.run_git(["commit", "-a", "-m", message])
def tag_for_release(self):
'''Tags the current branch for release using the tag name.'''
self.run_git(["tag", "-a", "-m", "'Release Script: tag %s'" % self.tag, self.tag])
def push_tag_to_origin(self):
'''Pushes the updated tags to origin'''
self.run_git("push origin --tags")
def push_branch_to_origin(self):
'''Pushes the updated branch to origin'''
self.run_git("push origin %s" % (self.branch))
def current_branch(self):
'''Returns the current branch you are on'''
return map(self.clean, filter(self.current, self.run_git('branch')))[0]
def cleanup(self):
'''Cleans up any temporary branches created'''
self.run_git("checkout %s" % self.original_branch)
self.run_git("branch -D %s" % self.working_branch)
def clean_release_directory(self):
'''Makes sure that no files exist in the working directory that might affect the content of the distribution'''
self.run_git("clean -d -x -f")
self.run_git("reset --hard HEAD")
class DryRun(object):
location_root = "%s/%s" % (os.getenv("HOME"), "infinispan_release_dry_run")
def find_version(self, url):
return os.path.split(url)[1]
def copy(self, src, dst):
prettyprint( " DryRun: Executing %s" % ['rsync', '-rv', '--protocol=28', src, dst], Levels.DEBUG)
try:
os.makedirs(dst)
except:
pass
subprocess.check_call(['rsync', '-rv', '--protocol=28', src, dst])
class Uploader(object):
def __init__(self):
if settings['verbose']:
self.scp_cmd = ['scp', '-rv']
self.rsync_cmd = ['rsync', '-rv', '--protocol=28']
else:
self.scp_cmd = ['scp', '-r']
self.rsync_cmd = ['rsync', '-r', '--protocol=28']
def upload_scp(self, fr, to, flags = []):
self.upload(fr, to, flags, list(self.scp_cmd))
def upload_rsync(self, fr, to, flags = []):
self.upload(fr, to, flags, list(self.rsync_cmd))
def upload(self, fr, to, flags, cmd):
for e in flags:
cmd.append(e)
cmd.append(fr)
cmd.append(to)
subprocess.check_call(cmd)
class DryRunUploader(DryRun):
def upload_scp(self, fr, to, flags = []):
self.upload(fr, to, "scp")
def upload_rsync(self, fr, to, flags = []):
self.upload(fr, to.replace(':', '____').replace('@', "__"), "rsync")
def upload(self, fr, to, type):
self.copy(fr, "%s/%s/%s" % (self.location_root, type, to))
def maven_build_distribution(version):
"""Builds the distribution in the current working dir"""
mvn_commands = [["clean"], ["install"], ["deploy", "-Pdistribution,extras"]]
for c in mvn_commands:
c.append("-Dmaven.test.skip.exec=true")
c.append("-DskipTests")
if settings['dry_run']:
c.append("-Dmaven.deploy.skip=true")
if not settings['verbose']:
c.insert(0, '-q')
c.insert(0, 'mvn')
subprocess.check_call(c)
def get_version_pattern():
return re.compile("^([4-9]\.[0-9])\.[0-9]\.(Final|(Alpha|Beta|CR)[1-9][0-9]?)$")
def get_version_major_minor(full_version):
pattern = get_version_pattern()
matcher = pattern.match(full_version)
return matcher.group(1)
def assert_python_minimum_version(major, minor):
e = re.compile('([0-9])\.([0-9])\.([0-9]).*')
m = e.match(sys.version)
major_ok = int(m.group(1)) == major
minor_ok = int(m.group(2)) >= minor
if not (minor_ok and major_ok):
prettyprint( "This script requires Python >= %s.%s.0. You have %s" % (major, minor, sys.version), Levels.FATAL)
sys.exit(3)
|
nilq/baby-python
|
python
|
import pandas as pd
import torch
from torch import nn
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Device Being used:", device)
torch.autograd.set_detect_anomaly(True)
from torch.autograd import Variable
import numpy as np
from torch import optim
from sklearn import metrics
import os
import copy
cos = nn.CosineSimilarity(dim=2, eps=1e-6)
import sys
from gpytorch.kernels import ScaleKernel, SpectralMixtureKernel, RBFKernel, CosineKernel, MaternKernel, PiecewisePolynomialKernel, RQKernel, PolynomialKernelGrad
from hdp_hmm import StickyHDPHMM
dir_path ="SanghaiTech/Split/"
fc7_features_path = os.path.join(dir_path, 'fc7-features')
annotation_path = os.path.join(dir_path, 'annotations')
root_dir = "SanghaiTech/Videos/"
annotated_videos = os.listdir(os.path.join(root_dir, 'testing', 'fc7-features'))
unannotated_videos = os.listdir(os.path.join(root_dir, 'training', 'preprocessed/'))
def hdm_dmm(features, output, no_posterior_steps=100, out_th = 35):
data_to_pass = features.data.cpu().numpy()
stickyhdmm = StickyHDPHMM(data_to_pass)
mean_output = torch.quantile(output, out_th, axis = 1)
topk_output = torch.zeros_like(mean_output)
for i in range(no_posterior_steps):
stickyhdmm.sampler()
cluster_numbers = np.array(stickyhdmm.state)
cluster_numbers = torch.from_numpy(cluster_numbers).to(device)
for i in range(len(features)):
video_cluster = cluster_numbers[i]
cluster_no_score = {}
for j, cluster_no in enumerate(video_cluster):
if output[i][j]<mean_output[i]:
continue
if cluster_no in cluster_no_score:
cluster_no_score[cluster_no] = torch.max(cluster_no_score[cluster_no], output[i][j])
else:
cluster_no_score[cluster_no] = output[i][j]
video_sum = torch.zeros_like(mean_output[0])
for k, v in cluster_no_score.items():
video_sum+=v
topk_output[i] = video_sum/len(cluster_no_score)
return topk_output
class CalibratedK(torch.nn.Module):
def __init__(self):
super(CalibratedK, self).__init__()
def forward(self, abnormal_outputs, normal_outputs, abnormal_features, normal_features, sim_th, out_th, no_segments = 32):
topk_output = hdm_dmm(abnormal_features,abnormal_outputs, no_posterior_steps=10, out_th = out_th)
#normal_max_value = compute_topk(normal_features, normal_outputs, sim_th, out_th)
[normal_max_value, _] = torch.max(normal_outputs, axis=1)
hinge_loss = torch.zeros_like(abnormal_outputs)[0][0]
for normal in normal_max_value:
topk_loss = 1-topk_output+normal
topk_loss[topk_loss<0]=0
topk_loss = torch.sum(topk_loss)
hinge_loss += topk_loss
return hinge_loss/(normal_outputs.shape[0])
class GCNConv(torch.nn.Module):
def __init__(self, input_channels, out_channels):
super(GCNConv, self).__init__()
self.input_channels = input_channels
self.out_channels = out_channels
self.weight = torch.nn.Linear(input_channels, out_channels)#Parameter(FloatTensor(input_channels, out_channels))
#self.reset_parameters()
def reset_parameters(self):
stdv = 1./sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = self.weight(input)#input.matmul(self.weight)
output = adj.matmul(support)
return output
class Net(torch.nn.Module):
def __init__(self, input_dim, hidden_size = 32, no_segments =32, no_mixtures = 5, initialize = False, X=None, y=None):
super(Net, self).__init__()
self.gc1 = GCNConv(input_dim, 128)
self.gc2= GCNConv(128, 64)
self.gc3 = GCNConv(input_dim, 128)
self.gc4= GCNConv(128, 64)
self.relu = torch.nn.ReLU()
self.dropout = torch.nn.Dropout(0.6)
self.lstm = torch.nn.LSTM(128,hidden_size,5,batch_first=True)
self.fc = torch.nn.Linear(hidden_size, 1)
self.sigmoid = torch.nn.Sigmoid()
self.hidden_size = hidden_size
self.norm = torch.nn.BatchNorm1d(hidden_size)
self.no_segments = no_segments
self.covar_module1 = RBFKernel(ard_num_dims=input_dim)#SpectralMixtureKernel(num_mixtures=6, ard_num_dims=input_dim, eps = 1e-06)
self.covar_module2 = SpectralMixtureKernel(num_mixtures=6, ard_num_dims=128, eps = 1e-06)
if initialize:
self.covar_module1.initialize_from_data(X, y)
def compute_feature_adjancency(self, x, covar):
adj = covar(x).evaluate()
I = Variable(torch.eye(adj.shape[1]), requires_grad = True).to(device)
I = I.repeat(x.shape[0], 1, 1)
adj_til = adj+I
d_inv_sqrt2 = torch.diag_embed(torch.pow(torch.sum(adj_til, dim = 2), -0.5))
adj_hat = d_inv_sqrt2.matmul(adj_til).matmul(d_inv_sqrt2)
return adj_hat
def compute_temporal_adjancency(self, x):
adj = torch.zeros(x.shape[1], x.shape[1])
for i in range(len(adj)):
for j in range(len(adj)):
adj[i][j] = torch.exp(-torch.abs(torch.tensor(abs(i-j))))
adj = Variable(adj, requires_grad = True).to(device)
adj = adj.repeat(x.shape[0], 1, 1)
I = Variable(torch.eye(adj.shape[1]), requires_grad = True).to(device)
I = I.repeat(x.shape[0], 1, 1)
adj_til = adj+I
d_inv_sqrt2 = torch.diag_embed(torch.pow(torch.sum(adj_til, dim = 2), -0.5))
adj_hat = d_inv_sqrt2.matmul(adj_til).matmul(d_inv_sqrt2)
return adj_hat
def forward(self, x):
adj_1_feat = self.compute_feature_adjancency(x, self.covar_module1)
x_feat_1 = self.gc1(x, adj_1_feat)
adj_1_temp = self.compute_temporal_adjancency(x)
x_temp_1 = self.gc3(x, adj_1_temp)
x = x_feat_1+x_temp_1
x = self.relu(x)
x = self.dropout(x)
x,_ = self.lstm(x)
feat= self.norm(x)
x = self.fc(feat)
x = self.sigmoid(x)
return [feat, x]
def get_output(X, model):
X = torch.from_numpy(X)
X = Variable(X).to(device)
[_, output] = model(X.float())
return output
def getframeauc(model, X_test_abnormal, X_test_normal, video_names_abnormal, video_names_normal):
no_segments = X_test_abnormal.shape[1]
predictions_abnormal = get_output(X_test_abnormal, model)
predictions_normal = get_output(X_test_normal, model)
predictions_abnormal = predictions_abnormal.data.cpu().numpy().flatten()
predictions_normal = predictions_normal.data.cpu().numpy().flatten()
predictions_abnormal = predictions_abnormal.reshape(len(X_test_abnormal),no_segments)
predictions_normal = predictions_normal.reshape(len(X_test_normal), no_segments)
GT, Pred = [], []
clip_size = 16
video_names = np.concatenate([video_names_abnormal, video_names_normal])
predictions = np.concatenate([predictions_abnormal, predictions_normal])
for i, video in enumerate(video_names):
prediction = predictions[i]
no_clips = len(sorted(os.listdir(fc7_features_path+"/testing/"+video)))
thirty2_shots = np.round(np.linspace(0, no_clips-1, 33))
p_c = 0
clip_pred_score = np.zeros(no_clips)
for ishots in range(0, len(thirty2_shots)-1):
ss = int(thirty2_shots[ishots])
ee = int(thirty2_shots[ishots+1])
if ee<ss or ee==ss:
clip_pred_score[ss] = prediction[p_c]
else:
clip_pred_score[ss:ee] = prediction[p_c]
p_c+=1
if video in annotated_videos:
val = np.load(os.path.join(root_dir, 'testing', 'test_frame_mask', video+".npy"))
number_frames = len(val)
GT.extend(val.tolist())
elif video in unannotated_videos:
number_frames = len(os.listdir(os.path.join(root_dir, 'training', 'preprocessed', video)))
val = np.zeros(number_frames)
GT.extend(val.tolist())
else:
print("Unusual")
print(video)
frame_pred = np.zeros(number_frames)
for j in range(no_clips):
start_frame = j*clip_size
if (j+1)*clip_size>number_frames:
end_frame = number_frames
else:
end_frame = (j+1)*clip_size
frame_pred[start_frame: end_frame] =clip_pred_score[j]
Pred.extend(frame_pred.tolist())
fpr, tpr, thresholds = metrics.roc_curve (GT, Pred, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
return roc_auc
if __name__=="__main__":
[_, run, out_th] = sys.argv
X_train_abnormal, X_train_normal = np.load("Dataset/SanghaiTech/X_train_abnormal.npy",allow_pickle=True), np.load("Dataset/SanghaiTech/X_train_normal.npy",allow_pickle=True)
X_test_abnormal, X_test_normal = np.load("Dataset/SanghaiTech/X_test_abnormal.npy"), np.load("Dataset/SanghaiTech/X_test_normal.npy")
video_names_abnormal, video_names_normal = np.load("Dataset/SanghaiTech/videos_test_abnormal.npy"), np.load("Dataset/SanghaiTech/videos_test_normal.npy")
#Training settings
batch_size = 16
lr = 0.01
sim_th = float(35)/100
out_th = float(out_th)/100
hidden_size = 32
no_segments = X_train_abnormal.shape[1]
max_iterations = 50000
input_dim = X_train_abnormal.shape[2]
abnormal_idx = list(range(len(X_train_abnormal)))
normal_idx = list(range(len(X_train_normal)))
model = Net(input_dim=input_dim,hidden_size=hidden_size)
customobjective = CalibratedK()
model.to(device)
customobjective.to(device)
optimizer = optim.SGD(model.parameters(), lr = lr, weight_decay = 0.0001)
best_auc = 0
aucs = []
losses =[]
for i in range(max_iterations+1):
model.train()
np.random.shuffle(abnormal_idx)
np.random.shuffle(normal_idx)
# In each batch,half is positive and half is negative
train_abnormal_feat = X_train_abnormal[abnormal_idx[:int(batch_size/2)]]
train_normal_feat = X_train_normal[normal_idx[:int(batch_size/2)]]
train_feat = np.concatenate([train_abnormal_feat, train_normal_feat])
train_feat = torch.from_numpy(train_feat)
train_feat = Variable(train_feat, requires_grad = True).to(device)
optimizer.zero_grad()
[feats, outputs] = model(train_feat.float())
outputs = outputs.squeeze()
abnormal_outputs, normal_outputs = outputs[:int(batch_size/2)], outputs[int(batch_size/2):]
abnormal_features = feats[:int(batch_size/2)]
normal_features = feats[int(batch_size/2):]
loss = customobjective(abnormal_outputs,normal_outputs,abnormal_features, normal_features, sim_th, out_th, no_segments = no_segments)
loss.backward()
a = loss.data.cpu()
losses.append(a)
optimizer.step()
if i%10==0:
model.eval()
test_abnormal = torch.from_numpy(X_test_abnormal)
test_abnormal = Variable(test_abnormal).to(device)
[_, predictions_abnormal] = model(test_abnormal.float())
predictions_abnormal = predictions_abnormal.reshape(-1, no_segments)
predictions_abnormal = predictions_abnormal.data.cpu().numpy()
test_normal = torch.from_numpy(X_test_normal)
test_normal = Variable(test_normal).to(device)
[_, predictions_normal] = model(test_normal.float())
predictions_normal = predictions_normal.reshape(-1, no_segments)
predictions_normal = predictions_normal.data.cpu().numpy()
auc_score = getframeauc(model, X_test_abnormal, X_test_normal, video_names_abnormal, video_names_normal)
aucs.append(auc_score)
if auc_score>best_auc:
best_auc = auc_score
print("Saving model")
torch.save({'state_dict': model.state_dict(),
'opt_dict': optimizer.state_dict(),}, os.path.join("models/SanghaiTech/model_hdm_hmm_"+str(run)+"_"+str(lr)+"_"+str(sim_th)+"_"+str(out_th)+".pth.tar"))
print(" For the iteration", i, "Best AUC", best_auc)
losses = np.array(losses)
aucs = np.array(aucs)
np.save("logs/SanghaiTech/auc_hdm_hmm_"+str(run)+"_"+str(lr)+"_"+str(sim_th)+"_"+str(out_th)+".npy", aucs)
np.save("logs/SanghaiTech/losses_hdm_hmm_"+str(run)+"_"+str(lr)+"_"+str(sim_th)+"_"+str(out_th)+".npy", losses)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""keyrotator List command."""
import logging
import re
import iam_service
class ListCommand(object):
"""Implementation of the keyrotator list command."""
keyname_pattern = re.compile("keys/(.*)$")
def run(self, project_id, iam_account, return_results=False):
"""Runs the list_keys command for keyrotator.
Args:
project_id: The project_id for which to create the key.
iam_account: The IAM account for which to create the key.
return_results: Boolean to return results or exit code.
Returns:
An integer indicating status or a dictionary containing
key data given an input parameters.
"""
response = iam_service.list_keys(project_id, iam_account)
if response and "keys" in response:
logging.info("Current key listing:")
for key in response["keys"]:
key_path = self.keyname_pattern.search(key["name"])
logging.info("Key: %s\n\tCreated: %s\n\tExpires: %s",
key_path.group(1), key["validAfterTime"],
key["validBeforeTime"])
if return_results:
return response["keys"]
return 0
|
nilq/baby-python
|
python
|
# Copyright 2021 Miljenko Šuflaj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from textwrap import dedent
benchmark_key_to_string = {
"conv1d": "Convolution 1D",
"conv2d": "Convolution 2D",
"conv3d": "Convolution 3D",
"masked_conv1d": "Masked Convolution 1D",
"masked_conv2d": "Masked Convolution 2D",
"masked_conv3d": "Masked Convolution 3D",
}
benchmark_key_pairs = (
("conv1d", "masked_conv1d"),
("conv2d", "masked_conv2d"),
("conv3d", "masked_conv3d"),
)
def generate_benchmark_markdown() -> str:
string = ""
with open("data/benchmark.json") as f:
json_dict = json.load(f)
for first, second in benchmark_key_pairs:
first_len = float(json_dict[first])
second_len = float(json_dict[second])
throughput_percentage = first_len / second_len * 100
string += (
f"- {benchmark_key_to_string[second]}: **{throughput_percentage:.02f} %** "
f"{benchmark_key_to_string[first]} throughput\n "
)
return string
def generate_readme_markdown() -> str:
return dedent(
f"""\
# Masked Convolution
[](https://opensource.org/licenses/Apache-2.0)
A PyTorch implementation of a thin wrapper for masked convolutions.
## What are masked convolutions?
Similarly to [partial convolutions](https://github.com/NVIDIA/partialconv), masked convolutions mask a part of the kernel, essentially ignoring data at specific locations. For an example, consider
```python
a = [1, 2, 3, 4, 5]
```
assuming we have a convolution kernel
```python
kernel = [1, 1, 1]
```
convolving over `a` would give us
```python
a_conv = [6, 9, 12]
```
However, if we were to mask the convolution kernel with a mask
```python
mask = [1, 0, 1]
```
**masked convolving** over `a` would return
```python
a_masked_conv = [4, 6, 8]
```
One use of masked convolutions is emulating skip-grams.
## Installation
First, make sure you have PyTorch installed. This was tested on **Python 3.8** and **PyTorch 1.7.1**. Further testing is needed to determine whether it works on a different setup - chances are it does. The recommended way to install this is through PyPi by running:
```bash
pip install masked-convolution
```
Other than that, you can clone this repository, and in its root directory (where `setup.py` is located) run
```bash
pip install .
```
## Benchmarks
Every build, automatic benchmarks are run in order to determine how much overhead the implementation brings. The ordinary convolutions are used as a baseline, while the the performance of masked convolutions is described as a percentage of throughput of their respective baselines.
Keep in mind that these benchmarks are in no way professional, they only serve to give users a general idea. Their results greatly differ, so they should be taken with a grain of salt.
{generate_benchmark_markdown()}
"""
)
def main():
with open("README.md", mode="w+", encoding="utf8", errors="replace") as f:
f.write(generate_readme_markdown())
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
# Generated by Django 2.2.7 on 2020-04-10 05:42
import blog.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=191)),
('body', models.TextField()),
('image', models.ImageField(blank=True, default='', null=True, upload_to=blog.models.upload_to)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('updated', models.BooleanField(default=False)),
('deleted', models.BooleanField(default=False)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('likes', models.ManyToManyField(blank=True, related_name='likes', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Post',
'verbose_name_plural': 'Post',
'ordering': ['-created_at'],
},
),
]
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
import tangos.testing.simulation_generator
from tangos import parallel_tasks as pt
from tangos import testing
import tangos
import sys
import time
from six.moves import range
from nose.plugins.skip import SkipTest
def setup():
pt.use("multiprocessing")
testing.init_blank_db_for_testing(timeout=5.0, verbose=False)
generator = tangos.testing.simulation_generator.TestSimulationGenerator()
generator.add_timestep()
generator.add_objects_to_timestep(9)
tangos.core.get_default_session().commit()
def teardown():
tangos.core.close_db()
pt.launch(tangos.core.close_db, 6)
def _add_property():
for i in pt.distributed(list(range(1,10))):
with pt.ExclusiveLock('insert', 0.05):
tangos.get_halo(i)['my_test_property']=i
tangos.core.get_default_session().commit()
def test_add_property():
pt.launch(_add_property,3)
for i in range(1,10):
assert tangos.get_halo(i)['my_test_property']==i
def _add_two_properties_different_ranges():
for i in pt.distributed(list(range(1,10))):
with pt.ExclusiveLock('insert', 0.05):
tangos.get_halo(i)['my_test_property_2']=i
tangos.core.get_default_session().commit()
for i in pt.distributed(list(range(1,8))):
with pt.ExclusiveLock('insert', 0.05):
tangos.get_halo(i)['my_test_property_3'] = i
tangos.core.get_default_session().commit()
def test_add_two_properties_different_ranges():
pt.launch(_add_two_properties_different_ranges,3)
for i in range(1,10):
assert tangos.get_halo(i)['my_test_property_2']==i
if i<8:
assert 'my_test_property_3' in tangos.get_halo(i)
assert tangos.get_halo(i)['my_test_property_3'] == i
else:
assert 'my_test_property_3' not in tangos.get_halo(i)
def _test_not_run_twice():
import time
# For this test we want a staggered start
time.sleep(pt.backend.rank()*0.05)
for i in pt.distributed(list(range(3))):
with pt.ExclusiveLock("lock"):
tangos.get_halo(1)['test_count']+=1
tangos.get_default_session().commit()
def test_for_loop_is_not_run_twice():
"""This test checks for an issue where if the number of CPUs exceeded the number of jobs for a task, the
entire task could be run twice"""
tangos.get_halo(1)['test_count'] = 0
tangos.get_default_session().commit()
pt.launch(_test_not_run_twice, 5)
assert tangos.get_halo(1)['test_count']==3
def _test_empty_loop():
for _ in pt.distributed([]):
assert False
def test_empty_loop():
pt.launch(_test_empty_loop,3)
def _test_empty_then_non_empty_loop():
for _ in pt.distributed([]):
pass
for _ in pt.distributed([1,2,3]):
pass
def test_empty_then_non_empty_loop():
pt.launch(_test_empty_then_non_empty_loop, 3)
def _test_synchronize_db_creator():
rank = pt.backend.rank()
import tangos.parallel_tasks.database
# hack: MultiProcessing backend forks so has already "synced" the current creator.
tangos.core.creator._current_creator = None
pt.database.synchronize_creator_object(tangos.core.get_default_session())
with pt.ExclusiveLock('insert', 0.05):
tangos.get_halo(rank)['db_creator_test_property'] = 1.0
tangos.core.get_default_session().commit()
def test_synchronize_db_creator():
pt.launch(_test_synchronize_db_creator,3)
assert tangos.get_halo(1)['db_creator_test_property']==1.0
assert tangos.get_halo(2)['db_creator_test_property'] == 1.0
creator_1, creator_2 = [tangos.get_halo(i).get_objects('db_creator_test_property')[0].creator for i in (1,2)]
assert creator_1==creator_2
def _test_shared_locks():
start_time = time.time()
if pt.backend.rank()==1:
# exclusive mode
time.sleep(0.05)
with pt.lock.ExclusiveLock("lock"):
# should be running after the shared locks are done
assert time.time()-start_time>0.1
else:
# shared mode
with pt.lock.SharedLock("lock"):
# should not have waited for the other shared locks
assert time.time() - start_time < 0.1
time.sleep(0.1)
pt.backend.barrier()
def _test_shared_locks_in_queue():
start_time = time.time()
if pt.backend.rank() <=2 :
# exclusive mode
with pt.lock.ExclusiveLock("lock", 0):
assert time.time() - start_time < 0.2
time.sleep(0.1)
else:
# shared mode
time.sleep(0.1)
with pt.lock.SharedLock("lock",0):
# should be running after the exclusive locks are done
assert time.time() - start_time > 0.1
time.sleep(0.1)
# should all have run in parallel
assert time.time()-start_time<0.5
pt.backend.barrier()
def test_shared_locks():
pt.launch(_test_shared_locks,4)
pt.launch(_test_shared_locks_in_queue, 6)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for PL-SQL recall file parser."""
import unittest
from plaso.formatters import pls_recall as _ # pylint: disable=unused-import
from plaso.lib import timelib
from plaso.parsers import pls_recall
from tests.parsers import test_lib
class PlsRecallTest(test_lib.ParserTestCase):
"""Tests for PL-SQL recall file parser."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._parser = pls_recall.PlsRecallParser()
def testParse(self):
"""Tests the Parse function."""
test_file = self._GetTestFilePath([u'PLSRecall_Test.dat'])
event_queue_consumer = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
# There are two events in test file.
self.assertEqual(len(event_objects), 2)
event_object = event_objects[0]
timestamp_expected = timelib.Timestamp.CopyFromString(
u'2013-06-18 19:50:00:00:00')
self.assertEqual(event_object.timestamp, timestamp_expected)
sequence_expected = 206
self.assertEqual(event_object.sequence, sequence_expected)
username_expected = u'tsltmp'
self.assertEqual(event_object.username, username_expected)
database_name_expected = u'DB11'
self.assertEqual(event_object.database_name, database_name_expected)
# The test file actually has 'test_databae' in the SQL string.
query_expected = u'SELECT * from test_databae where date > \'01/01/2012\''
self.assertEqual(event_object.query, query_expected)
expected_msg = (
u'Sequence #206 '
u'User: tsltmp '
u'Database Name: DB11 '
u'Query: SELECT * from test_databae where date > \'01/01/2012\'')
expected_msg_short = (
u'206 tsltmp DB11 '
u'SELECT * from test_databae where date > \'01/01/2012\'')
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
from predicate import predicate
class Annotation(predicate):
"""
"""
|
nilq/baby-python
|
python
|
"""
Tests the geomeTRIC molecule class.
"""
import pytest
import geometric
import os
import numpy as np
from . import addons
datad = addons.datad
def test_blank_molecule():
mol = geometric.molecule.Molecule()
assert len(mol) == 0
class TestAlaGRO:
@classmethod
def setup_class(cls):
try: cls.molecule = geometric.molecule.Molecule(os.path.join(datad, 'alaglu.gro'))
except:
assert 0, "Failed to load ACE-ALA-NME ACE-GLU-NME structure"
def test_topology(self):
"""Check for the correct number of bonds in a simple molecule"""
# print(len(self.molecule.bonds))
# self.logger.debug("\nTrying to read alanine dipeptide conformation... ")
assert len(self.molecule.bonds) == 47, "Incorrect number of bonds for ACE-ALA-NME ACE-GLU-NME structure"
assert len(self.molecule.molecules) == 2, "Incorrect number of molecules for ACE-ALA-NME ACE-GLU-NME structure"
def test_measure_distances(self):
measure = self.molecule.measure_distances(41,43)
ref = 1.337198
np.testing.assert_almost_equal(measure, ref, 4)
def test_measure_angles(self):
measure = self.molecule.measure_angles(40,14,39)
ref = 9.429428
np.testing.assert_almost_equal(measure, ref, 4)
def test_measure_dihedrals(self):
measure = self.molecule.measure_dihedrals(35,32,30,28)
ref = 56.5389
np.testing.assert_almost_equal(measure, ref, 4)
def test_lattice(self):
bx = self.molecule.boxes[0]
np.testing.assert_almost_equal(bx.a, 20.0)
np.testing.assert_almost_equal(bx.b, 20.0)
np.testing.assert_almost_equal(bx.c, 20.0)
np.testing.assert_almost_equal(bx.alpha, 90.0)
np.testing.assert_almost_equal(bx.beta, 90.0)
np.testing.assert_almost_equal(bx.gamma, 90.0)
def test_add(self):
# Test adding of Molecule objects and ensure that copies are created when adding
M = self.molecule + self.molecule # __add__
M += self.molecule # __iadd__
assert len(M) == 3
assert np.allclose(M.xyzs[0], M.xyzs[1])
M.xyzs[0][0,0] += 1.0
assert not np.allclose(M.xyzs[0], M.xyzs[1], atol=0.1)
assert not np.allclose(M.xyzs[0], M.xyzs[2], atol=0.1)
assert np.allclose(M.xyzs[1], M.xyzs[2])
M.xyzs[1][0,0] += 1.0
assert np.allclose(M.xyzs[0], M.xyzs[1])
assert not np.allclose(M.xyzs[0], M.xyzs[2], atol=0.1)
assert not np.allclose(M.xyzs[1], M.xyzs[2], atol=0.1)
M.xyzs[2][0,0] += 1.0
assert np.allclose(M.xyzs[0], M.xyzs[1])
assert np.allclose(M.xyzs[1], M.xyzs[2])
assert np.allclose(M.xyzs[0], M.xyzs[2])
def test_select_stack(self):
M1 = self.molecule.atom_select(range(22))
assert len(M1.bonds) == 21
assert len(M1.molecules) == 1
M2 = self.molecule.atom_select(range(22, self.molecule.na))
assert len(M2.bonds) == 26
assert len(M2.molecules) == 1
M3 = M1.atom_stack(M2)
assert np.allclose(self.molecule.xyzs[0], M3.xyzs[0])
M1.xyzs[0][0,0] += 1.0
assert np.allclose(self.molecule.xyzs[0], M3.xyzs[0])
def test_find_angles_dihedrals(self):
a = self.molecule.find_angles()
assert len(a) == 81
d = self.molecule.find_dihedrals()
assert len(d) == 97
def test_remove_tr(self):
IC = geometric.internal.DelocalizedInternalCoordinates(self.molecule, build=True, connect=False, addcart=False)
IC_TR = geometric.internal.DelocalizedInternalCoordinates(self.molecule, build=True, connect=False, addcart=False, remove_tr=True)
assert len(IC.Internals) == self.molecule.na*3
assert len(IC_TR.Internals) == (self.molecule.na*3 - 6)
|
nilq/baby-python
|
python
|
from amep.commands.make_dataset.sub_command import MakeDataset # NOQA
|
nilq/baby-python
|
python
|
import argparse
import numpy as np
import pandas as pd
import joblib
from src import config
TRAINING_DATA = config.TRAINING_DATA
TEST_DATA = config.TEST_DATA
FOLDS = config.FOLDS
def predict(MODEL, FOLDS):
MODEL = MODEL
df = pd.read_csv(TEST_DATA)
text_idx = df["id"].values
predictions = None
for FOLD in range(FOLDS):
print(FOLD)
df = pd.read_csv(TEST_DATA)
encoders = joblib.load(f"models/{MODEL}_{FOLD}_label_encoder.pkl")
cols = joblib.load(f"models/{MODEL}_{FOLD}_columns.pkl")
for c in encoders:
print(c)
lbl = encoders[c]
df.loc[:, c] = lbl.transform(df[c].values.tolist())
clf = joblib.load(f"models/{MODEL}_{FOLD}_.pkl")
df = df[cols]
preds = clf.predict_proba(df)[:, 1]
if FOLD == 0:
predictions = preds
else:
predictions += preds
predictions /= 5
sub = pd.DataFrame(
np.column_stack((text_idx, predictions)), columns=["id", "target"]
)
return sub
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("model", help="Type in the model you want to run", type=str)
args = parser.parse_args()
MODEL = args.model
submission = predict(MODEL, FOLDS)
submission.id = submission.id.astype(int)
submission.to_csv(f"models/{MODEL}.csv", index=False)
|
nilq/baby-python
|
python
|
# INI handling sample
import configparser
config = configparser.ConfigParser()
config.read('python-test.ini', encoding="UTF-8")
sections = config.sections()
print(sections)
pt3 = config.get('RECT', 'pt3', fallback = '855,774') # don't care : 'RECT' section or 'pt3' option not exists, default value = '855,774'
print(pt3)
if 'RECT' in config: # check 'RECT' section
secRECT = config['RECT']
pt1 = secRECT['pt1']
print(pt1)
pt2 = secRECT['pt2']
print(pt2)
#pt3 = secRECT['pt3']
else:
print('RECT section not exists, then by default value')
pt1 = '468,203'
pt2 = '1011,398'
config.add_section('RECT')
config['RECT']['pt1'] = pt1
config['RECT']['pt2'] = pt2
try:
config.write(open('python-test.ini', 'w'))
except:
print('WARNING: config write fail!')
|
nilq/baby-python
|
python
|
import os
import warnings
import sys
import pandas as pd
import numpy as np
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.naive_bayes import GaussianNB
import mlflow
import mlflow.sklearn
import logging
logging.basicConfig(level=logging.WARN)
logger = logging.getLogger(__name__)
def eval_metrics(actual, pred):
accuracy = metrics.accuracy_score(actual, pred)
recall_score = metrics.recall_score(actual, pred)
f1_score = metrics.f1_score(actual, pred)
return accuracy, recall_score, f1_score
# Read the csv file
data = pd.read_csv('pointure.data')
label_encoder = preprocessing.LabelEncoder()
input_classes = ['masculin','féminin']
label_encoder.fit(input_classes)
# transformer un ensemble de classes
encoded_labels = label_encoder.transform(data['Genre'])
data['Genre'] = encoded_labels
# Split the data into training and test sets. (0.75, 0.25) split.
train, test = train_test_split(data)
# The predicted column is "Genre"
train_x = train.drop(["Genre"], axis=1)
test_x = test.drop(["Genre"], axis=1)
train_y = train[["Genre"]]
test_y = test[["Genre"]]
gnb = GaussianNB()
gnb.fit(train_x, train_y)
predicted = gnb.predict(test_x)
(accuracy, recall_score, f1_score) = eval_metrics(test_y, predicted)
print(" accuracy: %s" % accuracy)
print(" recall_score: %s" % recall_score)
print(" f1_score: %s" % f1_score)
with open("metrics.txt", 'w') as outfile:
outfile.write("accuracy: " + str(accuracy) + "\n")
outfile.write("recall_score: " + str(recall_score) + "\n")
outfile.write("f1_score: " + str(f1_score) + "\n")
with mlflow.start_run():
mlflow.set_experiment(experiment_name="mlflow_demo")
mlflow.log_metric("accuracy", accuracy)
mlflow.log_metric("recall_score", recall_score)
mlflow.log_metric("f1_score", f1_score)
mlflow.sklearn.log_model(gnb, "model")
#print(f"artifact_uri={mlflow.get_artifact_uri()}")
|
nilq/baby-python
|
python
|
# Copyright 2019 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""The fallback skill implements a special type of skill handling
utterances not handled by the intent system.
"""
import operator
from mycroft.metrics import report_timing, Stopwatch
from mycroft.util.log import LOG
from .mycroft_skill import MycroftSkill, get_handler_name
class FallbackSkill(MycroftSkill):
"""Fallbacks come into play when no skill matches an Adapt or closely with
a Padatious intent. All Fallback skills work together to give them a
view of the user's utterance. Fallback handlers are called in an order
determined the priority provided when the the handler is registered.
======== ======== ================================================
Priority Who? Purpose
======== ======== ================================================
1-4 RESERVED Unused for now, slot for pre-Padatious if needed
5 MYCROFT Padatious near match (conf > 0.8)
6-88 USER General
89 MYCROFT Padatious loose match (conf > 0.5)
90-99 USER Uncaught intents
100+ MYCROFT Fallback Unknown or other future use
======== ======== ================================================
Handlers with the numerically lowest priority are invoked first.
Multiple fallbacks can exist at the same priority, but no order is
guaranteed.
A Fallback can either observe or consume an utterance. A consumed
utterance will not be see by any other Fallback handlers.
"""
fallback_handlers = {}
wrapper_map = [] # Map containing (handler, wrapper) tuples
def __init__(self, name=None, bus=None, use_settings=True):
super().__init__(name, bus, use_settings)
# list of fallback handlers registered by this instance
self.instance_fallback_handlers = []
@classmethod
def make_intent_failure_handler(cls, bus):
"""Goes through all fallback handlers until one returns True"""
def handler(message):
start, stop = message.data.get('fallback_range', (0, 101))
# indicate fallback handling start
LOG.debug('Checking fallbacks in range '
'{} - {}'.format(start, stop))
bus.emit(message.forward("mycroft.skill.handler.start",
data={'handler': "fallback"}))
stopwatch = Stopwatch()
handler_name = None
with stopwatch:
sorted_handlers = sorted(cls.fallback_handlers.items(),
key=operator.itemgetter(0))
handlers = [f[1] for f in sorted_handlers
if start <= f[0] < stop]
for handler in handlers:
try:
if handler(message):
# indicate completion
status = True
handler_name = get_handler_name(handler)
bus.emit(message.forward(
'mycroft.skill.handler.complete',
data={'handler': "fallback",
"fallback_handler": handler_name}))
break
except Exception:
LOG.exception('Exception in fallback.')
else:
status = False
# indicate completion with exception
warning = 'No fallback could handle intent.'
bus.emit(message.forward('mycroft.skill.handler.complete',
data={'handler': "fallback",
'exception': warning}))
if 'fallback_range' not in message.data:
# Old system TODO: Remove in 20.08
# No fallback could handle the utterance
bus.emit(message.forward('complete_intent_failure'))
LOG.warning(warning)
# return if the utterance was handled to the caller
bus.emit(message.response(data={'handled': status}))
# Send timing metric
if message.context.get('ident'):
ident = message.context['ident']
report_timing(ident, 'fallback_handler', stopwatch,
{'handler': handler_name})
return handler
@classmethod
def _register_fallback(cls, handler, wrapper, priority):
"""Register a function to be called as a general info fallback
Fallback should receive message and return
a boolean (True if succeeded or False if failed)
Lower priority gets run first
0 for high priority 100 for low priority
Arguments:
handler (callable): original handler, used as a reference when
removing
wrapper (callable): wrapped version of handler
priority (int): fallback priority
"""
while priority in cls.fallback_handlers:
priority += 1
cls.fallback_handlers[priority] = wrapper
cls.wrapper_map.append((handler, wrapper))
def register_fallback(self, handler, priority):
"""Register a fallback with the list of fallback handlers and with the
list of handlers registered by this instance
"""
def wrapper(*args, **kwargs):
if handler(*args, **kwargs):
self.make_active()
return True
return False
self.instance_fallback_handlers.append(handler)
self._register_fallback(handler, wrapper, priority)
@classmethod
def _remove_registered_handler(cls, wrapper_to_del):
"""Remove a registered wrapper.
Arguments:
wrapper_to_del (callable): wrapped handler to be removed
Returns:
(bool) True if one or more handlers were removed, otherwise False.
"""
found_handler = False
for priority, handler in list(cls.fallback_handlers.items()):
if handler == wrapper_to_del:
found_handler = True
del cls.fallback_handlers[priority]
if not found_handler:
LOG.warning('No fallback matching {}'.format(wrapper_to_del))
return found_handler
@classmethod
def remove_fallback(cls, handler_to_del):
"""Remove a fallback handler.
Arguments:
handler_to_del: reference to handler
Returns:
(bool) True if at least one handler was removed, otherwise False
"""
# Find wrapper from handler or wrapper
wrapper_to_del = None
for h, w in cls.wrapper_map:
if handler_to_del in (h, w):
wrapper_to_del = w
break
if wrapper_to_del:
cls.wrapper_map.remove((h, w))
remove_ok = cls._remove_registered_handler(wrapper_to_del)
else:
LOG.warning('Could not find matching fallback handler')
remove_ok = False
return remove_ok
def remove_instance_handlers(self):
"""Remove all fallback handlers registered by the fallback skill."""
self.log.info('Removing all handlers...')
while len(self.instance_fallback_handlers):
handler = self.instance_fallback_handlers.pop()
self.remove_fallback(handler)
def default_shutdown(self):
"""Remove all registered handlers and perform skill shutdown."""
self.remove_instance_handlers()
super(FallbackSkill, self).default_shutdown()
|
nilq/baby-python
|
python
|
"""
`neo_blue`
========================================================
Copyright 2020 Alorium Technology
Contact: info@aloriumtech.com
Description:
This is a very simple CircuitPython program that turns the
Evo M51 NeoPixel blue.
"""
from aloriumtech import board, digitalio, neopixel
neo = neopixel.NeoPixel(board.NEOPIXEL, 1)
neo.brightness = 0.1
led = digitalio.DigitalInOut(board.D13)
led.direction = digitalio.Direction.OUTPUT
led.value = False
print("NeoPixel Blue")
while True:
led.value = False
neo[0] = (0, 0, 255)
|
nilq/baby-python
|
python
|
import os
import time
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.nn import DataParallel
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from torchvision.transforms import transforms
from networks.discriminator import get_discriminator
from networks.resnet import resnet18
from networks.unet import UNet
# from utils.Logger import Logger
from utils.read_data import ConcatDataset
from utils.util import add_prefix, weight_to_cpu, rgb2gray, write_list, copy, write
plt.switch_backend('agg')
class base(object):
def __init__(self, args):
"""
"""
self.debug = args.debug
self.prefix = args.prefix
self.pretrain_unet_path = args.pretrain_unet_path
self.is_pretrained_unet = args.is_pretrained_unet
self.use_gpu = torch.cuda.is_available()
self.epoch_interval = 1 if self.debug else 50
self.power = args.power
self.data = args.data
self.batch_size = args.batch_size
self.epsi = args.epsi
self.gan_type = args.gan_type
self.u_depth = args.u_depth
self.d_depth = args.d_depth
self.dowmsampling = args.dowmsampling
self.lr = args.lr
self.beta1 = args.beta1
self.eta = args.eta
self.interval = args.interval
self.epochs = args.epochs
self.local = args.local
# self.logger = Logger(add_prefix(self.prefix, 'tensorboard'))
self.mean, self.std = [0.5, 0.5, 0.5], [0.5, 0.5, 0.5]
self.dataloader = self.get_dataloader()
self.auto_encoder = self.get_unet()
self.d = get_discriminator(self.gan_type, self.d_depth, self.dowmsampling)
self.classifier = resnet18(is_ptrtrained=False)
self.log_lst = []
if self.use_gpu:
self.auto_encoder = DataParallel(self.auto_encoder).cuda()
self.classifier = DataParallel(self.classifier).cuda()
self.d = DataParallel(self.d).cuda()
else:
raise RuntimeError('there is no gpu available.')
self.save_init_paras()
self.get_optimizer()
self.save_hyperparameters(args)
def save_hyperparameters(self, args):
write(vars(args), add_prefix(self.prefix, 'para.txt'))
print('save hyperparameters successfully.')
def train(self, epoch):
pass
def validate(self, epoch):
real_data_score = []
fake_data_score = []
for i, (lesion_data, _, lesion_names, _, real_data, _, normal_names, _) in enumerate(self.dataloader):
if i > 2:
break
if self.use_gpu:
lesion_data, real_data = lesion_data.cuda(), real_data.cuda()
phase = 'lesion_data'
prefix_path = '%s/epoch_%d/%s' % (self.prefix, epoch, phase)
lesion_output = self.d(self.auto_encoder(lesion_data))
fake_data_score += list(lesion_output.squeeze().cpu().data.numpy().flatten())
for idx in range(self.batch_size):
single_image = lesion_data[idx:(idx + 1), :, :, :]
single_name = lesion_names[idx]
self.save_single_image(prefix_path, single_name, single_image)
if self.debug:
break
phase = 'normal_data'
prefix_path = '%s/epoch_%d/%s' % (self.prefix, epoch, phase)
normal_output = self.d(real_data)
real_data_score += list(normal_output.squeeze().cpu().data.numpy().flatten())
for idx in range(self.batch_size):
single_image = real_data[idx:(idx + 1), :, :, :]
single_name = normal_names[idx]
self.save_single_image(prefix_path, single_name, single_image)
if self.debug:
break
prefix_path = '%s/epoch_%d' % (self.prefix, epoch)
self.plot_hist('%s/score_distribution.png' % prefix_path, real_data_score, fake_data_score)
torch.save(self.auto_encoder.state_dict(), add_prefix(prefix_path, 'g.pkl'))
torch.save(self.d.state_dict(), add_prefix(prefix_path, 'd.pkl'))
torch.save(self.classifier.state_dict(), add_prefix(prefix_path, 'c.pkl'))
print('save model parameters successfully when epoch=%d' % epoch)
def main(self):
print('training start!')
start_time = time.time()
for epoch in range(1, self.epochs + 1):
self.u_lr_scheduler.step()
self.c_lr_scheduler.step()
self.d_lr_scheduler.step()
self.train(epoch)
if epoch % self.epoch_interval == 0:
self.validate(epoch)
self.validate(self.epochs)
total_ptime = time.time() - start_time
print('Training complete in {:.0f}m {:.0f}s'.format(
total_ptime // 60, total_ptime % 60))
def get_optimizer(self):
self.u_optimizer = torch.optim.Adam(self.auto_encoder.parameters(), lr=self.lr, betas=(self.beta1, 0.9))
self.d_optimizer = torch.optim.Adam(self.d.parameters(), lr=self.lr, betas=(self.beta1, 0.9))
self.c_optimizer = torch.optim.Adam(self.classifier.parameters(), lr=self.lr, betas=(0.9, 0.999))
self.u_lr_scheduler = lr_scheduler.ExponentialLR(self.u_optimizer, gamma=self.epsi)
self.d_lr_scheduler = lr_scheduler.ExponentialLR(self.d_optimizer, gamma=self.epsi)
self.c_lr_scheduler = lr_scheduler.ExponentialLR(self.c_optimizer, gamma=self.epsi)
def save_init_paras(self):
if not os.path.exists(self.prefix):
os.makedirs(self.prefix)
torch.save(self.auto_encoder.state_dict(), add_prefix(self.prefix, 'init_g_para.pkl'))
torch.save(self.d.state_dict(), add_prefix(self.prefix, 'init_d_para.pkl'))
torch.save(self.classifier.state_dict(), add_prefix(self.prefix, 'init_c_para.pkl'))
print('save initial model parameters successfully')
def restore(self, x):
x = torch.squeeze(x, 0)
x = x.data.cpu()
for t, m, s in zip(x, self.mean, self.std):
t.mul_(s).add_(m)
# transform Tensor to numpy
x = x.numpy()
x = np.transpose(x, (1, 2, 0))
x = np.clip(x * 255, 0, 255).astype(np.uint8)
return x
def get_unet(self):
unet = UNet(3, depth=self.u_depth, in_channels=3)
print(unet)
print('load uent with depth %d and downsampling will be performed for %d times!!' % (
self.u_depth, self.u_depth - 1))
if self.is_pretrained_unet:
unet.load_state_dict(weight_to_cpu(self.pretrain_unet_path))
print('load pretrained unet!')
return unet
def get_dataloader(self):
if self.local:
print('load data from local.')
if self.data == '/data/zhangrong/gan':
print('load DR with size 128 successfully!!')
else:
raise ValueError("the parameter data must be in ['/data/zhangrong/gan']")
else:
print('load data from data center.')
if self.data == './data/gan':
print('load DR with size 128 successfully!!')
elif self.data == './data/contrast_dataset':
print('load contrast dataset with size 128 successfully!!')
else:
raise ValueError("the parameter data must be in ['./data/gan']")
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(self.mean, self.std)
])
dataset = ConcatDataset(data_dir=self.data,
transform=transform,
alpha=self.power
)
data_loader = DataLoader(dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=2,
drop_last=True,
pin_memory=True if self.use_gpu else False)
return data_loader
def save_single_image(self, saved_path, name, inputs):
"""
save unet output as a form of image
"""
if not os.path.exists(saved_path):
os.makedirs(saved_path)
output = self.auto_encoder(inputs)
left = self.restore(inputs)
right = self.restore(output)
diff = np.where(left > right, left - right, right - left).clip(0, 255).astype(np.uint8)
plt.figure(num='unet result', figsize=(8, 8))
plt.subplot(2, 2, 1)
plt.title('source image')
plt.imshow(left)
plt.axis('off')
plt.subplot(2, 2, 2)
plt.title('unet output')
plt.imshow(right)
plt.axis('off')
plt.subplot(2, 2, 3)
plt.imshow(rgb2gray(diff), cmap='jet')
plt.colorbar(orientation='horizontal')
plt.title('difference in heatmap')
plt.axis('off')
plt.subplot(2, 2, 4)
plt.imshow(rgb2gray(diff.clip(0, 32)), cmap='jet')
plt.colorbar(orientation='horizontal')
plt.axis('off')
plt.tight_layout()
plt.savefig(add_prefix(saved_path, name))
plt.close()
def plot_hist(self, path, real_data, fake_data):
bins = np.linspace(min(min(real_data), min(fake_data)), max(max(real_data), max(fake_data)), 60)
plt.hist(real_data, bins=bins, alpha=0.3, label='real_score', edgecolor='k')
plt.hist(fake_data, bins=bins, alpha=0.3, label='fake_score', edgecolor='k')
plt.legend(loc='upper right')
plt.savefig(path)
plt.close()
def save_log(self):
write_list(self.log_lst, add_prefix(self.prefix, 'log.txt'))
print('save running log successfully')
def save_running_script(self, script_path):
"""
save the main running script to get differences between scripts
"""
copy(script_path, add_prefix(self.prefix, script_path.split('/')[-1]))
def get_lr(self):
lr = []
for param_group in self.d_optimizer.param_groups:
lr += [param_group['lr']]
return lr[0]
def shuffle(self, lesion_data, normal_data, lesion_labels, normal_labels, lesion_gradients, normal_gradients):
inputs, labels, gradients = torch.cat((lesion_data, normal_data), 0), torch.cat(
(lesion_labels, normal_labels)), torch.cat((lesion_gradients, normal_gradients), 0)
shuffled_index = torch.randperm(inputs.size(0)).cuda()
return inputs.index_select(0, shuffled_index), labels.index_select(0, shuffled_index), gradients.index_select(0,
shuffled_index)
|
nilq/baby-python
|
python
|
import os
import numpy as np
from PIL import Image
from tensorflow.python.keras.models import load_model
from scripts.util import normalise_data
def get_character(label):
if label == 10:
return '+'
elif label == 11:
return '-'
elif label == 12:
return '*'
elif label == 13:
return '/'
elif label == 14:
return '('
elif label == 15:
return ')'
else:
return str(label)
def print_prediction_information(file_name, y_pred_i):
print(f'Character: {file_name}')
print(f'Predicted value: {get_character(np.argmax(y_pred_i))}, with accuracy of %.3f' % (
max(y_pred_i) * 100.0) + '%')
if max(y_pred_i) <= 0.5:
print('WARNING: Accuracy is below 50%, classification might be incorrect.')
print()
def classify(model_path, extracted_characters_dir, width, height, verbose):
model = load_model(model_path)
file_names, chars = [], []
for subdir, dirs, files in os.walk(extracted_characters_dir):
for file in sorted(files):
file_names.append(os.path.join(subdir, file))
photo = Image.open(os.path.join(subdir, file)).convert('L') # Greyscale
X_pred_i = normalise_data(np.asarray(photo))
chars.append(X_pred_i)
X_pred = np.asarray(chars).reshape((len(chars), width, height, 1))
y_pred = model.predict(X_pred, verbose=0)
predicted_labels = []
print()
for idx, y_pred_i in enumerate(y_pred):
predicted_labels.append(np.argmax(y_pred_i))
if verbose:
print_prediction_information(file_names[idx], y_pred_i)
math_expression = ' '.join([get_character(label) for label in predicted_labels])
return math_expression
|
nilq/baby-python
|
python
|
from setuptools import setup, find_packages
import os
import glob
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
VERSIONFILE="gdelttools/_version.py"
with open(VERSIONFILE, "rt") as vfile:
for line in vfile:
line = line.strip()
(lhs, equals, rhs) = line.partition( "=")
if lhs.strip() == "__version__":
rhs = rhs.strip()
version_string = rhs.strip('"')
pyfiles = [f for f in os.listdir(".") if f.endswith(".py")]
setup(
name="gdelttools",
version=version_string,
author="Joe Drumgoole",
author_email="joe@joedrumgoole.com",
description="A set of tools to support downloading GDELT data",
long_description=long_description,
long_description_content_type='text/markdown',
license="Apache 2.0",
keywords="MongoDB GDELT dataset",
url="https://github.com/jdrumgoole/gdelttools",
install_requires=['pymongo',
'requests',
],
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
# setup_requires=["pymongo",
# "nose",
# "dnspython",
# "dateutils",
# "configargparse",
# "toml"],
packages=find_packages(),
data_files=[("test", glob.glob("data/*.ff") +
glob.glob("data/*.csv") +
glob.glob("data/*.txt"))],
python_requires='>3.7',
scripts=[],
entry_points={
'console_scripts': [
'gdeltloader=gdelttools.gdeltloader:main',
]
},
test_suite='nose.collector',
tests_require=['nose'],
)
|
nilq/baby-python
|
python
|
# Copyright (c) 2020 kamyu. All rights reserved.
#
# Google Code Jam 2008 Round 3 - Problem C. No Cheating
# https://code.google.com/codejam/contest/32002/dashboard#s=p2
#
# Time: O(E * sqrt(V)) = O(M * N * sqrt(M * N))
# Space: O(V) = O(M * N)
#
import collections
# Time: O(E * sqrt(V))
# Space: O(V)
# Source code from http://code.activestate.com/recipes/123641-hopcroft-karp-bipartite-matching/
# Hopcroft-Karp bipartite max-cardinality matching and max independent set
# David Eppstein, UC Irvine, 27 Apr 2002
def bipartiteMatch(graph):
'''Find maximum cardinality matching of a bipartite graph (U,V,E).
The input format is a dictionary mapping members of U to a list
of their neighbors in V. The output is a triple (M,A,B) where M is a
dictionary mapping members of V to their matches in U, A is the part
of the maximum independent set in U, and B is the part of the MIS in V.
The same object may occur in both U and V, and is treated as two
distinct vertices if this happens.'''
# initialize greedy matching (redundant, but faster than full search)
matching = {}
for u in graph:
for v in graph[u]:
if v not in matching:
matching[v] = u
break
while 1:
# structure residual graph into layers
# pred[u] gives the neighbor in the previous layer for u in U
# preds[v] gives a list of neighbors in the previous layer for v in V
# unmatched gives a list of unmatched vertices in final layer of V,
# and is also used as a flag value for pred[u] when u is in the first layer
preds = {}
unmatched = []
pred = dict([(u,unmatched) for u in graph])
for v in matching:
del pred[matching[v]]
layer = list(pred)
# repeatedly extend layering structure by another pair of layers
while layer and not unmatched:
newLayer = {}
for u in layer:
for v in graph[u]:
if v not in preds:
newLayer.setdefault(v,[]).append(u)
layer = []
for v in newLayer:
preds[v] = newLayer[v]
if v in matching:
layer.append(matching[v])
pred[matching[v]] = v
else:
unmatched.append(v)
# did we finish layering without finding any alternating paths?
if not unmatched:
unlayered = {}
for u in graph:
for v in graph[u]:
if v not in preds:
unlayered[v] = None
return (matching,list(pred),list(unlayered))
# recursively search backward through layers to find alternating paths
# recursion returns true if found path, false otherwise
def recurse(v):
if v in preds:
L = preds[v]
del preds[v]
for u in L:
if u in pred:
pu = pred[u]
del pred[u]
if pu is unmatched or recurse(pu):
matching[v] = u
return 1
return 0
for v in unmatched: recurse(v)
def no_cheating():
M, N = map(int, raw_input().strip().split())
seats, E, count = [], collections.defaultdict(set), 0
for i in xrange(M):
seats.append(raw_input().strip())
for j in xrange(N):
if seats[i][j] != '.':
continue
count += 1
for dx, dy in DIRECTIONS:
ni, nj = i+dx, j+dy
if 0 <= ni < M and 0 <= nj < N and seats[ni][nj] == '.':
if j%2 == 0:
E[i*N+j].add(ni*N+nj)
else:
E[ni*N+nj].add(i*N+j)
return count-len(bipartiteMatch(E)[0])
DIRECTIONS = [(-1, -1), (0, -1), (-1, 1), (0, 1)]
for case in xrange(input()):
print 'Case #%d: %s' % (case+1, no_cheating())
|
nilq/baby-python
|
python
|
# Generated by Django 3.0.3 on 2020-03-13 11:01
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('eadmin', '0004_auto_20200313_0713'),
]
operations = [
migrations.CreateModel(
name='Products',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, unique=True, validators=[django.core.validators.MinLengthValidator(3), django.core.validators.MaxLengthValidator(20)], verbose_name='Product name')),
('unit', models.CharField(choices=[('kg', 'Kilogram'), ('ltr', 'Liter')], max_length=10, verbose_name='Unit(s)')),
('details', models.CharField(default='No details are available', max_length=250, validators=[django.core.validators.MaxLengthValidator(250)], verbose_name='Details')),
],
),
]
|
nilq/baby-python
|
python
|
# BSD 3-Clause License
#
# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the psutil authors nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import time
import torch
from patrickstar.manager import PatrickStarManager
from patrickstar.profiler import profiler
from patrickstar.utils import logger, getsizeof
import patrickstar.utils.global_timer as global_timer
from .const import TensorState, ChunkState
class Chunk(object):
def __init__(
self,
capacity: int,
data_type: torch.dtype,
chunk_id: int,
local_rank: int = 0,
is_dummy: bool = False,
):
r"""
Chunk is the minimal unit of the data transfer.
It is a contiguous memory for saving tensors.
To remove a tensor, we only need to set the state of the tensor to `FREE`.
Chunk does no know if we are doing distributed training or not.
Every process will observe its own chunk instances.
Args:
capacity: int. The maximum number of elements in the chunk.
data_type: :class:`torch.dtype`.
chunk_id: int.
local_rank: int.
is_dummy: bool.
"""
self.chunk_id = chunk_id
# payload numel does not equal to capacity. payload can be None.
self.capacity = capacity
self.data_type = data_type
self.local_rank = local_rank
self._is_dummy = is_dummy
# the number of tensors of the chunk in each state
self._state_dict = {
TensorState.COMPUTE: 0,
TensorState.HOLD: 0,
TensorState.HOLD_AFTER_FWD: 0,
TensorState.HOLD_AFTER_BWD: 0,
TensorState.FREE: 0,
}
# the number of tensors that are not used in the forward calculation
self.unused = 0
self.payload = None
self._time_profile = True
self.gpu_access_moments = []
self.cpu_access_moments = []
self._pin_flag = False
def append_moment(self, mom, compute_device):
mgr = PatrickStarManager()
assert mgr.is_warmup_training()
access_moments = (
self.gpu_access_moments
if compute_device.type == "cuda"
else self.cpu_access_moments
)
if len(access_moments) > 0 and mom == access_moments[-1]:
return
else:
access_moments.append(mom)
def next_accessed_mom(self, compute_device):
r"""Get the next accessed moment after the warmup step.
Args:
compute_device: :class:`torch.device`.
Returns:
An int. The next access moment of the chunk. During warmup,
return 0.
"""
mgr = PatrickStarManager()
access_moments = (
self.gpu_access_moments
if compute_device.type == "cuda"
else self.cpu_access_moments
)
if mgr.is_nonwarmup_training():
cur_mom = mgr.get_cur_mom()
max_mom_small_than_cur = 0
for i in access_moments:
if i > cur_mom:
return i
if i < cur_mom:
max_mom_small_than_cur = i
return mgr.get_total_mom() + max_mom_small_than_cur
else:
return 0
def display_access_mom_info(self):
logger.info(f"\t {self.chunk_id} cpu_access_moments {self.cpu_access_moments}")
logger.info(f"\t {self.chunk_id} gpu_access_moments {self.gpu_access_moments}")
def is_dummy(self):
return self._is_dummy
def get_chunk_space(self):
r"""Size of the chunk (Bytes)."""
return getsizeof(self.data_type) * self.capacity
def get_payload_space(self):
r"""Size of the payload (Bytes)."""
if self.payload is None:
return 0
else:
return getsizeof(self.payload.dtype) * self.payload.numel()
def pin(self):
self._pin_flag = True
def unpin(self):
self._pin_flag = False
def is_pin(self):
return self._pin_flag
def allocate_payload(self, device):
r"""Allocate payload on device for the chunk.
NOTE() This method does not check availability. Please check if
there is enough room for the chunk.
Args:
device: :class:`torch.device`.
"""
if self._time_profile:
global_timer.my_timer.start_profile("CHUNK_allocate_payload")
payload_size = self.capacity
if device.type == "cpu":
self.payload = torch.zeros(
payload_size, dtype=self.data_type, device=device, pin_memory=True
)
else:
self.payload = torch.zeros(
payload_size, dtype=self.data_type, device=device
)
mgr = PatrickStarManager()
mgr.add(device.type, self.get_payload_space())
if profiler.started():
profiler.chunk_life_cycle[self.chunk_id]["life_cycle"].append(
(time.time(), "allocate", device)
)
if self._time_profile:
global_timer.my_timer.finish_profile("CHUNK_allocate_payload")
def release_payload(self):
r"""Release the payload.
NOTE() Please make sure all tensors are in the `FREE` state.
"""
mgr = PatrickStarManager()
mgr.delete(self.get_device().type, self.get_payload_space())
# Remove the memory of the chunk.
del self.payload
self.payload = None
if profiler.started():
profiler.chunk_life_cycle[self.chunk_id]["life_cycle"].append(
(time.time(), "release", None)
)
def update_state(self, old_state, new_state):
r"""Update the state counter of tensors of the chunk.
Args:
old_state: :class:`TensorState`.
new_state: :class:`TensorState`.
"""
self._state_dict[old_state] -= 1
self._state_dict[new_state] += 1
def get_state(self):
"""
When payload is None, the state is `RELEASED`,
otherwise, state of the chunk is decided by its tensors.
Returns:
:class:`ChunkState`.
"""
if self.payload is None:
return ChunkState.RELEASED
# Distributed training need to fix the chunk on the compute device.
if self._state_dict[TensorState.COMPUTE] > 0:
return ChunkState.COMPUTE
elif self._state_dict[TensorState.HOLD] > 0:
return ChunkState.HOLD
elif self._state_dict[TensorState.HOLD_AFTER_FWD] > 0:
return ChunkState.HOLD_AFTER_FWD
elif self._state_dict[TensorState.HOLD_AFTER_BWD] > 0:
return ChunkState.HOLD_AFTER_BWD
else:
return ChunkState.FREE
def all_tensor_state(self, state):
r"""If all tensors are in the state or `FREE`.
Args:
state: :class:`TensorState`.
Return:
bool.
"""
for k, v in self._state_dict.items():
if k != TensorState.FREE and k != state:
if v != 0:
# Ignore the unused tensors.
if k == TensorState.HOLD and v == self.unused:
continue
return False
return True
def set_unused(self):
r"""
After forward calculation, the tensors in `HOLD` state are the ones
that are not used. Remember them for the release.
NOTE() This function can only be called at the end of forward calculation.
"""
# TODO(zilinzhu) Find a better way to represent the unused tensors
self.unused = self._state_dict[TensorState.HOLD]
def move(self, target_device: torch.device):
r"""
Move the chunk to `target_device`.
NOTE() Please check if the `target_device` has enough room before.
Args:
target_device: :class:`torch.device`.
"""
if self.get_device() is None:
logger.warning(f"chunk move payload None to {target_device}")
return
if self.get_device() == target_device:
return
if self._time_profile:
if target_device.type == "cuda":
global_timer.my_timer.start_profile("chunk_cpu_gpu_move")
else:
global_timer.my_timer.start_profile("chunk_gpu_cpu_move")
src_device = self.get_device()
mgr = PatrickStarManager()
logger.debug(
f"move chunk {self.chunk_id}, which has {self.payload.numel() / 1e6} M {self.payload.dtype} elements, "
f"from {src_device} to {target_device}, "
f"used mem {mgr.used_chunk_mem(target_device.type) / 1e6} MB"
)
# TODO(jiaruifang) asyc copy.
if target_device.type == "cpu":
pinned_payload_cpu = torch.empty(
self.payload.shape,
dtype=self.payload.dtype,
device="cpu:0",
pin_memory=True,
)
with torch.cuda.stream(mgr.copy_stream):
pinned_payload_cpu.copy_(self.payload)
self.payload = pinned_payload_cpu
elif target_device.type == "cuda":
self.payload = self.payload.pin_memory()
with torch.cuda.stream(mgr.copy_stream):
self.payload = self.payload.to(target_device)
mgr.delete(src_device.type, self.get_payload_space())
mgr.add(target_device.type, self.get_payload_space())
if self._time_profile:
if target_device.type == "cuda":
global_timer.my_timer.finish_profile("chunk_cpu_gpu_move")
global_timer.data_move_cnter.update(
"chunk_cpu_gpu_move", self.get_payload_space()
)
elif target_device.type == "cpu":
global_timer.my_timer.finish_profile("chunk_gpu_cpu_move")
global_timer.data_move_cnter.update(
"chunk_gpu_cpu_move", self.get_payload_space()
)
if profiler.started():
if len(profiler.chunk_life_cycle[self.chunk_id]["life_cycle"]) == 0:
raise RuntimeError(
f"Chunk {self.chunk_id} allocation time is not recorded. "
f"You may need to put profiler.start() before initialize_engine "
)
profiler.chunk_life_cycle[self.chunk_id]["life_cycle"].append(
(time.time(), "move", target_device)
)
def get_device(self):
r"""Get device of the payload of chunk, return None if not allocated."""
if self.payload is not None:
return self.payload.device
else:
return None
|
nilq/baby-python
|
python
|
if __name__ == "__main__":
print('HELLO WORLD')
|
nilq/baby-python
|
python
|
def aumentar(preco=0, taxa=0, sit=False):
"""
->> Calcular o aumento de um valor
:param preco: Valor a aumentar
:param taxa: Valor (porcentagem) do aumento
:param sit: Valor (opcional) informando se deve ou não realizar a formatação.
:return: O valor aumentado conforme a taxa
"""
res = preco + (preco * taxa/100)
return res if sit is False else moeda(res)
def diminuir(preco=0, taxa=0, sit=False):
res = preco - (preco * taxa/100)
return res if sit is False else moeda(res)
def dobro(preco=0, sit=False):
res = preco * 2
return res if sit is False else moeda(res)
def metade(preco=0, sit=False):
res = preco / 2
return res if sit is False else moeda(res)
def moeda(preco=0, moeda='R$'):
return f'{moeda}{preco:.2f}'.replace('.', ',')
def resumo(p=0, taxaa=10, taxar=5):
print('-' * 30)
print('RESUMO Do VALOR'.center(30))
print('-' * 30)
print(f'Preço analisado: \t\t{moeda(p)}')
print(f'Dobro do Preço: \t\t{dobro(p, True)}')
print(f'Metade do Preço: \t\t{metade(p, True)}')
print(f'Com {taxaa}% de aumento: \t{aumentar(p, taxaa, True)} ')
print(f'Com {taxar}% de redução: \t{diminuir(p, taxar, True)}')
print('-' * 30)
|
nilq/baby-python
|
python
|
from abc import abstractmethod
from typing import List, Union, Tuple
import numpy as np
from sc2 import Result, UnitTypeId
from sharpy.managers.extensions import ChatManager
from sharpy.plans import BuildOrder
from sharpy.plans.acts import ActBase
from tactics.ml.agents import BaseMLAgent
REWARD_WIN = 1
REWARD_LOSE = 0
REWARD_TIE = 0 # Any ties are going to be strange builds anyway with 100% for example
class MlBuild(BuildOrder):
agent: BaseMLAgent # Initialize after init
chatter: ChatManager
def __init__(self, state_size: int, action_size: int, orders: List[Union[ActBase, List[ActBase]]],
result_multiplier: float = 1.0):
self.state_size = state_size
self.action_size = action_size
self.reward = 0
self.game_ended = False
self.action: int = 0
self.result_multiplier: float = result_multiplier
self.last_minerals = 0
self.action_time = -1000
self.minimum_action_time = 1
self.update_action_always = False # Set this true to update bot action every step
self.update_on_mineral_loss = True
super().__init__(orders)
async def start(self, knowledge: 'Knowledge'):
await super().start(knowledge)
self.chatter: ChatManager = self.knowledge.chat_manager
@property
@abstractmethod
def state(self) -> List[Union[int, float]]:
pass
@property
def score(self) -> float:
return self.reward
async def debug_draw(self):
action_name, color = self.get_action_name_color(self.action)
self.ai.client.debug_text_screen(action_name, (0.01, 0.01), color, 16)
self.ai.client.debug_text_screen(str(self.score), (0.00, 0.05), color, 16)
await super().debug_draw()
def get_action_name_color(self, action: int) -> Tuple[str, Tuple]:
return f'ACT{action}', (255, 255, 255)
async def execute(self) -> bool:
current_minerals = self.ai.minerals
if (self.update_action_always
or (self.update_on_mineral_loss and current_minerals < self.last_minerals)
or self.action_time + self.minimum_action_time < self.ai.time):
# Update action only if significant amount of time has passed or bot used minerals
self.action_time = self.ai.time
current_state = np.array(self.state)
self.action = self.agent.choose_action(current_state, self.score)
self.last_minerals = current_minerals
await self.chat_space()
return await super().execute()
async def chat_space(self):
if self.ai.time > 10:
await self.chatter.chat_taunt_once("ml_state_space", lambda: f'State size {self.state_size}')
if self.ai.time > 30:
await self.chatter.chat_taunt_once("ml_action_space", lambda: f'Action size {self.action_size}')
if self.ai.time > 40:
await self.chatter.chat_taunt_once("ml_episodes",
lambda: f'This agent has trained for {self.agent.episode} episodes')
def on_end(self, game_result: Result):
self.game_ended = True
self.reward = REWARD_TIE*self.result_multiplier
if game_result == Result.Victory:
self.reward = REWARD_WIN*self.result_multiplier
elif game_result == Result.Defeat:
self.reward = REWARD_LOSE*self.result_multiplier
self.agent.on_end(self.state, self.reward)
def get_ml_number(self, unit_type: UnitTypeId) -> int:
""" Calculates a funny number of building progress that's useful for machine learning"""
units = self.cache.own(unit_type)
normal_count = len(units)
not_ready = units.not_ready
not_ready_count = not_ready.amount
normal_count = self.related_count(normal_count, unit_type)
magic = self.unit_pending_count(unit_type) + not_ready_count
magic += normal_count * 10
for unit in not_ready:
magic += unit.build_progress * 9
return magic * 0.1 # normalize back to 1 finished building being 1
|
nilq/baby-python
|
python
|
class HalfAdder(DynamicNetwork):
#-- This creator HalfAdder(a,b) takes two nodes a,b that
# are inputs to the half-adder.
def __init__(inst, a, b):
#-- Add the two input nodes to the set of nodes associated
# with the current full-adder network.
inst.addNodes(a, b)
#-- First, create an XOR gate operating on the two input nodes.
# The output node of the XOR is the low-order bit of the
# 2-bit sum of the two input bits.
XOR = DynamicXORGate(a,b)
inst.s0 = XOR.out
#-- Next, create an AND gate operating on the two input nodes.
# The output node of the AND is the high-order bit of the
# 2-bit sum of the two input bits.
AND = DynamicANDGate(a,b)
inst.s1 = AND.out
#-- Add the output nodes of the XOR and AND gates to the set of
# nodes associated with the current half-adder network.
inst.addNodes(inst.s0, inst.s1)
|
nilq/baby-python
|
python
|
import json
import argparse
import matplotlib.pyplot as plt
import math
from typing import Dict
plt.switch_backend("agg")
def plot_bleu_score_data(bleu_score_dict: Dict, language_dict: Dict, picture_path: str):
fig_num_per_picture = 6
lang_list = list(bleu_score_dict.keys())
num_picture = int(math.ceil(len(lang_list) / fig_num_per_picture))
cur = 0
plt.rcParams["font.family"] = "Times New Roman"
for i in range(num_picture):
fig, axes = plt.subplots(3, 2, figsize=(50, 30))
for row in range(3):
for col in range(2):
epoch_list = bleu_score_dict[lang_list[cur]]
axes[row, col].plot(list(range(len(epoch_list))), epoch_list, color="#DE6B58", marker="x",
linestyle="-", linewidth=2, label="BLEU point")
axes[row, col].set_xlabel("epoch")
axes[row, col].set_ylabel("BLEU")
axes[row, col].set_xticks(list(range(len(epoch_list))))
axes[row, col].grid(which="major", axis="y", linewidth=0.5)
axes[row, col].legend(loc="best")
axes[row, col].set_title(language_dict[lang_list[cur]]["language name"], fontdict={"size": 20})
for j in range(len(epoch_list)):
axes[row, col].text(j, epoch_list[j], "{:.2f}".format(epoch_list[j]))
cur += 1
if cur == len(lang_list):
break
if cur == len(lang_list):
break
plt.savefig("{}/{}.jpg".format(picture_path, i), dpi=200)
plt.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--bleu_score_dict_path", required=True)
parser.add_argument("--language_data", required=True)
parser.add_argument("--picture_path", required=True)
args, unknown = parser.parse_known_args()
with open(args.bleu_score_dict_path) as f:
bleu_score_dict = json.load(f)
with open(args.language_data) as f:
language_dict = json.load(f)
plot_bleu_score_data(bleu_score_dict, language_dict, args.picture_path)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import pprint
from zolware_data import user_manager
from zolware_data import datasource_manager
from zolware_data import signal_manager
from zolware_data import datasource_reader
user_manager = user_manager.UserManager()
user = user_manager.find_user_by_email('snclucas@gmail.com')
datasource_manager = datasource_manager.DatasourceManager(user)
signal_manager = signal_manager.SignalManager(user)
# Loop over datasources
data_sources = datasource_manager.get_all_datasources();
for datasource in data_sources:
if datasource.status == 'OK':
print('----------------------')
print('Datasource: ' + datasource.name)
datasource.populate_signals()
datasource_reader = datasource_reader.DataSourceReader(datasource, user)
series = datasource_reader.read()
|
nilq/baby-python
|
python
|
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""收益性的包括年化收益率、净利润、总盈利、总亏损、有效年化收益率、资金使用率。
风险性主要包括胜率、平均盈亏比、最大回撤比例、最大连续亏损次数、最大连续盈利次数、持仓时间占比、贝塔。
综合性指标主要包括风险收益比,夏普比例,波动率,VAR,偏度,峰度等"""
import math
from functools import lru_cache
import numpy as np
import pandas as pd
from QUANTAXIS.QAFetch.QAQuery_Advance import (QA_fetch_index_day_adv,
QA_fetch_stock_day_adv)
from QUANTAXIS.QASU.save_account import save_riskanalysis
from QUANTAXIS.QAUtil.QADate_trade import QA_util_get_trade_gap
from QUANTAXIS.QAUtil.QAParameter import MARKET_TYPE
class QA_Risk():
"""QARISK 是一个风险插件
需要加载一个account/portfolio类进来:
需要有
code,start_date,end_date,daily_cash,daily_hold
"""
def __init__(self, account, benchmark_code='000300', benchmark_type=MARKET_TYPE.INDEX_CN):
self.account = account
self.benchmark_code = benchmark_code # 默认沪深300
self.benchmark_type = benchmark_type
self.fetch = {MARKET_TYPE.STOCK_CN: QA_fetch_stock_day_adv,
MARKET_TYPE.INDEX_CN: QA_fetch_index_day_adv}
self.market_data = QA_fetch_stock_day_adv(
self.account.code, self.account.start_date, self.account.end_date)
self.assets = ((self.market_data.to_qfq().pivot('close') * self.account.daily_hold).sum(
axis=1) + self.account.daily_cash.set_index('date').cash).fillna(method='pad')
self.time_gap = QA_util_get_trade_gap(
self.account.start_date, self.account.end_date)
def __repr__(self):
return '< QA_RISK ANALYSIS ACCOUNT/PORTFOLIO >'
def __call__(self):
return pd.DataFrame([self.message])
@property
def max_dropback(self):
"""最大回撤
"""
return max([self.assets.iloc[idx::].max() - self.assets.iloc[idx::].min() for idx in range(len(self.assets))]) / float(self.assets.iloc[0])
@property
def profit(self):
return self.calc_profit(self.assets)
@property
def profit_pct(self):
"""利润
"""
return self.calc_profitpctchange(self.assets)
@property
def annualize_return(self):
"""年化收益
Returns:
[type] -- [description]
"""
return self.calc_annualize_return(self.assets, self.time_gap)
@property
def volatility(self):
"""波动率
Returns:
[type] -- [description]
"""
return self.profit_pct.std() * math.sqrt(250)
@property
def message(self):
return {
'account_cookie': self.account.account_cookie,
'portfolio_cookie': self.account.portfolio_cookie,
'user_cookie': self.account.user_cookie,
'annualize_return': self.annualize_return,
'profit': self.profit,
'max_dropback': self.max_dropback,
'time_gap': self.time_gap,
'volatility': self.volatility,
'benchmark_code': self.benchmark_code,
'beta': self.beta,
'alpha': self.alpha,
'sharpe': self.sharpe
}
@property
def benchmark_data(self):
"""
基准组合的行情数据(一般是组合,可以调整)
"""
return self.fetch[self.benchmark_type](
self.benchmark_code, self.account.start_date, self.account.end_date)
@property
def benchmark_assets(self):
"""
基准组合的账户资产队列
"""
return (self.benchmark_data.open / float(self.benchmark_data.open.iloc[0]) * float(self.account.init_assets))
@property
def benchmark_annualize_return(self):
"""基准组合的年化收益
Returns:
[type] -- [description]
"""
return self.calc_annualize_return(self.benchmark_assets, self.time_gap)
@property
def benchmark_profitpct(self):
"""
benchmark 基准组合的收益百分比计算
"""
return self.calc_profitpctchange(self.benchmark_assets)
@property
def beta(self):
"""
beta比率 组合的系统性风险
"""
return self.calc_beta(self.profit_pct.dropna(), self.benchmark_profitpct.dropna())
@property
def alpha(self):
"""
alpha比率 与市场基准收益无关的超额收益率
"""
return self.calc_alpha(self.annualize_return, self.benchmark_annualize_return, self.beta, 0.05)
@property
def sharpe(self):
"""
夏普比率
"""
return self.calc_sharpe(self.annualize_return, self.volatility, 0.05)
@property
def sortino(self):
"""
索提诺比率 投资组合收益和下行风险比值
"""
pass
@property
def calmar(self):
"""
卡玛比率
"""
pass
def set_benchmark(self, code, market_type):
self.benchmark_code = code
self.benchmark_type = market_type
def calc_annualize_return(self, assets, days):
return math.pow(float(assets.iloc[-1]) / float(assets.iloc[0]), 250.0 / float(days)) - 1.0
# def calc_profit(self, assets):
# return (assets.iloc[-1] / assets.iloc[1]) - 1
def calc_profitpctchange(self, assets):
return self.assets[::-1].pct_change()
def calc_beta(self, assest_profit, benchmark_profit):
calc_cov = np.cov(assest_profit, benchmark_profit)
beta = calc_cov[0, 1] / calc_cov[1, 1]
return beta
def calc_alpha(self, annualized_returns, benchmark_annualized_returns, beta, r=0.05):
alpha = (annualized_returns - r) - (beta) * \
(benchmark_annualized_returns - r)
return alpha
def calc_profit(self, assets):
return (float(assets.iloc[-1]) / float(assets.iloc[0])) - 1
def calc_sharpe(self, annualized_returns, volatility_year, r=0.05):
'计算夏普比率'
return (annualized_returns - r) / volatility_year
def save(self):
"""save to mongodb
"""
save_riskanalysis(self.message)
class QA_Performance():
"""
QA_Performance是一个绩效分析插件
需要加载一个account/portfolio类进来:
需要有
code,start_date,end_date,daily_cash,daily_hold
"""
def __init__(self, account):
self.account = account
self._style_title = ['beta', 'momentum', 'size', 'earning_yield',
'volatility', 'growth', 'value', 'leverage', 'liquidity', 'reversal']
@property
def prefer(self):
pass
@property
def style(self):
"""风格分析
"""
pass
def abnormal_active(self):
"""
账户的成交发生异常成交记录的分析
"""
pass
def brinson(self):
"""Brinson Model analysis
"""
pass
def hold(self):
"""持仓分析
"""
pass
@property
def accumulate_return(self):
"""
returns a pd-Dataframe format accumulate return for different periods
"""
pass
def save(self):
"""save the performance analysis result to database
"""
pass
|
nilq/baby-python
|
python
|
"""
file: 'time_align.py'
author: David Fairbairn
date: June 2016
The need for a script that looks at timestampdata (currently only relevant for
the Saskatoon SuperDARN radar) to the errlog files' erroneous timestamps
compelled me to write this script.
This script approaches the problem by identifying the times during which 7 and
8 pulse sequences occur, and using a pattern of these (e.g. 7,8,8,8,7,8,..) to
find when the pattern begins in the ERRLOG file and when it begins in the
reliable timestampdata file. The mapping of corresponding pulse sequences in
each file allows us to deduce correct times for the errlog data.
Likely due to running old software (the QNX operating system) running on new
hardware at the radar site, the main SuperDARN system at a few locations
undergoes frequent and unpredictable timing corrections (e.g. every 5 minutes
on average, discrete corrections that average about 0.5 seconds.)
**CURRENT STATUS**:
At the moment, this code is only useful to get a sense of how 'off' the
errlog data might be - full correction of the errlog data timestamps is
not implemented (although using the "shift_offset" in this code, it could
be done). So it's only useful if you're trying to analyze really nitpicky
issues of which beam/pulse sequence occurred when.
Possible avenues to work on still here:
- correcting and re-saving newly corrected errlog data*
- more errlog file parsing: reading which i) transmit frequency,
which ii) beam, etc.
"""
import datetime as dt
import numpy as np
import matplotlib.pyplot as plt
import os
import subprocess
import logging
# for initialize_data(), open_tstamps(), open_errlog()
import __init__
from ..utils import data_utils
from ..utils import two_pad
# ======================= FUNCTIONS FOR TIME ALIGNMENT ========================
def get_stamp_pulses(file_stamps,start_time,end_time):
"""
A function which takes a filehandle for the Saskatoon timestamper data and
start and end time periods of interest, and returns all the times a pulse
occurred.
*** PARAMS ***
file_stamps (file object): timestamps file e.g. maxwell:/data/epop/20160418.0100.timestampdata.bz2
start_time (datetime obj): the start of the time period of interest for gathering pulse data
end_time (datetime obj): the end of the time period of interest
*** RETURNS ***
pulse_times (list of strings): the total timestamps (as a string) for each pulse
pulses (list of floats): the times (in seconds) at which a pulse goes out
"""
#TODO: GET CLOSER TO THE ACTUAL START < 10 s (rather than as with these params, up to 59 seconds away)
strt_str = two_pad(start_time.hour) + ":" + two_pad(start_time.minute)
#TODO: Find way to grab minute after the one of interest
end_str = two_pad(end_time.hour) + ":" + two_pad(end_time.minute)
startln = data_utils.get_line_in_file(file_stamps,strt_str)
endln = data_utils.get_line_in_file(file_stamps,end_str)
print "Start line for search string of " + strt_str + ": " + str(startln)
print "End line for search string of " + end_str + ": " + str(endln)
# Reading Timestamp data, acquiring timing differences
end = False
pulse_times = []
pulses = []
# Initialized hour/minute timestamp for edge cases where a pulse is read
# in before it has a corresponding hr/min
hrtime = "--:--"
file_stamps.seekline(startln)
while end != True:
ln = file_stamps.readline()
if ln == '' or file_stamps.line > endln:
print "End of file or reached end of search range."
end = True
elif ln.find("TIME") != -1:
hrtime = (ln.split(" = ")[1]).split(" ")[0]
elif ln.find("SEC") != -1:
sectime = float((ln.split(" = ")[1]).split("\n")[0])
if sectime < 10.0:
time = hrtime + ":0" + str(round(sectime,5))
else:
time = hrtime + ":" + str(round(sectime,5))
pulse_times.append(time)
pulses.append(sectime)
return (pulse_times, pulses)
def get_errl_pulses(f_errl, start, end):
"""
Function to grab the pulses from the errlog file for the desired time
interval as well as their general timestamps.
*** PARAMS ***
file_errl (FileLineWrapper obj): errl file e.g. maxwell:/data/sas_errlog/...
start (datetime obj): the start of the time period of interest for gathering pulse data
end (datetime obj): the end of the time period of interest
*** RETURNS ***
pulse_times (list of strings): the total timestamps (as a string) for each pulse
pulses (list of floats): the times (in seconds) at which a pulse goes out
"""
start_str = two_pad(start.hour) + ":" + two_pad(start.minute) + ":"
end_str = two_pad(end.hour) + ":" + two_pad(end.minute) + ":"
#TODO: Find way to grab minute after the one of interest
#end_str = two_pad(end.hour) + ":" + two_pad(end.minute + 1) + ":"
ln_start = data_utils.get_line_in_file(f_errl, start_str)
ln_end = data_utils.get_line_in_file(f_errl, end_str)
print "Start line for search string of " + start_str + ": " + str(ln_start)
print "End line for search string of " + end_str + ": " + str(ln_end)
end = False
pulse_times = []
pulses = []
f_errl.seekline(ln_start)
while end != True:
ln = f_errl.readline()
if ln.find("Number of sequences") != -1:
#print "Found pulse sequence!"
pulse,numof = parse_pulses(ln)
ptime = parse_ptimes(ln)
for i in range(numof):
pulses.append(pulse)
pulse_times.append(ptime)
elif ln == '' or f_errl.line > ln_end:
print "End of file or reached end of search range."
end = True
return (pulse_times, pulses)
def get_diffs(pulse_times, pulses):
"""
Returns a list of time differences between the pulse times given,
corresponding 1 - for - 1
*** PARAMS ***
pulse_time (list of strings): list of strings containing the exact time of the pulse.
pulses (list of floats): list of floats of the exact time (sec) of the pulses.
*** RETURNS ***
diff_val ([] floats): time intervals between temporally adjacent pulses.
pulse_times ([] strings): timestamp of beginning of each time interval
"""
diffs = []
for i in range(pulses.__len__() - 1):
p = pulses[i]
p_nxt = pulses[i+1]
diff = p_nxt-p
diffs.append(p_nxt-p)
return pulse_times,diffs
def identify_sequences(pulse_times,diffs):
"""
This function takes a list of time intervals between pulses (whose interval
begins at the corresponding time in pulse_times, and picks out which series
of intervals corresponds to a 7 or 8 pulse sequence.
*** PARAMS ***
diff_val ([] floats): time intervals between temporally adjacent pulses.
pulse_times ([] strings): timestamp of beginning of each time interval
*** RETURNS ***
total ([] strings): list of every feature in diffs (possibly deprecated)
sequence_times ([] strings): timestamp of beginning of each time interval
sequences ([] strings): 7 vs 8 for which sequence occurred
"""
total = []
sequence_times = []
sequences = []
i = 0
# minutes_count separately tracks the number of minute-to-minute transitions
# that the sequence-identifier finds (which should hopefully match what's in pulse_times)
minutes_count = 0
while i < diffs.__len__():
d1 = diffs[i]
t1 = pulse_times[i]
# Implemented a hack to notice minute-to-minute transitions, note them in summary file
if d1 < 0: # This may screw up 7 or 8 pulse identification across transitions
d1 = d1 + 60.0
minutes_count = minutes_count + 1
total.append("Minute Transition: " + str(minutes_count))
if i < diffs.__len__() - 6:
d2 = diffs[i+1]
d3 = diffs[i+2]
d4 = diffs[i+3]
d5 = diffs[i+4]
d6 = diffs[i+5]
d7 = diffs[i+6]
c1 = np.around(d1,decimals=4) == 0.0210
c2 = np.around(d2,decimals=4) == 0.012
c3 = np.around(d3,decimals=4) == 0.003
c4 = np.around(d4,decimals=4) == 0.0045
c5 = np.around(d5,decimals=4) == 0.006
c6 = np.around(d6,decimals=4) == 0.0165
c7 = np.around(d7,decimals=4) == 0.0015
b1 = np.around(d1,decimals=4) == 0.0216
b2 = np.around(d2,decimals=4) == 0.0072
b3 = np.around(d3,decimals=4) == 0.0192
b4 = np.around(d4,decimals=4) == 0.0048
b5 = np.around(d5,decimals=4) == 0.0096
b6 = np.around(d6,decimals=4) == 0.0024
if c1 and c2 and c3 and c4 and c5 and c6 and c7:
#print "8 Pulse Sequence"
total.append("8 Pulse Sequence")
sequence_times.append(t1)
sequences.append("8")
i = i+7
elif b1 and b2 and b3 and b4 and b5 and b6:
#print "7 pulse sequence"
total.append("7 Pulse Sequence")
sequence_times.append(t1)
sequences.append("7")
i = i+6
else:
total.append(str(d1))
i = i + 1
else:
#print d
total.append(str(d1))
i = i + 1
return total,sequence_times,sequences
def parse_pulses(ln):
"""
A little mini function for taking a line in an errlog file and grabbing the pulse number.
"""
if ln.find("Number of sequences") == -1:
print "Line of text doesn't contain pulse information!"
rem = ln.split("Number of sequences")[1]
if rem.find("[") != -1:
# Then this file *does* specify pulse sequences as it should
pseq = (rem.split("[")[1]).split("]")[0]
numof = int((rem.split(": ")[1]).split("\n")[0])
else:
# Then this is an older errlog where the pseqs are all 8 pulse sequences
pseq = str(8)
numof = int((rem.split(": ")[1]).split("\n")[0])
return pseq,numof
def parse_ptimes(ln):
"""
Mini function for getting the time (in a string) from a line of an errlog file.
"""
timestring = ln.split(" ")[3]
return timestring
def determine_offset(pulse_times_a, pulse_seqs_a, pulse_times_b, pulse_seqs_b):
"""
This function determines a discrete offset by which the first list of pulse
sequences can be shifted so as to make the pulses with the same index in
each list be most similar.
e.g.
[3,4,3,2]
"""
#TODO: Finish this function
return -1, 0
def determine_shift_offset(lst_a, lst_b):
"""
Determines the optimal shifting of one sequence with respect to the other
so that the most list entries with the same indices are equal between the
two lists.
** PARAMS **
lst_a (list): the first list, which the function hopes is the smaller.
lst_b (list): the second list.
** RETURNS **
offset (integer): the offset from lst_a with respect to lst_b yielding
optimal matching between the two lists.
quality (integer): 0 for poor confidence, 1 for strong confidence
"""
assert isinstance(lst_a, list)
assert isinstance(lst_b, list)
lst_a_len = lst_a.__len__()
# Ensure lst_a is the shorter list
if lst_a_len > lst_b.__len__():
return determine_shift_offset(lst_b, lst_a)
# Loop through a reasonable different number of integer index shifts to try
# The first one we try should be no shift whatsoever, and if there's 100%
# overlap, don't bother trying anything else (???).
#for i in range(lst_a_len):
best_overlap = evaluate_difference(lst_a,lst_b)
best_overlap_index = 0
if best_overlap == 1.0:
# Optimal overlap already
confidence = 1
return best_overlap_index, confidence
overlap_scores = []
overlap_shifts = []
lst_b_len = lst_b.__len__()
lst_b_fwd = lst_b_bck = lst_b
for i in range(lst_a_len):
lst_b_fwd = lst_b_fwd[1:lst_b_len] + [lst_b_fwd[0]] # rotate forward
overlap = evaluate_difference(lst_a, lst_b_fwd)
if overlap > best_overlap:
best_overlap = overlap
# access indices start at 0, so subtract 1 to describe extent of shift
best_overlap_index = -i - 1
overlap_scores.append(overlap)
overlap_shifts.append(-i-1)
lst_b_bck = [lst_b_bck[-1]] + lst_b_bck[0:-1]
overlap = evaluate_difference(lst_a, lst_b_bck)
if overlap > best_overlap:
best_overlap = overlap
# access indices start at 0, so subtract 1 to describe extent of shift
best_overlap_index = i + 1
overlap_scores.append(overlap)
overlap_shifts.append(i+1)
#print overlap_scores # Less output
#print overlap_shifts
return best_overlap_index
#TODO: Confidence/quality of answer???
def evaluate_difference(lst_a, lst_b):
"""
Determines how much overlap there is between the two input lists.
Essentially a value function to maximize.
"""
assert isinstance(lst_a, list)
assert isinstance(lst_b, list)
lst_a_len = lst_a.__len__()
if lst_a_len > lst_b.__len__():
return evaluate_difference(lst_b, lst_a)
diff = 0.0
for i in range(lst_a_len):
if lst_a[i] != lst_b[i]:
diff = diff + 1.0
# The score will be an overlap percentage
return (lst_a_len - diff)/lst_a_len
def visualize_list_difference(lst_a, lst_b):
"""
Returns a list of evaluate_difference() scores corresponding to different
relative shifts of lst_a and lst_b.
"""
import collections
l = lst_a
y = []
for i in range(lst_a.__len__()):
d = collections.deque(l)
d.rotate(-1)
l = (np.array(d)).tolist()
y.append(evaluate_difference(l,lst_b))
return y
# ========================= TIME ALIGNMENT SCRIPT =============================
def perform_time_alignment_demo():
"""
A demonstration script for making use of the various time alignment
functions and testing their behaviour.
"""
import matplotlib.pyplot as plt
data_path,dat_fname = data_utils.initialize_data()
#start = dt.datetime(2014,7,8,1,15,9)
#end = dt.datetime(2014,7,8,1,17,30)
start = dt.datetime(2016,4,18,0,30,0)
end = dt.datetime(2016,4,18,0,33,0)
# Open the Timestamp data
file_stamps = data_utils.open_tstamps(data_path, start)
# Open the Saskatoon Errlog
rcode = 'sas' # If we had another Timestamper, this could be an input parameter
file_errl = data_utils.open_errlog(data_path, rcode, start)
# Reading Timestamp data, acquiring timing differences
stamp_ptimes,stamp_pulses = get_stamp_pulses(file_stamps, start, end)
stamp_dtimes,stamp_diffs = get_diffs(stamp_ptimes,stamp_pulses)
stamp_allpulses,stamp_seqtimes,stamp_pseqs = identify_sequences(stamp_dtimes,stamp_diffs)
# Reading the ERRLOG data!
errl_seqtimes,errl_pseqs = get_errl_pulses(file_errl, start, end)
print("\nNow defining custom lists lista and listb...")
lista = [7,8,8,8,8,7,8,8,8,8,7,8,8,8,8,7]
listb = [8,8,8,7,8,8,8,8,7,8,8,8,8,7]
score = evaluate_difference(lista,listb)
print("'evaluate_difference' result on lista vs listb initially: {0}".format(score))
shift = determine_shift_offset(lista,listb)
print("determined shift offset: {0}".format(shift))
errl_pseqs_ab = errl_pseqs[:len(stamp_pseqs)]
errl_seqtimes_ab = errl_seqtimes[:len(stamp_seqtimes)]
print("\nNow printing timestamper and errlog sequences and times" +
" near the start and end to show their alignment...".format(len(errl_seqtimes)))
for i in np.arange(28,42):
stamp_str = str(stamp_seqtimes[i]) + "\t" + str(stamp_pseqs[i])
errl_str = "\t" + str(errl_pseqs_ab[i]) + "\t" + str(errl_seqtimes[i])
print(stamp_str + errl_str)
print("\n\n")
for i in np.arange(60, 45, -1):
stamp_str = str(stamp_seqtimes[-1-i]) + "\t" + str(stamp_pseqs[-1-i])
errl_str = "\t" + str(errl_pseqs_ab[-1-i]) + "\t" + str(errl_seqtimes[-1-i])
print(stamp_str + errl_str)
# TODO: figure out what I was going to do with these two lines:
indx_del = 70
chunks = int(errl_pseqs.__len__()/70.)
# Run the stats on the equal-length versions of this data
score = evaluate_difference(stamp_pseqs, errl_pseqs_ab)
print("'evaluate_difference' result on lista vs listb initially: {0}".format(score))
shift = determine_shift_offset(stamp_pseqs, errl_pseqs_ab)
print("determined shift offset: {0}".format(shift))
print("\nNow supposedly going to perform visualization of the list differences in terms of offset similarities")
y = visualize_list_difference(errl_pseqs_ab, stamp_pseqs)
plt.plot(100.0*np.array(y))
plt.xlabel('Discrete rotations of list 1 vs list 2')
plt.ylabel('Agreement (%)')
plt.show()
# Unmount the Maxwell remote mount.
data_utils.exit_rri()
# -----------------------------------------------------------------------------
if __name__ == "__main__":
perform_time_alignment_demo()
|
nilq/baby-python
|
python
|
# String; Backtracking
# Given a string containing digits from 2-9 inclusive, return all possible letter combinations that the number could represent.
#
# A mapping of digit to letters (just like on the telephone buttons) is given below. Note that 1 does not map to any letters.
#
#
#
# Example:
#
# Input: "23"
# Output: ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"].
# Note:
#
# Although the above answer is in lexicographical order, your answer could be in any order you want.
import itertools
class Solution:
def letterCombinations(self, digits):
"""
:type digits: str
:rtype: List[str]
"""
if len(digits) == 0:
return []
digDict = {'2':['a','b','c'],'3':['d','e','f'],'4':['g','h','i'],'5':['j','k','l'],'6':['m','n','o'],'7':['p','q','r','s'],'8':['t','u','v'],'9':['w','x','y','z']}
productList = []
for x in digits:
productList.append(digDict[x])
productList = list(itertools.product(*productList))
output = [''.join(x) for x in productList]
return output
|
nilq/baby-python
|
python
|
from dagster import asset
# start_example
@asset(metadata={"cereal_name": "Sugar Sprinkles"})
def cereal_asset():
return 5
# end_example
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 4 18:26:36 2019
@author: Juan Sebastián Herrera Cobo
This code solves the scheduling problem using a genetic algorithm. Implementation taken from pyeasyga
As input this code receives:
1. T = number of jobs [integer]
2. ni = number of operations of the job i [list of T elements]
3. m = number of machines [integer]
3. Mj´ = feasible machines for the operaion j of the job i [matrix of sum(ni) row, each row with n' feasible machines]
4. pj'k = processing time of the operation j' in the machine k [matrix of sum(ni) row, each row with n' feasible machines]
"""
from time import time
# Inputs
#T = 2 # number of jobs
#ni =[2,2] # number of operations of the job i
#ma = 2 # number of machines
#Mij = [[1,2],[1],[2],[1,2]]
#pjk = [[3,4],[5,1000],[1000,6],[2,2]]
#T = 3 # number of jobs
#ni =[2,2,2] # number of operations of the job i
#ma = 2 # number of machines
#Mij = [[1,2],[1,2],[1],[1,2],[2],[1,2]]
#pjk = [[3,4],[5,4],[2,1000],[2,4],[1000,3],[1,2]]
#T = 4 # number of jobs
#ni =[1,3,2,2] # number of operations of the job i
#ma = 3 # number of machines
#Mij = [[1,2,3],[1,3],[3],[1,2],[1,3],[1,2],[1,2,3],[1,3]]
#pjk = [[3,4,3],[5,1000,5],[1000,1000,6],[2,4,3],[1,1000,3],[1,2,1000],[2,2,2],[1,1000,1000]]
#T = 3 # number of jobs
#ni =[2,3,4] # number of operations of the job i
#ma = 5 # number of machines
#Mij = [[1,2,3,4,5],[1,3,4],[3,2],[1,2,5],[1,3,4],[1,2],[1,2,3],[1,3,5],[1,5]]
#pjk = [[3,4,3,4,4],[5,1000,5,4,1000],[1000,4,6,1000,1000],[2,4,1000,1000,4],
# [1,1000,3,4,1000],[1,2,1000,1000,1000],[2,2,2,1000,1000],[1,1000,1,1000,2],
# [4,1000,1000,1000,3]]
T = 4 # number of jobs
ni =[2,3,4,2] # number of operations of the job i
ma = 6 # number of machines
Mij = [[1,2,3,4,5],[1,3,4,6],[1,3,2],[1,2,5],[1,2,3,4],[1,2,5],[1,2,3,6],[1,3,5],[1,5,6],
[1,6],[2,3,4]]
pjk = [[3,4,3,4,4,1000],[5,1000,5,4,1000,4],[3,4,6,1000,1000,1000],[2,4,1000,1000,4,1000],
[1,3,3,2,1000,1000],[1,3,1000,1000,2,1000],[2,2,2,1000,1000,2],[1,1000,1,1000,2,1000],
[4,1000,1000,1000,3,3],[3,1000,1000,1000,1000,4],[1000,5,3,4,1000,1000]]
"""
The individual is a list with T*ni*2 digits. For each operation in each job it has the variable S and the variable X
The S for start time to process and the X for the machine where this operation will be done. E.g:
individual = [S11,X11,S12,X12..........Sini,Xini]
But first of all a dataset to be used during the algorithm must be made
"""
from pyeasyga import pyeasyga # import the library to be used
import random
data=[]
data.append(T)
data.append(ni)
data.append(ma)
data.append(Mij)
data.append(pjk)
def is_data_ok(data):
sum_ni=0
for i in range(0,len(data[1])):
sum_ni+=data[1][i]
if len(data[1])!=data[0]:
print("Data invalid. Please check the length of ni list")
exit
elif len(data[3])!=sum_ni:
print("Data invalid. Please check the length of Mij list")
exit
elif len(data[4])!=sum_ni:
print("Data invalid. Please check the length of pjk list")
exit
is_data_ok(data)
"""
To create a random individual a function called create_individual is created. In this case, random values to S from 0 to the max
of pjk*T are generated and for X values between the feasible machines are generated
"""
def max_processing_time(data):
pjk=data[4]
max_time=0
for i in range(0,len(pjk)):
for j in range(0,len(pjk[i])):
if pjk[i][j]>max_time and pjk[i][j]!=1000:
max_time=pjk[i][j]
return max_time
def create_individual(data):
individual=[]
start_times=[0]*data[2]
jobs=data[0]
list_to=[2,1,2,0,1,2,0,1,1,0]
random_number=random.randint(0,len(list_to)-1)
reference=list_to[random_number]
if reference == 1:
a=0
for i in range(0,jobs):
for j in range(0,data[1][i]):
position_X=random.randint(0,len(data[3][a])-1)
X=data[3][a][position_X]
S=start_times[X-1]
individual.append(S)
individual.append(X)
start_times[X-1]=start_times[X-1]+data[4][a][X-1]
a+=1
elif reference == 2:
a=len(data[3])-1
for i in range(0,jobs):
for j in range(0,data[1][i]):
position_X=random.randint(0,len(data[3][a])-1)
X=data[3][a][position_X]
S=start_times[X-1]
individual.append(S)
individual.append(X)
start_times[X-1]=start_times[X-1]+data[4][a][X-1]
a-=1
else:
for i in range(0,jobs):
for j in range(0,data[1][i]):
X=random.randint(1,data[2])
max_time=max_processing_time(data)
S=random.randint(0,max_time)
individual.append(S)
individual.append(X)
return individual
def mutate(individual):
mutate_index1=random.randrange(len(individual))
mutate_index2=random.randrange(len(individual))
#max_time=max_processing_time(data)
if ((mutate_index1%2)==0 and (mutate_index2%2)==0) or ((mutate_index1%2)!=0 and \
(mutate_index2%2!=0)):
individual[mutate_index1], individual[mutate_index2] = individual[mutate_index2], individual[mutate_index1]
elif (mutate_index1%2)==0 and (mutate_index2%2)!=0:
#if individual[mutate_index1]>(max_time/2):
# individual[mutate_index1]=individual[mutate_index1]+random.randint(-(max_time/2),(max_time/2))
new_index=random.randrange(0,len(individual),2)
individual[mutate_index1], individual[new_index] = individual[new_index], individual[mutate_index1]
individual[mutate_index2]=random.randint(1,data[2])
else:
#if individual[mutate_index2]>(max_time/2):
# individual[mutate_index2]=individual[mutate_index2]+random.randint(-(max_time/2),(max_time/2))
new_index=random.randrange(0,len(individual),2)
individual[mutate_index2], individual[new_index] = individual[new_index], individual[mutate_index2]
individual[mutate_index1]=random.randint(1,data[2])
"""
The fitness function is divided in two parts: 1. the Cmax is calculated from the individual, 2. the restrictions of the
problema are validated to count how many fouls has the individual. At the end the fitness value = cmax + fouls*constant
"""
def is_feasible_machine(operation,machine,data):
Mij=data[3]
count=0
for i in range(0,len(Mij[operation])):
if machine==Mij[operation][i]:
count+=1
if count == 0:
return False
else:
return True
def operations_in_machine(machine,individual):
result=[]
i=0
while i<len(individual):
if individual[i+1]==machine:
result.append(int(i/2))
i+=2
return result
def fitness(individual,data):
fitness=0
pjk=data[4]
i=0
for op in range(0,len(pjk)):
if (individual[i]+pjk[op][individual[i+1]-1])>fitness:
fitness=individual[i]+pjk[op][individual[i+1]-1]
i+=2
# ------restrictions---------------
fouls=0
j=0
k=0
# for each job, C of current operation must be less than the next
for job in range(0,len(ni)):
for op2 in range(0,ni[job]-1):
if (individual[j]+pjk[k][individual[j+1]-1])>individual[j+2] or\
individual[j]>=individual[j+2]:
fouls+=4
j+=2
k+=1
j+=2
k+=1
# an operation must be made in a feasible machine
l=0
while l<len(individual):
if not is_feasible_machine(int(l/2),individual[l+1],data):
fouls+=2
l+=2
# for each machine an operation must start at zero
# for each mahcine, the operations cannot be mixed. Only one operation at a time
count_zeros=0
for machine2 in range(1,data[2]+1):
#count_zeros=0
operations2=operations_in_machine(machine2,individual)
for op4 in range(0,len(operations2)):
if individual[operations2[op4]*2]==0:
count_zeros+=1
start_reference=individual[operations2[op4]*2]
end_reference=individual[operations2[op4]*2]+pjk[operations2[op4]][machine2-1]
for op5 in range(0,len(operations2)):
if op5 != op4:
s=individual[operations2[op5]*2]
c=individual[operations2[op5]*2]+pjk[operations2[op5]][machine2-1]
if s<=start_reference and c>=end_reference:
fouls+=2
elif s>=start_reference and s<=end_reference and c<=end_reference:
fouls+=2
elif s<=start_reference and c>start_reference and c<=end_reference:
fouls+=2
elif s>=start_reference and s<end_reference and c>=end_reference:
fouls+=2
#if count_zeros != 1:
#fouls+=1
if count_zeros == 0:
fouls+=1
fitness=fitness+(fouls*1000)
return fitness
"""
At the end the create_individual and the fitness functions are added to the ga. Then run and print the best individual
"""
steps=[]
count_increment=0
def genetic_algorithm_scheduling(data,counter,pop_size=100,num_generations=500):
start_time=time()
ga=pyeasyga.GeneticAlgorithm(data,maximise_fitness=False,population_size=pop_size,generations=num_generations,mutation_probability=0.3) # initialization of the algorithm
ga.create_individual=create_individual
ga.mutate_function=mutate
ga.fitness_function=fitness
ga.run()
best_individual=ga.best_individual()
steps.append(best_individual)
best_fitness=best_individual[0]
if best_fitness>1000 and counter<10:
counter+=1
new_generations=num_generations+100
print("Incrementing generations to ",new_generations,"......")
genetic_algorithm_scheduling(data,counter,pop_size,new_generations)
elif best_fitness>1000 and counter==10:
print("Feasible individual wasn't found!")
print("Best infeasible individual: ",ga.best_individual())
end_time=time()
print("The execution time was: ",(end_time-start_time)," seconds")
elif best_fitness<1000:
end_time=time()
print("Best feasible individual found! ",ga.best_individual())
print("The execution time was: ",(end_time-start_time)," seconds")
print("These were the different best individuals:")
for i in range(0,len(steps)):
print(steps[i])
return steps
genetic_algorithm_scheduling(data,count_increment,pop_size=200)
|
nilq/baby-python
|
python
|
import os
from pydu.dt import timer
class TestTimer(object):
def test_context_manager(self):
timeit = timer()
with timeit:
os.getcwd()
assert timeit.elapsed is not None
def test_decorator(self):
timeit = timer()
@timeit
def foo():
os.getcwd()
foo()
assert timeit.elapsed is not None
def test_print_func(self):
import sys
timeit = timer(print_func=sys.stdout.write)
with timeit:
os.getcwd()
assert timeit.elapsed is not None
|
nilq/baby-python
|
python
|
# Imports
import sys
import torch
import os
import numpy as np
import time
from sbi.inference import SNRE_B, prepare_for_sbi
# Initial set up
lunarc = int(sys.argv[1])
seed = int(sys.argv[2])
print("Input args:")
print("seed: " + str(seed))
if lunarc == 1:
os.chdir('/home/samwiq/snpla/seq-posterior-approx-w-nf-dev/lotka_volterra')
else:
os.chdir('/home/samuel/Documents/projects/seq posterior approx w nf/seq posterior approx w nf dev/lotka_volterra')
sys.path.append('./')
print(os.getcwd())
id_job = str(seed)
import LotkaVolterra
import functions as func # Set model and generate data
# Set model and generate data
x_o, model, theta_true = func.set_up_model()
m_s_of_prior, s_s_of_prior = func.load_summary_stats_mean_and_std()
# set up simulator
def simulator(theta):
s_of_theta = model.model_sim(theta)
return func.normalize_summary_stats(s_of_theta, m_s_of_prior, s_s_of_prior)
s_x_o = LotkaVolterra.calc_summary_stats(x_o.reshape(1, x_o.shape[0], x_o.shape[1]))
s_x_o = func.normalize_summary_stats(s_x_o, m_s_of_prior, s_s_of_prior)
# check simulator and prior
simulator, prior = prepare_for_sbi(simulator, model.prior)
# function that builds the network
def build_custom_post_net(batch_theta, batch_x):
flow_lik, flow_post = func.set_up_networks()
return flow_post
inference = SNRE_B(simulator, prior)
learning_rate = 0.0005 # default value
start = time.time()
torch.manual_seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
prior_samples = prior.sample(sample_shape=(1,))
data_sets = simulator(prior_samples)
num_rounds = 5
x_o = data_sets
posteriors = []
proposal = None
print(learning_rate)
for i in range(num_rounds):
posterior = inference(num_simulations=1000, proposal=proposal, max_num_epochs=50, learning_rate=learning_rate)
posteriors.append(posterior)
proposal = posterior.set_default_x(x_o)
end = time.time()
run_time = end - start
print("")
print("Runtime:" + str(round(run_time, 2)))
L = 4
K = 4
M = L
Lprime = 50
run_mcmc = True
post_samples = posteriors[-1].sample((Lprime,), x=x_o)
print(post_samples)
ess_current = func.ess_mcmc(post_samples)
print(ess_current)
if ess_current < M:
# continu mcmc
L_all = int(Lprime * M / ess_current)
print(L_all)
post_samples = posteriors[-1].sample((L_all,), x=x_o)
# post_samples = torch.cat((post_samples, post_samples_new))
# else:
# run_mcmc = False
# thinning chain
ess_current = func.ess_mcmc(post_samples)
print(ess_current)
N_total = post_samples.shape[0]
post_samples = post_samples[range(0, N_total, int(N_total / M)), :] # thin samples
indications = torch.zeros(K)
for k in range(4):
indications[k] = (post_samples[:, k] < prior_samples[0, k]).sum()
np.savetxt('sbc/ranks_snre_b_' + id_job + '.csv', indications.numpy(), delimiter=",")
|
nilq/baby-python
|
python
|
class RMCError(Exception):
def __init__(self, message):
Exception.__init__(self, message)
self.__line_number=None
def set_line_number(self, new_line_number):
self.__line_number=new_line_number
def get_line_number(self):
return self.__line_number
#operations throws RMCError if not successful
#throws also for +1, +2 and so on (but also -1)
def asNonnegInt(literal, must_be_positive=False, lit_name="unknown"):
condition="positive" if must_be_positive else "nonnegative"
if not literal.isdigit():
raise RMCError(lit_name+" must be a "+condition+" integer, found "+literal)
res=int(literal)
if must_be_positive and res==0:
raise RMCError(lit_name+" must be a "+condition+" integer, found "+literal)
return res
|
nilq/baby-python
|
python
|
__author__ = 'Milo Utsch'
__version__ = '0.1.0'
from setuptools import setup, find_packages
from euler import __name__ as name
from euler import __author__ as author
from euler import __doc__ as doc
from euler import __email__ as author_email
from euler import __version__ as version
from euler import __license__ as license
from euler import __credits__ as credits
maintainer = author
maintainer_email = author_email
keywords = ['euler']
description = doc.splitlines()[0].strip()
long_description = open('README.md').read()
long_description_content_type = 'text/markdown'
install_requires = [
element
for element in [
line.split('#', 1)[0].strip()
for line in open('requirements.txt', 'r', encoding='utf-8')
]
if element and not element.startswith('--')
]
classifiers = [
'Programming Language :: Python :: 3',
'Operating System :: OS Independent'
]
setup(
name=name,
author=author,
author_email=author_email,
version=version,
license=license,
credits=credits,
maintainer=maintainer,
maintainer_email=maintainer_email,
keywords=keywords,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
classifiers=classifiers,
packages=find_packages(exclude=['tests'])
)
|
nilq/baby-python
|
python
|
# Masters Research Project
# Kenneth Young
# FSBF MIP Model of the SUALBSP-2
# This file contains:
# -A MIP model of the SUALBSP-2
# -This model was adapted from Esmaeilbeigi et al. (2016),
# specifically their FSBF-2 model.
# Packages
import sys
import pdb
import time
# import itertools
import csv
import argparse
import numpy as np
import networkx as nx
from gurobipy import *
# User-defined Packages
from ALB_instance_storage import AssemblyLineInstance
# initilise settings for argument parser
parser = argparse.ArgumentParser()
parser.add_argument('file', help='instance file')
parser.add_argument('-q', '--quiet', help='Some output', action='store_true')
parser.add_argument('-vq', '--very-quiet', help='Minimal output', action='store_true')
parser.add_argument('-s', '--statistics', help='Print statistics', action='store_true')
parser.add_argument('-c', '--check-solution', help='Check solution', action='store_true')
parser.add_argument('-H', '--human-readable', help='Human readable output', action='store_true')
parser.add_argument('-b', '--backwardSU-type', type=str, default='copy-forward-setups',
help='Type of backward setup times to use. Options include:'
'{just-forward-setups}')
parser.add_argument('-t', '--time-limit', type=float, default=1800,
help='Optimisation time limit.')
parser.add_argument('-et', '--experiment-token', type=int, default=0,
help='Indicator for which experiment is being run')
parser.add_argument('-v1', '--valid-ineq-1', help='Use this valid inequality', action='store_true')
parser.add_argument('-v2', '--valid-ineq-2', help='Use this valid inequality', action='store_true')
args = parser.parse_args()
# Define globals constants
if sys.platform == "win32":
INST_DIR = 'instances\\'
elif sys.platform =="cygwin":
INST_DIR = 'instances\\'
elif sys.platform == "darwin":
INST_DIR = 'instances/'
elif sys.platform == "linux" or sys.platform == "linux2":
INST_DIR = 'instances/'
# argument definitions
PRINT_STATISTICS = args.statistics
BACKWARD_SETUP_TYPE = args.backwardSU_type
VALID_INEQ_1 = args.valid_ineq_1
VALID_INEQ_2 = args.valid_ineq_2
CHECK_SOLUTION = 'tools/bin/check-solution' # create this file
TIMELIMIT = args.time_limit
EXPERIMENT_TOKEN = args.experiment_token
if args.very_quiet:
args.quiet = True
class SolverFSBF:
def __init__(self, inst):
self.inst = inst
# initialise the full MIP model
self.model = Model('assemblyline')
self.init_model_parameters()
if args.quiet:
self.model.setParam('OutputFlag', 0)
if not args.very_quiet:
print('Initialising the MIP...', end='')
self.optimisation_times = []
self.sequencing_times = []
start = time.time()
self.init_vars()
self.create_objective()
self.create_constraints()
self.init_time = time.time() - start
self.optimisation_times.append(self.init_time)
if not args.very_quiet:
print(' initialisation complete ({:.3f}s)'.format(self.init_time))
def init_model_parameters(self):
self.model.setParam('TimeLimit', TIMELIMIT)
def init_vars(self):
self.cycleTime = self.model.addVar(lb=self.inst.minCycleTime,
ub=self.inst.maxCycleTime,
obj=1.0,
vtype=GRB.CONTINUOUS,
name='c')
# initialise x variables
self.xs = self.model.addVars([ (i,k)
for i in self.inst.tasks
for k in self.inst.feasibleStations[i] ],
vtype=GRB.BINARY, name='x')
# initialise y variables
self.ys = self.model.addVars([ (i,j)
for i in self.inst.tasks
for j in self.inst.followForw[i] ],
vtype=GRB.BINARY, name='y')
# initialise w variables
self.ws = self.model.addVars([ (i,j)
for i in self.inst.tasks
for j in self.inst.followBack[i] ],
vtype=GRB.BINARY, name='w')
# initialise z variables
self.zs = self.model.addVars(self.inst.tasks, vtype=GRB.CONTINUOUS, name='z')
# initialise start time variables, s
self.ss = self.model.addVars(self.inst.tasks, vtype=GRB.CONTINUOUS, name='s')
# initialise o variables
self.os = self.model.addVars([ (i,k)
for i in self.inst.tasks
for k in self.inst.feasibleStations[i] ],
vtype=GRB.BINARY, name='o')
self.model.update()
def create_objective(self):
self.objective = self.cycleTime
self.model.setObjective(self.objective, GRB.MINIMIZE)
def create_constraints(self):
# Each task i is assigned exactly one station
self.model.addConstrs((self.xs.sum(i,'*') == 1
for i in self.inst.tasks), 'oneStationPerTask')
# Encode the index of the stations which task i is assigned
self.model.addConstrs((sum([k*self.xs[i,k] for k in self.inst.stations]) == self.zs[i]
for i in self.inst.tasks), 'encodeStationNums')
# Each task i has exactly one successor (in forward and backward station loads)
self.model.addConstrs(( self.ys.sum(i,'*')
+ self.ws.sum(i,'*') == 1
for i in self.inst.tasks), 'oneSuccessor')
# Each task j has exactly one predecessor (in forward and backward station loads)
self.model.addConstrs(( sum([self.ys.sum(i,j) for i in self.inst.followBack[j]])
+ sum([self.ws.sum(i,j) for i in self.inst.precedeBack[j]]) == 1
for j in self.inst.tasks), 'onePredecessor')
# Forward load: tasks contained in the same cycle are assigned the same station
self.model.addConstrs((self.zs[j] - self.zs[i] <= self.inst.bigM * (1 - self.ys[i,j])
for i in self.inst.tasks
for j in self.inst.followForw[i]), 'sameCycleForwA')
self.model.addConstrs((self.zs[i] - self.zs[j] <= self.inst.bigM * (1 - self.ys[i,j])
for i in self.inst.tasks
for j in self.inst.followForw[i]), 'sameCycleForwB')
# Backward load: tasks contained in the same cycle are assigned the same station
self.model.addConstrs((self.zs[j] - self.zs[i] <= self.inst.bigM * (1 - self.ws[i,j])
for i in self.inst.tasks
for j in self.inst.followBack[i]), 'sameCycleBackA')
self.model.addConstrs((self.zs[i] - self.zs[j] <= self.inst.bigM * (1 - self.ws[i,j])
for i in self.inst.tasks
for j in self.inst.followBack[i]), 'sameCycleBackB')
# Task i can only be the last task of station k if it is assigned to k
self.model.addConstrs((self.os[i,k] <= self.xs[i,k]
for i in self.inst.tasks
for k in self.inst.feasibleStations[i]), 'onlyLastIfAssigned')
# o[i,k] gets the value 1 only if task i is the last task of k
self.model.addConstrs((self.ws.sum(i,'*') <= self.os.sum(i,'*')
for i in self.inst.tasks), 'ifBackSUthenLast')
# In combination with 'oneSucc' and 'onePred' constraints, each station has only one o[i,k]==1
self.model.addConstrs((sum([ self.os[i,k] for i in self.inst.feasibleTasks[k] ]) <= 1
for k in self.inst.stations), 'onlyOneLast')
# Strengthening Knapsack constraint: Total load of each station is less than cycle time
self.model.addConstrs((sum([ self.inst.procList[i]*self.xs[i,k] for i in self.inst.feasibleTasks[k] ]) <= self.cycleTime
for k in self.inst.stations), 'loadLessThanCycleTime')
# The last task of each station finishes by the cycle time
self.model.addConstrs(( sum([ self.inst.backSU[i][j]*self.ws[i,j] for j in self.inst.followBack[i] ])
+ self.ss[i] + self.inst.procList[i] <= self.cycleTime
for i in self.inst.tasks), 'lastTaskFinishByCycleTime')
# The number of backward setups is at least the number of stations
self.model.addConstr(sum([ self.ws.sum(i,'*') for i in self.inst.tasks ]) <= self.inst.numStations,
'numBackSUsAtLeastNumStations')
# Precedence Relations are respected in the forward direction
self.model.addConstrs(( self.ss[i] + self.inst.maxCycleTime*(self.zs[i] - self.zs[j]) + self.inst.procList[i]
+ self.inst.forwSU[i][j]*self.ys[i,j] <= self.ss[j]
for (i,j) in self.inst.precList), 'precedenceRelations')
# Constrain start times of tasks following task i in the forward direction
self.model.addConstrs(( self.ss[i] + (self.inst.procList[i] + self.inst.forwSU[i][j])
+ (self.inst.maxCycleTime + self.inst.forwSU[i][j])*(self.ys[i,j] - 1) <= self.ss[j]
for i in self.inst.tasks
for j in set(self.inst.followForw[i]) - set(self.inst.precGraph.successors(i))),
'')
# Bounds for the cycle time
self.model.addConstr(self.cycleTime <= self.inst.maxCycleTime, 'cycleTimeUB')
self.model.addConstr(self.cycleTime >= self.inst.minCycleTime, 'cycleTimeLB')
# Valid Inequality:
if VALID_INEQ_1:
self.model.addConstrs(( self.ys[i,j] + self.ys[j,i] <= 1
for i in self.inst.tasks
for j in self.inst.followForw[i].intersection(self.inst.precedeForw[i])),
'validiIneq1')
# Valid Inequality: lower bound on the total line capacity
if VALID_INEQ_2:
self.model.addConstr( sum([ sum([ self.inst.forwSU[i][j]*self.ys[i,j]
for j in self.inst.followForw[i] ])
for i in self.inst.tasks ])
+ sum([ sum([ self.inst.backSU[i][j]*self.ws[i,j]
for j in self.inst.followBack[i] ])
for i in self.inst.tasks ])
+ sum( self.inst.procList ) <= self.inst.numStations*self.cycleTime,
'lineCapacityLowerBound')
def optimise(self):
start = time.time()
self.model.optimize()
self.optimisation_times.append(time.time() - start)
# # record the results for outputting
def store_results_summary(self):
# store assignment of tasks to stations
self.taskAssignment = [None for k in self.inst.stations]
for k in self.inst.stations:
self.taskAssignment[k] = { i for i in self.inst.tasks if self.xs[i,k].x > 0.5 }
# store start times of all tasks
self.startTimes = [None for k in self.inst.stations]
for k in self.inst.stations:
self.startTimes[k] = [ round(self.ss[i].x) for i in sorted(self.taskAssignment[k])]
# store load of each station
self.stationLoad = [None for k in self.inst.stations]
for k in self.inst.stations:
try:
self.stationLoad[k] = round(max([ self.ss[i].x + self.inst.procList[i]
+ sum([ self.inst.backSU[i][j]*self.ws[i,j].x
for j in self.inst.followBack[i] ])
for i in self.taskAssignment[k] ]))
except ValueError:
self.stationLoad[k] = 0
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# OUTPUT METHODS
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
def process_solution_statistics(self):
# pdb.set_trace()
if self.model.solcount == 0:
self.solFeasible = 0
self.solOptimal = 0
else:
self.solFeasible = 1
# find out how model terminated. with feasible sol? with optimal sol?
if self.solFeasible == 1:
if self.model.status == 2:
self.solOptimal = 1
self.store_results_summary()
else:
self.solOptimal = 0
self.store_results_summary()
self.optimalCycleTime = round(self.model.objval)
else:
self.optimalCycleTime = 0
# get gap value at time of termination (either succesfful or timeout)
self.gap = round(100*self.model.mipgap,2)
self.statsTotalRuntime = round(sum(self.optimisation_times),4)
# get number of nodes
self.statsTotalNodes = round(self.model.nodecount)
def print_solution(self):
if args.human_readable:
if not args.very_quiet:
print('\n! ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ')
print('! \tSOLUTION ')
else:
print('\n',end='')
print('! Cycle Time:\t{}'.format(round(self.model.objval)))
if not args.very_quiet:
for k in self.inst.stations:
print('! Station {}'.format(k))
print('! Load = \t{}'.format(self.stationLoad[k]))
print('! Tasks = \t{}'.format(sorted(self.taskAssignment[k])))
print('! Starts = \t{}'.format(self.startTimes[k]))
else:
print(self.model.objval)
def save_solution(self, results_file):
# add stuff to a text file
# if args.check_solution:
# print('checking solution:')
# os.system('%s %s %s' % (CHECK_SOLUTION, self.inst.filename, results_file))
# pdb.set_trace()
with open(results_file, 'w', newline='') as csvfile:
results = csv.writer(csvfile)
results.writerow([self.solFeasible, self.solOptimal,
self.optimalCycleTime, self.gap,
self.statsTotalRuntime, self.statsTotalNodes])
def print_statistics(self):
if args.human_readable:
print('\n! ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ')
print('! \tSOLUTION STATISTICS ')
print('! Feasible Solution:\t{}'.format(self.solFeasible))
print('! Optimal Solution:\t{}'.format(self.solOptimal))
print('! Nodes Explored:\t{}'.format(int(self.statsTotalNodes)))
if self.solFeasible:
print('! Gap:\t\t\t{:.2f}'.format(self.gap))
else:
print('! Gap:\t\t\tNA')
print('\n! ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ')
print('! \tRUNTIME STATISTICS ')
print('! Init time:\t{:.4f}'.format(self.init_time))
print('! Total:\t{:.4f}'.format(self.statsTotalRuntime))
print('! Maximum:\t{:.4f}'.format(max(self.optimisation_times)))
print('! Average:\t{:.4f}'.format(sum(self.optimisation_times)/len(self.optimisation_times)))
else:
print(self.solOptimal)
print(self.nodesExplored)
print(self.gap)
print(self.init_time)
print(sum(self.optimisation_times))
print(max(self.optimisation_times))
print(sum(self.optimisation_times)/len(self.optimisation_times))
# Script to create instance class, run the solver and output the solution
if __name__ == '__main__':
# start total runtime timer
start = time.time()
filename = args.file # retrieve filename of instance to solve
if args.human_readable:
print('instance:', filename)
else:
print(filename)
# store assembly line instance data
inst = AssemblyLineInstance(INST_DIR,filename)
# pdb.set_trace()
# create Solver for given instance and optimise it
s = SolverFSBF(inst)
if not args.very_quiet:
print('Solving the MIP...')
s.optimise()
# output
s.process_solution_statistics()
if s.solFeasible == 1:
s.print_solution()
s.save_solution('summary_results_{}.txt'.format(EXPERIMENT_TOKEN))
if PRINT_STATISTICS:
s.print_statistics()
# print total runtime
end = time.time()
if not args.quiet:
if args.human_readable:
print('\n! Language runtime:\t{:.4f}'.format(end-start-sum(s.optimisation_times)))
else:
print(end-start-sum(s.optimisation_times))
if args.human_readable:
if args.very_quiet:
print('')
print('! Total runtime:\t{:.4f}'.format(end-start))
else:
print(end-start)
# EOF #
|
nilq/baby-python
|
python
|
# https://leetcode.com/problems/count-number-of-pairs-with-absolute-difference-k/
class Solution:
def countKDifference(self, nums: List[int], k: int) -> int:
count = 0
for i in range(0, len(nums)-1):
for j in range(i+1, len(nums)):
if abs(nums[i]-nums[j]) == k:
count+= 1
return count
|
nilq/baby-python
|
python
|
import glob
import re
import time
import os
from utils.config_utils import *
from utils.colors import *
import shutil
from datetime import datetime
import smtplib
from email.mime.text import MIMEText
from email.header import Header
import socket
import subprocess
current_path = os.path.dirname(os.path.abspath(__file__))
project_path = os.path.dirname(current_path)
package_path = os.path.join(project_path, "package")
build_path = os.path.join(package_path, "build")
config_path = os.path.join(project_path, "conf")
local_repo_path = os.path.join(project_path, "repo")
tool_path = os.path.join(project_path, "tools")
oap_tools_source_code_path = os.path.join(package_path, "source_code/oap-tools")
oapperf_source_code_path = os.path.join(package_path, "source_code/oap-perf")
# Get all the defined properties from a property file
def get_properties(filename):
properties = {}
if not os.path.isfile(filename):
return properties
with open(filename) as f:
for line in f:
if line.startswith('#') or not line.split():
continue
key, value = line.partition("=")[::2]
properties[key.strip()] = value.strip()
return properties
def copy_spark_test_script_to_remote(script_folder, dst_path, dict):
output_folder = os.path.join(package_path, "tmp/script/" + os.path.basename(script_folder))
os.system("rm -rf " + output_folder)
os.system("mkdir -p " + output_folder)
os.system("cp -rf " + script_folder + "/* " + output_folder)
output_folder_star = output_folder + "/*"
final_config_files = glob.glob(output_folder_star)
for file in final_config_files:
if not os.path.isdir(file):
replace_conf_value(file, dict)
os.system("rm -rf " + dst_path)
os.system("mkdir -p " + dst_path)
output_folder = os.path.join(package_path, "tmp/script/" + os.path.basename(script_folder))
os.system("cp -r " + output_folder + "/* " + dst_path)
def update_copy_spark_conf(custom_conf, beaver_env):
spark_output_conf = update_conf("spark", custom_conf)
# for conf_file in [file for file in os.listdir(spark_output_conf) if file.endswith(('.conf', '.xml'))]:
# output_conf_file = os.path.join(spark_output_conf, conf_file)
# # dict = get_spark_replace_dict(master, slaves, beaver_env, spark_version)
# replace_conf_value(output_conf_file, dict)
copy_configurations(spark_output_conf, "spark", beaver_env.get("SPARK_HOME"))
def update_copy_hibench_conf(custom_conf, beaver_env):
hibench_output_conf = update_conf("hibench", custom_conf)
for conf_file in [file for file in os.listdir(hibench_output_conf) if file.endswith(('.conf', '.xml'))]:
output_conf_file = os.path.join(hibench_output_conf, conf_file)
dict = get_hibench_replace_dict(beaver_env)
replace_conf_value(output_conf_file, dict)
copy_configurations(hibench_output_conf, "hibench", beaver_env.get("HIBENCH_HOME"))
def get_hibench_replace_dict(beaver_env):
dict = {};
print(colors.LIGHT_BLUE + "Update spark.conf and hadoop.conf" + colors.ENDC)
hostname = socket.gethostname()
hibench_hadoop_examples_jars = subprocess.check_output(
"find " + beaver_env.get("HADOOP_HOME") + " -name hadoop-mapreduce-examples-*.jar", shell=True).decode('utf-8').strip('\r\n')
if hibench_hadoop_examples_jars == "":
hibench_hadoop_examples_jars = subprocess.check_output(
"find " + os.path.join(os.path.dirname(beaver_env.get("HADOOP_HOME")), "hadoop-mapreduce") + " -name hadoop-mapreduce-examples-*.jar", shell=True).decode('utf-8').strip('\r\n')
hibench_hadoop_examples_test_jars = subprocess.check_output(
"find " + beaver_env.get("HADOOP_HOME") + " -name hadoop-mapreduce-client-jobclient-*tests.jar", shell=True).decode('utf-8').strip('\r\n')
if hibench_hadoop_examples_test_jars == "":
hibench_hadoop_examples_test_jars = subprocess.check_output(
"find " + os.path.join(os.path.dirname(beaver_env.get("HADOOP_HOME")), "hadoop-mapreduce") + " -name hadoop-mapreduce-client-jobclient-*tests.jar", shell=True).decode('utf-8').strip('\r\n')
hibench_version = hibench_get_build_version(beaver_env)
s3_bucket = beaver_env.get("S3_BUCKET")
dict["{%storage%}"] = beaver_env.get("STORAGE")
if beaver_env.get("STORAGE") == "s3":
dict["{%s3.bucket%}"] = s3_bucket
else:
dict["{%s3.bucket%}"] = ""
dict["master_hostname"] = hostname
dict["{%hadoop.home%}"] = beaver_env.get("HADOOP_HOME")
dict["{%spark.home%}"] = beaver_env.get("SPARK_HOME")
dict["{%hibench.version%}"] = hibench_version
dict["{%hibench.hadoop.examples.jar%}"] = hibench_hadoop_examples_jars
dict["{%hibench.hadoop.examples.test.jar%}"] = hibench_hadoop_examples_test_jars
return dict
def hibench_get_build_version(beaver_env):
hibench_ET = ET
hibench_pom_tree = hibench_ET.parse(os.path.join(beaver_env.get("HIBENCH_HOME"), 'pom.xml'))
hibench_pom_root = hibench_pom_tree.getroot()
hibench_version = hibench_pom_root.find('{http://maven.apache.org/POM/4.0.0}version').text
return hibench_version
def copy_configurations(config_path, component, home_path):
print (colors.LIGHT_BLUE + "Distribute configuration files for " + component + ":" + colors.ENDC)
print (colors.LIGHT_BLUE + "\tGenerate final configuration files of " + component + colors.ENDC)
path = config_path + "/*"
final_config_files = glob.glob(path)
copy_final_configs(final_config_files, component, home_path)
def copy_final_configs(config_files, component, home_path):
print (colors.LIGHT_BLUE + "\tCopy configuration files of " + component + " to all nodes" + colors.ENDC)
if component == "spark":
conf_link = os.path.join(home_path, "conf")
conf_path = home_path + "/config/" + str(time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())) + "/"
os.system("sudo mkdir -p " + conf_path)
os.system("sudo cp -r " + conf_link + "/*" + " " + conf_path)
for file in config_files:
os.system("sudo cp -r " + file + " " + os.path.join(conf_link, os.path.basename(file)))
if component == "hibench":
conf_link = os.path.join(home_path, "conf")
conf_path = os.path.join(home_path, "config/") + str(time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())) + "/"
os.system("mkdir -p " + conf_path)
os.system("cp -r " + conf_link + "/*" + " " + conf_path)
for file in config_files:
if os.path.basename(file).strip("'\r\n'") in ["hadoop.conf", "spark.conf", "hibench.conf"]:
os.system("cp -r " + file + " " + conf_path + os.path.basename(file))
else:
cmd = "find " + conf_path + " -name " + os.path.basename(file).strip("'\r\n'")
# stdout = ssh_execute_withReturn(node, cmd)
stdout = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout
file_path = stdout.readlines()
# for i in file_path:
# print(i.decode().strip("'\r\n'"))
if len(file_path) == 1:
os.system("cp -r " + file + " " + file_path[0].decode().strip("'\r\n'"))
os.system("rm -rf " + conf_link)
os.system("ln -s " + conf_path + " " + conf_link)
def sendmail(subject, html_path, receivers, sender_name=""):
sender = "root@" + socket.gethostname()
with open(html_path, 'rb') as f:
mail_body = f.read()
message = MIMEText(mail_body, 'HTML', "utf-8")
message['Subject'] = Header(subject, "utf-8")
if sender_name:
message['From'] = sender_name
message['To'] = ",".join(receivers)
try:
smtp_obj = smtplib.SMTP('localhost')
smtp_obj.sendmail(sender, receivers, message.as_string())
except smtplib.SMTPException as e:
print(e)
def get_conf_list(root_path, testing_conf_list, dataGen_conf_list):
dir_or_files = os.listdir(root_path)
for dir_file in dir_or_files:
dir_file_path = os.path.join(root_path, dir_file)
if os.path.isdir(dir_file_path):
if os.path.exists(os.path.join(dir_file_path, ".base")):
if verfiry_dataGen_conf(dir_file_path):
dataGen_conf_list.append(dir_file_path)
else:
testing_conf_list.append(dir_file_path)
else:
get_conf_list(dir_file_path, testing_conf_list, dataGen_conf_list)
def verfiry_dataGen_conf(conf):
beaver_env = get_merged_env(conf)
if not beaver_env.get("GENERATE_DATA") is None and beaver_env.get("GENERATE_DATA").lower() == "true":
return True
else:
return False
def verfiry_throughput_test_conf(conf):
beaver_env = get_merged_env(conf)
if not beaver_env.get("THROUGHPUT_TEST") is None and beaver_env.get("THROUGHPUT_TEST").lower() == "true":
return True
else:
return False
def get_all_conf_list(root_path, testing_conf_list):
dir_or_files = os.listdir(root_path)
for dir_file in dir_or_files:
dir_file_path = os.path.join(root_path, dir_file)
if os.path.isdir(dir_file_path):
if os.path.exists(os.path.join(dir_file_path, ".base")):
testing_conf_list.append(dir_file_path)
else:
get_all_conf_list(dir_file_path, testing_conf_list)
|
nilq/baby-python
|
python
|
__author__ = 'Benjamin Knight'
__license__ = 'MIT'
__version__ = '0.1'
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import yaml
import torch
import random
import argparse
import json
import numpy as np
import datetime
from pathlib import Path
from src.marcos import *
from src.mono_interface import MonoASRInterface
from src.utils import get_usable_cpu_cnt
import src.monitor.logger as logger
# Make cudnn deterministic to reproduce result
torch.backends.cudnn.deterministic = True
# Arguments
parser = argparse.ArgumentParser(description='CommonVoice E2E ASR training/testing scripts')
# shared opts
parser.add_argument('--config', type=str,
help='Path to experiment config.', required=True)
parser.add_argument('--eval_suffix', type=str, default=None,
help='Evaluation suffix')
parser.add_argument('--runs', type=int, default=0)
parser.add_argument('--accent', choices=AVAIL_ACCENTS, required=True)
parser.add_argument('--overwrite',action='store_true')
parser.add_argument('--eval_every_epoch',action='store_true')
parser.add_argument('--seed', type=int, default=531,
help='Random seed for reproducable results.')
parser.add_argument('--no_cuda',action='store_true')
parser.add_argument('--no_memmap',action='store_true')
parser.add_argument('--algo', choices=['reptile','fomaml', 'multi', 'fomaml_fast','no'], required=True)
parser.add_argument('--adv', action='store_true')
parser.add_argument('--use_tensorboard',action='store_true')
parser.add_argument('--model_name', choices=['blstm','las','transformer'], default='transformer')
parser.add_argument('--njobs', type=int, default=-1,
help='Number of threads for decoding.')
parser.add_argument('--freeze_layer', type=str, default=None, choices=['VGG','VGG_BLSTM'])
parser.add_argument('--save_verbose', action='store_true')
# pretrain
parser.add_argument('--pretrain', action='store_true')
parser.add_argument('--pretrain_suffix', type=str, default=None,
help='Pretrain model suffix')
parser.add_argument('--pretrain_setting', type=str, default=None)
parser.add_argument('--pretrain_runs', type=int, default=0)
parser.add_argument('--pretrain_step', type=int, default=0)
parser.add_argument('--pretrain_tgt_accent', choices=AVAIL_ACCENTS, default='wa')
parser.add_argument('--pretrain_model_path',type=str, default=None,
help='directly set Pretrain model path')
# for meta
# training opts
parser.add_argument('--resume',action='store_true')
parser.add_argument('--no_bucket',action='store_true')
# testing opts
parser.add_argument('--test', action='store_true', help='Test the model.')
parser.add_argument('--test_model',type=str, default='model.wer.best',
help='Evaluate on this model')
parser.add_argument('--decode_batch_size', type=int, default=1)
# parser.add_argument('--resume_decode_step', default=0, type=int)
# paser.add_argument('--decode_resume')
parser.add_argument('--decode_mode', choices=['greedy', 'beam', 'lm_beam'],
default='greedy')
parser.add_argument('--decode_suffix', default=None, type=str) # will remove later
parser.add_argument('--lm_model_path', default=None, type=str)
# parser.add_argument('--nbest', default=5, type=int)
paras = parser.parse_args()
cur_time_suffix = "{:%B%d-%H%M%S}".format(datetime.datetime.now())
paras.eval_suffix = paras.eval_suffix if paras.eval_suffix else cur_time_suffix
paras.decode_suffix = f"{paras.decode_mode}_decode_{paras.decode_suffix}" if paras.decode_suffix else f"{paras.decode_mode}_decode"
setattr(paras,'cuda', not paras.no_cuda)
setattr(paras,'is_bucket', not paras.no_bucket)
setattr(paras,'is_memmap', not paras.no_memmap)
if paras.adv:
assert paras.algo != 'no'
paras.algo += '-adv'
paras.njobs = paras.njobs if paras.njobs >= 0 else get_usable_cpu_cnt()
config = yaml.safe_load(open(paras.config,'r'))
# Seed init.
random.seed(paras.seed)
np.random.seed(paras.seed)
torch.manual_seed(paras.seed)
if torch.cuda.is_available(): torch.cuda.manual_seed_all(paras.seed)
with open(Path('data','accent-code.json'),'r') as fin:
id2accent = json.load(fin)
if paras.test:
from src.tester import Tester
if paras.decode_mode != 'greedy':
assert paras.decode_batch_size == 1, f"decode_batch_size can only be 1 if decode_mode is {paras.decode_mode}"
if paras.cuda and torch.cuda.device_count() == 0:
logger.warning(f"cuda is set to True, but no gpu detected, use cpu for decoding")
paras.cuda = False
solver = Tester(config, paras, id2accent)
else:
if paras.model_name == 'blstm':
from src.blstm_trainer import get_trainer
elif paras.model_name == 'las':
from src.las_trainer import get_trainer
elif paras.model_name == 'transformer':
from src.transformer_torch_trainer import get_trainer
else:
raise NotImplementedError
solver = get_trainer(MonoASRInterface, config, paras, id2accent)
solver.load_data()
solver.set_model()
solver.exec()
|
nilq/baby-python
|
python
|
from distutils.core import setup
setup(name='tf-easy-model-saving',
version='1.0',
author='Philippe Remy',
packages=['easy_model_saving'],
zip_safe=False)
|
nilq/baby-python
|
python
|
# type: ignore
import colorsys
from dataclasses import dataclass, field
from enum import Enum
from typing import Any, Dict, List, Optional, Union, Type
from pydantic import constr
from labelbox.schema import project
from labelbox.exceptions import InconsistentOntologyException
from labelbox.orm.db_object import DbObject
from labelbox.orm.model import Field, Relationship
FeatureSchemaId: Type[str] = constr(min_length=25, max_length=25)
SchemaId: Type[str] = constr(min_length=25, max_length=25)
class FeatureSchema(DbObject):
name = Field.String("name")
color = Field.String("name")
normalized = Field.Json("normalized")
@dataclass
class Option:
"""
An option is a possible answer within a Classification object in
a Project's ontology.
To instantiate, only the "value" parameter needs to be passed in.
Example(s):
option = Option(value = "Option Example")
Attributes:
value: (str)
schema_id: (str)
feature_schema_id: (str)
options: (list)
"""
value: Union[str, int]
label: Optional[Union[str, int]] = None
schema_id: Optional[str] = None
feature_schema_id: Optional[FeatureSchemaId] = None
options: List["Classification"] = field(default_factory=list)
def __post_init__(self):
if self.label is None:
self.label = self.value
@classmethod
def from_dict(cls, dictionary: Dict[str, Any]):
return cls(value=dictionary["value"],
label=dictionary["label"],
schema_id=dictionary.get("schemaNodeId", None),
feature_schema_id=dictionary.get("featureSchemaId", None),
options=[
Classification.from_dict(o)
for o in dictionary.get("options", [])
])
def asdict(self) -> Dict[str, Any]:
return {
"schemaNodeId": self.schema_id,
"featureSchemaId": self.feature_schema_id,
"label": self.label,
"value": self.value,
"options": [o.asdict() for o in self.options]
}
def add_option(self, option: 'Classification'):
if option.instructions in (o.instructions for o in self.options):
raise InconsistentOntologyException(
f"Duplicate nested classification '{option.instructions}' "
f"for option '{self.label}'")
self.options.append(option)
@dataclass
class Classification:
"""
A classfication to be added to a Project's ontology. The
classification is dependent on the Classification Type.
To instantiate, the "class_type" and "instructions" parameters must
be passed in.
The "options" parameter holds a list of Option objects. This is not
necessary for some Classification types, such as TEXT. To see which
types require options, look at the "_REQUIRES_OPTIONS" class variable.
Example(s):
classification = Classification(
class_type = Classification.Type.TEXT,
instructions = "Classification Example")
classification_two = Classification(
class_type = Classification.Type.RADIO,
instructions = "Second Example")
classification_two.add_option(Option(
value = "Option Example"))
Attributes:
class_type: (Classification.Type)
instructions: (str)
required: (bool)
options: (list)
schema_id: (str)
feature_schema_id: (str)
"""
class Type(Enum):
TEXT = "text"
CHECKLIST = "checklist"
RADIO = "radio"
DROPDOWN = "dropdown"
_REQUIRES_OPTIONS = {Type.CHECKLIST, Type.RADIO, Type.DROPDOWN}
class_type: Type
instructions: str
required: bool = False
options: List[Option] = field(default_factory=list)
schema_id: Optional[str] = None
feature_schema_id: Optional[str] = None
@property
def name(self):
return self.instructions
@classmethod
def from_dict(cls, dictionary: Dict[str, Any]):
return cls(class_type=cls.Type(dictionary["type"]),
instructions=dictionary["instructions"],
required=dictionary.get("required", False),
options=[Option.from_dict(o) for o in dictionary["options"]],
schema_id=dictionary.get("schemaNodeId", None),
feature_schema_id=dictionary.get("featureSchemaId", None))
def asdict(self) -> Dict[str, Any]:
if self.class_type in self._REQUIRES_OPTIONS \
and len(self.options) < 1:
raise InconsistentOntologyException(
f"Classification '{self.instructions}' requires options.")
return {
"type": self.class_type.value,
"instructions": self.instructions,
"name": self.name,
"required": self.required,
"options": [o.asdict() for o in self.options],
"schemaNodeId": self.schema_id,
"featureSchemaId": self.feature_schema_id
}
def add_option(self, option: Option):
if option.value in (o.value for o in self.options):
raise InconsistentOntologyException(
f"Duplicate option '{option.value}' "
f"for classification '{self.name}'.")
self.options.append(option)
@dataclass
class Tool:
"""
A tool to be added to a Project's ontology. The tool is
dependent on the Tool Type.
To instantiate, the "tool" and "name" parameters must
be passed in.
The "classifications" parameter holds a list of Classification objects.
This can be used to add nested classifications to a tool.
Example(s):
tool = Tool(
tool = Tool.Type.LINE,
name = "Tool example")
classification = Classification(
class_type = Classification.Type.TEXT,
instructions = "Classification Example")
tool.add_classification(classification)
Attributes:
tool: (Tool.Type)
name: (str)
required: (bool)
color: (str)
classifications: (list)
schema_id: (str)
feature_schema_id: (str)
"""
class Type(Enum):
POLYGON = "polygon"
SEGMENTATION = "superpixel"
POINT = "point"
BBOX = "rectangle"
LINE = "line"
NER = "named-entity"
tool: Type
name: str
required: bool = False
color: Optional[str] = None
classifications: List[Classification] = field(default_factory=list)
schema_id: Optional[str] = None
feature_schema_id: Optional[str] = None
@classmethod
def from_dict(cls, dictionary: Dict[str, Any]):
return cls(name=dictionary['name'],
schema_id=dictionary.get("schemaNodeId", None),
feature_schema_id=dictionary.get("featureSchemaId", None),
required=dictionary.get("required", False),
tool=cls.Type(dictionary["tool"]),
classifications=[
Classification.from_dict(c)
for c in dictionary["classifications"]
],
color=dictionary["color"])
def asdict(self) -> Dict[str, Any]:
return {
"tool": self.tool.value,
"name": self.name,
"required": self.required,
"color": self.color,
"classifications": [c.asdict() for c in self.classifications],
"schemaNodeId": self.schema_id,
"featureSchemaId": self.feature_schema_id
}
def add_classification(self, classification: Classification):
if classification.instructions in (
c.instructions for c in self.classifications):
raise InconsistentOntologyException(
f"Duplicate nested classification '{classification.instructions}' "
f"for tool '{self.name}'")
self.classifications.append(classification)
class Ontology(DbObject):
"""An ontology specifies which tools and classifications are available
to a project. This is read only for now.
Attributes:
name (str)
description (str)
updated_at (datetime)
created_at (datetime)
normalized (json)
object_schema_count (int)
classification_schema_count (int)
projects (Relationship): `ToMany` relationship to Project
created_by (Relationship): `ToOne` relationship to User
"""
name = Field.String("name")
description = Field.String("description")
updated_at = Field.DateTime("updated_at")
created_at = Field.DateTime("created_at")
normalized = Field.Json("normalized")
object_schema_count = Field.Int("object_schema_count")
classification_schema_count = Field.Int("classification_schema_count")
projects = Relationship.ToMany("Project", True)
created_by = Relationship.ToOne("User", False, "created_by")
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._tools: Optional[List[Tool]] = None
self._classifications: Optional[List[Classification]] = None
def tools(self) -> List[Tool]:
"""Get list of tools (AKA objects) in an Ontology."""
if self._tools is None:
self._tools = [
Tool.from_dict(tool) for tool in self.normalized['tools']
]
return self._tools
def classifications(self) -> List[Classification]:
"""Get list of classifications in an Ontology."""
if self._classifications is None:
self._classifications = [
Classification.from_dict(classification)
for classification in self.normalized['classifications']
]
return self._classifications
@dataclass
class OntologyBuilder:
"""
A class to help create an ontology for a Project. This should be used
for making Project ontologies from scratch. OntologyBuilder can also
pull from an already existing Project's ontology.
There are no required instantiation arguments.
To create an ontology, use the asdict() method after fully building your
ontology within this class, and inserting it into project.setup() as the
"labeling_frontend_options" parameter.
Example:
builder = OntologyBuilder()
...
frontend = list(client.get_labeling_frontends())[0]
project.setup(frontend, builder.asdict())
attributes:
tools: (list)
classifications: (list)
"""
tools: List[Tool] = field(default_factory=list)
classifications: List[Classification] = field(default_factory=list)
@classmethod
def from_dict(cls, dictionary: Dict[str, Any]):
return cls(tools=[Tool.from_dict(t) for t in dictionary["tools"]],
classifications=[
Classification.from_dict(c)
for c in dictionary["classifications"]
])
def asdict(self):
self._update_colors()
return {
"tools": [t.asdict() for t in self.tools],
"classifications": [c.asdict() for c in self.classifications]
}
def _update_colors(self):
num_tools = len(self.tools)
for index in range(num_tools):
hsv_color = (index * 1 / num_tools, 1, 1)
rgb_color = tuple(
int(255 * x) for x in colorsys.hsv_to_rgb(*hsv_color))
if self.tools[index].color is None:
self.tools[index].color = '#%02x%02x%02x' % rgb_color
@classmethod
def from_project(cls, project: "project.Project"):
ontology = project.ontology().normalized
return cls.from_dict(ontology)
@classmethod
def from_ontology(cls, ontology: Ontology):
return cls.from_dict(ontology.normalized)
def add_tool(self, tool: Tool):
if tool.name in (t.name for t in self.tools):
raise InconsistentOntologyException(
f"Duplicate tool name '{tool.name}'. ")
self.tools.append(tool)
def add_classification(self, classification: Classification):
if classification.instructions in (
c.instructions for c in self.classifications):
raise InconsistentOntologyException(
f"Duplicate classification instructions '{classification.instructions}'. "
)
self.classifications.append(classification)
|
nilq/baby-python
|
python
|
from django.conf.urls import url, include
from . import views
from project import views
urlpatterns = [
url('signIn', views.signIn, name='signIn'),
url('signUp', views.signUp, name='signUp')
]
|
nilq/baby-python
|
python
|
__version__ = u'0.1.2'
|
nilq/baby-python
|
python
|
import os
import types
from ledger.util import STH
from ledger.ledger import Ledger
def checkLeafInclusion(verifier, leafData, leafIndex, proof, treeHead):
assert verifier.verify_leaf_inclusion(
leaf=leafData,
leaf_index=leafIndex,
proof=proof,
sth=STH(**treeHead))
def checkConsistency(tree, verifier):
vectors = [(1, 2),
(1, 3),
(4, 5),
(2, 3),
(3, 8)]
for oldsize, newsize in vectors:
proof = tree.consistency_proof(oldsize, newsize)
oldroot = tree.merkle_tree_hash(0, oldsize)
newroot = tree.merkle_tree_hash(0, newsize)
assert verifier.verify_tree_consistency(old_tree_size=oldsize,
new_tree_size=newsize,
old_root=oldroot,
new_root=newroot,
proof=proof)
def check_ledger_generator(ledger):
size = ledger.size
assert isinstance(ledger.getAllTxn(frm=1, to=size), types.GeneratorType)
assert isinstance(ledger.getAllTxn(frm=1), types.GeneratorType)
assert isinstance(ledger.getAllTxn(to=size), types.GeneratorType)
assert isinstance(ledger.getAllTxn(), types.GeneratorType)
class NoTransactionRecoveryLedger(Ledger):
def recoverTreeFromTxnLog(self):
pass
|
nilq/baby-python
|
python
|
from __future__ import absolute_import, print_function
from . import camx, cmaq
__all__ = ['camx', 'cmaq']
__name__ = 'models'
#
|
nilq/baby-python
|
python
|
from bangtal import *
import time
setGameOption(GameOption.INVENTORY_BUTTON, False)
setGameOption(GameOption.MESSAGE_BOX_BUTTON, False)
game_scene = Scene('Othello', 'Images/background.png')
transparent_screen = Object('Images/transparent_screen.png')
BLANK = -1
BLACK = 0
WHITE = 1
BLACK_POS = 3
WHITE_POS = 4
BASE = 40
LEN = 80
BLK_SCR_BASE_X = 825
BLK_SCR_BASE_Y = 220
WHT_SCR_BASE_X = 1080
WHT_SCR_BASE_Y = 220
SCR_LEN = 65
turn = BLACK
black_can_move = True
white_can_move = True
finish_status = False
blank_img = 'Images/blank.png'
black_pos_img = 'Images/black possible.png'
white_pos_img = 'Images/white possible.png'
black_img = 'Images/black.png'
white_img = 'Images/white.png'
game_data = []
game_img = []
black_score_img = []
white_score_img = []
possible_list = []
for j in range(8):
row_data = []
row_img = []
for i in range(8):
row_data.append(-1)
row_img.append(Object(blank_img))
game_data.append(row_data)
game_img.append(row_img)
def init_game():
game_data[3][3] = BLACK
game_img[3][3] = Object(black_img)
game_img[3][3].locate(game_scene, BASE + 3 * LEN, BASE + 3 * LEN)
game_img[3][3].show()
game_data[4][4] = BLACK
game_img[4][4] = Object(black_img)
game_img[4][4].locate(game_scene, BASE + 4 * LEN, BASE + 4 * LEN)
game_img[4][4].show()
game_data[4][3] = WHITE
game_img[4][3] = Object(white_img)
game_img[4][3].locate(game_scene, BASE + 3 * LEN, BASE + 4 * LEN)
game_img[4][3].show()
game_data[3][4] = WHITE
game_img[3][4] = Object(white_img)
game_img[3][4].locate(game_scene, BASE + 4 * LEN, BASE + 3 * LEN)
game_img[3][4].show()
show_score()
def change_state(x, y, state):
if state == BLACK:
game_data[y][x] = BLACK
game_img[y][x].hide()
game_img[y][x] = Object((black_img))
game_img[y][x].locate(game_scene, BASE + x * LEN, BASE + y * LEN)
game_img[y][x].show()
elif state == WHITE:
game_data[y][x] = WHITE
game_img[y][x].hide()
game_img[y][x] = Object((white_img))
game_img[y][x].locate(game_scene, BASE + x * LEN, BASE + y * LEN)
game_img[y][x].show()
elif state == BLANK:
game_data[y][x] = BLANK
game_img[y][x].hide()
game_img[y][x] = Object((blank_img))
game_img[y][x].locate(game_scene, BASE + x * LEN, BASE + y * LEN)
game_img[y][x].show()
elif state == BLACK_POS:
game_data[y][x] = BLACK_POS
game_img[y][x].hide()
game_img[y][x] = Object((black_pos_img))
game_img[y][x].locate(game_scene, BASE + x * LEN, BASE + y * LEN)
game_img[y][x].show()
elif state == WHITE_POS:
game_data[y][x] = WHITE_POS
game_img[y][x].hide()
game_img[y][x] = Object((white_pos_img))
game_img[y][x].locate(game_scene, BASE + x * LEN, BASE + y * LEN)
game_img[y][x].show()
else:
pass
def possible_move_check():
global possible_list
global turn
global black_can_move
global white_can_move
possible_list = []
for j in range(8):
for i in range(8):
if game_data[j][i] == turn:
other_color_check = False
x = i + 1
y = j
while x < 8:
if game_data[y][x] == turn:
break
elif game_data[y][x] == BLANK:
if other_color_check:
possible_list.append(y * 8 + x)
break
else:
other_color_check = True
x += 1
other_color_check = False
x = i + 1
y = j + 1
while x < 8 and y < 8:
if game_data[y][x] == turn:
break
elif game_data[y][x] == BLANK:
if other_color_check:
possible_list.append(y * 8 + x)
break
else:
other_color_check = True
x += 1
y += 1
other_color_check = False
x = i
y = j + 1
while y < 8:
if game_data[y][x] == turn:
break
elif game_data[y][x] == BLANK:
if other_color_check:
possible_list.append(y * 8 + x)
break
else:
other_color_check = True
y += 1
other_color_check = False
x = i - 1
y = j + 1
while x >= 0 and y < 8:
if game_data[y][x] == turn:
break
elif game_data[y][x] == BLANK:
if other_color_check:
possible_list.append(y * 8 + x)
break
else:
other_color_check = True
x -= 1
y += 1
other_color_check = False
x = i - 1
y = j
while x >= 0:
if game_data[y][x] == turn:
break
elif game_data[y][x] == BLANK:
if other_color_check:
possible_list.append(y * 8 + x)
break
else:
other_color_check = True
x -= 1
other_color_check = False
x = i - 1
y = j - 1
while x >= 0 and y >= 0:
if game_data[y][x] == turn:
break
elif game_data[y][x] == BLANK:
if other_color_check:
possible_list.append(y * 8 + x)
break
else:
other_color_check = True
x -= 1
y -= 1
other_color_check = False
x = i
y = j - 1
while y >= 0:
if game_data[y][x] == turn:
break
elif game_data[y][x] == BLANK:
if other_color_check:
possible_list.append(y * 8 + x)
break
else:
other_color_check = True
y -= 1
other_color_check = False
x = i + 1
y = j - 1
while x < 8 and y >= 0:
if game_data[y][x] == turn:
break
elif game_data[y][x] == BLANK:
if other_color_check:
possible_list.append(y * 8 + x)
break
else:
other_color_check = True
x += 1
y -= 1
if not possible_list:
finish_control()
else:
for idx in possible_list:
x = idx % 8
y = idx // 8
if turn == BLACK:
black_can_move = True
change_state(x, y, BLACK_POS)
else :
white_can_move = True
change_state(x, y, WHITE_POS)
def flip_stone(i, j):
global turn
global possible_list
for idx in possible_list:
x = idx % 8
y = idx // 8
change_state(x, y, BLANK)
target_list = []
temp = []
x = i + 1
y = j
while x < 8:
if game_data[y][x] == turn:
target_list += temp
break
elif game_data[y][x] == BLANK:
break
else:
temp.append(y * 8 + x)
x += 1
temp = []
x = i + 1
y = j + 1
while x < 8 and y < 8:
if game_data[y][x] == turn:
target_list += temp
break
elif game_data[y][x] == BLANK:
break
else:
temp.append(y * 8 + x)
x += 1
y += 1
temp = []
x = i
y = j + 1
while y < 8:
if game_data[y][x] == turn:
target_list += temp
break
elif game_data[y][x] == BLANK:
break
else:
temp.append(y * 8 + x)
y += 1
temp = []
x = i - 1
y = j + 1
while x >= 0 and y < 8:
if game_data[y][x] == turn:
target_list += temp
break
elif game_data[y][x] == BLANK:
break
else:
temp.append(y * 8 + x)
x -= 1
y += 1
temp = []
x = i - 1
y = j
while x >= 0:
if game_data[y][x] == turn:
target_list += temp
break
elif game_data[y][x] == BLANK:
break
else:
temp.append(y * 8 + x)
x -= 1
temp = []
x = i - 1
y = j - 1
while x >= 0 and y >= 0:
if game_data[y][x] == turn:
target_list += temp
break
elif game_data[y][x] == BLANK:
break
else:
temp.append(y * 8 + x)
x -= 1
y -= 1
temp = []
x = i
y = j - 1
while y >= 0:
if game_data[y][x] == turn:
target_list += temp
break
elif game_data[y][x] == BLANK:
break
else:
temp.append(y * 8 + x)
y -= 1
temp = []
x = i + 1
y = j - 1
while x < 8 and y >= 0:
if game_data[y][x] == turn:
target_list += temp
break
elif game_data[y][x] == BLANK:
break
else:
temp.append(y * 8 + x)
x += 1
y -= 1
target_list.append(i + j * 8)
for idx in target_list:
x = idx % 8
y = idx // 8
change_state(x, y, turn)
def score_check():
black_score = 0
white_score = 0
for j in range(8):
for i in range(8):
if game_data[j][i] == BLACK:
black_score += 1
elif game_data[j][i] == WHITE:
white_score += 1
else:
pass
return black_score, white_score
def show_score():
global black_score_img
global white_score_img
for img in black_score_img:
img.hide()
for img in white_score_img:
img.hide()
black_score_img = []
white_score_img = []
black_score, white_score = score_check()
for idx, num in enumerate(str(black_score)[::-1]):
file_name = 'Images/L' + num + '.png'
number = Object(file_name)
number.locate(game_scene, BLK_SCR_BASE_X - idx * SCR_LEN, BLK_SCR_BASE_Y)
number.show()
black_score_img.append(number)
for idx, num in enumerate(str(white_score)[::1]):
file_name = 'Images/L' + num + '.png'
number = Object(file_name)
number.locate(game_scene, WHT_SCR_BASE_X + idx * SCR_LEN, WHT_SCR_BASE_Y)
number.show()
black_score_img.append(number)
def finish_control():
global turn
global black_can_move
global white_can_move
global transparent_screen
global finish_status
if turn == BLACK:
black_can_move = False
if white_can_move:
turn = WHITE
possible_move_check()
else:
white_can_move = False
if black_can_move:
turn = BLACK
possible_move_check()
if not black_can_move and not white_can_move:
black_score, white_score = score_check()
if black_score > white_score:
showMessage('검은색이 승리했습니다!!')
elif black_score < white_score:
showMessage('희색이 승리했습니다!!')
else:
showMessage('비겼습니다!!')
finish_status = True
def ai_location_select():
global turn
global possible_list
flip_stone_count = []
for idx in possible_list:
i = idx % 8
j = idx // 8
target_list = []
temp = []
x = i + 1
y = j
while x < 8:
if game_data[y][x] == turn:
target_list += temp
break
elif game_data[y][x] == BLANK:
break
else:
temp.append(y * 8 + x)
x += 1
temp = []
x = i + 1
y = j + 1
while x < 8 and y < 8:
if game_data[y][x] == turn:
target_list += temp
break
elif game_data[y][x] == BLANK:
break
else:
temp.append(y * 8 + x)
x += 1
y += 1
temp = []
x = i
y = j + 1
while y < 8:
if game_data[y][x] == turn:
target_list += temp
break
elif game_data[y][x] == BLANK:
break
else:
temp.append(y * 8 + x)
y += 1
temp = []
x = i - 1
y = j + 1
while x >= 0 and y < 8:
if game_data[y][x] == turn:
target_list += temp
break
elif game_data[y][x] == BLANK:
break
else:
temp.append(y * 8 + x)
x -= 1
y += 1
temp = []
x = i - 1
y = j
while x >= 0:
if game_data[y][x] == turn:
target_list += temp
break
elif game_data[y][x] == BLANK:
break
else:
temp.append(y * 8 + x)
x -= 1
temp = []
x = i - 1
y = j - 1
while x >= 0 and y >= 0:
if game_data[y][x] == turn:
target_list += temp
break
elif game_data[y][x] == BLANK:
break
else:
temp.append(y * 8 + x)
x -= 1
y -= 1
temp = []
x = i
y = j - 1
while y >= 0:
if game_data[y][x] == turn:
target_list += temp
break
elif game_data[y][x] == BLANK:
break
else:
temp.append(y * 8 + x)
y -= 1
temp = []
x = i + 1
y = j - 1
while x < 8 and y >= 0:
if game_data[y][x] == turn:
target_list += temp
break
elif game_data[y][x] == BLANK:
break
else:
temp.append(y * 8 + x)
x += 1
y -= 1
target_list.append(i + j * 8)
flip_stone_count.append(len(target_list))
return possible_list[flip_stone_count.index(max(flip_stone_count))]
def ai_move():
location = ai_location_select()
x = location % 8
y = location // 8
game_control(x, y)
def game_control(x, y):
global possible_list
global turn
global finish_status
global transparent_screen
index = y * 8 + x
if index in possible_list:
flip_stone(x, y)
if turn == BLACK:
turn = WHITE
else:
turn = BLACK
possible_move_check()
show_score()
if finish_status:
transparent_screen.hide()
else:
reset_transparent_screen()
##########
#If erase this part, you can play without AI
if turn == WHITE:
ai_move()
##########
else:
showMessage('해당 위치는 놓을 수 없습니다')
def reset_transparent_screen():
global transparent_screen
transparent_screen.hide()
del transparent_screen
transparent_screen = Object('Images/transparent_screen.png')
transparent_screen.locate(game_scene, 40, 40)
transparent_screen.show()
transparent_screen.onMouseAction = transparent_screen_on_click
def transparent_screen_on_click(x, y, action):
idx_x = x // 80
idx_y = y // 80
game_control(idx_x, idx_y)
init_game()
possible_move_check()
reset_transparent_screen()
startGame(game_scene)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 7 00:53:29 2019
@author: yoelr
"""
from ... import Unit
__all__ = ('extend_summary', )
def extend_summary(cls):
"""Extends the Unit class with the following abstract methods:
**_end():**
Finish setting purchase prices and utility costs.
"""
if hasattr(cls, '_end'):
if cls._summary is Unit._summary:
cls._summary = _summary
elif cls._summary is not _summary:
raise RuntimeError("cannot decorate Unit subclass an implemented '_summary' method")
def _summary(self):
"""Calculate all results from unit run."""
self._design()
self._cost()
self._end()
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.