text stringlengths 0 1.05M | meta dict |
|---|---|
"""Allows the creation of a sensor that filters state property."""
from collections import Counter, deque
from copy import copy
from datetime import timedelta
from functools import partial
import logging
from numbers import Number
import statistics
from typing import Optional
import voluptuous as vol
from homeassistant.components import history
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.input_number import DOMAIN as INPUT_NUMBER_DOMAIN
from homeassistant.components.sensor import (
DEVICE_CLASSES as SENSOR_DEVICE_CLASSES,
DOMAIN as SENSOR_DOMAIN,
PLATFORM_SCHEMA,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
CONF_ENTITY_ID,
CONF_NAME,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.util.decorator import Registry
import homeassistant.util.dt as dt_util
from . import DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
FILTER_NAME_RANGE = "range"
FILTER_NAME_LOWPASS = "lowpass"
FILTER_NAME_OUTLIER = "outlier"
FILTER_NAME_THROTTLE = "throttle"
FILTER_NAME_TIME_THROTTLE = "time_throttle"
FILTER_NAME_TIME_SMA = "time_simple_moving_average"
FILTERS = Registry()
CONF_FILTERS = "filters"
CONF_FILTER_NAME = "filter"
CONF_FILTER_WINDOW_SIZE = "window_size"
CONF_FILTER_PRECISION = "precision"
CONF_FILTER_RADIUS = "radius"
CONF_FILTER_TIME_CONSTANT = "time_constant"
CONF_FILTER_LOWER_BOUND = "lower_bound"
CONF_FILTER_UPPER_BOUND = "upper_bound"
CONF_TIME_SMA_TYPE = "type"
TIME_SMA_LAST = "last"
WINDOW_SIZE_UNIT_NUMBER_EVENTS = 1
WINDOW_SIZE_UNIT_TIME = 2
DEFAULT_WINDOW_SIZE = 1
DEFAULT_PRECISION = 2
DEFAULT_FILTER_RADIUS = 2.0
DEFAULT_FILTER_TIME_CONSTANT = 10
NAME_TEMPLATE = "{} filter"
ICON = "mdi:chart-line-variant"
FILTER_SCHEMA = vol.Schema(
{vol.Optional(CONF_FILTER_PRECISION, default=DEFAULT_PRECISION): vol.Coerce(int)}
)
FILTER_OUTLIER_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_OUTLIER,
vol.Optional(CONF_FILTER_WINDOW_SIZE, default=DEFAULT_WINDOW_SIZE): vol.Coerce(
int
),
vol.Optional(CONF_FILTER_RADIUS, default=DEFAULT_FILTER_RADIUS): vol.Coerce(
float
),
}
)
FILTER_LOWPASS_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_LOWPASS,
vol.Optional(CONF_FILTER_WINDOW_SIZE, default=DEFAULT_WINDOW_SIZE): vol.Coerce(
int
),
vol.Optional(
CONF_FILTER_TIME_CONSTANT, default=DEFAULT_FILTER_TIME_CONSTANT
): vol.Coerce(int),
}
)
FILTER_RANGE_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_RANGE,
vol.Optional(CONF_FILTER_LOWER_BOUND): vol.Coerce(float),
vol.Optional(CONF_FILTER_UPPER_BOUND): vol.Coerce(float),
}
)
FILTER_TIME_SMA_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_TIME_SMA,
vol.Optional(CONF_TIME_SMA_TYPE, default=TIME_SMA_LAST): vol.In(
[TIME_SMA_LAST]
),
vol.Required(CONF_FILTER_WINDOW_SIZE): vol.All(
cv.time_period, cv.positive_timedelta
),
}
)
FILTER_THROTTLE_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_THROTTLE,
vol.Optional(CONF_FILTER_WINDOW_SIZE, default=DEFAULT_WINDOW_SIZE): vol.Coerce(
int
),
}
)
FILTER_TIME_THROTTLE_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_TIME_THROTTLE,
vol.Required(CONF_FILTER_WINDOW_SIZE): vol.All(
cv.time_period, cv.positive_timedelta
),
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): vol.Any(
cv.entity_domain(SENSOR_DOMAIN),
cv.entity_domain(BINARY_SENSOR_DOMAIN),
cv.entity_domain(INPUT_NUMBER_DOMAIN),
),
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_FILTERS): vol.All(
cv.ensure_list,
[
vol.Any(
FILTER_OUTLIER_SCHEMA,
FILTER_LOWPASS_SCHEMA,
FILTER_TIME_SMA_SCHEMA,
FILTER_THROTTLE_SCHEMA,
FILTER_TIME_THROTTLE_SCHEMA,
FILTER_RANGE_SCHEMA,
)
],
),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the template sensors."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
name = config.get(CONF_NAME)
entity_id = config.get(CONF_ENTITY_ID)
filters = [
FILTERS[_filter.pop(CONF_FILTER_NAME)](entity=entity_id, **_filter)
for _filter in config[CONF_FILTERS]
]
async_add_entities([SensorFilter(name, entity_id, filters)])
class SensorFilter(Entity):
"""Representation of a Filter Sensor."""
def __init__(self, name, entity_id, filters):
"""Initialize the sensor."""
self._name = name
self._entity = entity_id
self._unit_of_measurement = None
self._state = None
self._filters = filters
self._icon = None
self._device_class = None
@callback
def _update_filter_sensor_state_event(self, event):
"""Handle device state changes."""
_LOGGER.debug("Update filter on event: %s", event)
self._update_filter_sensor_state(event.data.get("new_state"))
@callback
def _update_filter_sensor_state(self, new_state, update_ha=True):
"""Process device state changes."""
if new_state is None:
_LOGGER.warning(
"While updating filter %s, the new_state is None", self._name
)
self._state = None
self.async_write_ha_state()
return
if new_state.state in [STATE_UNKNOWN, STATE_UNAVAILABLE]:
self._state = new_state.state
self.async_write_ha_state()
return
temp_state = new_state
try:
for filt in self._filters:
filtered_state = filt.filter_state(copy(temp_state))
_LOGGER.debug(
"%s(%s=%s) -> %s",
filt.name,
self._entity,
temp_state.state,
"skip" if filt.skip_processing else filtered_state.state,
)
if filt.skip_processing:
return
temp_state = filtered_state
except ValueError:
_LOGGER.error(
"Could not convert state: %s (%s) to number",
new_state.state,
type(new_state.state),
)
return
self._state = temp_state.state
if self._icon is None:
self._icon = new_state.attributes.get(ATTR_ICON, ICON)
if (
self._device_class is None
and new_state.attributes.get(ATTR_DEVICE_CLASS) in SENSOR_DEVICE_CLASSES
):
self._device_class = new_state.attributes.get(ATTR_DEVICE_CLASS)
if self._unit_of_measurement is None:
self._unit_of_measurement = new_state.attributes.get(
ATTR_UNIT_OF_MEASUREMENT
)
if update_ha:
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Register callbacks."""
if "recorder" in self.hass.config.components:
history_list = []
largest_window_items = 0
largest_window_time = timedelta(0)
# Determine the largest window_size by type
for filt in self._filters:
if (
filt.window_unit == WINDOW_SIZE_UNIT_NUMBER_EVENTS
and largest_window_items < filt.window_size
):
largest_window_items = filt.window_size
elif (
filt.window_unit == WINDOW_SIZE_UNIT_TIME
and largest_window_time < filt.window_size
):
largest_window_time = filt.window_size
# Retrieve the largest window_size of each type
if largest_window_items > 0:
filter_history = await self.hass.async_add_executor_job(
partial(
history.get_last_state_changes,
self.hass,
largest_window_items,
entity_id=self._entity,
)
)
if self._entity in filter_history:
history_list.extend(filter_history[self._entity])
if largest_window_time > timedelta(seconds=0):
start = dt_util.utcnow() - largest_window_time
filter_history = await self.hass.async_add_executor_job(
partial(
history.state_changes_during_period,
self.hass,
start,
entity_id=self._entity,
)
)
if self._entity in filter_history:
history_list.extend(
[
state
for state in filter_history[self._entity]
if state not in history_list
]
)
# Sort the window states
history_list = sorted(history_list, key=lambda s: s.last_updated)
_LOGGER.debug(
"Loading from history: %s",
[(s.state, s.last_updated) for s in history_list],
)
# Replay history through the filter chain
for state in history_list:
if state.state not in [STATE_UNKNOWN, STATE_UNAVAILABLE, None]:
self._update_filter_sensor_state(state, False)
self.async_on_remove(
async_track_state_change_event(
self.hass, [self._entity], self._update_filter_sensor_state_event
)
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._icon
@property
def unit_of_measurement(self):
"""Return the unit_of_measurement of the device."""
return self._unit_of_measurement
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {ATTR_ENTITY_ID: self._entity}
@property
def device_class(self):
"""Return device class."""
return self._device_class
class FilterState:
"""State abstraction for filter usage."""
def __init__(self, state):
"""Initialize with HA State object."""
self.timestamp = state.last_updated
try:
self.state = float(state.state)
except ValueError:
self.state = state.state
def set_precision(self, precision):
"""Set precision of Number based states."""
if isinstance(self.state, Number):
value = round(float(self.state), precision)
self.state = int(value) if precision == 0 else value
def __str__(self):
"""Return state as the string representation of FilterState."""
return str(self.state)
def __repr__(self):
"""Return timestamp and state as the representation of FilterState."""
return f"{self.timestamp} : {self.state}"
class Filter:
"""Filter skeleton."""
def __init__(
self,
name,
window_size: int = 1,
precision: Optional[int] = None,
entity: Optional[str] = None,
):
"""Initialize common attributes.
:param window_size: size of the sliding window that holds previous values
:param precision: round filtered value to precision value
:param entity: used for debugging only
"""
if isinstance(window_size, int):
self.states = deque(maxlen=window_size)
self.window_unit = WINDOW_SIZE_UNIT_NUMBER_EVENTS
else:
self.states = deque(maxlen=0)
self.window_unit = WINDOW_SIZE_UNIT_TIME
self.precision = precision
self._name = name
self._entity = entity
self._skip_processing = False
self._window_size = window_size
self._store_raw = False
self._only_numbers = True
@property
def window_size(self):
"""Return window size."""
return self._window_size
@property
def name(self):
"""Return filter name."""
return self._name
@property
def skip_processing(self):
"""Return whether the current filter_state should be skipped."""
return self._skip_processing
def _filter_state(self, new_state):
"""Implement filter."""
raise NotImplementedError()
def filter_state(self, new_state):
"""Implement a common interface for filters."""
fstate = FilterState(new_state)
if self._only_numbers and not isinstance(fstate.state, Number):
raise ValueError(f"State <{fstate.state}> is not a Number")
filtered = self._filter_state(fstate)
filtered.set_precision(self.precision)
if self._store_raw:
self.states.append(copy(FilterState(new_state)))
else:
self.states.append(copy(filtered))
new_state.state = filtered.state
return new_state
@FILTERS.register(FILTER_NAME_RANGE)
class RangeFilter(Filter):
"""Range filter.
Determines if new state is in the range of upper_bound and lower_bound.
If not inside, lower or upper bound is returned instead.
"""
def __init__(
self,
entity,
precision: Optional[int] = DEFAULT_PRECISION,
lower_bound: Optional[float] = None,
upper_bound: Optional[float] = None,
):
"""Initialize Filter.
:param upper_bound: band upper bound
:param lower_bound: band lower bound
"""
super().__init__(FILTER_NAME_RANGE, precision=precision, entity=entity)
self._lower_bound = lower_bound
self._upper_bound = upper_bound
self._stats_internal = Counter()
def _filter_state(self, new_state):
"""Implement the range filter."""
if self._upper_bound is not None and new_state.state > self._upper_bound:
self._stats_internal["erasures_up"] += 1
_LOGGER.debug(
"Upper outlier nr. %s in %s: %s",
self._stats_internal["erasures_up"],
self._entity,
new_state,
)
new_state.state = self._upper_bound
elif self._lower_bound is not None and new_state.state < self._lower_bound:
self._stats_internal["erasures_low"] += 1
_LOGGER.debug(
"Lower outlier nr. %s in %s: %s",
self._stats_internal["erasures_low"],
self._entity,
new_state,
)
new_state.state = self._lower_bound
return new_state
@FILTERS.register(FILTER_NAME_OUTLIER)
class OutlierFilter(Filter):
"""BASIC outlier filter.
Determines if new state is in a band around the median.
"""
def __init__(self, window_size, precision, entity, radius: float):
"""Initialize Filter.
:param radius: band radius
"""
super().__init__(FILTER_NAME_OUTLIER, window_size, precision, entity)
self._radius = radius
self._stats_internal = Counter()
self._store_raw = True
def _filter_state(self, new_state):
"""Implement the outlier filter."""
median = statistics.median([s.state for s in self.states]) if self.states else 0
if (
len(self.states) == self.states.maxlen
and abs(new_state.state - median) > self._radius
):
self._stats_internal["erasures"] += 1
_LOGGER.debug(
"Outlier nr. %s in %s: %s",
self._stats_internal["erasures"],
self._entity,
new_state,
)
new_state.state = median
return new_state
@FILTERS.register(FILTER_NAME_LOWPASS)
class LowPassFilter(Filter):
"""BASIC Low Pass Filter."""
def __init__(self, window_size, precision, entity, time_constant: int):
"""Initialize Filter."""
super().__init__(FILTER_NAME_LOWPASS, window_size, precision, entity)
self._time_constant = time_constant
def _filter_state(self, new_state):
"""Implement the low pass filter."""
if not self.states:
return new_state
new_weight = 1.0 / self._time_constant
prev_weight = 1.0 - new_weight
new_state.state = (
prev_weight * self.states[-1].state + new_weight * new_state.state
)
return new_state
@FILTERS.register(FILTER_NAME_TIME_SMA)
class TimeSMAFilter(Filter):
"""Simple Moving Average (SMA) Filter.
The window_size is determined by time, and SMA is time weighted.
"""
def __init__(
self, window_size, precision, entity, type
): # pylint: disable=redefined-builtin
"""Initialize Filter.
:param type: type of algorithm used to connect discrete values
"""
super().__init__(FILTER_NAME_TIME_SMA, window_size, precision, entity)
self._time_window = window_size
self.last_leak = None
self.queue = deque()
def _leak(self, left_boundary):
"""Remove timeouted elements."""
while self.queue:
if self.queue[0].timestamp + self._time_window <= left_boundary:
self.last_leak = self.queue.popleft()
else:
return
def _filter_state(self, new_state):
"""Implement the Simple Moving Average filter."""
self._leak(new_state.timestamp)
self.queue.append(copy(new_state))
moving_sum = 0
start = new_state.timestamp - self._time_window
prev_state = self.last_leak or self.queue[0]
for state in self.queue:
moving_sum += (state.timestamp - start).total_seconds() * prev_state.state
start = state.timestamp
prev_state = state
new_state.state = moving_sum / self._time_window.total_seconds()
return new_state
@FILTERS.register(FILTER_NAME_THROTTLE)
class ThrottleFilter(Filter):
"""Throttle Filter.
One sample per window.
"""
def __init__(self, window_size, precision, entity):
"""Initialize Filter."""
super().__init__(FILTER_NAME_THROTTLE, window_size, precision, entity)
self._only_numbers = False
def _filter_state(self, new_state):
"""Implement the throttle filter."""
if not self.states or len(self.states) == self.states.maxlen:
self.states.clear()
self._skip_processing = False
else:
self._skip_processing = True
return new_state
@FILTERS.register(FILTER_NAME_TIME_THROTTLE)
class TimeThrottleFilter(Filter):
"""Time Throttle Filter.
One sample per time period.
"""
def __init__(self, window_size, precision, entity):
"""Initialize Filter."""
super().__init__(FILTER_NAME_TIME_THROTTLE, window_size, precision, entity)
self._time_window = window_size
self._last_emitted_at = None
self._only_numbers = False
def _filter_state(self, new_state):
"""Implement the filter."""
window_start = new_state.timestamp - self._time_window
if not self._last_emitted_at or self._last_emitted_at <= window_start:
self._last_emitted_at = new_state.timestamp
self._skip_processing = False
else:
self._skip_processing = True
return new_state
| {
"repo_name": "partofthething/home-assistant",
"path": "homeassistant/components/filter/sensor.py",
"copies": "3",
"size": "20713",
"license": "mit",
"hash": 7859111957291327000,
"line_mean": 30.1942771084,
"line_max": 88,
"alpha_frac": 0.5822430358,
"autogenerated": false,
"ratio": 4.0878231695283205,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002651546690513061,
"num_lines": 664
} |
"""Allows the creation of a sensor that filters state property."""
from __future__ import annotations
from collections import Counter, deque
from copy import copy
from datetime import timedelta
from functools import partial
import logging
from numbers import Number
import statistics
import voluptuous as vol
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.input_number import DOMAIN as INPUT_NUMBER_DOMAIN
from homeassistant.components.recorder import history
from homeassistant.components.sensor import (
DEVICE_CLASSES as SENSOR_DEVICE_CLASSES,
DOMAIN as SENSOR_DOMAIN,
PLATFORM_SCHEMA,
SensorEntity,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
CONF_ENTITY_ID,
CONF_NAME,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.util.decorator import Registry
import homeassistant.util.dt as dt_util
from . import DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
FILTER_NAME_RANGE = "range"
FILTER_NAME_LOWPASS = "lowpass"
FILTER_NAME_OUTLIER = "outlier"
FILTER_NAME_THROTTLE = "throttle"
FILTER_NAME_TIME_THROTTLE = "time_throttle"
FILTER_NAME_TIME_SMA = "time_simple_moving_average"
FILTERS = Registry()
CONF_FILTERS = "filters"
CONF_FILTER_NAME = "filter"
CONF_FILTER_WINDOW_SIZE = "window_size"
CONF_FILTER_PRECISION = "precision"
CONF_FILTER_RADIUS = "radius"
CONF_FILTER_TIME_CONSTANT = "time_constant"
CONF_FILTER_LOWER_BOUND = "lower_bound"
CONF_FILTER_UPPER_BOUND = "upper_bound"
CONF_TIME_SMA_TYPE = "type"
TIME_SMA_LAST = "last"
WINDOW_SIZE_UNIT_NUMBER_EVENTS = 1
WINDOW_SIZE_UNIT_TIME = 2
DEFAULT_WINDOW_SIZE = 1
DEFAULT_PRECISION = 2
DEFAULT_FILTER_RADIUS = 2.0
DEFAULT_FILTER_TIME_CONSTANT = 10
NAME_TEMPLATE = "{} filter"
ICON = "mdi:chart-line-variant"
FILTER_SCHEMA = vol.Schema(
{vol.Optional(CONF_FILTER_PRECISION, default=DEFAULT_PRECISION): vol.Coerce(int)}
)
FILTER_OUTLIER_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_OUTLIER,
vol.Optional(CONF_FILTER_WINDOW_SIZE, default=DEFAULT_WINDOW_SIZE): vol.Coerce(
int
),
vol.Optional(CONF_FILTER_RADIUS, default=DEFAULT_FILTER_RADIUS): vol.Coerce(
float
),
}
)
FILTER_LOWPASS_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_LOWPASS,
vol.Optional(CONF_FILTER_WINDOW_SIZE, default=DEFAULT_WINDOW_SIZE): vol.Coerce(
int
),
vol.Optional(
CONF_FILTER_TIME_CONSTANT, default=DEFAULT_FILTER_TIME_CONSTANT
): vol.Coerce(int),
}
)
FILTER_RANGE_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_RANGE,
vol.Optional(CONF_FILTER_LOWER_BOUND): vol.Coerce(float),
vol.Optional(CONF_FILTER_UPPER_BOUND): vol.Coerce(float),
}
)
FILTER_TIME_SMA_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_TIME_SMA,
vol.Optional(CONF_TIME_SMA_TYPE, default=TIME_SMA_LAST): vol.In(
[TIME_SMA_LAST]
),
vol.Required(CONF_FILTER_WINDOW_SIZE): vol.All(
cv.time_period, cv.positive_timedelta
),
}
)
FILTER_THROTTLE_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_THROTTLE,
vol.Optional(CONF_FILTER_WINDOW_SIZE, default=DEFAULT_WINDOW_SIZE): vol.Coerce(
int
),
}
)
FILTER_TIME_THROTTLE_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_TIME_THROTTLE,
vol.Required(CONF_FILTER_WINDOW_SIZE): vol.All(
cv.time_period, cv.positive_timedelta
),
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): vol.Any(
cv.entity_domain(SENSOR_DOMAIN),
cv.entity_domain(BINARY_SENSOR_DOMAIN),
cv.entity_domain(INPUT_NUMBER_DOMAIN),
),
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_FILTERS): vol.All(
cv.ensure_list,
[
vol.Any(
FILTER_OUTLIER_SCHEMA,
FILTER_LOWPASS_SCHEMA,
FILTER_TIME_SMA_SCHEMA,
FILTER_THROTTLE_SCHEMA,
FILTER_TIME_THROTTLE_SCHEMA,
FILTER_RANGE_SCHEMA,
)
],
),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the template sensors."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
name = config.get(CONF_NAME)
entity_id = config.get(CONF_ENTITY_ID)
filters = [
FILTERS[_filter.pop(CONF_FILTER_NAME)](entity=entity_id, **_filter)
for _filter in config[CONF_FILTERS]
]
async_add_entities([SensorFilter(name, entity_id, filters)])
class SensorFilter(SensorEntity):
"""Representation of a Filter Sensor."""
def __init__(self, name, entity_id, filters):
"""Initialize the sensor."""
self._name = name
self._entity = entity_id
self._unit_of_measurement = None
self._state = None
self._filters = filters
self._icon = None
self._device_class = None
@callback
def _update_filter_sensor_state_event(self, event):
"""Handle device state changes."""
_LOGGER.debug("Update filter on event: %s", event)
self._update_filter_sensor_state(event.data.get("new_state"))
@callback
def _update_filter_sensor_state(self, new_state, update_ha=True):
"""Process device state changes."""
if new_state is None:
_LOGGER.warning(
"While updating filter %s, the new_state is None", self._name
)
self._state = None
self.async_write_ha_state()
return
if new_state.state in [STATE_UNKNOWN, STATE_UNAVAILABLE]:
self._state = new_state.state
self.async_write_ha_state()
return
temp_state = new_state
try:
for filt in self._filters:
filtered_state = filt.filter_state(copy(temp_state))
_LOGGER.debug(
"%s(%s=%s) -> %s",
filt.name,
self._entity,
temp_state.state,
"skip" if filt.skip_processing else filtered_state.state,
)
if filt.skip_processing:
return
temp_state = filtered_state
except ValueError:
_LOGGER.error(
"Could not convert state: %s (%s) to number",
new_state.state,
type(new_state.state),
)
return
self._state = temp_state.state
if self._icon is None:
self._icon = new_state.attributes.get(ATTR_ICON, ICON)
if (
self._device_class is None
and new_state.attributes.get(ATTR_DEVICE_CLASS) in SENSOR_DEVICE_CLASSES
):
self._device_class = new_state.attributes.get(ATTR_DEVICE_CLASS)
if self._unit_of_measurement is None:
self._unit_of_measurement = new_state.attributes.get(
ATTR_UNIT_OF_MEASUREMENT
)
if update_ha:
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Register callbacks."""
if "recorder" in self.hass.config.components:
history_list = []
largest_window_items = 0
largest_window_time = timedelta(0)
# Determine the largest window_size by type
for filt in self._filters:
if (
filt.window_unit == WINDOW_SIZE_UNIT_NUMBER_EVENTS
and largest_window_items < filt.window_size
):
largest_window_items = filt.window_size
elif (
filt.window_unit == WINDOW_SIZE_UNIT_TIME
and largest_window_time < filt.window_size
):
largest_window_time = filt.window_size
# Retrieve the largest window_size of each type
if largest_window_items > 0:
filter_history = await self.hass.async_add_executor_job(
partial(
history.get_last_state_changes,
self.hass,
largest_window_items,
entity_id=self._entity,
)
)
if self._entity in filter_history:
history_list.extend(filter_history[self._entity])
if largest_window_time > timedelta(seconds=0):
start = dt_util.utcnow() - largest_window_time
filter_history = await self.hass.async_add_executor_job(
partial(
history.state_changes_during_period,
self.hass,
start,
entity_id=self._entity,
)
)
if self._entity in filter_history:
history_list.extend(
[
state
for state in filter_history[self._entity]
if state not in history_list
]
)
# Sort the window states
history_list = sorted(history_list, key=lambda s: s.last_updated)
_LOGGER.debug(
"Loading from history: %s",
[(s.state, s.last_updated) for s in history_list],
)
# Replay history through the filter chain
for state in history_list:
if state.state not in [STATE_UNKNOWN, STATE_UNAVAILABLE, None]:
self._update_filter_sensor_state(state, False)
self.async_on_remove(
async_track_state_change_event(
self.hass, [self._entity], self._update_filter_sensor_state_event
)
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._icon
@property
def unit_of_measurement(self):
"""Return the unit_of_measurement of the device."""
return self._unit_of_measurement
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def extra_state_attributes(self):
"""Return the state attributes of the sensor."""
return {ATTR_ENTITY_ID: self._entity}
@property
def device_class(self):
"""Return device class."""
return self._device_class
class FilterState:
"""State abstraction for filter usage."""
def __init__(self, state):
"""Initialize with HA State object."""
self.timestamp = state.last_updated
try:
self.state = float(state.state)
except ValueError:
self.state = state.state
def set_precision(self, precision):
"""Set precision of Number based states."""
if isinstance(self.state, Number):
value = round(float(self.state), precision)
self.state = int(value) if precision == 0 else value
def __str__(self):
"""Return state as the string representation of FilterState."""
return str(self.state)
def __repr__(self):
"""Return timestamp and state as the representation of FilterState."""
return f"{self.timestamp} : {self.state}"
class Filter:
"""Filter skeleton."""
def __init__(
self,
name,
window_size: int = 1,
precision: int | None = None,
entity: str | None = None,
):
"""Initialize common attributes.
:param window_size: size of the sliding window that holds previous values
:param precision: round filtered value to precision value
:param entity: used for debugging only
"""
if isinstance(window_size, int):
self.states = deque(maxlen=window_size)
self.window_unit = WINDOW_SIZE_UNIT_NUMBER_EVENTS
else:
self.states = deque(maxlen=0)
self.window_unit = WINDOW_SIZE_UNIT_TIME
self.precision = precision
self._name = name
self._entity = entity
self._skip_processing = False
self._window_size = window_size
self._store_raw = False
self._only_numbers = True
@property
def window_size(self):
"""Return window size."""
return self._window_size
@property
def name(self):
"""Return filter name."""
return self._name
@property
def skip_processing(self):
"""Return whether the current filter_state should be skipped."""
return self._skip_processing
def _filter_state(self, new_state):
"""Implement filter."""
raise NotImplementedError()
def filter_state(self, new_state):
"""Implement a common interface for filters."""
fstate = FilterState(new_state)
if self._only_numbers and not isinstance(fstate.state, Number):
raise ValueError(f"State <{fstate.state}> is not a Number")
filtered = self._filter_state(fstate)
filtered.set_precision(self.precision)
if self._store_raw:
self.states.append(copy(FilterState(new_state)))
else:
self.states.append(copy(filtered))
new_state.state = filtered.state
return new_state
@FILTERS.register(FILTER_NAME_RANGE)
class RangeFilter(Filter, SensorEntity):
"""Range filter.
Determines if new state is in the range of upper_bound and lower_bound.
If not inside, lower or upper bound is returned instead.
"""
def __init__(
self,
entity,
precision: int | None = DEFAULT_PRECISION,
lower_bound: float | None = None,
upper_bound: float | None = None,
):
"""Initialize Filter.
:param upper_bound: band upper bound
:param lower_bound: band lower bound
"""
super().__init__(FILTER_NAME_RANGE, precision=precision, entity=entity)
self._lower_bound = lower_bound
self._upper_bound = upper_bound
self._stats_internal = Counter()
def _filter_state(self, new_state):
"""Implement the range filter."""
if self._upper_bound is not None and new_state.state > self._upper_bound:
self._stats_internal["erasures_up"] += 1
_LOGGER.debug(
"Upper outlier nr. %s in %s: %s",
self._stats_internal["erasures_up"],
self._entity,
new_state,
)
new_state.state = self._upper_bound
elif self._lower_bound is not None and new_state.state < self._lower_bound:
self._stats_internal["erasures_low"] += 1
_LOGGER.debug(
"Lower outlier nr. %s in %s: %s",
self._stats_internal["erasures_low"],
self._entity,
new_state,
)
new_state.state = self._lower_bound
return new_state
@FILTERS.register(FILTER_NAME_OUTLIER)
class OutlierFilter(Filter, SensorEntity):
"""BASIC outlier filter.
Determines if new state is in a band around the median.
"""
def __init__(self, window_size, precision, entity, radius: float):
"""Initialize Filter.
:param radius: band radius
"""
super().__init__(FILTER_NAME_OUTLIER, window_size, precision, entity)
self._radius = radius
self._stats_internal = Counter()
self._store_raw = True
def _filter_state(self, new_state):
"""Implement the outlier filter."""
median = statistics.median([s.state for s in self.states]) if self.states else 0
if (
len(self.states) == self.states.maxlen
and abs(new_state.state - median) > self._radius
):
self._stats_internal["erasures"] += 1
_LOGGER.debug(
"Outlier nr. %s in %s: %s",
self._stats_internal["erasures"],
self._entity,
new_state,
)
new_state.state = median
return new_state
@FILTERS.register(FILTER_NAME_LOWPASS)
class LowPassFilter(Filter, SensorEntity):
"""BASIC Low Pass Filter."""
def __init__(self, window_size, precision, entity, time_constant: int):
"""Initialize Filter."""
super().__init__(FILTER_NAME_LOWPASS, window_size, precision, entity)
self._time_constant = time_constant
def _filter_state(self, new_state):
"""Implement the low pass filter."""
if not self.states:
return new_state
new_weight = 1.0 / self._time_constant
prev_weight = 1.0 - new_weight
new_state.state = (
prev_weight * self.states[-1].state + new_weight * new_state.state
)
return new_state
@FILTERS.register(FILTER_NAME_TIME_SMA)
class TimeSMAFilter(Filter, SensorEntity):
"""Simple Moving Average (SMA) Filter.
The window_size is determined by time, and SMA is time weighted.
"""
def __init__(
self, window_size, precision, entity, type
): # pylint: disable=redefined-builtin
"""Initialize Filter.
:param type: type of algorithm used to connect discrete values
"""
super().__init__(FILTER_NAME_TIME_SMA, window_size, precision, entity)
self._time_window = window_size
self.last_leak = None
self.queue = deque()
def _leak(self, left_boundary):
"""Remove timeouted elements."""
while self.queue:
if self.queue[0].timestamp + self._time_window <= left_boundary:
self.last_leak = self.queue.popleft()
else:
return
def _filter_state(self, new_state):
"""Implement the Simple Moving Average filter."""
self._leak(new_state.timestamp)
self.queue.append(copy(new_state))
moving_sum = 0
start = new_state.timestamp - self._time_window
prev_state = self.last_leak or self.queue[0]
for state in self.queue:
moving_sum += (state.timestamp - start).total_seconds() * prev_state.state
start = state.timestamp
prev_state = state
new_state.state = moving_sum / self._time_window.total_seconds()
return new_state
@FILTERS.register(FILTER_NAME_THROTTLE)
class ThrottleFilter(Filter, SensorEntity):
"""Throttle Filter.
One sample per window.
"""
def __init__(self, window_size, precision, entity):
"""Initialize Filter."""
super().__init__(FILTER_NAME_THROTTLE, window_size, precision, entity)
self._only_numbers = False
def _filter_state(self, new_state):
"""Implement the throttle filter."""
if not self.states or len(self.states) == self.states.maxlen:
self.states.clear()
self._skip_processing = False
else:
self._skip_processing = True
return new_state
@FILTERS.register(FILTER_NAME_TIME_THROTTLE)
class TimeThrottleFilter(Filter, SensorEntity):
"""Time Throttle Filter.
One sample per time period.
"""
def __init__(self, window_size, precision, entity):
"""Initialize Filter."""
super().__init__(FILTER_NAME_TIME_THROTTLE, window_size, precision, entity)
self._time_window = window_size
self._last_emitted_at = None
self._only_numbers = False
def _filter_state(self, new_state):
"""Implement the filter."""
window_start = new_state.timestamp - self._time_window
if not self._last_emitted_at or self._last_emitted_at <= window_start:
self._last_emitted_at = new_state.timestamp
self._skip_processing = False
else:
self._skip_processing = True
return new_state
| {
"repo_name": "home-assistant/home-assistant",
"path": "homeassistant/components/filter/sensor.py",
"copies": "2",
"size": "20774",
"license": "apache-2.0",
"hash": 2504674727996988000,
"line_mean": 30.2390977444,
"line_max": 88,
"alpha_frac": 0.5823625686,
"autogenerated": false,
"ratio": 4.088565243062389,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000264755940225665,
"num_lines": 665
} |
"""Allows the creation of a sensor that filters state property."""
import logging
import statistics
from collections import deque, Counter
from numbers import Number
from functools import partial
from copy import copy
from datetime import timedelta
from typing import Optional
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME,
CONF_ENTITY_ID,
ATTR_UNIT_OF_MEASUREMENT,
ATTR_ENTITY_ID,
ATTR_ICON,
STATE_UNKNOWN,
STATE_UNAVAILABLE,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.util.decorator import Registry
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_state_change
from homeassistant.components import history
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
FILTER_NAME_RANGE = "range"
FILTER_NAME_LOWPASS = "lowpass"
FILTER_NAME_OUTLIER = "outlier"
FILTER_NAME_THROTTLE = "throttle"
FILTER_NAME_TIME_THROTTLE = "time_throttle"
FILTER_NAME_TIME_SMA = "time_simple_moving_average"
FILTERS = Registry()
CONF_FILTERS = "filters"
CONF_FILTER_NAME = "filter"
CONF_FILTER_WINDOW_SIZE = "window_size"
CONF_FILTER_PRECISION = "precision"
CONF_FILTER_RADIUS = "radius"
CONF_FILTER_TIME_CONSTANT = "time_constant"
CONF_FILTER_LOWER_BOUND = "lower_bound"
CONF_FILTER_UPPER_BOUND = "upper_bound"
CONF_TIME_SMA_TYPE = "type"
TIME_SMA_LAST = "last"
WINDOW_SIZE_UNIT_NUMBER_EVENTS = 1
WINDOW_SIZE_UNIT_TIME = 2
DEFAULT_WINDOW_SIZE = 1
DEFAULT_PRECISION = 2
DEFAULT_FILTER_RADIUS = 2.0
DEFAULT_FILTER_TIME_CONSTANT = 10
NAME_TEMPLATE = "{} filter"
ICON = "mdi:chart-line-variant"
FILTER_SCHEMA = vol.Schema(
{vol.Optional(CONF_FILTER_PRECISION, default=DEFAULT_PRECISION): vol.Coerce(int)}
)
FILTER_OUTLIER_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_OUTLIER,
vol.Optional(CONF_FILTER_WINDOW_SIZE, default=DEFAULT_WINDOW_SIZE): vol.Coerce(
int
),
vol.Optional(CONF_FILTER_RADIUS, default=DEFAULT_FILTER_RADIUS): vol.Coerce(
float
),
}
)
FILTER_LOWPASS_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_LOWPASS,
vol.Optional(CONF_FILTER_WINDOW_SIZE, default=DEFAULT_WINDOW_SIZE): vol.Coerce(
int
),
vol.Optional(
CONF_FILTER_TIME_CONSTANT, default=DEFAULT_FILTER_TIME_CONSTANT
): vol.Coerce(int),
}
)
FILTER_RANGE_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_RANGE,
vol.Optional(CONF_FILTER_LOWER_BOUND): vol.Coerce(float),
vol.Optional(CONF_FILTER_UPPER_BOUND): vol.Coerce(float),
}
)
FILTER_TIME_SMA_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_TIME_SMA,
vol.Optional(CONF_TIME_SMA_TYPE, default=TIME_SMA_LAST): vol.In(
[TIME_SMA_LAST]
),
vol.Required(CONF_FILTER_WINDOW_SIZE): vol.All(
cv.time_period, cv.positive_timedelta
),
}
)
FILTER_THROTTLE_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_THROTTLE,
vol.Optional(CONF_FILTER_WINDOW_SIZE, default=DEFAULT_WINDOW_SIZE): vol.Coerce(
int
),
}
)
FILTER_TIME_THROTTLE_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_TIME_THROTTLE,
vol.Required(CONF_FILTER_WINDOW_SIZE): vol.All(
cv.time_period, cv.positive_timedelta
),
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_FILTERS): vol.All(
cv.ensure_list,
[
vol.Any(
FILTER_OUTLIER_SCHEMA,
FILTER_LOWPASS_SCHEMA,
FILTER_TIME_SMA_SCHEMA,
FILTER_THROTTLE_SCHEMA,
FILTER_TIME_THROTTLE_SCHEMA,
FILTER_RANGE_SCHEMA,
)
],
),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the template sensors."""
name = config.get(CONF_NAME)
entity_id = config.get(CONF_ENTITY_ID)
filters = [
FILTERS[_filter.pop(CONF_FILTER_NAME)](entity=entity_id, **_filter)
for _filter in config[CONF_FILTERS]
]
async_add_entities([SensorFilter(name, entity_id, filters)])
class SensorFilter(Entity):
"""Representation of a Filter Sensor."""
def __init__(self, name, entity_id, filters):
"""Initialize the sensor."""
self._name = name
self._entity = entity_id
self._unit_of_measurement = None
self._state = None
self._filters = filters
self._icon = None
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def filter_sensor_state_listener(entity, old_state, new_state, update_ha=True):
"""Handle device state changes."""
if new_state.state in [STATE_UNKNOWN, STATE_UNAVAILABLE]:
return
temp_state = new_state
try:
for filt in self._filters:
filtered_state = filt.filter_state(copy(temp_state))
_LOGGER.debug(
"%s(%s=%s) -> %s",
filt.name,
self._entity,
temp_state.state,
"skip" if filt.skip_processing else filtered_state.state,
)
if filt.skip_processing:
return
temp_state = filtered_state
except ValueError:
_LOGGER.error("Could not convert state: %s to number", self._state)
return
self._state = temp_state.state
if self._icon is None:
self._icon = new_state.attributes.get(ATTR_ICON, ICON)
if self._unit_of_measurement is None:
self._unit_of_measurement = new_state.attributes.get(
ATTR_UNIT_OF_MEASUREMENT
)
if update_ha:
self.async_schedule_update_ha_state()
if "recorder" in self.hass.config.components:
history_list = []
largest_window_items = 0
largest_window_time = timedelta(0)
# Determine the largest window_size by type
for filt in self._filters:
if (
filt.window_unit == WINDOW_SIZE_UNIT_NUMBER_EVENTS
and largest_window_items < filt.window_size
):
largest_window_items = filt.window_size
elif (
filt.window_unit == WINDOW_SIZE_UNIT_TIME
and largest_window_time < filt.window_size
):
largest_window_time = filt.window_size
# Retrieve the largest window_size of each type
if largest_window_items > 0:
filter_history = await self.hass.async_add_job(
partial(
history.get_last_state_changes,
self.hass,
largest_window_items,
entity_id=self._entity,
)
)
if self._entity in filter_history:
history_list.extend(filter_history[self._entity])
if largest_window_time > timedelta(seconds=0):
start = dt_util.utcnow() - largest_window_time
filter_history = await self.hass.async_add_job(
partial(
history.state_changes_during_period,
self.hass,
start,
entity_id=self._entity,
)
)
if self._entity in filter_history:
history_list.extend(
[
state
for state in filter_history[self._entity]
if state not in history_list
]
)
# Sort the window states
history_list = sorted(history_list, key=lambda s: s.last_updated)
_LOGGER.debug(
"Loading from history: %s",
[(s.state, s.last_updated) for s in history_list],
)
# Replay history through the filter chain
prev_state = None
for state in history_list:
filter_sensor_state_listener(self._entity, prev_state, state, False)
prev_state = state
async_track_state_change(self.hass, self._entity, filter_sensor_state_listener)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._icon
@property
def unit_of_measurement(self):
"""Return the unit_of_measurement of the device."""
return self._unit_of_measurement
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
state_attr = {ATTR_ENTITY_ID: self._entity}
return state_attr
class FilterState:
"""State abstraction for filter usage."""
def __init__(self, state):
"""Initialize with HA State object."""
self.timestamp = state.last_updated
try:
self.state = float(state.state)
except ValueError:
self.state = state.state
def set_precision(self, precision):
"""Set precision of Number based states."""
if isinstance(self.state, Number):
self.state = round(float(self.state), precision)
def __str__(self):
"""Return state as the string representation of FilterState."""
return str(self.state)
def __repr__(self):
"""Return timestamp and state as the representation of FilterState."""
return f"{self.timestamp} : {self.state}"
class Filter:
"""Filter skeleton."""
def __init__(
self,
name,
window_size: int = 1,
precision: Optional[int] = None,
entity: Optional[str] = None,
):
"""Initialize common attributes.
:param window_size: size of the sliding window that holds previous values
:param precision: round filtered value to precision value
:param entity: used for debugging only
"""
if isinstance(window_size, int):
self.states = deque(maxlen=window_size)
self.window_unit = WINDOW_SIZE_UNIT_NUMBER_EVENTS
else:
self.states = deque(maxlen=0)
self.window_unit = WINDOW_SIZE_UNIT_TIME
self.precision = precision
self._name = name
self._entity = entity
self._skip_processing = False
self._window_size = window_size
self._store_raw = False
@property
def window_size(self):
"""Return window size."""
return self._window_size
@property
def name(self):
"""Return filter name."""
return self._name
@property
def skip_processing(self):
"""Return wether the current filter_state should be skipped."""
return self._skip_processing
def _filter_state(self, new_state):
"""Implement filter."""
raise NotImplementedError()
def filter_state(self, new_state):
"""Implement a common interface for filters."""
filtered = self._filter_state(FilterState(new_state))
filtered.set_precision(self.precision)
if self._store_raw:
self.states.append(copy(FilterState(new_state)))
else:
self.states.append(copy(filtered))
new_state.state = filtered.state
return new_state
@FILTERS.register(FILTER_NAME_RANGE)
class RangeFilter(Filter):
"""Range filter.
Determines if new state is in the range of upper_bound and lower_bound.
If not inside, lower or upper bound is returned instead.
"""
def __init__(
self,
entity,
precision: Optional[int] = DEFAULT_PRECISION,
lower_bound: Optional[float] = None,
upper_bound: Optional[float] = None,
):
"""Initialize Filter.
:param upper_bound: band upper bound
:param lower_bound: band lower bound
"""
super().__init__(FILTER_NAME_RANGE, precision=precision, entity=entity)
self._lower_bound = lower_bound
self._upper_bound = upper_bound
self._stats_internal = Counter()
def _filter_state(self, new_state):
"""Implement the range filter."""
if self._upper_bound is not None and new_state.state > self._upper_bound:
self._stats_internal["erasures_up"] += 1
_LOGGER.debug(
"Upper outlier nr. %s in %s: %s",
self._stats_internal["erasures_up"],
self._entity,
new_state,
)
new_state.state = self._upper_bound
elif self._lower_bound is not None and new_state.state < self._lower_bound:
self._stats_internal["erasures_low"] += 1
_LOGGER.debug(
"Lower outlier nr. %s in %s: %s",
self._stats_internal["erasures_low"],
self._entity,
new_state,
)
new_state.state = self._lower_bound
return new_state
@FILTERS.register(FILTER_NAME_OUTLIER)
class OutlierFilter(Filter):
"""BASIC outlier filter.
Determines if new state is in a band around the median.
"""
def __init__(self, window_size, precision, entity, radius: float):
"""Initialize Filter.
:param radius: band radius
"""
super().__init__(FILTER_NAME_OUTLIER, window_size, precision, entity)
self._radius = radius
self._stats_internal = Counter()
self._store_raw = True
def _filter_state(self, new_state):
"""Implement the outlier filter."""
median = statistics.median([s.state for s in self.states]) if self.states else 0
if (
len(self.states) == self.states.maxlen
and abs(new_state.state - median) > self._radius
):
self._stats_internal["erasures"] += 1
_LOGGER.debug(
"Outlier nr. %s in %s: %s",
self._stats_internal["erasures"],
self._entity,
new_state,
)
new_state.state = median
return new_state
@FILTERS.register(FILTER_NAME_LOWPASS)
class LowPassFilter(Filter):
"""BASIC Low Pass Filter."""
def __init__(self, window_size, precision, entity, time_constant: int):
"""Initialize Filter."""
super().__init__(FILTER_NAME_LOWPASS, window_size, precision, entity)
self._time_constant = time_constant
def _filter_state(self, new_state):
"""Implement the low pass filter."""
if not self.states:
return new_state
new_weight = 1.0 / self._time_constant
prev_weight = 1.0 - new_weight
new_state.state = (
prev_weight * self.states[-1].state + new_weight * new_state.state
)
return new_state
@FILTERS.register(FILTER_NAME_TIME_SMA)
class TimeSMAFilter(Filter):
"""Simple Moving Average (SMA) Filter.
The window_size is determined by time, and SMA is time weighted.
"""
def __init__(
self, window_size, precision, entity, type
): # pylint: disable=redefined-builtin
"""Initialize Filter.
:param type: type of algorithm used to connect discrete values
"""
super().__init__(FILTER_NAME_TIME_SMA, window_size, precision, entity)
self._time_window = window_size
self.last_leak = None
self.queue = deque()
def _leak(self, left_boundary):
"""Remove timeouted elements."""
while self.queue:
if self.queue[0].timestamp + self._time_window <= left_boundary:
self.last_leak = self.queue.popleft()
else:
return
def _filter_state(self, new_state):
"""Implement the Simple Moving Average filter."""
self._leak(new_state.timestamp)
self.queue.append(copy(new_state))
moving_sum = 0
start = new_state.timestamp - self._time_window
prev_state = self.last_leak or self.queue[0]
for state in self.queue:
moving_sum += (state.timestamp - start).total_seconds() * prev_state.state
start = state.timestamp
prev_state = state
new_state.state = moving_sum / self._time_window.total_seconds()
return new_state
@FILTERS.register(FILTER_NAME_THROTTLE)
class ThrottleFilter(Filter):
"""Throttle Filter.
One sample per window.
"""
def __init__(self, window_size, precision, entity):
"""Initialize Filter."""
super().__init__(FILTER_NAME_THROTTLE, window_size, precision, entity)
def _filter_state(self, new_state):
"""Implement the throttle filter."""
if not self.states or len(self.states) == self.states.maxlen:
self.states.clear()
self._skip_processing = False
else:
self._skip_processing = True
return new_state
@FILTERS.register(FILTER_NAME_TIME_THROTTLE)
class TimeThrottleFilter(Filter):
"""Time Throttle Filter.
One sample per time period.
"""
def __init__(self, window_size, precision, entity):
"""Initialize Filter."""
super().__init__(FILTER_NAME_TIME_THROTTLE, window_size, precision, entity)
self._time_window = window_size
self._last_emitted_at = None
def _filter_state(self, new_state):
"""Implement the filter."""
window_start = new_state.timestamp - self._time_window
if not self._last_emitted_at or self._last_emitted_at <= window_start:
self._last_emitted_at = new_state.timestamp
self._skip_processing = False
else:
self._skip_processing = True
return new_state
| {
"repo_name": "qedi-r/home-assistant",
"path": "homeassistant/components/filter/sensor.py",
"copies": "3",
"size": "18820",
"license": "apache-2.0",
"hash": -2815479887214064000,
"line_mean": 30.3144758735,
"line_max": 88,
"alpha_frac": 0.5774176408,
"autogenerated": false,
"ratio": 4.109170305676856,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6186587946476856,
"avg_score": null,
"num_lines": null
} |
"""Allows the creation of node networks through equation strings.
Dependency Graph Expressions (dge) is a convenience API used to simplify the creation
of Maya DG node networks. Rather than scripting out many createNode, get/setAttr,
connectAttr commands, you can specify a string equation.
No compiled plug-ins are used. All created nodes are vanilla Maya nodes. Each created
node has notes added to it to describe its place in the equation
Example Usage
=============
::
from cmt.dge import dge
# Create a simple mathematical graph
loc = cmds.spaceLocator()[0]
result = dge("(x+3)*(2+x)", x="{}.tx".format(loc))
cmds.connectAttr(result, "{}.ty".format(loc))
# Use assignment operator to auto-connect
loc = cmds.spaceLocator()[0]
dge("y=x^2", x="{}.tx".format(loc), y="{}.ty".format(loc))
# More complex example with ternary operator and functions
soft_ik_percentage = dge(
"x > (1.0 - softIk)"
"? (1.0 - softIk) + softIk * (1.0 - exp(-(x - (1.0 - softIk)) / softIk)) "
": x",
x="{}.outputX".format(stretch_scale_mdn)
softIk="{}.softIk".format(ik_control),
)
# Put the created nodes in a container
soft_ik_percentage = dge(
"x > (1.0 - softIk)"
"? (1.0 - softIk) + softIk * (1.0 - exp(-(x - (1.0 - softIk)) / softIk)) "
": x",
container="softik_equation",
x="{}.outputX".format(stretch_scale_mdn)
softIk="{}.softIk".format(ik_control),
)
Supported Syntax
===================
Operators::
+ # addition
- # subtraction
* # multiplication
/ # division
^ # power,
?: # ternary
Functions::
abs(x)
exp(x)
clamp(x, min, max)
lerp(a, b, t)
min(x, y)
max(x, y)
sqrt(x)
cos(x)
sin(x)
tan(x)
acos(x)
asin(x)
atan(x)
distance(node1, node2)
Constants::
PI
E
Use Case
========
Before::
# (1.0 - softik)
one_minus = cmds.createNode(
"plusMinusAverage", name="{}_one_minus_softik".format(self.name)
)
cmds.setAttr("{}.operation".format(one_minus), 2)
cmds.setAttr("{}.input1D[0]".format(one_minus), 1)
cmds.connectAttr(softik, "{}.input1D[1]".format(one_minus))
# x - (1.0 - softik)
x_minus = cmds.createNode(
"plusMinusAverage", name="{}_x_minus_one_minus_softik".format(self.name)
)
cmds.setAttr("{}.operation".format(x_minus), 2)
cmds.connectAttr(self.percent_rest_distance, "{}.input1D[0]".format(x_minus))
cmds.connectAttr(
"{}.output1D".format(one_minus), "{}.input1D[1]".format(x_minus)
)
# -(x - (1.0 - softik))
negate = cmds.createNode(
"multDoubleLinear", name="{}_softik_negate".format(self.name)
)
cmds.setAttr("{}.input1".format(negate), -1)
cmds.connectAttr("{}.output1D".format(x_minus), "{}.input2".format(negate))
# -(x - (1.0 - softik)) / softik
divide = cmds.createNode(
"multiplyDivide", name="{}_softik_divide".format(self.name)
)
cmds.setAttr("{}.operation".format(divide), 2) # divide
cmds.connectAttr("{}.output".format(negate), "{}.input1X".format(divide))
cmds.connectAttr(softik, "{}.input2X".format(divide))
# exp(-(x - (1.0 - softIk)) / softIk)
exp = cmds.createNode("multiplyDivide", name="{}_softik_exp".format(self.name))
cmds.setAttr("{}.operation".format(exp), 3) # pow
cmds.setAttr("{}.input1X".format(exp), 2.71828)
cmds.connectAttr("{}.outputX".format(divide), "{}.input2X".format(exp))
# 1.0 - exp(-(x - (1.0 - softIk)) / softIk)
one_minus_exp = cmds.createNode(
"plusMinusAverage", name="{}_one_minus_exp".format(self.name)
)
cmds.setAttr("{}.operation".format(one_minus_exp), 2)
cmds.setAttr("{}.input1D[0]".format(one_minus_exp), 1)
cmds.connectAttr(
"{}.outputX".format(exp), "{}.input1D[1]".format(one_minus_exp)
)
# softik * (1.0 - exp(-(x - (1.0 - softIk)) / softIk))
mdl = cmds.createNode(
"multDoubleLinear", name="{}_softik_mdl".format(self.name)
)
cmds.connectAttr(softik, "{}.input1".format(mdl))
cmds.connectAttr("{}.output1D".format(one_minus_exp), "{}.input2".format(mdl))
# (1.0 - softik) + softik * (1.0 - exp(-(x - (1.0 - softIk)) / softIk))
adl = cmds.createNode("addDoubleLinear", name="{}_softik_adl".format(self.name))
cmds.connectAttr("{}.output1D".format(one_minus), "{}.input1".format(adl))
cmds.connectAttr("{}.output".format(mdl), "{}.input2".format(adl))
# Now output of adl is the % of the rest distance the ik handle should be from
# the start joint
# Only adjust the ik handle if it is less than the soft percentage threshold
cnd = cmds.createNode(
"condition",
name="{}_current_length_greater_than_soft_length".format(self.name),
)
cmds.setAttr("{}.operation".format(cnd), 2) # greater than
cmds.connectAttr(self.percent_rest_distance, "{}.firstTerm".format(cnd))
cmds.connectAttr("{}.output1D".format(one_minus), "{}.secondTerm".format(cnd))
cmds.connectAttr("{}.output".format(adl), "{}.colorIfTrueR".format(cnd))
cmds.connectAttr(self.percent_rest_distance, "{}.colorIfFalseR".format(cnd))
softik_percentage = "{}.outColorR".format(cnd)
After::
soft_ik_percentage = dge(
"x > (1.0 - softIk)"
"? (1.0 - softIk) + softIk * (1.0 - exp(-(x - (1.0 - softIk)) / softIk)) "
": x",
container="{}_softik".format(self.name),
x=self.percent_rest_distance,
softIk=softik,
)
"""
from pyparsing import (
Literal,
Word,
Group,
Forward,
alphas,
alphanums,
Regex,
ParseException,
CaselessKeyword,
Suppress,
delimitedList,
oneOf,
infixNotation,
opAssoc,
ParseResults,
Optional,
FollowedBy,
)
import maya.cmds as cmds
import math
import operator
from six import string_types
_parser = None
def dge(expression, container=None, **kwargs):
global _parser
if _parser is None:
_parser = DGParser()
return _parser.eval(expression, container=container, **kwargs)
class DGParser(object):
def __init__(self):
"""
expop :: '^'
multop :: '*' | '/'
addop :: '+' | '-'
integer :: ['+' | '-'] '0'..'9'+
atom :: PI | E | real | fn '(' expr ')' | '(' expr ')'
factor :: atom [ expop factor ]*
term :: factor [ multop factor ]*
expr :: term [ addop term ]*
"""
self.kwargs = {}
self.expr_stack = []
self.assignment_stack = []
self.expression_string = None
self.results = None
self.container = None
# Look up to optimize redundant nodes
self.created_nodes = {}
self.opn = {
"+": self.add,
"-": self.subtract,
"*": self.multiply,
"/": self.divide,
"^": self.pow,
}
self.fn = {
"abs": self.abs,
"exp": self.exp,
"clamp": self.clamp,
"lerp": self.lerp,
"min": self.min,
"max": self.max,
"sqrt": self.sqrt,
"cos": self.cos,
"sin": self.sin,
"tan": self.tan,
"acos": self.acos,
"asin": self.asin,
"atan": self.atan,
"distance": self.distance,
}
self.conditionals = ["==", "!=", ">", ">=", "<", "<="]
# use CaselessKeyword for e and pi, to avoid accidentally matching
# functions that start with 'e' or 'pi' (such as 'exp'); Keyword
# and CaselessKeyword only match whole words
e = CaselessKeyword("E")
pi = CaselessKeyword("PI")
# fnumber = Combine(Word("+-"+nums, nums) +
# Optional("." + Optional(Word(nums))) +
# Optional(e + Word("+-"+nums, nums)))
# or use provided pyparsing_common.number, but convert back to str:
# fnumber = ppc.number().addParseAction(lambda t: str(t[0]))
fnumber = Regex(r"[+-]?\d+(?:\.\d*)?(?:[eE][+-]?\d+)?")
ident = Word(alphas, alphanums + "_$")
plus, minus, mult, div = map(Literal, "+-*/")
lpar, rpar = map(Suppress, "()")
addop = plus | minus
multop = mult | div
expop = Literal("^")
comparison_op = oneOf(" ".join(self.conditionals))
qm, colon = map(Literal, "?:")
assignment = Literal("=")
assignment_op = ident + assignment + ~FollowedBy(assignment)
expr = Forward()
expr_list = delimitedList(Group(expr))
# add parse action that replaces the function identifier with a (name, number of args) tuple
fn_call = (ident + lpar - Group(expr_list) + rpar).setParseAction(
lambda t: t.insert(0, (t.pop(0), len(t[0])))
)
atom = (
addop[...]
+ (
(fn_call | pi | e | fnumber | ident).setParseAction(self.push_first)
| Group(lpar + expr + rpar)
)
).setParseAction(self.push_unary_minus)
# by defining exponentiation as "atom [ ^ factor ]..." instead of "atom [ ^ atom ]...", we get right-to-left
# exponents, instead of left-to-right that is, 2^3^2 = 2^(3^2), not (2^3)^2.
factor = Forward()
factor <<= atom + (expop + factor).setParseAction(self.push_first)[...]
term = factor + (multop + factor).setParseAction(self.push_first)[...]
expr <<= term + (addop + term).setParseAction(self.push_first)[...]
comparison = expr + (comparison_op + expr).setParseAction(self.push_first)[...]
ternary = (
comparison + (qm + expr + colon + expr).setParseAction(self.push_first)[...]
)
assignment = Optional(assignment_op).setParseAction(self.push_last) + ternary
self.bnf = assignment
def eval(self, expression_string, container=None, **kwargs):
long_kwargs = {}
for var, value in kwargs.items():
if isinstance(value, string_types):
tokens = value.split(".")
if len(tokens) == 1:
# Assume a single node name is the world matrix
value = "{}.worldMatrix[0]".format(tokens[0])
else:
# Turn all attribute names into long names for consistency with
# results in listConnections
value = tokens[0]
for t in tokens[1:]:
attr = "{}.{}".format(value, t)
value += ".{}".format(cmds.attributeName(attr, long=True))
long_kwargs[var] = value
self.kwargs = long_kwargs
# Reverse variable look up to write cleaner notes
self._reverse_kwargs = {}
for k, v in self.kwargs.items():
self._reverse_kwargs[v] = k
self.expression_string = expression_string
self.expr_stack = []
self.assignment_stack = []
self.results = self.bnf.parseString(expression_string, True)
self.container = (
cmds.container(name=container, current=True) if container else None
)
self.created_nodes = {}
stack = self.expr_stack[:] + self.assignment_stack[:]
result = self.evaluate_stack(stack)
if self.container:
self.publish_container_attributes()
return result
def push_first(self, toks):
self.expr_stack.append(toks[0])
def push_last(self, toks):
for t in toks:
self.assignment_stack.append(t)
def push_unary_minus(self, toks):
for t in toks:
if t == "-":
self.expr_stack.append("unary -")
else:
break
def evaluate_stack(self, s):
op, num_args = s.pop(), 0
if isinstance(op, tuple):
op, num_args = op
if op == "unary -":
op1 = self.evaluate_stack(s)
return self.get_op_result(op, self.multiply, -1, op1)
elif op == "?":
# ternary
if_false = self.evaluate_stack(s)
if_true = self.evaluate_stack(s)
condition = self.evaluate_stack(s)
second_term = self.evaluate_stack(s)
first_term = self.evaluate_stack(s)
note = "{} {} {} ? {} : {}".format(
first_term, self.conditionals[condition], second_term, if_true, if_false
)
return self.get_op_result(
note,
self.condition,
first_term,
second_term,
condition,
if_true,
if_false,
op_str=note,
)
elif op == ":":
# Return the if_true statement to the ternary
return self.evaluate_stack(s)
elif op in "+-*/^":
# operands are pushed onto the stack in reverse order
op2 = self.evaluate_stack(s)
op1 = self.evaluate_stack(s)
return self.get_op_result(op, self.opn[op], op1, op2)
elif op == "PI":
return math.pi
elif op == "E":
return math.e
elif op in self.fn:
# args are pushed onto the stack in reverse order
args = reversed([self.evaluate_stack(s) for _ in range(num_args)])
args = list(args)
return self.get_op_result(op, self.fn[op], *args)
elif op[0].isalpha():
value = self.kwargs.get(op)
if value is None:
raise Exception("invalid identifier '%s'" % op)
return value
elif op in self.conditionals:
return self.conditionals.index(op)
elif op == "=":
destination = self.evaluate_stack(s)
source = self.evaluate_stack(s)
cmds.connectAttr(source, destination, f=True)
else:
# try to evaluate as int first, then as float if int fails
try:
return int(op)
except ValueError:
return float(op)
def get_op_result(self, op, func, *args, **kwargs):
op_str = kwargs.get("op_str", self.op_str(op, *args))
result = self.created_nodes.get(op_str)
if result is None:
result = func(*args)
self.created_nodes[op_str] = result
self.add_notes(result, op_str)
return result
def add(self, v1, v2):
return self._connect_plus_minus_average(1, v1, v2)
def subtract(self, v1, v2):
return self._connect_plus_minus_average(2, v1, v2)
def _connect_plus_minus_average(self, operation, v1, v2):
pma = cmds.createNode("plusMinusAverage")
cmds.setAttr("{}.operation".format(pma), operation)
in_attr = "input1D"
out_attr = "output1D"
# Determine whether we should use 1D or 3D attributes
for v in [v1, v2]:
if isinstance(v, string_types) and attribute_is_array(v):
in_attr = "input3D"
out_attr = "output3D"
for i, v in enumerate([v1, v2]):
if isinstance(v, string_types):
if attribute_is_array(v):
cmds.connectAttr(v, "{}.{}[{}]".format(pma, in_attr, i))
else:
if in_attr == "input3D":
for x in "xyz":
cmds.connectAttr(
v, "{}.{}[{}].input3D{}".format(pma, in_attr, i, x)
)
else:
cmds.connectAttr(v, "{}.{}[{}]".format(pma, in_attr, i))
else:
if in_attr == "input3D":
for x in "xyz":
cmds.setAttr(
"{}.{}[{}].input3D{}".format(pma, in_attr, i, x), v
)
else:
cmds.setAttr("{}.{}[{}]".format(pma, in_attr, i), v)
return "{}.{}".format(pma, out_attr)
def multiply(self, v1, v2):
return self._connect_multiply_divide(1, v1, v2)
def divide(self, v1, v2):
return self._connect_multiply_divide(2, v1, v2)
def pow(self, v1, v2):
return self._connect_multiply_divide(3, v1, v2)
def exp(self, v):
return self._connect_multiply_divide(3, math.e, v)
def sqrt(self, x):
return self._connect_multiply_divide(3, x, 0.5)
def _connect_multiply_divide(self, operation, v1, v2):
mdn = cmds.createNode("multiplyDivide")
cmds.setAttr("{}.operation".format(mdn), operation)
value_count = 1
# Determine whether we should use 1D or 3D attributes
for v in [v1, v2]:
if isinstance(v, string_types) and attribute_is_array(v):
value_count = 3
for i, v in enumerate([v1, v2]):
i += 1
if isinstance(v, string_types):
if attribute_is_array(v):
cmds.connectAttr(v, "{}.input{}".format(mdn, i))
else:
if value_count == 3:
for x in "XYZ":
cmds.connectAttr(v, "{}.input{}{}".format(mdn, i, x))
else:
cmds.connectAttr(v, "{}.input{}X".format(mdn, i))
else:
if value_count == 3:
for x in "XYZ":
cmds.setAttr("{}.input{}{}".format(mdn, i, x), v)
else:
cmds.setAttr("{}.input{}X".format(mdn, i), v)
return "{}.output".format(mdn) if value_count == 3 else "{}.outputX".format(mdn)
def clamp(self, value, min_value, max_value):
clamp = cmds.createNode("clamp")
for v, attr in [[min_value, "min"], [max_value, "max"]]:
if isinstance(v, string_types):
if attribute_is_array(v):
cmds.connectAttr(v, "{}.{}".format(clamp, attr))
else:
for x in "RGB":
cmds.connectAttr(v, "{}.{}{}".format(clamp, attr, x))
else:
for x in "RGB":
cmds.setAttr("{}.{}{}".format(clamp, attr, x), v)
value_count = 1
if isinstance(value, string_types):
if attribute_is_array(value):
value_count = 3
cmds.connectAttr(value, "{}.input".format(clamp))
else:
for x in "RGB":
cmds.connectAttr(value, "{}.input{}".format(clamp, x))
else:
# Unlikely for a static value to be clamped, but it should still work
for x in "RGB":
cmds.setAttr("{}.input{}".format(clamp, x), value)
return (
"{}.output".format(clamp)
if value_count == 3
else "{}.outputR".format(clamp)
)
def condition(self, first_term, second_term, operation, if_true, if_false):
node = cmds.createNode("condition")
cmds.setAttr("{}.operation".format(node), operation)
for v, attr in [[first_term, "firstTerm"], [second_term, "secondTerm"]]:
if isinstance(v, string_types):
cmds.connectAttr(v, "{}.{}".format(node, attr))
else:
cmds.setAttr("{}.{}".format(node, attr), v)
value_count = 1
for v, attr in [[if_true, "colorIfTrue"], [if_false, "colorIfFalse"]]:
if isinstance(v, string_types):
if attribute_is_array(v):
value_count = 3
cmds.connectAttr(v, "{}.{}".format(node, attr))
else:
for x in "RGB":
cmds.connectAttr(v, "{}.{}{}".format(node, attr, x))
else:
cmds.setAttr("{}.{}R".format(node, attr), v)
return (
"{}.outColor".format(node)
if value_count == 3
else "{}.outColorR".format(node)
)
def lerp(self, a, b, t):
node = cmds.createNode("blendTwoAttr")
if isinstance(t, string_types):
cmds.connectAttr(t, "{}.attributesBlender".format(node))
else:
# Static value on attributesBlender doesn't make much sense
# but we don't want to error out
cmds.setAttr("{}.attributesBlender".format(node), t)
for i, v in enumerate([a, b]):
if isinstance(v, string_types):
cmds.connectAttr(v, "{}.input[{}]".format(node, i))
else:
cmds.setAttr("{}.input[{}]".format(node, i), v)
return "{}.output".format(node)
def abs(self, x):
return dge("x > 0 ? x : -x", x=x)
def min(self, x, y):
return self.condition(x, y, self.conditionals.index("<="), x, y)
def max(self, x, y):
return self.condition(x, y, self.conditionals.index(">="), x, y)
def sin(self, x):
return self._euler_to_quat(x, "X")
def cos(self, x):
return self._euler_to_quat(x, "W")
def _euler_to_quat(self, x, attr):
cmds.loadPlugin("quatNodes", qt=False)
mdl = cmds.createNode("multDoubleLinear")
cmds.setAttr("{}.input1".format(mdl), 2 * 57.2958) # To degrees
if isinstance(x, string_types):
cmds.connectAttr(x, "{}.input2".format(mdl))
else:
cmds.setAttr("{}.input2".format(mdl), x)
quat = cmds.createNode("eulerToQuat")
cmds.connectAttr("{}.output".format(mdl), "{}.inputRotateX".format(quat))
return "{}.outputQuat.outputQuat{}".format(quat, attr)
def tan(self, x):
half_pi = math.pi * 0.5
c = dge("{} - x".format(half_pi), x=x)
return dge("sin(x) / sin(c)", x=x, c=c)
def acos(self, x):
angle = cmds.createNode("angleBetween")
for attr in ["{}{}".format(i, j) for i in [1, 2] for j in "XYZ"]:
cmds.setAttr("{}.vector{}".format(angle, attr), 0)
if isinstance(x, string_types):
cmds.connectAttr(x, "{}.vector1X".format(angle))
dge("y = x == 0.0 ? 1.0 : abs(x)", y="{}.vector2X".format(angle), x=x)
else:
cmds.setAttr("{}.vector1X".format(angle), x)
cmds.setAttr("{}.vector2X".format(angle), math.fabs(x))
dge("y = sqrt(1.0 - x*x)", y="{}.vector1Y".format(angle), x=x)
return "{}.axisAngle.angle".format(angle)
def asin(self, x):
angle = cmds.createNode("angleBetween")
for attr in ["{}{}".format(i, j) for i in [1, 2] for j in "XYZ"]:
cmds.setAttr("{}.vector{}".format(angle, attr), 0)
if isinstance(x, string_types):
cmds.connectAttr(x, "{}.vector1Y".format(angle))
else:
cmds.setAttr("{}.vector1Y".format(angle), x)
result = dge("sqrt(1.0 - x*x)", x=x)
cmds.connectAttr(result, "{}.vector1X".format(angle))
dge("y=abs(x) == 1.0 ? 1.0 : r", y="{}.vector2X".format(angle), x=x, r=result)
return dge("x < 0 ? -y : y", x=x, y="{}.axisAngle.angle".format(angle))
def atan(self, x):
angle = cmds.createNode("angleBetween")
for attr in ["{}{}".format(i, j) for i in [1, 2] for j in "XYZ"]:
cmds.setAttr("{}.vector{}".format(angle, attr), 0)
cmds.setAttr("{}.vector1X".format(angle), 1)
cmds.setAttr("{}.vector2X".format(angle), 1)
if isinstance(x, string_types):
cmds.connectAttr(x, "{}.vector1Y".format(angle))
else:
cmds.setAttr("{}.vector1Y".format(angle), x)
return dge("x < 0 ? -y : y", x=x, y="{}.axisAngle.angle".format(angle))
def distance(self, node1, node2):
distance_between = cmds.createNode("distanceBetween")
cmds.connectAttr(node1, "{}.inMatrix1".format(distance_between))
cmds.connectAttr(node2, "{}.inMatrix2".format(distance_between))
return "{}.distance".format(distance_between)
def add_notes(self, node, op_str):
node = node.split(".")[0]
attrs = cmds.listAttr(node, ud=True) or []
if "notes" not in attrs:
cmds.addAttr(node, ln="notes", dt="string")
keys = self.kwargs.keys()
keys.sort()
notes = "Node generated by dge\n\nExpression:\n {}\n\nOperation:\n {}\n\nkwargs:\n {}".format(
self.expression_string,
op_str,
"\n ".join(["{}: {}".format(x, self.kwargs[x]) for x in keys]),
)
cmds.setAttr("{}.notes".format(node), notes, type="string")
def publish_container_attributes(self):
self.add_notes(self.container, self.expression_string)
external_connections = cmds.container(
self.container, q=True, connectionList=True
)
external_connections = set(external_connections)
container_nodes = set(cmds.container(self.container, q=True, nodeList=True))
for var, value in self.kwargs.items():
if not isinstance(value, string_types):
continue
# To connect multiple attributes to a bound container attribute, we
# need to create an intermediary attribute that is bound and connected
# to the internal attributes
attr_type = attribute_type(value)
kwargs = {"dt": attr_type} if attr_type == "matrix" else {"at": attr_type}
cmds.addAttr(self.container, ln="_{}".format(var), **kwargs)
published_attr = "{}._{}".format(self.container, var)
cmds.container(self.container, e=True, publishAndBind=[published_attr, var])
cmds.connectAttr(value, published_attr)
# Reroute connections into the container to go through the published
# attribute
if value in external_connections:
connected_nodes = set(cmds.listConnections(value, s=False, plugs=True))
for connection in connected_nodes:
node_name = connection.split(".")[0]
if node_name in container_nodes:
cmds.connectAttr(published_attr, connection, force=True)
source_plug = cmds.listConnections(value, d=False, plugs=True)
if source_plug:
source_plug = source_plug[0]
node_name = source_plug.split(".")[0]
if node_name in container_nodes:
cmds.connectAttr(source_plug, published_attr, force=True)
cmds.connectAttr(published_attr, value, force=True)
cmds.container(self.container, e=True, current=False)
def op_str(self, op, *args):
"""Get the string form of the op and args.
This is used for notes on the node as well as identifying which nodes can be
reused.
:param op: Name of the op
:param args: Optional op arguments
:return: The unique op string
"""
args = [str(v) for v in args]
if op in self.fn:
return "{}({})".format(op, ", ".join(args))
elif args:
return op.join([self._reverse_kwargs.get(x, x) for x in args])
return op
def attribute_is_array(value):
array_types = ["double3", "float3"]
return attribute_type(value) in array_types
def attribute_type(a):
tokens = a.split(".")
node = tokens[0]
attribute = tokens[-1]
if attribute.startswith("worldMatrix"):
# attributeQuery doesn't seem to work with worldMatrix
return "matrix"
return cmds.attributeQuery(attribute, node=node, at=True)
| {
"repo_name": "chadmv/cmt",
"path": "scripts/cmt/dge.py",
"copies": "1",
"size": "27667",
"license": "mit",
"hash": -6001516206706798000,
"line_mean": 35.9385847797,
"line_max": 116,
"alpha_frac": 0.5332706835,
"autogenerated": false,
"ratio": 3.5810251100181207,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46142957935181206,
"avg_score": null,
"num_lines": null
} |
"""Allows the execution of complex queries against postgres."""
import sqlalchemy
from cis_identity_vault.models import rds
def raw_query(conn, sql_statement):
"""[Execute a raw sql query against the database.]
Arguments:
conn {object} -- [A sqlalchemy connection object]
sql_statement {[string]} -- [The sql query to execute.]
"""
result = None
result = conn.execute(sql_statement)
return result.fetchall()
def sql_alchemy_select(engine, attr, comparator, stringified_operator, start=None, end=None, full_profiles=False):
"""[Execute a sqlalchemy style filter by against the database.]
Arguments:
conn {object} -- [A sqlalchemy connection object.]
attr {string} -- [attribute that we are querying against.]
comparator {string} -- [thing that we must equal.]
stringified_operator {string} -- [what kind of operation are we performing.]
start {int} -- [used in paginator slices.]
stop {int} -- [used in paginator slices.]
full_profiles {bool} -- [should we return full profiles or usernames only.]
"""
result = None
Session = sqlalchemy.orm.sessionmaker(bind=engine)
session = Session()
allowed_operators = ["not", "empty", "contains"]
# max_records_to_return = 50 # for a paginated query
if stringified_operator not in allowed_operators:
raise ValueError(f"Operator {stringified_operator} not allowed for query.")
if attr == "active":
if stringified_operator == "contains":
result = session.query(rds.People).filter(
rds.People.profile[("active", "value")].astext.cast(sqlalchemy.types.BOOLEAN) == bool(comparator)
)
elif stringified_operator == "not":
result = session.query(rds.People).filter(
rds.People.profile[("active", "value")].astext.cast(sqlalchemy.types.BOOLEAN) != bool(comparator)
)
elif stringified_operator == "empty":
result = session.query(rds.People).filter(
rds.People.profile[("active", "value")].astext is None
or rds.People.profile[("active", "value")].astext == ""
)
else:
result = []
elif attr.startswith("access_information."):
access_provider = attr.split(".")[1]
if access_provider == "ldap":
if stringified_operator == "contains":
result = session.query(rds.People).filter(
rds.People.profile[("access_information", "ldap", "values")].astext.contains(comparator)
)
elif stringified_operator == "not":
# Future feature
pass
elif stringified_operator == "empty":
# Future feature
pass
else:
result = []
elif access_provider == "mozilliansorg":
if stringified_operator == "contains":
result = session.query(rds.People).filter(
rds.People.profile[("access_information", "mozilliansorg", "values")].astext.contains(comparator)
)
elif stringified_operator == "not":
# Future feature
pass
elif stringified_operator == "empty":
# Future feature
pass
else:
result = []
elif access_provider == "access_provider":
# Currently unused. Reserve for future use.
pass
elif access_provider == "hris":
# Currently unused. Reserve for future use.
pass
else:
raise ValueError("Access provider not supported.")
elif attr.startswith == "staff_information.":
if stringified_operator == "contains":
result = session.query(rds.People).filter(
rds.People.profile[("staff_information", "values")].astext.contains(comparator)
)
elif stringified_operator == "not":
# Future feature
pass
elif stringified_operator == "empty":
# Future feature
pass
else:
result = []
else:
raise ValueError(f"Attribute {attr} is not supported.")
results = []
if result.count() > 0:
if full_profiles is True:
for _ in result:
results.append(_.profile)
elif full_profiles is False:
for _ in result:
results.append(_.user_id)
else:
pass
return results
| {
"repo_name": "mozilla-iam/cis",
"path": "python-modules/cis_postgresql/cis_postgresql/execute.py",
"copies": "1",
"size": "4590",
"license": "mpl-2.0",
"hash": -2009879438213014300,
"line_mean": 37.5714285714,
"line_max": 117,
"alpha_frac": 0.5675381264,
"autogenerated": false,
"ratio": 4.508840864440079,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.557637899084008,
"avg_score": null,
"num_lines": null
} |
# Allows the page to be rendered
from django.http import HttpResponse, HttpResponseRedirect
# Excpetion for objects that don't exist in the database
from django.core.exceptions import ObjectDoesNotExist
# Import database model
from models import PocketKey
# Check if in debug mode
from django.conf import settings
# Makes requests to pocket
import urllib, urllib2
# Utility Functions
import util
# Reads JSON
import json
import secret
CONSUMER_KEY = secret.POCKET_KEY
url1 = 'https://getpocket.com/v3/oauth/request'
url2 = 'https://getpocket.com/v3/oauth/authorize'
retrieveUrl = 'https://getpocket.com/v3/get'
def getPath():
if settings.DEBUG == True:
redirectURL = 'http://localhost:8000'
else:
redirectURL = 'http://www.theconnectedwire.com'
return redirectURL
def checkAuthorized(request):
user = util.checkLoggedIn(request)
if user:
try:
key = PocketKey.objects.get(User = user)
return HttpResponse('True')
except ObjectDoesNotExist:
return HttpResponse('False')
else:
return HttpResponseRedirect('/')
def authorize(request):
user = util.checkLoggedIn(request)
if user:
try:
key = PocketKey.objects.get(User = user)
return HttpResponse('Already Authorized')
except ObjectDoesNotExist:
requestToken = getRequestToken()
redirectURL = getPath()
url = 'https://getpocket.com/auth/authorize?request_token=' + requestToken + '&redirect_uri=' + redirectURL + '/pocket/approved?request_token=' + requestToken
return HttpResponseRedirect(url)
else:
return HttpResponseRedirect("/")
def getRequestToken():
redirectURL = getPath()
redirect_uri = redirectURL + '/dashboard'
values = {
"consumer_key": CONSUMER_KEY,
'redirect_uri': redirect_uri
}
data = urllib.urlencode(values)
req = urllib2.Request(url1, data)
response = urllib2.urlopen(req)
the_page = response.read()
code = the_page[(the_page.find('=')) + 1:]
return code
def approved(request):
user = util.checkLoggedIn(request)
if user:
requestToken = request.GET.get('request_token')
access_token = getAuthorization(requestToken)
key = PocketKey(User = user, key = access_token)
key.save()
return HttpResponseRedirect('/dashboard')
else:
return HttpResponseRedirect('/')
def getAuthorization(requestToken):
values = {
'consumer_key': CONSUMER_KEY,
'code': requestToken
}
data = urllib.urlencode(values)
req = urllib2.Request(url2, data)
response = urllib2.urlopen(req)
the_page = response.read()
access_token = the_page[(the_page.find('access_token=')) + 13:(the_page.find('&'))]
return access_token
def getArticles(request):
user = util.checkLoggedIn(request)
if user:
access_token = PocketKey.objects.get(User = user).key
values = {
'consumer_key': CONSUMER_KEY,
'access_token': access_token
}
data = urllib.urlencode(values)
req = urllib2.Request(retrieveUrl, data)
response = urllib2.urlopen(req)
data = json.loads(response.read())
outputlist = []
for i in data["list"]:
output = {'output': '', 'added': ''}
k = 0
inserted = False
title = data['list'][i]['resolved_title']
url = data['list'][i]['resolved_url']
excerpt = data['list'][i]['excerpt']
output['output'] = '<a class="starredLink" target="_blank" href="' + url + '"><div class="starredItem">' + title + '<div class="excerpt"> - ' + excerpt + '</div></div></a>'
output['added'] = data['list'][i]['time_added']
while(k < len(outputlist)):
if outputlist[k]['added'] >= output['added']:
k += 1
else:
# print output
outputlist.insert(k, output)
inserted = True
break
if inserted == False:
# print output
outputlist.append(output)
outputString = ''
# print outputlist
for j in outputlist:
# print j
outputString += j['output']
return HttpResponse(outputString)
else:
return 'Not Logged In'
| {
"repo_name": "mjcuva/ConnectedWire",
"path": "blog/pocket.py",
"copies": "1",
"size": "4541",
"license": "apache-2.0",
"hash": -6130052415354423000,
"line_mean": 21.705,
"line_max": 184,
"alpha_frac": 0.5857740586,
"autogenerated": false,
"ratio": 4.150822669104205,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00782633498974141,
"num_lines": 200
} |
# Allows the page to be rendered
from django.shortcuts import render_to_response
# Allows redirects, as well as 404 pages
from django.http import HttpResponseRedirect, Http404, HttpResponse
# Allows the request to be sent to the template
from django.template import RequestContext
import util
# Calculate stats about posts for dashboard
import stats
from models import Todo
# Displays the dashboard
def dash(request):
# Gets the logged in user, or redirects
username = util.checkLoggedIn(request)
if not username:
return HttpResponseRedirect('/login?next=dashboard')
# Gets total wordcount
wordCountInt, wordCountString = stats.getWordCount()
# Gets total number of posts
postCount = stats.getPostCount()
average = int(int(wordCountInt) / int(postCount));
# Days since last post
daysSince = stats.daysSince()
# Makes sure the grammar is correct
if not daysSince == 1:
day = "days"
else:
day = 'day'
# Gets the top story title and link from techmeme
suggestionTitle, suggestionLink = stats.suggestion()
todo = Todo.objects.all().order_by('-id')
# Renders the dashboard html page
return render_to_response("dashboard/main.html", {'username':username,
'wordCountString': wordCountString,
'average': average,
'postCount': postCount,
'daysSince':daysSince,
'day': day,
'suggestionTitle':suggestionTitle,
'suggestionLink':suggestionLink,
'todo': todo}, context_instance =RequestContext(request))
def addTodo(request):
todoText = request.GET.get('item')
todo = Todo(text = todoText)
todo.save()
return render_to_response('dashboard/todo.html', {'todoItem':todo}, context_instance =RequestContext(request))
def deleteTodo(request):
itemid = request.GET.get('id')
item = Todo.objects.get(pk = itemid)
item.delete()
return HttpResponse('deleted') | {
"repo_name": "mjcuva/ConnectedWire",
"path": "blog/dashboard.py",
"copies": "1",
"size": "1908",
"license": "apache-2.0",
"hash": 7932401934721582000,
"line_mean": 23.164556962,
"line_max": 111,
"alpha_frac": 0.7059748428,
"autogenerated": false,
"ratio": 3.533333333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9626409622860668,
"avg_score": 0.022579710654533253,
"num_lines": 79
} |
# Allows the page to be rendered
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
import util
import blogForms
import datetime
from django.core.files.storage import FileSystemStorage
import paths
import os
import subprocess
# Allows the request to be sent to the template
from django.template import RequestContext
from models import Podcast, Page
def addEpisode(request):
username = util.checkLoggedIn(request)
if not username:
return HttpResponseRedirect('/')
error = ""
if request.method == "POST":
form = blogForms.newEpisodeForm(request.POST)
title = request.POST['title']
showNotes = request.POST['showNotes']
episodeURL = '/podcasts/' + request.POST['episode']
size = request.POST['size']
audio_length = request.POST['duration']
if title and showNotes and episodeURL:
published = datetime.datetime.now()
if 'image' in request.FILES:
image = request.FILES['image']
store = FileSystemStorage(paths.SITE_ROOT + '/images/')
storedImage = store.save(image.name, image)
imageURL = '/images/' + storedImage
else:
imageURL = None
episode = Podcast(title = title,
link = episodeURL,
showNotes = showNotes,
length = size,
date = published,
imageURL = imageURL,
audio_length = audio_length)
episode.save()
return HttpResponseRedirect('/podcast')
else:
error = 'Something is amiss'
else:
form = blogForms.newEpisodeForm()
# Renders the dashboard html page
return render_to_response("dashboard/newEpisode.html", {'username':username, 'form':form, 'error':error}, context_instance =RequestContext(request))
def generateRSS(request):
podcasts = Podcast.objects.all().order_by('-date')
lastepisode = podcasts[0].date
return render_to_response("podcast.xml", {"podcasts": podcasts, 'lastepisode': lastepisode}, mimetype="text/xml")
def showEpisodes(request):
username = util.checkLoggedIn(request)
podcasts = Podcast.objects.all().order_by('-date')
pages = Page.objects.all().order_by('id')
return render_to_response("podcasts.html", {"podcasts": podcasts, "pages": pages, "username":username})
def deleteEpisode(request, id):
username = util.checkLoggedIn(request)
if not username:
return HttpResponseRedirect('/')
episode = Podcast.objects.get(pk=id)
try:
os.remove(paths.SITE_ROOT + episode.imageURL)
except OSError:
pass
episode.delete()
return HttpResponseRedirect('/podcast')
| {
"repo_name": "mjcuva/ConnectedWire",
"path": "blog/podcast.py",
"copies": "1",
"size": "2910",
"license": "apache-2.0",
"hash": 7544363522170949000,
"line_mean": 22.6585365854,
"line_max": 152,
"alpha_frac": 0.6127147766,
"autogenerated": false,
"ratio": 4.330357142857143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5443071919457143,
"avg_score": null,
"num_lines": null
} |
"""Allows the user to draw streamlines for given vector data. This
supports various types of seed objects (line, sphere, plane and point
seeds). It also allows the user to draw ribbons or tubes and further
supports different types of interactive modes of calculating the
streamlines.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
from math import sqrt
# Enthought library imports.
from traits.api import Instance, Bool, TraitPrefixList, Trait, \
Delegate, Button
from traitsui.api import View, Group, Item, InstanceEditor
from tvtk.api import tvtk
from tvtk.common import configure_outputs
# Local imports
from mayavi.core.module import Module
from mayavi.core.pipeline_info import PipelineInfo
from mayavi.components.actor import Actor
from mayavi.components.source_widget import SourceWidget
######################################################################
# `Streamline` class.
######################################################################
class Streamline(Module):
# The version of this class. Used for persistence.
__version__ = 0
# The streamline generator.
stream_tracer = Instance(tvtk.StreamTracer, allow_none=False,
record=True)
# The seed for the streamlines.
seed = Instance(SourceWidget, allow_none=False, record=True)
# The update mode of the seed -- this is delegated to the
# SourceWidget.
update_mode = Delegate('seed', modify=True)
# Determines if the streamlines are shown as lines or ribbons or
# tubes.
streamline_type = Trait('line', TraitPrefixList(['line', 'ribbon',
'tube']),
desc='draw streamlines as lines/ribbons/tubes')
# The ribbon filter.
ribbon_filter = Instance(tvtk.RibbonFilter, allow_none=False,
record=True)
# The tube filter.
tube_filter = Instance(tvtk.TubeFilter, allow_none=False,
record=True)
# The clean poly data filter
clean_filter = Instance(tvtk.CleanPolyData, allow_none=False,
record=True)
# The actor component that represents the visualization.
actor = Instance(Actor, allow_none=False, record=True)
input_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['vectors'])
########################################
# Private traits.
_first = Bool(True)
########################################
# View related code.
# A button to update the streamlines.
update_streamlines = Button('Update Streamlines')
_tube_group = Group(Item(name='capping'),
Item(name='sides_share_vertices'),
Item(name='vary_radius'),
Item(name='number_of_sides'),
Item(name='radius'),
Item(name='radius_factor'),
Item(name='offset'),
Item(name='on_ratio')
)
_ribbon_group = Group(Item(name='vary_width'),
Item(name='width'),
Item(name='width_factor'),
Item(name='angle')
)
view = View(Group(Group(Item(name='update_mode'),
),
Group(Item(name='update_streamlines'),
show_labels=False,
),
Group(Item(name='streamline_type'),
Item(name='ribbon_filter', style='custom',
visible_when='object.streamline_type == "ribbon"',
editor=InstanceEditor(view=View(_ribbon_group))),
Item(name='tube_filter', style='custom',
visible_when='object.streamline_type == "tube"',
editor=InstanceEditor(view=View(_tube_group))),
show_labels=False,
label='Streamline'
),
label='Streamline'
),
Group(Item(name='seed', style='custom', resizable=True),
label='Seed',
show_labels=False),
Group(Item(name='stream_tracer', style='custom', resizable=True),
label='StreamTracer',
show_labels=False),
Group(Item(name='actor', style='custom'),
label='Actor',
show_labels=False),
resizable=True
)
######################################################################
# `Module` interface
######################################################################
def setup_pipeline(self):
"""Override this method so that it *creates* the tvtk
pipeline.
This method is invoked when the object is initialized via
`__init__`. Note that at the time this method is called, the
tvtk data pipeline will *not* yet be setup. So upstream data
will not be available. The idea is that you simply create the
basic objects and setup those parts of the pipeline not
dependent on upstream sources and filters. You should also
set the `actors` attribute up at this point.
"""
# Create and setup the default objects.
self.seed = SourceWidget()
self.stream_tracer = tvtk.StreamTracer(maximum_propagation=50,
integration_direction='forward',
compute_vorticity=True,
integrator_type='runge_kutta4',
)
self.ribbon_filter = tvtk.RibbonFilter()
self.tube_filter = tvtk.TubeFilter()
self.clean_filter = tvtk.CleanPolyData()
self.actor = Actor()
# Setup the actor suitably for this module.
self.actor.property.line_width = 2.0
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when any of the inputs
sends a `pipeline_changed` event.
"""
mm = self.module_manager
if mm is None:
return
src = mm.source
self.configure_connection(self.stream_tracer, src)
self.seed.inputs = [src]
# Setup the radius/width of the tube/ribbon filters based on
# given input.
if self._first:
b = src.outputs[0].bounds
l = [(b[1]-b[0]), (b[3]-b[2]), (b[5]-b[4])]
length = sqrt(l[0]*l[0] + l[1]*l[1] + l[2]*l[2])
self.ribbon_filter.width = length*0.0075
self.tube_filter.radius = length*0.0075
self._first = False
self._streamline_type_changed(self.streamline_type)
# Set the LUT for the mapper.
self.actor.set_lut(mm.scalar_lut_manager.lut)
self.pipeline_changed = True
def update_data(self):
"""Override this method so that it flushes the vtk pipeline if
that is necessary.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
# Just set data_changed, the components should do the rest if
# they are connected.
self.data_changed = True
######################################################################
# Non-public methods.
######################################################################
def _streamline_type_changed(self, value):
if self.module_manager is None:
return
st = self.stream_tracer
rf = self.ribbon_filter
tf = self.tube_filter
if value == 'line':
configure_outputs(self, st)
elif value == 'ribbon':
self.configure_connection(rf, st)
configure_outputs(self, rf)
elif value == 'tube':
# Without a clean poly data filter, tube filter will throw could
# not generate normals warning
cf = self.clean_filter
self.configure_connection(cf, st)
self.configure_connection(tf, cf)
configure_outputs(self, tf)
self.render()
def _update_streamlines_fired(self):
self.seed.update_poly_data()
self.stream_tracer.update()
self.render()
def _stream_tracer_changed(self, old, new):
if old is not None:
old.on_trait_change(self.render, remove=True)
seed = self.seed
if seed is not None:
self.configure_source_data(new, seed.poly_data)
new.on_trait_change(self.render)
mm = self.module_manager
if mm is not None:
src = mm.source
self.configure_connection(new, src)
# A default output so there are no pipeline errors. The
# update_pipeline call corrects this if needed.
self.outputs = [new.output]
self.update_pipeline()
def _seed_changed(self, old, new):
st = self.stream_tracer
if st is not None:
self.configure_source_data(st, new.poly_data)
self._change_components(old, new)
def _ribbon_filter_changed(self, old, new):
if old is not None:
old.on_trait_change(self.render, remove=True)
new.on_trait_change(self.render)
self._streamline_type_changed(self.streamline_type)
def _tube_filter_changed(self, old, new):
if old is not None:
old.on_trait_change(self.render, remove=True)
new.on_trait_change(self.render)
self._streamline_type_changed(self.streamline_type)
def _actor_changed(self, old, new):
new.scene = self.scene
new.inputs = [self]
self._change_components(old, new)
| {
"repo_name": "dmsurti/mayavi",
"path": "mayavi/modules/streamline.py",
"copies": "3",
"size": "10285",
"license": "bsd-3-clause",
"hash": 9190171937129475000,
"line_mean": 37.5205992509,
"line_max": 83,
"alpha_frac": 0.5288283909,
"autogenerated": false,
"ratio": 4.458170784568704,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00044633340991675585,
"num_lines": 267
} |
# Allows the user to view the constructed HWP bacxground images. Shows all four
# HWP rotations associated with a single IPPA angle
#
import os
import sys
import glob
import numpy as np
from astropy.io import ascii
from astropy.table import Table as Table
from astropy.table import Column as Column
from astropy.convolution import convolve, convolve_fft, Gaussian2DKernel
from astropy.stats import gaussian_fwhm_to_sigma, sigma_clipped_stats
from photutils import detect_threshold, detect_sources
from scipy.ndimage.filters import median_filter, gaussian_filter
import matplotlib.pyplot as plt
from astropy.visualization import ZScaleInterval
# Add the AstroImage class
import astroimage as ai
# Add the header handler to the BaseImage class
from Mimir_header_handler import Mimir_header_handler
ai.reduced.ReducedScience.set_header_handler(Mimir_header_handler)
ai.set_instrument('mimir')
# This is the location where all pyPol data will be saved
pyPol_data = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\pyPol_Reduced\\201611\\'
polarimetryDir = os.path.join(pyPol_data, 'Polarimetry')
stokesDir = os.path.join(polarimetryDir, 'stokesImages')
################################################################################
# Determine which parts of the fileIndex pertain to science images
fileList = glob.glob(os.path.join(stokesDir, '*I.fits'))
imgList = [ai.reduced.ReducedScience.read(f) for f in fileList]
imgList = np.array(imgList)
# Loop through each image and construct a list of pixel positions
xxList = []
yyList = []
for img in imgList:
ny, nx = img.shape
yy, xx = np.mgrid[0:ny, 0:nx]
xxList.append(xx)
yyList.append(yy)
#******************************************************************************
# Define the event handlers for clicking and keying on the image display
#******************************************************************************
def on_click(event):
global xxList, yyList, imgList, imgNum
global fig, brushSize, ax, maskImg, axImg
# Grab the position of the click
x, y = event.xdata, event.ydata
# Rtreieve the image pixel positions
yy, xx = yyList[imgNum], xxList[imgNum]
# Compute distances from the click and update mask array
dist = np.sqrt((xx - x)**2 + (yy - y)**2)
maskInds = np.where(dist < brushSize*5)
if event.button == 1:
tmpData = maskImg.data
tmpData[maskInds] = 1
maskImg.data = tmpData
if (event.button == 2) or (event.button == 3):
tmpData = maskImg.data
tmpData[maskInds] = 0
maskImg.data = tmpData
# Update contour plot (clear old lines redo contouring)
ax.collections = []
ax.contour(xx, yy, maskImg.data, levels=[0.5], colors='white', alpha = 0.2)
# Update the display
fig.canvas.draw()
################################################################################
# Define a function to handle what sholud be done whenever a key is pressed
################################################################################
def on_key(event):
global xxList, yyList, imgList, imgNum
global fig, brushSize, maskImg
global stokesDir
# Handle brush sizing
if event.key == '1':
brushSize = 1
elif event.key == '2':
brushSize = 2
elif event.key == '3':
brushSize = 3
elif event.key == '4':
brushSize = 4
elif event.key == '5':
brushSize = 5
elif event.key == '6':
brushSize = 6
# Increment the image number
if event.key == 'right' or event.key == 'left':
if event.key == 'right':
#Advance to the next image
imgNum += 1
# If there are no more images, then loop back to begin of list
if imgNum > imgList.size - 1:
imgNum = 0
if event.key == 'left':
#Move back to the previous image
imgNum -= 1
# If there are no more images, then loop back to begin of list
if imgNum < 0:
imgNum = imgList.size - 1
# Build the image scaling intervals
img = imgList[imgNum]
zScaleGetter = ZScaleInterval()
thisMin, thisMax = zScaleGetter.get_limits(img.data)
thisMax *= 10
#*******************************
# Update the displayed mask
#*******************************
# Check which mask files might be usable...
baseFile = os.path.basename(img.filename).split('_I')[0]
maskFile = os.path.join(stokesDir,
baseFile + '_mask.fits')
if os.path.isfile(maskFile):
# If the mask for this file exists, use it
print('using this mask: ',os.path.basename(maskFile))
maskImg = ai.reduced.ReducedScience.read(maskFile)
else:
# If none of those files exist, build a blank slate
# Build a mask template (0 = not masked, 1 = masked)
maskImg = ai.reduced.ReducedScience(
(img.data*0).astype(np.int16),
header = img.header
)
maskImg.filename = maskFile
# Grab the pixel positons
yy, xx, = yyList[imgNum], xxList[imgNum]
# Update contour plot (clear old lines redo contouring)
ax.collections = []
ax.contour(xx, yy, maskImg.data, levels=[0.5], colors='white', alpha = 0.2)
# Reassign image display limits
axImg.set_clim(vmin = thisMin, vmax = thisMax)
# Display the new images and update extent
axImg.set_data(img.data)
axImg.set_extent((xx.min(), xx.max(), yy.min(), yy.max()))
# Update the annotation
ax.set_title(os.path.basename(img.filename))
# Update the display
fig.canvas.draw()
# Save the generated mask
if event.key == 'enter':
# Write the mask to disk
print('Writing mask for file {}'.format(maskImg.filename))
maskImg.write(clobber=True)
# Clear out the mask values
if event.key == 'backspace':
try:
# Clear out the mask array
maskImg.data = (maskImg.data*0).astype(np.int16)
# Update contour plot (clear old lines redo contouring)
ax.collections = []
ax.contour(xx, yy, maskImg.data, levels=[0.5], colors='white', alpha = 0.2)
# Update the display
fig.canvas.draw()
except:
pass
#******************************************************************************
# This script will run the mask building step of the pyPol reduction
#******************************************************************************
fig = plt.figure(figsize=(10,9))
# Create the first axis and make the x-axis labels invisible
ax = plt.subplot(111)
plt.setp(ax.get_xticklabels(), fontsize = 12)
plt.setp(ax.get_yticklabels(), fontsize = 12)
# Rescale the figure and setup the spacing between images
plt.subplots_adjust(left = 0.04, bottom = 0.04, right = 0.95, top = 0.96,
wspace = 0.02, hspace = 0.02)
# Initalize the image number and brush size
imgNum = 0
brushSize = 3
# Start by grabbing the corresponding group names and IPPAs for those indices
img = imgList[imgNum]
# Build (or read) an initial mask
baseFile = os.path.basename(img.filename).split('_I')[0]
maskFile = os.path.join(stokesDir,
baseFile + '_mask.fits')
if os.path.isfile(maskFile):
# If the mask for this file exists, use it
print('using this mask: ',os.path.basename(maskFile))
maskImg = ai.reduced.ReducedScience.read(maskFile)
else:
# If none of those files exist, build a blank slate
# Build a mask template (0 = not masked, 1 = masked)
maskImg = ai.reduced.ReducedScience(
(img.data*0).astype(np.int16),
header = img.header
)
maskImg.filename = maskFile
# Populate each axis with its image
axImg = img.show(axes = ax, cmap='viridis', noShow = True)
ax.set_title(os.path.basename(img.filename))
# Connect the event manager...
cid1 = fig.canvas.mpl_connect('button_press_event',on_click)
cid2 = fig.canvas.mpl_connect('key_press_event', on_key)
# NOW show the image (without continuing execution)
# plt.ion()
plt.show()
# plt.ioff()
#
# import pdb; pdb.set_trace()
# Disconnect the event manager and close the figure
fig.canvas.mpl_disconnect(cid1)
fig.canvas.mpl_disconnect(cid2)
# Close the plot
plt.close()
print('Done!')
| {
"repo_name": "jmontgom10/Mimir_pyPol",
"path": "06a_finalStarMasking.py",
"copies": "1",
"size": "8435",
"license": "mit",
"hash": -1945763585155443700,
"line_mean": 33.1497975709,
"line_max": 87,
"alpha_frac": 0.5950207469,
"autogenerated": false,
"ratio": 3.7224183583406885,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48174391052406884,
"avg_score": null,
"num_lines": null
} |
# Allows the user to view the constructed HWP bacxground images. Shows all four
# HWP rotations associated with a single IPPA angle
#
import os
import sys
import numpy as np
from astropy.io import ascii
from astropy.table import Table as Table
from astropy.table import Column as Column
from astropy.convolution import convolve, convolve_fft, Gaussian2DKernel
from astropy.stats import gaussian_fwhm_to_sigma, sigma_clipped_stats
from photutils import detect_threshold, detect_sources
from scipy.ndimage.filters import median_filter, gaussian_filter
import matplotlib.pyplot as plt
# Add the AstroImage class
import astroimage as ai
# Add the header handler to the BaseImage class
from Mimir_header_handler import Mimir_header_handler
ai.reduced.ReducedScience.set_header_handler(Mimir_header_handler)
ai.set_instrument('mimir')
# This is the location of all PPOL reduction directory
PPOL_dir = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\PPOL_reduced\\201611'
# Build the path to the S3_Asotrometry files
S3_dir = os.path.join(PPOL_dir, 'S3_Astrometry')
# This is the location where all pyPol data will be saved
pyPol_data = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\pyPol_Reduced\\201611\\'
# Build the path to the supersky directory
bkgImagesDir = os.path.join(pyPol_data, 'bkgImages')
# Read in the indexFile data and select the filenames
indexFile = os.path.join(pyPol_data, 'reducedFileIndex.csv')
fileIndex = Table.read(indexFile, format='csv')
################################################################################
# Determine which parts of the fileIndex pertain to science images
useFiles = np.where(fileIndex['USE'] == 1)
# Cull the file index to only include files selected for use
fileIndex = fileIndex[useFiles]
# Group the index by GROUP_ID and IPPA
fileIndexByGroup = fileIndex.group_by(['GROUP_ID', 'AB'])
# Define a dictionary for translating HWP rotation into IPPA
HWPlist = (
4*(np.arange(16, dtype=int).reshape((4,4))%4) +
np.arange(4,dtype=int).reshape((4,1)) + 1
)
IPPAlist = np.array([0, 45, 90, 135])
IPPA_to_HWP = dict(zip(IPPAlist, HWPlist))
groupDict = {}
# Loop through each group
for group in fileIndexByGroup.groups:
# Test if it was an ABBA/BAAB dither
thisDither = str(np.unique(group['DITHER_TYPE'].data)[0])
if thisDither != 'ABBA': continue
thisGroupName = str(np.unique(group['OBJECT'].data)[0])
thisGroupID = str(np.unique(group['GROUP_ID'].data)[0])
ippaDict = {}
# Loop through each IPPA/HWP pairing in this group
for ippa, hwps in IPPA_to_HWP.items():
# Loop through all the HWPs in this group and build a list of filenames
hwpImageFiles = []
for hwp in hwps:
hwpFile = os.path.join(
bkgImagesDir,
'{}_G{}_HWP{}.fits'.format(thisGroupName, thisGroupID, str(hwp))
)
hwpImageFiles.append(hwpFile)
# Store the HWPs in the corresponding ippaDict entry
ippaDict[ippa] = hwpImageFiles
# Add the IPPA dictionary to the groupDict
groupKey = '{}_G{}'.format(thisGroupName, thisGroupID)
groupDict[groupKey] = ippaDict
# Grab all the keys and alphabetically sort them for consistency
groupKeyList = list(groupDict.keys())
groupKeyList.sort()
################################################################################
# Define a function to handle what sholud be done whenever a key is pressed
################################################################################
def on_key(event):
global groupDict, groupKeyList, IPPAlist, fig, IPPA_num, group_num
global HWP_0_img, HWP_1_img, HWP_2_img, HWP_3_img
global HWP_0_AxImg, HWP_1_AxImg, HWP_2_AxImg, HWP_3_AxImg
# global prevTarget, thisTarget, nextTarget
global HWP_0_Label, HWP_1_Label, HWP_2_Label, HWP_3_Label
# Increment the image number
if event.key == 'right' or event.key == 'left':
if event.key == 'right':
#Advance to the next IPPA
IPPA_num += 1
# If there are no more IPPAs left in this group, then move to the
# next group.
if IPPA_num > IPPAlist.size - 1:
IPPA_num = 0
group_num += 1
# If there are no more groups left, reloop back to zero
if group_num > (len(groupKeyList) - 1):
group_num = 0
if group_num < (1 - len(groupKeyList)):
group_num = 0
# Having incremented the group index and the IPPA index, it's time
# to grab the current groupDict entry
thisGroupKey = groupKeyList[group_num]
thisIPPA = IPPAlist[IPPA_num]
thisHWPfiles = groupDict[thisGroupKey][thisIPPA]
# Loop through all the for this group/IPPA and read them in
try:
HWP_0_img = ai.reduced.ReducedScience.read(thisHWPfiles[0])
except:
HWP_0_img = ai.reduced.ReducedScience(np.ones(HWP_0_img.shape))
try:
HWP_1_img = ai.reduced.ReducedScience.read(thisHWPfiles[1])
except:
HWP_1_img = ai.reduced.ReducedScience(np.ones(HWP_1_img.shape))
try:
HWP_2_img = ai.reduced.ReducedScience.read(thisHWPfiles[2])
except:
HWP_2_img = ai.reduced.ReducedScience(np.ones(HWP_2_img.shape))
try:
HWP_3_img = ai.reduced.ReducedScience.read(thisHWPfiles[3])
except:
HWP_3_img = ai.reduced.ReducedScience(np.ones(HWP_3_img.shape))
if event.key == 'left':
#Advance to the next IPPA
IPPA_num -= 1
# If there are no more IPPAs left in this group, then move to the
# next group.
if IPPA_num < 0:
IPPA_num = IPPAlist.size - 1
group_num -= 1
# Having incremented the group index and the IPPA index, it's time
# to grab the current groupDict entry
thisGroupKey = groupKeyList[group_num]
thisIPPA = IPPAlist[IPPA_num]
thisHWPfiles = groupDict[thisGroupKey][thisIPPA]
# Loop through all the for this group/IPPA and read them in
try:
HWP_0_img = ai.reduced.ReducedScience.read(thisHWPfiles[0])
except:
HWP_0_img = ai.reduced.ReducedScience(np.ones(HWP_0_img.shape))
try:
HWP_1_img = ai.reduced.ReducedScience.read(thisHWPfiles[1])
except:
HWP_1_img = ai.reduced.ReducedScience(np.ones(HWP_1_img.shape))
try:
HWP_2_img = ai.reduced.ReducedScience.read(thisHWPfiles[2])
except:
HWP_2_img = ai.reduced.ReducedScience(np.ones(HWP_2_img.shape))
try:
HWP_3_img = ai.reduced.ReducedScience.read(thisHWPfiles[3])
except:
HWP_3_img = ai.reduced.ReducedScience(np.ones(HWP_3_img.shape))
###############################
# Update the displayed images
###############################
# Display the new images
HWP_0_AxImg.set_data(HWP_0_img.data)
HWP_1_AxImg.set_data(HWP_1_img.data)
HWP_2_AxImg.set_data(HWP_2_img.data)
HWP_3_AxImg.set_data(HWP_3_img.data)
# Update the annotation
thisTitle = fig.suptitle('{}: IPPA {}'.format(thisGroupKey, thisIPPA))
# Construct the label strings
HWP_0_str, HWP_1_str, HWP_2_str, HWP_3_str = (
['HWP {}'.format(hwp) for hwp in IPPA_to_HWP[thisIPPA]]
)
# Update the labels
HWP_0_Label.set_text(HWP_0_str)
HWP_1_Label.set_text(HWP_1_str)
HWP_2_Label.set_text(HWP_2_str)
HWP_3_Label.set_text(HWP_3_str)
###############################
# Update time series plot
###############################
# Now plot the timeseries for this dataset
# Grab the group ID for the current group/IPPA
thisGroupID = int(thisGroupKey.split('_')[-1][1:])
# Locate the MJD and background values for the current group/IPPA
thisGroupIPPAbool = np.logical_and(
fileIndex['GROUP_ID'] == thisGroupID,
fileIndex['IPPA'] == thisIPPA
)
thisAbool = np.logical_and(
thisGroupIPPAbool,
fileIndex['AB'] == 'A'
)
thisBbool = np.logical_and(
thisGroupIPPAbool,
fileIndex['AB'] == 'B'
)
thisAinds = np.where(thisAbool)
thisBinds = np.where(thisBbool)
thisAmjd = fileIndex['MJD'][thisAinds]
thisBmjd = fileIndex['MJD'][thisBinds]
# Make an estimate of the first time stamp
try:
mjd0 = np.min([np.min(thisAmjd), np.min(thisBmjd)])
except:
mjd0 = 0
thisAmjd -= mjd0
thisBmjd -= mjd0
thisAmjd *= 24*60*60
thisBmjd *= 24*60*60
thisAbkg = fileIndex['BACKGROUND'][thisAinds]
thisBbkg = fileIndex['BACKGROUND'][thisBinds]
# Identify filter
thisFilter = np.unique(fileIndex[thisGroupIPPAbool]['FILTER'].data)[0]
if thisFilter == 'H':
ylims = (600, 2100)
if thisFilter == 'Ks':
ylims = (400, 1000)
# Plot the background values
ax4.cla()
ax4.plot(thisBmjd, thisBbkg, marker='o', color='b')#, facecolor='b', edgecolor='k')
ax4.plot(thisAmjd, thisAbkg, marker='o', color='r')#, facecolor='r', edgecolor='k')
plt.setp(ax4.get_xticklabels(), fontsize = 6)
plt.setp(ax4.get_yticklabels(), fontsize = 6)
ax4.set_ylim((ylims))
ax4.set_ylabel('Background Counts [ADU]')
ax4.set_xlabel('Time [sec]')
# Update the display
fig.canvas.draw()
#******************************************************************************
# This script will run the mask building step of the pyPol reduction
#******************************************************************************
fig = plt.figure(figsize=(18,9))
# Create the first axis and make the x-axis labels invisible
ax0 = plt.subplot(2,4,1)
plt.setp(ax0.get_xticklabels(), visible = False)
plt.setp(ax0.get_yticklabels(), fontsize = 6)
# Create the second axis and make the x- and y-axis labels invisible
ax1 = plt.subplot(2,4,2, sharey=ax0, sharex=ax0)
plt.setp(ax1.get_xticklabels(), visible = False)
plt.setp(ax1.get_yticklabels(), visible = False)
# Create the third axis and make both axis labels visible
ax2 = plt.subplot(2,4,5, sharey=ax0, sharex=ax0)
plt.setp(ax2.get_xticklabels(), fontsize = 6)
plt.setp(ax2.get_yticklabels(), fontsize = 6)
# Create the fourth axis and make y-axis labels invisible
ax3 = plt.subplot(2,4,6, sharey=ax0, sharex=ax0)
plt.setp(ax3.get_xticklabels(), fontsize = 6)
plt.setp(ax3.get_yticklabels(), visible = False)
# Create the final plot for the time-series
ax4 = plt.subplot(1,2,2)
ax4.yaxis.set_label_position('right')
ax4.tick_params(axis='y',
labelleft=False, labelright=True,
)
# Rescale the figure and setup the spacing between images
plt.subplots_adjust(left = 0.04, bottom = 0.04, right = 0.95, top = 0.96,
wspace = 0.02, hspace = 0.02)
axarr = [ax0, ax1, ax2, ax3, ax4]
# Initalize the group and IPPA index at zero
IPPA_num, group_num = 0, 0
# Start by grabbing the corresponding group names and IPPAs for those indices
thisGroupKey = groupKeyList[group_num]
thisIPPA = IPPAlist[IPPA_num]
thisHWPfiles = groupDict[thisGroupKey][thisIPPA]
# Loop through all the for this group/IPPA and read them in
HWP_0_img = ai.reduced.ReducedScience.read(thisHWPfiles[0])
HWP_1_img = ai.reduced.ReducedScience.read(thisHWPfiles[1])
HWP_2_img = ai.reduced.ReducedScience.read(thisHWPfiles[2])
HWP_3_img = ai.reduced.ReducedScience.read(thisHWPfiles[3])
# Populate each axis with its image
HWP_0_AxImg = HWP_0_img.show(axes = axarr[0], cmap='viridis',
vmin = 0.95, vmax = 1.05, noShow = True)
HWP_1_AxImg = HWP_1_img.show(axes = axarr[1], cmap='viridis',
vmin = 0.95, vmax = 1.05, noShow = True)
HWP_2_AxImg = HWP_2_img.show(axes = axarr[2], cmap='viridis',
vmin = 0.95, vmax = 1.05, noShow = True)
HWP_3_AxImg = HWP_3_img.show(axes = axarr[3], cmap='viridis',
vmin = 0.95, vmax = 1.05, noShow = True)
# Now plot the timeseries for this dataset
# Grab the group ID for the current group/IPPA
thisGroupID = int(thisGroupKey.split('_')[-1][1:])
# Locate the MJD and background values for the current group/IPPA
thisGroupIPPAbool = np.logical_and(
fileIndex['GROUP_ID'] == thisGroupID,
fileIndex['IPPA'] == thisIPPA
)
thisAbool = np.logical_and(
thisGroupIPPAbool,
fileIndex['AB'] == 'A'
)
thisBbool = np.logical_and(
thisGroupIPPAbool,
fileIndex['AB'] == 'B'
)
thisAinds = np.where(thisAbool)
thisBinds = np.where(thisBbool)
thisAmjd = fileIndex['MJD'][thisAinds]
thisBmjd = fileIndex['MJD'][thisBinds]
mjd0 = np.min([np.min(thisAmjd), np.min(thisBmjd)])
thisAmjd -= mjd0
thisBmjd -= mjd0
thisAmjd *= 24*60*60
thisBmjd *= 24*60*60
thisAbkg = fileIndex['BACKGROUND'][thisAinds]
thisBbkg = fileIndex['BACKGROUND'][thisBinds]
# Identify filter
thisFilter = np.unique(fileIndex[thisGroupIPPAbool]['FILTER'].data)[0]
if thisFilter == 'H':
ylims = (600, 2100)
if thisFilter == 'Ks':
ylims = (400, 1000)
# Plot the background values
ax4.plot(thisBmjd, thisBbkg, marker='o', color='b')#, facecolor='b', edgecolor='k')
ax4.plot(thisAmjd, thisAbkg, marker='o', color='r')#, facecolor='r', edgecolor='k')
plt.setp(ax4.get_xticklabels(), fontsize = 6)
plt.setp(ax4.get_yticklabels(), fontsize = 6)
ax4.set_ylim((ylims))
# Add timeseries axis labels
ax4.set_ylabel('Background Counts [ADU]')
ax4.set_xlabel('Time [sec]')
# Add some labels to each plot
HWP_0_str, HWP_1_str, HWP_2_str, HWP_3_str = (
['HWP {}'.format(hwp) for hwp in IPPA_to_HWP[thisIPPA]]
)
# Add some figure annotation
thisTitle = fig.suptitle('{}: IPPA {}'.format(thisGroupKey, thisIPPA))
# Construct the label strings
HWP_0_str, HWP_1_str, HWP_2_str, HWP_3_str = (
['HWP {}'.format(hwp) for hwp in IPPA_to_HWP[thisIPPA]]
)
# Update the labels
HWP_0_Label = axarr[0].text(20, 875, HWP_0_str,
color = 'black', backgroundcolor = 'white', size = 'medium')
HWP_1_Label = axarr[1].text(20, 875, HWP_1_str,
color = 'black', backgroundcolor = 'white', size = 'medium')
HWP_2_Label = axarr[2].text(20, 875, HWP_2_str,
color = 'black', backgroundcolor = 'white', size = 'medium')
HWP_3_Label = axarr[3].text(20, 875, HWP_3_str,
color = 'black', backgroundcolor = 'white', size = 'medium')
# Connect the event manager...
cid1 = fig.canvas.mpl_connect('key_press_event', on_key)
# NOW show the image (without continuing execution)
# plt.ion()
plt.show()
# plt.ioff()
#
# pdb.set_trace()
# Disconnect the event manager and close the figure
fig.canvas.mpl_disconnect(cid1)
# Close the plot
plt.close()
print('Done!')
| {
"repo_name": "jmontgom10/Mimir_pyPol",
"path": "03b_examineImagesAndMaskArtifacts.py",
"copies": "2",
"size": "15165",
"license": "mit",
"hash": -3801912826919651000,
"line_mean": 35.6304347826,
"line_max": 91,
"alpha_frac": 0.6098252555,
"autogenerated": false,
"ratio": 3.155430711610487,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4765255967110487,
"avg_score": null,
"num_lines": null
} |
"""Allows to configuration ecoal (esterownik.pl) pumps as switches."""
from __future__ import annotations
from homeassistant.components.switch import SwitchEntity
from . import AVAILABLE_PUMPS, DATA_ECOAL_BOILER
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up switches based on ecoal interface."""
if discovery_info is None:
return
ecoal_contr = hass.data[DATA_ECOAL_BOILER]
switches = []
for pump_id in discovery_info:
name = AVAILABLE_PUMPS[pump_id]
switches.append(EcoalSwitch(ecoal_contr, name, pump_id))
add_entities(switches, True)
class EcoalSwitch(SwitchEntity):
"""Representation of Ecoal switch."""
def __init__(self, ecoal_contr, name, state_attr):
"""
Initialize switch.
Sets HA switch to state as read from controller.
"""
self._ecoal_contr = ecoal_contr
self._name = name
self._state_attr = state_attr
# Ecoalcotroller holds convention that same postfix is used
# to set attribute
# set_<attr>()
# as attribute name in status instance:
# status.<attr>
self._contr_set_fun = getattr(self._ecoal_contr, f"set_{state_attr}")
# No value set, will be read from controller instead
self._state = None
@property
def name(self) -> str | None:
"""Return the name of the switch."""
return self._name
def update(self):
"""Fetch new state data for the sensor.
This is the only method that should fetch new data for Home Assistant.
"""
status = self._ecoal_contr.get_cached_status()
self._state = getattr(status, self._state_attr)
def invalidate_ecoal_cache(self):
"""Invalidate ecoal interface cache.
Forces that next read from ecaol interface to not use cache.
"""
self._ecoal_contr.status = None
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs) -> None:
"""Turn the device on."""
self._contr_set_fun(1)
self.invalidate_ecoal_cache()
def turn_off(self, **kwargs) -> None:
"""Turn the device off."""
self._contr_set_fun(0)
self.invalidate_ecoal_cache()
| {
"repo_name": "w1ll1am23/home-assistant",
"path": "homeassistant/components/ecoal_boiler/switch.py",
"copies": "5",
"size": "2333",
"license": "apache-2.0",
"hash": -8781881000518866000,
"line_mean": 30.1066666667,
"line_max": 78,
"alpha_frac": 0.6168024003,
"autogenerated": false,
"ratio": 3.674015748031496,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 75
} |
"""Allows to configuration ecoal (esterownik.pl) pumps as switches."""
from typing import Optional
from homeassistant.components.switch import SwitchEntity
from . import AVAILABLE_PUMPS, DATA_ECOAL_BOILER
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up switches based on ecoal interface."""
if discovery_info is None:
return
ecoal_contr = hass.data[DATA_ECOAL_BOILER]
switches = []
for pump_id in discovery_info:
name = AVAILABLE_PUMPS[pump_id]
switches.append(EcoalSwitch(ecoal_contr, name, pump_id))
add_entities(switches, True)
class EcoalSwitch(SwitchEntity):
"""Representation of Ecoal switch."""
def __init__(self, ecoal_contr, name, state_attr):
"""
Initialize switch.
Sets HA switch to state as read from controller.
"""
self._ecoal_contr = ecoal_contr
self._name = name
self._state_attr = state_attr
# Ecoalcotroller holds convention that same postfix is used
# to set attribute
# set_<attr>()
# as attribute name in status instance:
# status.<attr>
self._contr_set_fun = getattr(self._ecoal_contr, f"set_{state_attr}")
# No value set, will be read from controller instead
self._state = None
@property
def name(self) -> Optional[str]:
"""Return the name of the switch."""
return self._name
def update(self):
"""Fetch new state data for the sensor.
This is the only method that should fetch new data for Home Assistant.
"""
status = self._ecoal_contr.get_cached_status()
self._state = getattr(status, self._state_attr)
def invalidate_ecoal_cache(self):
"""Invalidate ecoal interface cache.
Forces that next read from ecaol interface to not use cache.
"""
self._ecoal_contr.status = None
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs) -> None:
"""Turn the device on."""
self._contr_set_fun(1)
self.invalidate_ecoal_cache()
def turn_off(self, **kwargs) -> None:
"""Turn the device off."""
self._contr_set_fun(0)
self.invalidate_ecoal_cache()
| {
"repo_name": "partofthething/home-assistant",
"path": "homeassistant/components/ecoal_boiler/switch.py",
"copies": "9",
"size": "2329",
"license": "mit",
"hash": 971214835752747500,
"line_mean": 30.0533333333,
"line_max": 78,
"alpha_frac": 0.6182911121,
"autogenerated": false,
"ratio": 3.679304897314376,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8797596009414377,
"avg_score": null,
"num_lines": null
} |
"""Allows to configuration ecoal (esterownik.pl) pumps as switches."""
import logging
from typing import Optional
from homeassistant.components.switch import SwitchDevice
from homeassistant.components.ecoal_boiler import (
DATA_ECOAL_BOILER, AVAILABLE_PUMPS, )
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['ecoal_boiler']
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up switches based on ecoal interface."""
if discovery_info is None:
return
ecoal_contr = hass.data[DATA_ECOAL_BOILER]
switches = []
for pump_id in discovery_info:
name = AVAILABLE_PUMPS[pump_id]
switches.append(EcoalSwitch(ecoal_contr, name, pump_id))
add_entities(switches, True)
class EcoalSwitch(SwitchDevice):
"""Representation of Ecoal switch."""
def __init__(self, ecoal_contr, name, state_attr):
"""
Initialize switch.
Sets HA switch to state as read from controller.
"""
self._ecoal_contr = ecoal_contr
self._name = name
self._state_attr = state_attr
# Ecoalcotroller holds convention that same postfix is used
# to set attribute
# set_<attr>()
# as attribute name in status instance:
# status.<attr>
self._contr_set_fun = getattr(self._ecoal_contr, "set_" + state_attr)
# No value set, will be read from controller instead
self._state = None
@property
def name(self) -> Optional[str]:
"""Return the name of the switch."""
return self._name
def update(self):
"""Fetch new state data for the sensor.
This is the only method that should fetch new data for Home Assistant.
"""
status = self._ecoal_contr.get_cached_status()
self._state = getattr(status, self._state_attr)
def invalidate_ecoal_cache(self):
"""Invalidate ecoal interface cache.
Forces that next read from ecaol interface to not use cache.
"""
self._ecoal_contr.status = None
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs) -> None:
"""Turn the device on."""
self._contr_set_fun(1)
self.invalidate_ecoal_cache()
def turn_off(self, **kwargs) -> None:
"""Turn the device off."""
self._contr_set_fun(0)
self.invalidate_ecoal_cache()
| {
"repo_name": "HydrelioxGitHub/home-assistant",
"path": "homeassistant/components/ecoal_boiler/switch.py",
"copies": "2",
"size": "2460",
"license": "apache-2.0",
"hash": 518893972830391360,
"line_mean": 29.75,
"line_max": 78,
"alpha_frac": 0.6239837398,
"autogenerated": false,
"ratio": 3.6552748885586923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5279258628358693,
"avg_score": null,
"num_lines": null
} |
"""Allows to configuration ecoal (esterownik.pl) pumps as switches."""
import logging
from typing import Optional
from homeassistant.components.switch import SwitchDevice
from . import AVAILABLE_PUMPS, DATA_ECOAL_BOILER
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['ecoal_boiler']
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up switches based on ecoal interface."""
if discovery_info is None:
return
ecoal_contr = hass.data[DATA_ECOAL_BOILER]
switches = []
for pump_id in discovery_info:
name = AVAILABLE_PUMPS[pump_id]
switches.append(EcoalSwitch(ecoal_contr, name, pump_id))
add_entities(switches, True)
class EcoalSwitch(SwitchDevice):
"""Representation of Ecoal switch."""
def __init__(self, ecoal_contr, name, state_attr):
"""
Initialize switch.
Sets HA switch to state as read from controller.
"""
self._ecoal_contr = ecoal_contr
self._name = name
self._state_attr = state_attr
# Ecoalcotroller holds convention that same postfix is used
# to set attribute
# set_<attr>()
# as attribute name in status instance:
# status.<attr>
self._contr_set_fun = getattr(self._ecoal_contr, "set_" + state_attr)
# No value set, will be read from controller instead
self._state = None
@property
def name(self) -> Optional[str]:
"""Return the name of the switch."""
return self._name
def update(self):
"""Fetch new state data for the sensor.
This is the only method that should fetch new data for Home Assistant.
"""
status = self._ecoal_contr.get_cached_status()
self._state = getattr(status, self._state_attr)
def invalidate_ecoal_cache(self):
"""Invalidate ecoal interface cache.
Forces that next read from ecaol interface to not use cache.
"""
self._ecoal_contr.status = None
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs) -> None:
"""Turn the device on."""
self._contr_set_fun(1)
self.invalidate_ecoal_cache()
def turn_off(self, **kwargs) -> None:
"""Turn the device off."""
self._contr_set_fun(0)
self.invalidate_ecoal_cache()
| {
"repo_name": "jamespcole/home-assistant",
"path": "homeassistant/components/ecoal_boiler/switch.py",
"copies": "1",
"size": "2416",
"license": "apache-2.0",
"hash": -5015942588316477000,
"line_mean": 29.2,
"line_max": 78,
"alpha_frac": 0.6212748344,
"autogenerated": false,
"ratio": 3.655068078668684,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9776342913068684,
"avg_score": 0,
"num_lines": 80
} |
"""Allows to configuration ecoal (esterownik.pl) pumps as switches."""
import logging
from typing import Optional
from homeassistant.components.switch import SwitchEntity
from . import AVAILABLE_PUMPS, DATA_ECOAL_BOILER
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up switches based on ecoal interface."""
if discovery_info is None:
return
ecoal_contr = hass.data[DATA_ECOAL_BOILER]
switches = []
for pump_id in discovery_info:
name = AVAILABLE_PUMPS[pump_id]
switches.append(EcoalSwitch(ecoal_contr, name, pump_id))
add_entities(switches, True)
class EcoalSwitch(SwitchEntity):
"""Representation of Ecoal switch."""
def __init__(self, ecoal_contr, name, state_attr):
"""
Initialize switch.
Sets HA switch to state as read from controller.
"""
self._ecoal_contr = ecoal_contr
self._name = name
self._state_attr = state_attr
# Ecoalcotroller holds convention that same postfix is used
# to set attribute
# set_<attr>()
# as attribute name in status instance:
# status.<attr>
self._contr_set_fun = getattr(self._ecoal_contr, f"set_{state_attr}")
# No value set, will be read from controller instead
self._state = None
@property
def name(self) -> Optional[str]:
"""Return the name of the switch."""
return self._name
def update(self):
"""Fetch new state data for the sensor.
This is the only method that should fetch new data for Home Assistant.
"""
status = self._ecoal_contr.get_cached_status()
self._state = getattr(status, self._state_attr)
def invalidate_ecoal_cache(self):
"""Invalidate ecoal interface cache.
Forces that next read from ecaol interface to not use cache.
"""
self._ecoal_contr.status = None
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs) -> None:
"""Turn the device on."""
self._contr_set_fun(1)
self.invalidate_ecoal_cache()
def turn_off(self, **kwargs) -> None:
"""Turn the device off."""
self._contr_set_fun(0)
self.invalidate_ecoal_cache()
| {
"repo_name": "pschmitt/home-assistant",
"path": "homeassistant/components/ecoal_boiler/switch.py",
"copies": "7",
"size": "2383",
"license": "apache-2.0",
"hash": 4496531698101095000,
"line_mean": 29.5512820513,
"line_max": 78,
"alpha_frac": 0.6206462442,
"autogenerated": false,
"ratio": 3.6831530139103554,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7803799258110355,
"avg_score": null,
"num_lines": null
} |
"""Allows to configure a switch using BeagleBone Black GPIO."""
import logging
import voluptuous as vol
from homeassistant.components import bbb_gpio
from homeassistant.components.switch import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, DEVICE_DEFAULT_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import ToggleEntity
_LOGGER = logging.getLogger(__name__)
CONF_PINS = "pins"
CONF_INITIAL = "initial"
CONF_INVERT_LOGIC = "invert_logic"
PIN_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_INITIAL, default=False): cv.boolean,
vol.Optional(CONF_INVERT_LOGIC, default=False): cv.boolean,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_PINS, default={}): vol.Schema({cv.string: PIN_SCHEMA})}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the BeagleBone Black GPIO devices."""
pins = config[CONF_PINS]
switches = []
for pin, params in pins.items():
switches.append(BBBGPIOSwitch(pin, params))
add_entities(switches)
class BBBGPIOSwitch(ToggleEntity):
"""Representation of a BeagleBone Black GPIO."""
def __init__(self, pin, params):
"""Initialize the pin."""
self._pin = pin
self._name = params[CONF_NAME] or DEVICE_DEFAULT_NAME
self._state = params[CONF_INITIAL]
self._invert_logic = params[CONF_INVERT_LOGIC]
bbb_gpio.setup_output(self._pin)
if self._state is False:
bbb_gpio.write_output(self._pin, 1 if self._invert_logic else 0)
else:
bbb_gpio.write_output(self._pin, 0 if self._invert_logic else 1)
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the device on."""
bbb_gpio.write_output(self._pin, 0 if self._invert_logic else 1)
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
bbb_gpio.write_output(self._pin, 1 if self._invert_logic else 0)
self._state = False
self.schedule_update_ha_state()
| {
"repo_name": "pschmitt/home-assistant",
"path": "homeassistant/components/bbb_gpio/switch.py",
"copies": "7",
"size": "2444",
"license": "apache-2.0",
"hash": -6166431733254164000,
"line_mean": 28.4457831325,
"line_max": 78,
"alpha_frac": 0.647299509,
"autogenerated": false,
"ratio": 3.6751879699248122,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7822487478924812,
"avg_score": null,
"num_lines": null
} |
"""Allows to configure a switch using BeagleBone Black GPIO."""
import logging
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA
from homeassistant.components import bbb_gpio
from homeassistant.const import DEVICE_DEFAULT_NAME, CONF_NAME
from homeassistant.helpers.entity import ToggleEntity
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_PINS = "pins"
CONF_INITIAL = "initial"
CONF_INVERT_LOGIC = "invert_logic"
PIN_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_INITIAL, default=False): cv.boolean,
vol.Optional(CONF_INVERT_LOGIC, default=False): cv.boolean,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_PINS, default={}): vol.Schema({cv.string: PIN_SCHEMA})}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the BeagleBone Black GPIO devices."""
pins = config.get(CONF_PINS)
switches = []
for pin, params in pins.items():
switches.append(BBBGPIOSwitch(pin, params))
add_entities(switches)
class BBBGPIOSwitch(ToggleEntity):
"""Representation of a BeagleBone Black GPIO."""
def __init__(self, pin, params):
"""Initialize the pin."""
self._pin = pin
self._name = params.get(CONF_NAME) or DEVICE_DEFAULT_NAME
self._state = params.get(CONF_INITIAL)
self._invert_logic = params.get(CONF_INVERT_LOGIC)
bbb_gpio.setup_output(self._pin)
if self._state is False:
bbb_gpio.write_output(self._pin, 1 if self._invert_logic else 0)
else:
bbb_gpio.write_output(self._pin, 0 if self._invert_logic else 1)
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the device on."""
bbb_gpio.write_output(self._pin, 0 if self._invert_logic else 1)
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
bbb_gpio.write_output(self._pin, 1 if self._invert_logic else 0)
self._state = False
self.schedule_update_ha_state()
| {
"repo_name": "Cinntax/home-assistant",
"path": "homeassistant/components/bbb_gpio/switch.py",
"copies": "4",
"size": "2460",
"license": "apache-2.0",
"hash": -993117798658789500,
"line_mean": 28.6385542169,
"line_max": 78,
"alpha_frac": 0.6479674797,
"autogenerated": false,
"ratio": 3.6552748885586923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6303242368258692,
"avg_score": null,
"num_lines": null
} |
"""Allows to configure a switch using BeagleBone Black GPIO."""
import voluptuous as vol
from homeassistant.components import bbb_gpio
from homeassistant.components.switch import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, DEVICE_DEFAULT_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import ToggleEntity
CONF_PINS = "pins"
CONF_INITIAL = "initial"
CONF_INVERT_LOGIC = "invert_logic"
PIN_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_INITIAL, default=False): cv.boolean,
vol.Optional(CONF_INVERT_LOGIC, default=False): cv.boolean,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_PINS, default={}): vol.Schema({cv.string: PIN_SCHEMA})}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the BeagleBone Black GPIO devices."""
pins = config[CONF_PINS]
switches = []
for pin, params in pins.items():
switches.append(BBBGPIOSwitch(pin, params))
add_entities(switches)
class BBBGPIOSwitch(ToggleEntity):
"""Representation of a BeagleBone Black GPIO."""
def __init__(self, pin, params):
"""Initialize the pin."""
self._pin = pin
self._name = params[CONF_NAME] or DEVICE_DEFAULT_NAME
self._state = params[CONF_INITIAL]
self._invert_logic = params[CONF_INVERT_LOGIC]
bbb_gpio.setup_output(self._pin)
if self._state is False:
bbb_gpio.write_output(self._pin, 1 if self._invert_logic else 0)
else:
bbb_gpio.write_output(self._pin, 0 if self._invert_logic else 1)
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the device on."""
bbb_gpio.write_output(self._pin, 0 if self._invert_logic else 1)
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
bbb_gpio.write_output(self._pin, 1 if self._invert_logic else 0)
self._state = False
self.schedule_update_ha_state()
| {
"repo_name": "mezz64/home-assistant",
"path": "homeassistant/components/bbb_gpio/switch.py",
"copies": "14",
"size": "2389",
"license": "apache-2.0",
"hash": 2679275547850851300,
"line_mean": 29.2405063291,
"line_max": 78,
"alpha_frac": 0.645876936,
"autogenerated": false,
"ratio": 3.6753846153846155,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""Allows to configure a switch using RPi GPIO."""
import logging
import voluptuous as vol
from homeassistant.components import rpi_gpio
from homeassistant.components.switch import PLATFORM_SCHEMA
from homeassistant.const import DEVICE_DEFAULT_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.reload import setup_reload_service
from . import DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
CONF_PULL_MODE = "pull_mode"
CONF_PORTS = "ports"
CONF_INVERT_LOGIC = "invert_logic"
DEFAULT_INVERT_LOGIC = False
_SWITCHES_SCHEMA = vol.Schema({cv.positive_int: cv.string})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PORTS): _SWITCHES_SCHEMA,
vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Raspberry PI GPIO devices."""
setup_reload_service(hass, DOMAIN, PLATFORMS)
invert_logic = config.get(CONF_INVERT_LOGIC)
switches = []
ports = config.get(CONF_PORTS)
for port, name in ports.items():
switches.append(RPiGPIOSwitch(name, port, invert_logic))
add_entities(switches)
class RPiGPIOSwitch(ToggleEntity):
"""Representation of a Raspberry Pi GPIO."""
def __init__(self, name, port, invert_logic):
"""Initialize the pin."""
self._name = name or DEVICE_DEFAULT_NAME
self._port = port
self._invert_logic = invert_logic
self._state = False
rpi_gpio.setup_output(self._port)
rpi_gpio.write_output(self._port, 1 if self._invert_logic else 0)
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the device on."""
rpi_gpio.write_output(self._port, 0 if self._invert_logic else 1)
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
rpi_gpio.write_output(self._port, 1 if self._invert_logic else 0)
self._state = False
self.schedule_update_ha_state()
| {
"repo_name": "tchellomello/home-assistant",
"path": "homeassistant/components/rpi_gpio/switch.py",
"copies": "1",
"size": "2426",
"license": "apache-2.0",
"hash": -3333757859707854300,
"line_mean": 27.880952381,
"line_max": 82,
"alpha_frac": 0.6644682605,
"autogenerated": false,
"ratio": 3.670196671709531,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4834664932209531,
"avg_score": null,
"num_lines": null
} |
"""Allows to configure a switch using RPi GPIO."""
import logging
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA
from homeassistant.components import rpi_gpio
from homeassistant.const import DEVICE_DEFAULT_NAME
from homeassistant.helpers.entity import ToggleEntity
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_PULL_MODE = "pull_mode"
CONF_PORTS = "ports"
CONF_INVERT_LOGIC = "invert_logic"
DEFAULT_INVERT_LOGIC = False
_SWITCHES_SCHEMA = vol.Schema({cv.positive_int: cv.string})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PORTS): _SWITCHES_SCHEMA,
vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Raspberry PI GPIO devices."""
invert_logic = config.get(CONF_INVERT_LOGIC)
switches = []
ports = config.get(CONF_PORTS)
for port, name in ports.items():
switches.append(RPiGPIOSwitch(name, port, invert_logic))
add_entities(switches)
class RPiGPIOSwitch(ToggleEntity):
"""Representation of a Raspberry Pi GPIO."""
def __init__(self, name, port, invert_logic):
"""Initialize the pin."""
self._name = name or DEVICE_DEFAULT_NAME
self._port = port
self._invert_logic = invert_logic
self._state = False
rpi_gpio.setup_output(self._port)
rpi_gpio.write_output(self._port, 1 if self._invert_logic else 0)
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the device on."""
rpi_gpio.write_output(self._port, 0 if self._invert_logic else 1)
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
rpi_gpio.write_output(self._port, 1 if self._invert_logic else 0)
self._state = False
self.schedule_update_ha_state()
| {
"repo_name": "Cinntax/home-assistant",
"path": "homeassistant/components/rpi_gpio/switch.py",
"copies": "4",
"size": "2279",
"license": "apache-2.0",
"hash": -2715570998050265600,
"line_mean": 28.2179487179,
"line_max": 82,
"alpha_frac": 0.656428258,
"autogenerated": false,
"ratio": 3.6581059390048156,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6314534197004816,
"avg_score": null,
"num_lines": null
} |
"""Allows to configure a switch using RPi GPIO."""
import logging
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchDevice
from homeassistant.const import CONF_HOST, DEVICE_DEFAULT_NAME
import homeassistant.helpers.config_validation as cv
from . import CONF_INVERT_LOGIC, DEFAULT_INVERT_LOGIC
from .. import remote_rpi_gpio
_LOGGER = logging.getLogger(__name__)
CONF_PORTS = "ports"
_SENSORS_SCHEMA = vol.Schema({cv.positive_int: cv.string})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORTS): _SENSORS_SCHEMA,
vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Remote Raspberry PI GPIO devices."""
address = config[CONF_HOST]
invert_logic = config[CONF_INVERT_LOGIC]
ports = config[CONF_PORTS]
devices = []
for port, name in ports.items():
try:
led = remote_rpi_gpio.setup_output(address, port, invert_logic)
except (ValueError, IndexError, KeyError, OSError):
return
new_switch = RemoteRPiGPIOSwitch(name, led, invert_logic)
devices.append(new_switch)
add_entities(devices)
class RemoteRPiGPIOSwitch(SwitchDevice):
"""Representation of a Remtoe Raspberry Pi GPIO."""
def __init__(self, name, led, invert_logic):
"""Initialize the pin."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = False
self._invert_logic = invert_logic
self._switch = led
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def assumed_state(self):
"""If unable to access real state of the entity."""
return True
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the device on."""
remote_rpi_gpio.write_output(self._switch, 0 if self._invert_logic else 1)
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
remote_rpi_gpio.write_output(self._switch, 1 if self._invert_logic else 0)
self._state = False
self.schedule_update_ha_state()
| {
"repo_name": "postlund/home-assistant",
"path": "homeassistant/components/remote_rpi_gpio/switch.py",
"copies": "4",
"size": "2510",
"license": "apache-2.0",
"hash": -2112487089107797000,
"line_mean": 28.1860465116,
"line_max": 82,
"alpha_frac": 0.6438247012,
"autogenerated": false,
"ratio": 3.7687687687687688,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6412593469968768,
"avg_score": null,
"num_lines": null
} |
"""Allows to configure a switch using RPi GPIO."""
import logging
import voluptuous as vol
from homeassistant.components.switch import SwitchDevice, PLATFORM_SCHEMA
from homeassistant.const import DEVICE_DEFAULT_NAME, CONF_HOST
import homeassistant.helpers.config_validation as cv
from . import CONF_INVERT_LOGIC, DEFAULT_INVERT_LOGIC
from .. import remote_rpi_gpio
_LOGGER = logging.getLogger(__name__)
CONF_PORTS = 'ports'
_SENSORS_SCHEMA = vol.Schema({
cv.positive_int: cv.string,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORTS): _SENSORS_SCHEMA,
vol.Optional(CONF_INVERT_LOGIC,
default=DEFAULT_INVERT_LOGIC): cv.boolean
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Remote Raspberry PI GPIO devices."""
address = config[CONF_HOST]
invert_logic = config[CONF_INVERT_LOGIC]
ports = config[CONF_PORTS]
devices = []
for port, name in ports.items():
try:
led = remote_rpi_gpio.setup_output(
address, port, invert_logic)
except (ValueError, IndexError, KeyError, IOError):
return
new_switch = RemoteRPiGPIOSwitch(name, led, invert_logic)
devices.append(new_switch)
add_entities(devices)
class RemoteRPiGPIOSwitch(SwitchDevice):
"""Representation of a Remtoe Raspberry Pi GPIO."""
def __init__(self, name, led, invert_logic):
"""Initialize the pin."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = False
self._invert_logic = invert_logic
self._switch = led
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def assumed_state(self):
"""If unable to access real state of the entity."""
return True
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the device on."""
remote_rpi_gpio.write_output(self._switch,
0 if self._invert_logic else 1)
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
remote_rpi_gpio.write_output(self._switch,
1 if self._invert_logic else 0)
self._state = False
self.schedule_update_ha_state()
| {
"repo_name": "jabesq/home-assistant",
"path": "homeassistant/components/remote_rpi_gpio/switch.py",
"copies": "2",
"size": "2603",
"license": "apache-2.0",
"hash": 3783124849169205000,
"line_mean": 27.6043956044,
"line_max": 73,
"alpha_frac": 0.6208221283,
"autogenerated": false,
"ratio": 3.890881913303438,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5511704041603438,
"avg_score": null,
"num_lines": null
} |
"""Allows to configure a switch using RPi GPIO."""
import voluptuous as vol
from homeassistant.components import rpi_gpio
from homeassistant.components.switch import PLATFORM_SCHEMA
from homeassistant.const import DEVICE_DEFAULT_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.reload import setup_reload_service
from . import DOMAIN, PLATFORMS
CONF_PULL_MODE = "pull_mode"
CONF_PORTS = "ports"
CONF_INVERT_LOGIC = "invert_logic"
DEFAULT_INVERT_LOGIC = False
_SWITCHES_SCHEMA = vol.Schema({cv.positive_int: cv.string})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PORTS): _SWITCHES_SCHEMA,
vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Raspberry PI GPIO devices."""
setup_reload_service(hass, DOMAIN, PLATFORMS)
invert_logic = config.get(CONF_INVERT_LOGIC)
switches = []
ports = config.get(CONF_PORTS)
for port, name in ports.items():
switches.append(RPiGPIOSwitch(name, port, invert_logic))
add_entities(switches)
class RPiGPIOSwitch(ToggleEntity):
"""Representation of a Raspberry Pi GPIO."""
def __init__(self, name, port, invert_logic):
"""Initialize the pin."""
self._name = name or DEVICE_DEFAULT_NAME
self._port = port
self._invert_logic = invert_logic
self._state = False
rpi_gpio.setup_output(self._port)
rpi_gpio.write_output(self._port, 1 if self._invert_logic else 0)
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the device on."""
rpi_gpio.write_output(self._port, 0 if self._invert_logic else 1)
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
rpi_gpio.write_output(self._port, 1 if self._invert_logic else 0)
self._state = False
self.schedule_update_ha_state()
| {
"repo_name": "adrienbrault/home-assistant",
"path": "homeassistant/components/rpi_gpio/switch.py",
"copies": "7",
"size": "2370",
"license": "mit",
"hash": 8812718546879752000,
"line_mean": 29,
"line_max": 82,
"alpha_frac": 0.6637130802,
"autogenerated": false,
"ratio": 3.668730650154799,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7832443730354799,
"avg_score": null,
"num_lines": null
} |
"""Allows to configure a switch using RPi GPIO."""
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import CONF_HOST, DEVICE_DEFAULT_NAME
import homeassistant.helpers.config_validation as cv
from . import CONF_INVERT_LOGIC, DEFAULT_INVERT_LOGIC
from .. import remote_rpi_gpio
CONF_PORTS = "ports"
_SENSORS_SCHEMA = vol.Schema({cv.positive_int: cv.string})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORTS): _SENSORS_SCHEMA,
vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Remote Raspberry PI GPIO devices."""
address = config[CONF_HOST]
invert_logic = config[CONF_INVERT_LOGIC]
ports = config[CONF_PORTS]
devices = []
for port, name in ports.items():
try:
led = remote_rpi_gpio.setup_output(address, port, invert_logic)
except (ValueError, IndexError, KeyError, OSError):
return
new_switch = RemoteRPiGPIOSwitch(name, led)
devices.append(new_switch)
add_entities(devices)
class RemoteRPiGPIOSwitch(SwitchEntity):
"""Representation of a Remote Raspberry Pi GPIO."""
def __init__(self, name, led):
"""Initialize the pin."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = False
self._switch = led
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def assumed_state(self):
"""If unable to access real state of the entity."""
return True
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the device on."""
remote_rpi_gpio.write_output(self._switch, 1)
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
remote_rpi_gpio.write_output(self._switch, 0)
self._state = False
self.schedule_update_ha_state()
| {
"repo_name": "tboyce021/home-assistant",
"path": "homeassistant/components/remote_rpi_gpio/switch.py",
"copies": "14",
"size": "2327",
"license": "apache-2.0",
"hash": -5825141947692338000,
"line_mean": 27.7283950617,
"line_max": 82,
"alpha_frac": 0.638160722,
"autogenerated": false,
"ratio": 3.7899022801302933,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""Allows to configure custom shell commands to turn a value for a sensor."""
from collections.abc import Mapping
from datetime import timedelta
import json
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_COMMAND,
CONF_NAME,
CONF_UNIT_OF_MEASUREMENT,
CONF_VALUE_TEMPLATE,
STATE_UNKNOWN,
)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import template
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.reload import setup_reload_service
from . import check_output_or_log
from .const import CONF_COMMAND_TIMEOUT, DEFAULT_TIMEOUT, DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
CONF_JSON_ATTRIBUTES = "json_attributes"
DEFAULT_NAME = "Command Sensor"
SCAN_INTERVAL = timedelta(seconds=60)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_COMMAND): cv.string,
vol.Optional(CONF_COMMAND_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_JSON_ATTRIBUTES): cv.ensure_list_csv,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Command Sensor."""
setup_reload_service(hass, DOMAIN, PLATFORMS)
name = config.get(CONF_NAME)
command = config.get(CONF_COMMAND)
unit = config.get(CONF_UNIT_OF_MEASUREMENT)
value_template = config.get(CONF_VALUE_TEMPLATE)
command_timeout = config.get(CONF_COMMAND_TIMEOUT)
if value_template is not None:
value_template.hass = hass
json_attributes = config.get(CONF_JSON_ATTRIBUTES)
data = CommandSensorData(hass, command, command_timeout)
add_entities(
[CommandSensor(hass, data, name, unit, value_template, json_attributes)], True
)
class CommandSensor(Entity):
"""Representation of a sensor that is using shell commands."""
def __init__(
self, hass, data, name, unit_of_measurement, value_template, json_attributes
):
"""Initialize the sensor."""
self._hass = hass
self.data = data
self._attributes = None
self._json_attributes = json_attributes
self._name = name
self._state = None
self._unit_of_measurement = unit_of_measurement
self._value_template = value_template
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
def update(self):
"""Get the latest data and updates the state."""
self.data.update()
value = self.data.value
if self._json_attributes:
self._attributes = {}
if value:
try:
json_dict = json.loads(value)
if isinstance(json_dict, Mapping):
self._attributes = {
k: json_dict[k]
for k in self._json_attributes
if k in json_dict
}
else:
_LOGGER.warning("JSON result was not a dictionary")
except ValueError:
_LOGGER.warning("Unable to parse output as JSON: %s", value)
else:
_LOGGER.warning("Empty reply found when expecting JSON data")
if value is None:
value = STATE_UNKNOWN
elif self._value_template is not None:
self._state = self._value_template.render_with_possible_json_value(
value, STATE_UNKNOWN
)
else:
self._state = value
class CommandSensorData:
"""The class for handling the data retrieval."""
def __init__(self, hass, command, command_timeout):
"""Initialize the data object."""
self.value = None
self.hass = hass
self.command = command
self.timeout = command_timeout
def update(self):
"""Get the latest data with a shell command."""
command = self.command
if " " not in command:
prog = command
args = None
args_compiled = None
else:
prog, args = command.split(" ", 1)
args_compiled = template.Template(args, self.hass)
if args_compiled:
try:
args_to_render = {"arguments": args}
rendered_args = args_compiled.render(args_to_render)
except TemplateError as ex:
_LOGGER.exception("Error rendering command template: %s", ex)
return
else:
rendered_args = None
if rendered_args == args:
# No template used. default behavior
pass
else:
# Template used. Construct the string used in the shell
command = f"{prog} {rendered_args}"
_LOGGER.debug("Running command: %s", command)
self.value = check_output_or_log(command, self.timeout)
| {
"repo_name": "partofthething/home-assistant",
"path": "homeassistant/components/command_line/sensor.py",
"copies": "1",
"size": "5597",
"license": "mit",
"hash": -3998006410111912000,
"line_mean": 30.9828571429,
"line_max": 86,
"alpha_frac": 0.6074682866,
"autogenerated": false,
"ratio": 4.403619197482297,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5511087484082297,
"avg_score": null,
"num_lines": null
} |
"""Allows to configure custom shell commands to turn a value for a sensor."""
import collections
from datetime import timedelta
import json
import logging
import shlex
import subprocess
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_COMMAND, CONF_NAME, CONF_UNIT_OF_MEASUREMENT, CONF_VALUE_TEMPLATE,
STATE_UNKNOWN)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import template
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_COMMAND_TIMEOUT = 'command_timeout'
CONF_JSON_ATTRIBUTES = 'json_attributes'
DEFAULT_NAME = 'Command Sensor'
DEFAULT_TIMEOUT = 15
SCAN_INTERVAL = timedelta(seconds=60)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_COMMAND): cv.string,
vol.Optional(CONF_COMMAND_TIMEOUT, default=DEFAULT_TIMEOUT):
cv.positive_int,
vol.Optional(CONF_JSON_ATTRIBUTES): cv.ensure_list_csv,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Command Sensor."""
name = config.get(CONF_NAME)
command = config.get(CONF_COMMAND)
unit = config.get(CONF_UNIT_OF_MEASUREMENT)
value_template = config.get(CONF_VALUE_TEMPLATE)
command_timeout = config.get(CONF_COMMAND_TIMEOUT)
if value_template is not None:
value_template.hass = hass
json_attributes = config.get(CONF_JSON_ATTRIBUTES)
data = CommandSensorData(hass, command, command_timeout)
add_entities([CommandSensor(
hass, data, name, unit, value_template, json_attributes)], True)
class CommandSensor(Entity):
"""Representation of a sensor that is using shell commands."""
def __init__(self, hass, data, name, unit_of_measurement, value_template,
json_attributes):
"""Initialize the sensor."""
self._hass = hass
self.data = data
self._attributes = None
self._json_attributes = json_attributes
self._name = name
self._state = None
self._unit_of_measurement = unit_of_measurement
self._value_template = value_template
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
def update(self):
"""Get the latest data and updates the state."""
self.data.update()
value = self.data.value
if self._json_attributes:
self._attributes = {}
if value:
try:
json_dict = json.loads(value)
if isinstance(json_dict, collections.Mapping):
self._attributes = {k: json_dict[k] for k in
self._json_attributes
if k in json_dict}
else:
_LOGGER.warning("JSON result was not a dictionary")
except ValueError:
_LOGGER.warning(
"Unable to parse output as JSON: %s", value)
else:
_LOGGER.warning("Empty reply found when expecting JSON data")
if value is None:
value = STATE_UNKNOWN
elif self._value_template is not None:
self._state = self._value_template.render_with_possible_json_value(
value, STATE_UNKNOWN)
else:
self._state = value
class CommandSensorData:
"""The class for handling the data retrieval."""
def __init__(self, hass, command, command_timeout):
"""Initialize the data object."""
self.value = None
self.hass = hass
self.command = command
self.timeout = command_timeout
def update(self):
"""Get the latest data with a shell command."""
command = self.command
cache = {}
if command in cache:
prog, args, args_compiled = cache[command]
elif ' ' not in command:
prog = command
args = None
args_compiled = None
cache[command] = (prog, args, args_compiled)
else:
prog, args = command.split(' ', 1)
args_compiled = template.Template(args, self.hass)
cache[command] = (prog, args, args_compiled)
if args_compiled:
try:
args_to_render = {"arguments": args}
rendered_args = args_compiled.render(args_to_render)
except TemplateError as ex:
_LOGGER.exception("Error rendering command template: %s", ex)
return
else:
rendered_args = None
if rendered_args == args:
# No template used. default behavior
shell = True
else:
# Template used. Construct the string used in the shell
command = str(' '.join([prog] + shlex.split(rendered_args)))
shell = True
try:
_LOGGER.debug("Running command: %s", command)
return_value = subprocess.check_output(
command, shell=shell, timeout=self.timeout)
self.value = return_value.strip().decode('utf-8')
except subprocess.CalledProcessError:
_LOGGER.error("Command failed: %s", command)
except subprocess.TimeoutExpired:
_LOGGER.error("Timeout for command: %s", command)
| {
"repo_name": "molobrakos/home-assistant",
"path": "homeassistant/components/command_line/sensor.py",
"copies": "7",
"size": "6018",
"license": "apache-2.0",
"hash": -761827776272045700,
"line_mean": 33.3885714286,
"line_max": 79,
"alpha_frac": 0.6041874377,
"autogenerated": false,
"ratio": 4.444608567208272,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 175
} |
"""Allows to control the bot via REST calls."""
import json
import logging
import os
import requests
import threading
import urllib.parse
from bottle import ServerAdapter, abort, request, route, run
from jwcrypto import jwk, jws, jwt
from bot.paths import CONFIG_PATH
from bot.paths import OIDC_API, USER_ID_API
# Regarding decoding:
# https://bottlepy.org/docs/dev/tutorial.html#introducing-formsdict
# >>> request.forms['city'] [or request.forms.get('city')]
# 'Göttingen' # An utf8 string provisionally decoded as ISO-8859-1 by the server
# >>> request.forms.city
# 'Göttingen' # The same string correctly re-encoded as utf8 by bottle
class StoppableWSGIRefServer(ServerAdapter):
"""Allows to programmatically shut down bottle server."""
isRunning = False
def run(self, handler):
"""Start the server."""
from wsgiref.simple_server import make_server, WSGIRequestHandler
if self.quiet:
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw): pass
self.options['handler_class'] = QuietHandler
try:
self.server = make_server(self.host, self.port, handler, **self.options)
self.isRunning = True
self.server.serve_forever()
except OSError as err:
if err.errno == 98: # Address already in use
self.isRunning = False
logging.critical("Port {} already in use. Shutting down web server.".format(self.port))
else:
raise
def stop(self):
"""Stop the server."""
# self.server.server_close() <--- alternative but causes bad fd exception
if self.isRunning:
self.server.shutdown()
else:
logging.warning("Server is already shut down.")
class Singleton(type):
"""Singleton, allows a class to be initiated only once."""
_instances = {}
def __call__(cls, *args, **kwargs):
"""."""
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class WebAPI(object):
"""Create a REST API that allows control over a list of bots."""
__metaclass = Singleton
def __init__(self, bots, lport, password=None):
"""Initiate Web API."""
global api_bots
api_bots = bots
global api_server
api_server = StoppableWSGIRefServer(host='localhost', port=lport)
global api_port
api_port = lport
global api_password
api_password = password
global clientID
with open(CONFIG_PATH.format(bots[0].root), 'r', encoding="utf-8") as file:
CONFIG = json.load(file)
clientID = str(CONFIG['clientID'])
threading.Thread(target=run, kwargs=dict(server=api_server,)).start()
def stop(self):
"""Stop the server."""
api_server.stop()
@route('/hello')
def hello():
"""Return hw."""
return "Hello World!\n"
@route('/bots', method='POST')
def getBots():
"""Return list of all bots for given user."""
WebAPI.checkIfFormExists(['user', 'auth'])
username = urllib.parse.unquote(request.forms.user)
auth = urllib.parse.unquote(request.forms.auth)
if not WebAPI.hasUserPermission(username, auth):
abort(403, "Bad authentication")
logging.info("[API] [User: {}] /bots ".format(username))
bots = []
for bot in api_bots:
if WebAPI.hasBotPermission(username, bot):
bots.append(bot)
out = []
for bot in bots:
out.append(WebAPI.getBotName(bot))
return json.dumps(out)
@route('/files', method='POST')
def getFiles():
"""Return list of all modifiable files for a given bot."""
WebAPI.checkIfFormExists(['user', 'bot', 'auth'])
username = urllib.parse.unquote(request.forms.user)
botname = urllib.parse.unquote(request.forms.bot)
auth = urllib.parse.unquote(request.forms.auth)
bot = WebAPI.getBot(botname)
if not WebAPI.hasUserPermission(username, auth):
abort(403, "Bad authentication")
if not WebAPI.hasBotPermission(username, bot):
abort(403, "User doesn't have access to this bot.")
logging.info("[API] [#{}] [User: {}] /files ".format(botname, username))
data = os.listdir(bot.root + 'data')
configs = os.listdir(bot.root + 'configs')
out = []
for d in data + configs:
if '.json' in d:
out.append(d)
return json.dumps(out)
@route('/file', method='POST')
def getFile():
"""Return the content of a specific file."""
WebAPI.checkIfFormExists(['user', 'bot', 'file', 'auth'])
username = urllib.parse.unquote(request.forms.user)
botname = urllib.parse.unquote(request.forms.bot)
filename = urllib.parse.unquote(request.forms.file)
auth = urllib.parse.unquote(request.forms.auth)
bot = WebAPI.getBot(botname)
if not WebAPI.hasUserPermission(username, auth):
abort(403, "Bad authentication")
if not WebAPI.hasBotPermission(username, bot):
abort(403, "User doesn't have access to this bot.")
# For security
filename = os.path.split(filename)[1]
logging.info("[API] [#{}] [User: {}] /file {} ".format(botname, username, filename))
path = None
if os.path.isfile(bot.root + 'configs/' + filename):
path = bot.root + 'configs/' + filename
if os.path.isfile(bot.root + 'data/' + filename):
path = bot.root + 'data/' + filename
if path is None:
abort(404, "File \"" + filename + "\" not found.")
with open(path) as fp:
data = json.load(fp)
return {'content': data}
@route('/setfile', method='POST')
def setFile():
"""Set the json of a specific file."""
WebAPI.checkIfFormExists(['user', 'bot', 'file', 'content', 'auth'])
username = urllib.parse.unquote(request.forms.user)
botname = urllib.parse.unquote(request.forms.bot)
filename = urllib.parse.unquote(request.forms.file)
content = urllib.parse.unquote(request.forms.content)
auth = urllib.parse.unquote(request.forms.auth)
bot = WebAPI.getBot(botname)
if not WebAPI.hasUserPermission(username, auth):
abort(403, "Bad authentication")
if not WebAPI.hasBotPermission(username, bot):
abort(403, "User doesn't have access to this bot.")
try:
json_data = json.loads(content)
except ValueError:
abort(400, "A json dictionary is required.\n Given:\n" + str(content))
# For security
filename = os.path.split(filename)[1]
logging.info("[API] [#{}] [User: {}] /setfile {} ".format(botname, username, filename))
path = None
if os.path.isfile(bot.root + 'configs/' + filename):
path = bot.root + 'configs/' + filename
if os.path.isfile(bot.root + 'data/' + filename):
path = bot.root + 'data/' + filename
if path is None:
abort(404, "File \"" + filename + "\" not found.")
with open(path, mode='w') as file:
json.dump(json_data, file, indent=4)
bot.reloadConfig()
@route('/getTwitchUsername', method='POST')
def getUserNameI():
"""Get the username based on an id_token. Also verifies token."""
WebAPI.checkIfFormExists(['auth'])
auth = urllib.parse.unquote(request.forms.auth)
username = WebAPI.getUserNameAndVerifyToken(auth)
logging.info("[API] /getTwitchUsername => {} ".format(username))
return {"username": username}
@route('/pause', method='POST')
def pause():
"""Pause or unpause a bot."""
WebAPI.checkIfFormExists(['user', 'bot', 'auth', 'pause'])
username = urllib.parse.unquote(request.forms.user)
botname = urllib.parse.unquote(request.forms.bot)
auth = urllib.parse.unquote(request.forms.auth)
pause = urllib.parse.unquote(request.forms.pause)
bot = WebAPI.getBot(botname)
if not WebAPI.hasUserPermission(username, auth):
abort(403, "Bad authentication")
if not WebAPI.hasBotPermission(username, bot):
abort(403, "User doesn't have access to this bot.")
logging.info("[API] [#{}] [User: {}] /pause {}".format(botname, username, pause))
if pause == 'true':
bot.pause = True
elif pause == 'false':
bot.pause = False
else:
abort(400, "pause must be either 'True' or 'False'")
def checkIfFormExists(keys):
"""Get all forms for the given keys."""
for k in keys:
if k not in request.forms:
abort(400, "Bad Request, expecting the following data:\n" + str(keys))
def getBot(botname):
"""Return the correct bot, based on its directory name."""
bot = None
for b in api_bots:
if WebAPI.getBotName(b) == botname:
bot = b
if bot is None:
abort(404, "Bot \"" + botname + "\"not found.\n")
return bot
def getBotName(bot):
"""Return the correct bot name, based on its bot."""
return bot.root.split('/')[len(bot.root.split('/')) - 2]
def hasUserPermission(username, auth):
"""Check if the user is authenticated."""
# Check for password
if api_password is not None:
if auth == api_password:
return True
# If auth doesn't match password, assume id_token is submitted. Verify it.
id_token_username = WebAPI.getUserNameAndVerifyToken(auth)
# Compare username from id_token with given username
return username == id_token_username
def hasBotPermission(username, bot):
"""Check if the user is allowed to access the bot."""
with open(CONFIG_PATH.format(bot.root), 'r', encoding="utf-8") as file:
CONFIG = json.load(file)
admins = CONFIG['owner_list']
return username in admins
def getUserNameAndVerifyToken(auth):
"""Verify id_token and returns the username."""
r = requests.get(OIDC_API)
if r.status_code != 200:
abort(503, "Cannot reach twitch api.")
# Verify id_token
k = r.json()['keys'][0]
key = jwk.JWK(**k)
try:
ET = jwt.JWT(key=key, jwt=auth)
except (jws.InvalidJWSObject, ValueError):
abort(403, "Token format unrecognized or bad password.")
except (jwt.JWTExpired):
abort(403, "Token expired.")
user_id = json.loads(ET.claims)['sub']
# Check that audience in token is same as clientid
if json.loads(ET.claims)['aud'] != clientID:
abort(403, "Token not issued to this client.")
# Get username for id
headers = {'Client-id': clientID, 'Accept': 'application/vnd.twitchtv.v5+json'}
r = requests.get(USER_ID_API.format(user_id), headers=headers)
if r.status_code != 200:
abort(503, "Cannot reach twitch api.")
return r.json()['name']
| {
"repo_name": "ghostduck/monkalot",
"path": "bot/web.py",
"copies": "1",
"size": "11374",
"license": "mit",
"hash": -8165793029498674000,
"line_mean": 33.5623100304,
"line_max": 103,
"alpha_frac": 0.5901855598,
"autogenerated": false,
"ratio": 3.945523941707148,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5035709501507148,
"avg_score": null,
"num_lines": null
} |
"""Allows to get all data about a given GitHub City.
This module allow to developers to get all users of GitHub that have a
given city in their profile. For example, if I want getting all users
from London,. I will get all users that have London in their
profiles (they could live in London or not)
Author: Israel Blancas @iblancasa
Original idea: https://github.com/JJ/github-city-rankings
License:
The MIT License (MIT)
Copyright (c) 2015-2017 Israel Blancas @iblancasa (http://iblancasa.com/)
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the Software), to deal in the Software
without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from setuptools import setup
setup(
name='githubcity',
version='1.0.4',
description='GitHub city ranking creator',
author='Israel Blancas @iblancasa',
author_email='iblancasa@gmail.com',
url='https://github.com/iblancasa/GitHubCity',
download_url='https://github.com/iblancasa/GitHubCity/tarball/0.01',
keywords=['github', 'ranking', 'data', 'api'],
classifiers=[],
install_requires=[
'python-dateutil==2.4.2',
'beautifulsoup4==4.6.0',
'lxml==4.1.1',
'coloredlogs==5.0',
'pystache==0.5.4',
'httpretty==0.8.14'
],
packages=['githubcity'],
py_modules=["githubcity"],
long_description=open('../README.md').read(),
license='MIT'
)
| {
"repo_name": "iblancasa/GitHubCity",
"path": "src/setup.py",
"copies": "1",
"size": "2334",
"license": "mit",
"hash": 6188048988957184000,
"line_mean": 38.5593220339,
"line_max": 77,
"alpha_frac": 0.7095115681,
"autogenerated": false,
"ratio": 3.935919055649241,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 59
} |
"""Allows treating tuples/lists as vectors"""
import math
def add(*items):
"""Adds one or more vectors."""
if len(items) == 0: return 0
n = len(items[0])
for v in items:
if n!=len(v):
raise RuntimeError('Vector dimensions not equal')
return [sum([v[i] for v in items]) for i in range(n)]
def madd(a,b,c):
"""Return a+c*b where a and b are vectors."""
if len(a)!=len(b):
raise RuntimeError('Vector dimensions not equal')
return [ai+c*bi for ai,bi in zip(a,b)]
def sub(a,b):
"""Subtract a vector b from a, or subtract a scalar"""
if hasattr(b,'__iter__'):
if len(a)!=len(b):
raise RuntimeError('Vector dimensions not equal')
return [ai-bi for ai,bi in zip(a,b)]
else:
return [ai-b for ai in a]
def mul(a,b):
"""Multiply a vector either elementwise with another vector, or with a
scalar."""
if hasattr(b,'__iter__'):
if len(a)!=len(b):
raise RuntimeError('Vector dimensions not equal')
return [ai*bi for ai,bi in zip(a,b)]
else:
return [ai*b for ai in a]
def div(a,b):
"""Elementwise division with another vector, or with a scalar."""
if hasattr(b,'__iter__'):
if len(a)!=len(b):
raise RuntimeError('Vector dimensions not equal')
return [ai/bi for ai,bi in zip(a,b)]
else:
return [ai/b for ai in a]
def maximum(a,b):
"""Elementwise max"""
if hasattr(b,'__iter__'):
return [max(ai,bi) for ai,bi in zip(a,b)]
else:
return [max(ai,b) for ai in a]
def minimum(a,b):
"""Elementwise min"""
if hasattr(b,'__iter__'):
return [min(ai,bi) for ai,bi in zip(a,b)]
else:
return [min(ai,b) for ai in a]
def dot(a,b):
"""Dot product."""
if len(a)!=len(b):
raise RuntimeError('Vector dimensions not equal')
return sum([a[i]*b[i] for i in range(len(a))])
def normSquared(a):
"""Returns the norm of a, squared."""
return sum(ai*ai for ai in a)
def norm(a):
"""L2 norm"""
return math.sqrt(normSquared(a))
def unit(a,epsilon=1e-5):
"""Returns the unit vector in the direction a. If the norm of
a is less than epsilon, a is left unchanged."""
n = norm(a)
if n > epsilon:
return mul(a,1.0/n)
return a[:]
norm_L2 = norm
def norm_L1(a):
"""L1 norm"""
return sum(abs(ai) for ai in a)
def norm_Linf(a):
"""L-infinity norm"""
return max(abs(ai) for ai in a)
def distanceSquared(a,b):
"""Squared L2 distance"""
if len(a)!=len(b): raise RuntimeError('Vector dimensions not equal')
sum=0
for i in range(len(a)):
sum = sum + (a[i]-b[i])*(a[i]-b[i])
return sum
def distance(a,b):
"""L2 distance"""
return math.sqrt(distanceSquared(a,b));
def cross(a,b):
"""Cross product between a 3-vector or a 2-vector"""
if len(a)!=len(b):
raise RuntimeError('Vector dimensions not equal')
if len(a)==3:
return (a[1]*b[2]-a[2]*b[1],a[2]*b[0]-a[0]*b[2],a[0]*b[1]-a[1]*b[0])
elif len(a)==2:
return a[0]*b[1]-a[1]*b[0]
else:
raise RuntimeError('Vectors must be 2D or 3D')
def interpolate(a,b,u):
"""Linear interpolation between a and b"""
return madd(a,sub(b,a),u)
| {
"repo_name": "krishauser/Klampt",
"path": "Python/klampt/math/vectorops.py",
"copies": "1",
"size": "3273",
"license": "bsd-3-clause",
"hash": -3314224845835614700,
"line_mean": 26.5042016807,
"line_max": 76,
"alpha_frac": 0.571035747,
"autogenerated": false,
"ratio": 3.047486033519553,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4118521780519553,
"avg_score": null,
"num_lines": null
} |
# allows use of Python3.X print functionality
from __future__ import print_function
import re # for regex functions
import sys
import os
# need regex for N-glycosylation motif
# N{any but P}[S or T]{any but P}
# parentheses and '?=' allow for overlapping
motif = re.compile('(?=N[^P][ST][^P])')
# loop through protein id's to import from uniprot website
import urllib2
from Bio import SeqIO
def main(*args, **kwargs):
fpath = os.path.join(os.getcwd(), args[-2])
dataset = open(fpath, 'r')
protein_ids = dataset.readlines()
dataset.close()
out_path = os.path.join(os.getcwd(), args[-1])
outhandle = open(out_path, 'w')
uniprot_url = "http://www.uniprot.org/uniprot/"
for protein in protein_ids:
request = urllib2.Request("".join([uniprot_url+protein.rstrip()+".fasta"]))
opener = urllib2.build_opener()
f = opener.open(request)
raw_data = SeqIO.read(f, 'fasta')
f.close()
locations = []
# Use search instead of match to search entire string
if re.search(motif, str(raw_data.seq)):
print(protein.strip(), file=outhandle)
for m in re.finditer(motif, str(raw_data.seq)):
locations.append(m.start()+1)
print(" ".join(map(str, locations)), file=outhandle)
outhandle.close()
if __name__ == '__main__':
main(*sys.argv) | {
"repo_name": "crf1111/Bio-Informatics-Learning",
"path": "Bio-StrongHold/src/Finding_a_Protein_Motif.py",
"copies": "1",
"size": "1375",
"license": "mit",
"hash": 7214292008074038000,
"line_mean": 29.5777777778,
"line_max": 83,
"alpha_frac": 0.6225454545,
"autogenerated": false,
"ratio": 3.3454987834549876,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4468044237954988,
"avg_score": null,
"num_lines": null
} |
"""Allows use of some high-level git operations at GitHub."""
import base64
from collections import namedtuple
from threading import Lock
from decorator import decorator
import github
FileDescription = namedtuple('FileDescription', 'path contents executable')
def file_description(path, contents, executable=False):
return FileDescription(path, contents, executable)
class Githubx:
def __init__(self, *args, **kwargs):
"""All parameters are passed to PyGithub."""
self._gh = github.Github(*args, **kwargs)
self._mutex = Lock()
@decorator
def _atomic(f, self, *args, **kwargs):
"""Ensure the decorated method is the only one in this instance
modifying GitHub resorces."""
with self._mutex:
return f(self, *args, **kwargs)
@_atomic
def commit(self, repo,
file_descriptions,
commit_message,
branch='master',
force=False):
"""Make a commit on GitHub.
If a path exists, it will be replaced; if not, it will be created.
See http://developer.github.com/v3/git/"""
gh_repo = self._gh.get_user().get_repo(repo)
head_ref = gh_repo.get_git_ref("heads/%s" % branch)
latest_commit = gh_repo.get_git_commit(head_ref.object.sha)
base_tree = latest_commit.tree
tree_els = [github.InputGitTreeElement(
path=desc.path,
mode='100755' if desc.executable else '100644',
type='blob',
content=desc.contents
) for desc in file_descriptions]
new_tree = gh_repo.create_git_tree(tree_els, base_tree)
new_commit = gh_repo.create_git_commit(
message=commit_message,
parents=[latest_commit],
tree=new_tree)
head_ref.edit(sha=new_commit.sha, force=force)
@_atomic
def get_file(self, repo, filepath, branch='master'):
"""Return a unicode string of the file contents.
Raise a github.GithubException is the file is not found."""
#Raising an exception isn't the best general api, but for my use
#it makes the most sense; I always expect the file to be there.
gh_repo = self._gh.get_user().get_repo(repo)
latest_commit = gh_repo.get_git_ref("heads/%s" % branch)
base_tree = gh_repo.get_git_commit(latest_commit.object.sha).tree
matching_blobs = [el for el in base_tree.tree
if el.type == 'blob' and
el.path == filepath]
if not matching_blobs:
raise github.GithubException('File not found in repo.')
blob = gh_repo.get_git_blob(matching_blobs[0].sha)
if blob.encoding == 'base64':
return base64.b64decode(blob.content)
else:
return blob.content
| {
"repo_name": "simon-weber/the-listserve-archive",
"path": "githubx.py",
"copies": "1",
"size": "2852",
"license": "mit",
"hash": 4364341682852877300,
"line_mean": 30,
"line_max": 75,
"alpha_frac": 0.6037868163,
"autogenerated": false,
"ratio": 3.9943977591036415,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5098184575403641,
"avg_score": null,
"num_lines": null
} |
"""Allows user to locate points or route segments along WA routes.
"""
from __future__ import (unicode_literals, print_function, division,
absolute_import)
import re
from os.path import split as split_path, join as join_path
import arcpy
from .route_ids import RouteIdSuffixType, standardize_route_id
try:
from arcpy.da import Describe
except ImportError:
from arcpy import Describe
def _get_row_count(view):
return int(arcpy.management.GetCount(view)[0])
def add_standardized_route_id_field(in_table, route_id_field, direction_field, out_field_name, out_error_field_name, route_id_suffix_type, wsdot_validation=True):
"""Adds route ID + direction field to event table that has both unsuffixed route ID and direction fields.
"""
# Make sure an output route ID suffix type other than "unsuffixed" has been specified.
if route_id_suffix_type == RouteIdSuffixType.has_no_suffix:
raise ValueError("Invalid route ID suffix type: %s" %
route_id_suffix_type)
if wsdot_validation:
# Determine the length of the output route ID field based on route suffix type
out_field_length = 11
if route_id_suffix_type == RouteIdSuffixType.has_i_suffix or route_id_suffix_type == RouteIdSuffixType.has_d_suffix:
out_field_length += 1
elif route_id_suffix_type == RouteIdSuffixType.has_both_i_and_d:
out_field_length += 2
else:
# TODO: Get field length from arcpy.ListFields(in_table) using route_id_field's length + 2.
out_field_length = None
# Add new fields to the output table.
if "AddFields" in dir(arcpy.management):
arcpy.management.AddFields(in_table, [
[out_field_name, "TEXT", None, out_field_length, None],
# Use default length (255)
[out_error_field_name, "TEXT", None, None]
])
else:
# ArcGIS Desktop 10.5.1 doesn't have AddFields, so use multiple AddField calls.
arcpy.management.AddField(
in_table, out_field_name, "TEXT", field_length=out_field_length)
arcpy.management.AddField(in_table, out_error_field_name, "TEXT")
decrease_re = re.compile(r"^d", re.IGNORECASE)
with arcpy.da.UpdateCursor(in_table, (route_id_field, direction_field, out_field_name, out_error_field_name)) as cursor:
for row in cursor:
rid = row[0]
direction = row[1]
if rid:
# Get unsuffixed, standardized route ID.
try:
if wsdot_validation:
rid = standardize_route_id(
rid, RouteIdSuffixType.has_no_suffix)
except ValueError as error:
row[3] = "%s" % error
else:
# If direction is None, skip regex and set match result to None.
if direction:
match = decrease_re.match(direction)
else:
match = None
# If direction is "d" and specified suffix type has "d" suffixes, add "d" suffix.
if match and route_id_suffix_type & RouteIdSuffixType.has_d_suffix == RouteIdSuffixType.has_d_suffix:
rid = "%s%s" % (rid, "d")
# Add the "i" suffix for non-"d" if specified suffix type includes "i" suffixes.
elif route_id_suffix_type & RouteIdSuffixType.has_i_suffix:
rid = "%s%s" % (rid, "i")
row[2] = rid
else:
# If no route ID value, add error message to error field.
row[3] = "Input Route ID is null"
cursor.updateRow(row)
def create_event_feature_class(event_table,
route_layer,
event_table_route_id_field,
route_layer_route_id_field,
begin_measure_field,
end_measure_field=None,
out_fc=None):
"""Creates a feature class by locating events along a route layer.
Args:
event_table: str, path to a table containing route events
route_layer: str, path to layer or feature class containing route polylines
event_table_route_id_field: name of the field in event_table that specifies route ID.
route_layer_route_id_field: name of the field in the route_layer that specifies route ID.
begin_measure_field: name of numeric field in event_table that contains begin measure value
end_measure_field: Optional. Name of numeric field in event_table that contains end measure
value. If omitted, begin_measure_field will be interpereted as a point along a line
instead of a line segment.
out_fc: str, path to output feature class. If omitted, an in_memory FC will be created
Returns:
Returns the path to the output feature class.
"""
# # Ensure given event table and route layer exist.
# if not arcpy.Exists(event_table):
# raise FileNotFoundError(event_table)
# elif not arcpy.Exists(route_layer):
# raise FileNotFoundError(route_layer)
# End measure is optional. If omitted, out geometry will be points.
# Otherwise, output will be polyline.
if end_measure_field is not None:
fields = ("OID@", event_table_route_id_field,
begin_measure_field, end_measure_field)
out_geo_type = "POLYLINE"
else:
fields = ("OID@", event_table_route_id_field, begin_measure_field)
out_geo_type = "POINT"
if out_fc is None:
wkspc = "in_memory"
out_fc = arcpy.CreateUniqueName("LocatedEvents", wkspc)
# Create the output feature class.
workspace, fc_name = split_path(out_fc)
routes_desc = Describe(route_layer)
# Get the spatial reference from the route layer description.
# The method for accessing it will differ in ArcGIS Desktop
# and ArcGIS Pro.
spatial_reference = None
if isinstance(routes_desc, dict):
spatial_reference = routes_desc["spatialReference"]
else:
spatial_reference = routes_desc.spatialReference
arcpy.management.CreateFeatureclass(workspace, fc_name, out_geo_type,
spatial_reference=spatial_reference)
event_oid_field_name = "EventOid"
error_field_name = "Error"
arcpy.management.AddField(out_fc, event_oid_field_name, "LONG", field_alias="Event OID",
field_is_nullable=False, field_is_required=True)
arcpy.management.AddField(out_fc, error_field_name, "TEXT", field_is_nullable=True,
field_alias="Locating Error")
with arcpy.da.SearchCursor(event_table, fields) as table_cursor:
with arcpy.da.InsertCursor(out_fc, (event_oid_field_name, "SHAPE@",
error_field_name)) as insert_cursor:
for row in table_cursor:
event_oid = row[0]
event_route_id = row[1]
begin_m = row[2]
if len(fields) >= 4:
end_m = row[3]
else:
end_m = None
where = "%s = '%s'" % (
route_layer_route_id_field, event_route_id)
out_geom = None
error = None
with arcpy.da.SearchCursor(route_layer, "SHAPE@", where) as route_cursor:
# Initialize output route event geometry.
for (geom,) in route_cursor:
# find position or segment.
try:
if end_m is None:
out_geom = geom.positionAlongLine(begin_m)
else:
out_geom = geom.segmentAlongLine(
begin_m, end_m)
except arcpy.ExecuteError as ex:
error = ex
arcpy.AddWarning("Error finding event on route: %s @ %s.\n%s" % (
event_route_id, (begin_m, end_m), ex))
# If out geometry has been found, no need to try with other
# route features. (There should be only one route feature,
# anyway.)
if out_geom:
error = None
break
if error:
insert_cursor.insertRow((event_oid, None, str(error)))
elif out_geom is None:
msg = "Could not locate %s on %s (%s)." % (
(begin_m, end_m), event_route_id, event_route_id)
insert_cursor.insertRow((event_oid, None, msg))
arcpy.AddWarning(msg)
else:
insert_cursor.insertRow((event_oid, out_geom, None))
return out_fc
def field_list_contains(fields, name, field_type="TEXT"):
"""Determines if a list of fields contains a field with the given name and type.
Args:
fields: A Python list of fields. This can be acquired from arcpy.ListFields or arcpy(.da).Describe.
name: The name of the field to search for. Match is case-insensitive.
type_re: An re that matches the given field type. Defaults to match either "String" or "Text"
(case-insensitive).
Returns:
Returns a tuple with two boolean values. The first tells if the field with the given name is
contained in the list; the second tells if the type of the field matches the desired type.
(This second value will always be False if the field is not found.)
Example:
fields = arcpy.ListFields(r"c:/temp/example.gdb/my_features")
field_exists, correct_type = _field_list_contains(fields, "LOC_ERROR")
"""
field_mapping = {
"BLOB": "Blob",
"DATE": "Date",
"DOUBLE": "Double",
# "Geometry": "Geometry",
# "GlobalID": "GlobalID",
"GUID": "Guid",
"LONG": "Integer",
# "OID": "OID",
# "Raster": "Raster",
"FLOAT": "Single",
"SHORT": "SmallInteger",
"TEXT": "String"
}
type_re_parts = "|".join(
map(lambda s: r"(?:%s)" % s, (field_type, field_mapping[field_type]))
)
type_re = re.compile(r"^(?:%s)$" % type_re_parts, re.IGNORECASE)
field_exists = False
correct_type = False
name_re = re.compile("^%s$" % name)
for field in fields:
if name_re.match(field.baseName) or name_re.match(field.name):
field_exists = True
if type_re.match(field.type):
correct_type = True
break
return field_exists, correct_type
def get_measures(in_geometry, route_geometry):
"""Finds the nearest point or route segement along a route polyline.
Args:
in_geometry: Either a PointGeometry or a Polyline.
route_geometry: A route Polyline.
Returns:
A tuple with the following values
* located geometry
* Begin, End point info tuples: (nearest_point, measure, distance, right_site)
Will contain one item if input is point, two if input is polyline.
"""
if not isinstance(route_geometry, arcpy.Polyline):
raise TypeError("route_geometry must be a Polyline.")
p1_info, p2_info = (None,) * 2
if isinstance(in_geometry, (arcpy.PointGeometry, arcpy.Polyline)):
p1_info = route_geometry.queryPointAndDistance(in_geometry.firstPoint)
if isinstance(in_geometry, arcpy.Polyline):
p2_info = route_geometry.queryPointAndDistance(
in_geometry.lastPoint)
out_geometry = route_geometry.segmentAlongLine(
p1_info[1], p2_info[1])
else:
out_geometry = p1_info[0]
else:
raise TypeError("Invalid geometry type")
return out_geometry, p1_info, p2_info
def update_route_location(
in_features,
route_layer,
in_features_route_id_field,
route_layer_route_id_field,
measure_field,
end_measure_field=None,
rounding_digits=None,
use_m_from_route_point=True):
"""Given input features, finds location nearest route.
Args:
in_features: Input features to be located. (Feature Layer)
route_layer: Route layer containing Linear Referencing System (LRS)
in_features_route_id_field: The field in "in_features" that identifies which route the event is on.
route_layer_route_id_field: The field in the "route_layer" that contains the unique route identifier.
measure_field: The field in the input features with begin measures (for lines) or a point's measure (for points).
end_measure_field: The field in the input features with end measures (for lines). Not used if in_features are points.
rounding_digits: The number of digits to round to.
use_m_from_route_point: If you want to use the M value from the nearest point (rather than the distance returned by
queryPointAndDistance) set to True. Otherwise, set to False
"""
# Convert rounding digits to integer
if not isinstance(rounding_digits, (int, type(None))):
rounding_digits = int(rounding_digits)
# out_rid_field: Name of the route ID field in the output feature class.
# out_error_field: The name of the new field for error information that will be created in "out_fc".
# source_oid_field: The name of the source Object ID field in the output feature class.
out_error_field = "LOC_ERROR"
distance_1_field = "Distance"
distance_2_field = "EndDistance"
route_desc = Describe(route_layer)
in_features_desc = Describe(in_features)
if not re.match(r"^(?:(?:Point)|(?:Polyline))$", in_features_desc.shapeType, re.IGNORECASE):
raise TypeError(
"Input feature class must be either Point or Polyline.")
spatial_ref = route_desc.spatialReference
def add_output_fields(field_name, field_type, **other_add_field_params):
field_exists, correct_type = field_list_contains(
in_features_desc.fields, field_name, field_type)
if not field_exists:
arcpy.management.AddField(
in_features, field_name, field_type, **other_add_field_params)
elif not correct_type:
arcpy.AddError(
"Field '%s' already exists in table but is not the correct type." % field_name)
else:
arcpy.AddWarning(
"Field '%s' already exists and its data will be overwritten." % field_name)
# Locating Error field
add_output_fields(out_error_field, "TEXT", field_alias="Locating Error")
add_output_fields(distance_1_field, "DOUBLE", field_alias="Distance")
update_fields = [in_features_route_id_field,
"SHAPE@", out_error_field, measure_field, distance_1_field]
if end_measure_field:
add_output_fields(distance_2_field, "DOUBLE",
field_alias="End Distance")
update_fields += [end_measure_field, distance_2_field]
# Get search cursor feature count
result = arcpy.management.GetCount(in_features)
feature_count = int(result.getOutput(0))
error_count = 0
with arcpy.da.UpdateCursor(in_features, update_fields, "%s IS NOT NULL" % in_features_route_id_field, spatial_ref) as update_cursor:
for row in update_cursor:
in_route_id, event_geometry = row[:2]
if not event_geometry:
row[2] = "Event geometry is NULL."
continue
with arcpy.da.SearchCursor(route_layer, ["SHAPE@"], "%s = '%s'" % (route_layer_route_id_field, in_route_id)) as route_cursor:
route_geometry = None
for route_row in route_cursor:
route_geometry = route_row[0]
updated_geometry, begin_info, end_info = get_measures(
event_geometry, route_geometry)
# the updated_geometry is not needed for this.
del updated_geometry
# Geometry should not change, so no need to update it.
# row[1] = updated_geometry
nearest_point, measure, distance, right_side = begin_info
if use_m_from_route_point:
measure = nearest_point.firstPoint.M
if rounding_digits is not None:
measure = round(measure, rounding_digits)
del right_side
row[2] = None
row[3], row[4] = measure, distance
# row[3] = m1
# row[5], row[4] = angle_dist1
if end_measure_field:
nearest_point, measure, distance, right_side = end_info
del right_side
if use_m_from_route_point:
measure = nearest_point.firstPoint.M
if rounding_digits is not None:
measure = round(measure, rounding_digits)
row[-2] = measure
row[-1] = distance
break
if not route_geometry:
row[2] = "Route not found"
error_count += 1
update_cursor.updateRow(row)
if error_count:
arcpy.AddWarning("Unable to locate %d out of %d events." %
(error_count, feature_count))
def copy_with_segment_ids(input_point_features, out_feature_class):
"""Copies point feature classes and adds SegmentID and IsEndPoint fields to the copy.
Parameters:
input_point_features: Path to a point feature class.
out_feature_class: Path where output feature class will be written. This feature class
will contain the following extra fields:
SegmentId: Indicates which point features of input_point_features go together to define the
begin and end points of a line segement.
IsEndPoint: Will have value of 1 if the the row represents an end point, 0 for a
begin point.
Returns:
Returns a tuple: total number of rows (r), number of segments (s).
s = r / 2
"""
row_count = int(arcpy.GetCount_management(input_point_features)[0])
if row_count % 2 != 0:
raise ValueError(
"Input feature class should have an even number of features.")
arcpy.AddMessage("Copying %s to %s..." %
(input_point_features, out_feature_class))
arcpy.management.CopyFeatures(input_point_features, out_feature_class)
arcpy.AddMessage("Adding fields %s to %s" %
(("SegmentId", "IsEndPoint"), out_feature_class))
arcpy.management.AddField(
out_feature_class, "SegmentId", "LONG", field_alias="Segement ID")
arcpy.management.AddField(
out_feature_class, "IsEndPoint", "SHORT", field_alias="Is end point")
arcpy.AddMessage(
"Calculating SegmentIDs and determining start and end points.")
i = -1
segment_id = -1
with arcpy.da.UpdateCursor(out_feature_class, ("SegmentId", "IsEndPoint")) as cursor:
# Need to iterate through row, but not actually use the variable itself
for row in cursor:
# row is not actually used
del row
i += 1
is_end_point = False
if i % 2 == 0:
segment_id += 1
else:
is_end_point = True
cursor.updateRow([segment_id, int(is_end_point)])
return i + 1, segment_id + 1
def _list_oids_of_non_matches(table, match_field1, match_field2):
"""Returns a list of the OIDs of rows where the values in match_field1
and match_field2 do not match.
"""
# Get a list of OIDs that need to be deleted.
# These are the rows where the begin and end route IDs do not match.
oids_to_be_deleted = []
with arcpy.da.SearchCursor(table, ["OID@", match_field1, match_field2]) as cursor:
for row in cursor:
oid, field1, field2 = row
if field1 != field2:
oids_to_be_deleted.append(oid)
return oids_to_be_deleted
def _select_by_oids(table, oid_list):
"""Creates a new table view and selects all rows with object IDs
that are in the input OID list.
"""
# Return nothing if the OID list is empty or None.
if not oid_list:
return
# Create the name for the table view.
events_layer = split_path(arcpy.CreateScratchName(
"OutputEvents", workspace="in_memory"))[1]
arcpy.AddMessage(
"Rows with the following OIDs should be deleted: %s" % oid_list)
arcpy.AddMessage("Creating view %s on %s" % (events_layer, table))
arcpy.management.MakeTableView(table, events_layer)
# Get OID field name
oid_field = arcpy.ListFields(table, field_type="OID")[0]
oid_list = ",".join(map(str, oid_list))
arcpy.management.SelectLayerByAttribute(
events_layer, "NEW_SELECTION", "%s in (%s)" % (oid_field.name, oid_list))
return events_layer
def points_to_line_events(in_features, in_routes, route_id_field, radius, out_table):
"""Using a point feature layer to represent begin and end points, finds nearest
route event points.
For parameter explanations, see http://pro.arcgis.com/en/pro-app/tool-reference/linear-referencing/locate-features-along-routes.htm
Returns
Returns a tuple: (out_table, output event features for use with arcpy.lr.MakeRouteEventLayer)
"""
# Copy input features to new temporary feature class.
in_features_copy = arcpy.CreateScratchName(workspace="in_memory")
# Determine the segment IDs and store in Numpy structured array.
copy_with_segment_ids(in_features, in_features_copy)
temp_events_table = arcpy.CreateScratchName(
"AllEvents", workspace="in_memory")
try:
arcpy.AddMessage("Locating fields along routes...")
arcpy.lr.LocateFeaturesAlongRoutes(
in_features_copy, in_routes, route_id_field, radius, temp_events_table,
"RID POINT Measure", "ALL", "DISTANCE", in_fields="FIELDS")
finally:
arcpy.AddMessage("Deleting %s" % in_features_copy)
arcpy.management.Delete(in_features_copy)
# Create layer name, removing the workspace part from the generated output.
events_layer = split_path(
arcpy.CreateUniqueName("point_events", "in_memory"))[1]
end_events_table = arcpy.CreateScratchName(
"End", "PointEvents", workspace="in_memory")
try:
# Select start point events, copy to new table.
# Then switch the selection and copy the end point events to a new table.
arcpy.management.MakeTableView(
temp_events_table, events_layer, None, "in_memory")
arcpy.management.SelectLayerByAttribute(
events_layer, "NEW_SELECTION", "IsEndPoint = 0")
# copy selection to output table
arcpy.management.CopyRows(events_layer, out_table)
arcpy.management.SelectLayerByAttribute(
events_layer, "SWITCH_SELECTION")
# copy selection to new temp table
arcpy.management.CopyRows(events_layer, end_events_table)
# Alter the field names in the end point events table
for field_name in ("RID", "Measure", "Distance"):
new_name = "End%s" % field_name
arcpy.management.AlterField(end_events_table, field_name, new_name)
# Join the temp table end point data to the output table containg the begin point events.
arcpy.management.JoinField(
out_table, "SegmentId", end_events_table, "SegmentId", [
"EndRID", "EndMeasure", "EndDistance"])
finally:
for table in (events_layer, temp_events_table, end_events_table):
if table and arcpy.Exists(table):
arcpy.AddMessage("Deleting %s..." % table)
arcpy.management.Delete(table)
# Get a list of OIDs that need to be deleted.
# These are the rows where the begin and end route IDs do not match.
oids_to_be_deleted = _list_oids_of_non_matches(
out_table, "RID", "EndRID")
drop_end_rid_field = True
# Delete rows from the output table where the start and end route IDs do not match
if oids_to_be_deleted:
try:
# # This didn't work for some reason, so now select rows for deletion using alternative method.
# # arcpy.management.SelectLayerByAttribute(events_layer, "NEW_SELECTION", "RID <> EndRID")
events_layer = _select_by_oids(out_table, oids_to_be_deleted)
selected_row_count = _get_row_count(events_layer)
drop_end_rid_field = True
if selected_row_count:
total_rows_before_delete = _get_row_count(out_table)
arcpy.AddMessage(
"There are %d rows where the start and end RIDs do not match. Deleting these rows..." % selected_row_count)
arcpy.management.DeleteRows(events_layer)
rows_after_delete = _get_row_count(out_table)
if rows_after_delete >= total_rows_before_delete:
arcpy.AddWarning(
"%d rows were selected for deletion, but no rows were deleted." % selected_row_count)
drop_end_rid_field = False
else:
arcpy.AddMessage("Zero rows were selected for deletion")
finally:
arcpy.management.Delete(events_layer)
if drop_end_rid_field:
for field in ("EndRID", "SegmentID", "IsEndPoint"):
try:
arcpy.DeleteField_management(out_table, field)
except arcpy.ExecuteError as ex:
arcpy.AddWarning(
"Could not delete field %s from %s.\n%s" % (field, out_table, ex))
return out_table, "RID LINE Measure EndMeasure"
def points_to_line_event_features(in_features, in_routes, route_id_field, radius, out_feature_class):
"""Given an input feature layer of points representing route segment start and end points,
locates those points along the given route layer's routes and returns the output route line segment
event feature class.
"""
# Create name for temporary event table.
event_table = arcpy.CreateScratchName(
"event_table", workspace="in_memory")
# Create line events for input point features, writing to the event_table.
event_table, event_properties = points_to_line_events(in_features, in_routes,
route_id_field, radius, event_table)
# Create a name for the event layer.
event_layer = split_path(arcpy.CreateScratchName(
"located_events", workspace="in_memory"))[1]
# Create the event layer.
arcpy.lr.MakeRouteEventLayer(
in_routes, route_id_field, event_table, event_properties, event_layer, None, "ERROR_FIELD")
# Copy the event layer to a feature class.
arcpy.management.CopyFeatures(event_layer, out_feature_class)
# Delete the temporary tables and layers.
for item in (event_layer, event_table):
if item and arcpy.Exists(item):
arcpy.management.Delete(item)
| {
"repo_name": "WSDOT-GIS/wsdot-route-gp",
"path": "wsdotroute/__init__.py",
"copies": "1",
"size": "27667",
"license": "unlicense",
"hash": -8275669347216432000,
"line_mean": 42.7768987342,
"line_max": 162,
"alpha_frac": 0.5975711136,
"autogenerated": false,
"ratio": 4.063298575414892,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5160869689014892,
"avg_score": null,
"num_lines": null
} |
"""Allows using emdp as a gym environment."""
import numpy as np
import gym
from gym import spaces
import emdp.utils as utils
def gymify(mdp, **kwargs):
return GymToMDP(mdp, **kwargs)
class GymToMDP(gym.Env):
def __init__(self, mdp, observation_one_hot=True):
"""
:param mdp: The emdp.MDP object to wrap.
:param observation_one_hot: Boolean indicating if the observation space
should be one hot or an integer.
"""
self.mdp = mdp
if observation_one_hot:
self.observation_space = spaces.Box(
low=0, high=1, shape=(self.mdp.state_space, ), dtype=np.int32)
else:
self.observation_space = spaces.Discrete(self.mdp.state_space)
self.action_space = spaces.Discrete(self.mdp.action_space)
self._obs_one_hot = observation_one_hot
def reset(self):
return self.maybe_convert_state(self.mdp.reset())
def step(self, action):
state, reward, done, info = self.mdp.step(action)
return (self.maybe_convert_state(state),
reward, done, info)
def seed(self, seed):
self.mdp.set_seed(seed)
# TODO:
def render(self):
pass
def maybe_convert_state(self, state):
if self._obs_one_hot:
return state
else:
return utils.convert_onehot_to_int(state)
| {
"repo_name": "zafarali/emdp",
"path": "emdp/emdp_gym/gym_wrap.py",
"copies": "1",
"size": "1395",
"license": "mit",
"hash": -8074689601729049000,
"line_mean": 25.8269230769,
"line_max": 79,
"alpha_frac": 0.6,
"autogenerated": false,
"ratio": 3.4875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45875,
"avg_score": null,
"num_lines": null
} |
#allows us to clear the console screen.
import os
import time
#the width of the display
#(the windows console is 79 characters wide).
WIDTH = 79
#the message we wish to print
message = "hello!".upper()
#the printed banner version of the message
#this is a 7-line display, stored as 7 strings
#initially, these are empty.
printedMessage = [ "","","","","","","" ]
#a dictionary mapping letters to their 7-line
#banner display equivalents. each letter in the dictionary
#maps to 7 strings, one for each line of the display.
characters = { " " : [ " ",
" ",
" ",
" ",
" ",
" ",
" " ],
"E" : [ "*****",
"* ",
"* ",
"*****",
"* ",
"* ",
"*****" ],
"H" : [ "* *",
"* *",
"* *",
"*****",
"* *",
"* *",
"* *" ],
"O" : [ "*****",
"* *",
"* *",
"* *",
"* *",
"* *",
"*****" ],
"L" : [ "* ",
"* ",
"* ",
"* ",
"* ",
"* ",
"*****" ],
"!" : [ " * ",
" * ",
" * ",
" * ",
" * ",
" ",
" * " ]
}
#build up the printed banner. to do this, the 1st row of the
#display is created for each character in the message, followed by
#the second line, etc..
for row in range(7):
for char in message:
printedMessage[row] += (str(characters[char][row]) + " ")
#the offset is how far to the right we want to print the message.
#initially, we want to print the message just off the display.
offset = WIDTH
while True:
os.system("cls")
#print each line of the message, including the offset.
for row in range(7):
print(" " * offset + printedMessage[row][max(0,offset*-1):WIDTH - offset])
#move the message a little to the left.
offset -=1
#if the entire message has moved 'through' the display then
#start again from the right hand side.
if offset <= ((len(message)+2)*6) * -1:
offset = WIDTH
#take out or change this line to speed up / slow down the display
time.sleep(0.05)
| {
"repo_name": "dracaether/python",
"path": "random stuff/banner.py",
"copies": "1",
"size": "2842",
"license": "mit",
"hash": 9223170121400854000,
"line_mean": 29.8913043478,
"line_max": 82,
"alpha_frac": 0.3603096411,
"autogenerated": false,
"ratio": 4.525477707006369,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.016157669062241992,
"num_lines": 92
} |
"""Allow tabs to be closed in different ways."""
from __future__ import annotations
import tkinter
from functools import partial
from porcupine import get_tab_manager, images, tabs
def close_clicked_tab(event: tkinter.Event[tabs.TabManager], *, what2close: str = 'this') -> None:
before = event.widget.index(f'@{event.x},{event.y}')
after = before + 1
if what2close == 'this':
tabs = event.widget.tabs()[before:after]
elif what2close == 'left':
tabs = event.widget.tabs()[:before]
elif what2close == 'right':
tabs = event.widget.tabs()[after:]
elif what2close == 'others':
tabs = event.widget.tabs()[:before] + event.widget.tabs()[after:]
else:
raise RuntimeError(f"bad what2close value: {what2close}")
for tab in tabs:
if tab.can_be_closed():
event.widget.close_tab(tab)
def on_x_clicked(event: tkinter.Event[tabs.TabManager]) -> None:
if event.widget.identify(event.x, event.y) == 'label':
# find the right edge of the top label (including close button)
right = event.x
while event.widget.identify(right, event.y) == 'label':
right += 1
# hopefully the image is on the right edge of the label and there's no padding :O
if event.x >= right - images.get('closebutton').width():
close_clicked_tab(event)
def show_menu(event: tkinter.Event[tabs.TabManager]) -> None:
menu = tkinter.Menu(tearoff=False)
menu.add_command(label="Close this tab", command=partial(close_clicked_tab, event))
menu.add_command(label="Close tabs to left", command=partial(close_clicked_tab, event, what2close='left'))
menu.add_command(label="Close tabs to right", command=partial(close_clicked_tab, event, what2close='right'))
menu.add_command(label="Close other tabs", command=partial(close_clicked_tab, event, what2close='others'))
menu.tk_popup(event.x_root, event.y_root)
menu.bind('<Unmap>', (lambda event: menu.after_idle(menu.destroy)), add=True)
# Close tab on middle-click (press down the wheel of the mouse)
def on_header_clicked(event: tkinter.Event[tabs.TabManager]) -> None:
if event.widget.identify(event.x, event.y) == 'label':
close_clicked_tab(event)
def setup() -> None:
tabmanager = get_tab_manager()
tabmanager.add_tab_callback(lambda tab: get_tab_manager().tab(
tab, image=images.get('closebutton'), compound='right'
))
tabmanager.bind('<<TabClosing:XButtonClickClose>>', on_x_clicked, add=True)
tabmanager.bind('<<TabClosing:ShowMenu>>', show_menu, add=True)
tabmanager.bind('<<TabClosing:HeaderClickClose>>', on_header_clicked, add=True)
| {
"repo_name": "Akuli/editor",
"path": "porcupine/plugins/tab_closing.py",
"copies": "1",
"size": "2682",
"license": "mit",
"hash": 3970756498124793300,
"line_mean": 39.6363636364,
"line_max": 112,
"alpha_frac": 0.668158091,
"autogenerated": false,
"ratio": 3.4785992217898833,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4646757312789883,
"avg_score": null,
"num_lines": null
} |
"""Allow the ability to connect and publish to a queue.
"""
import json
import logging
import time
import kombu
import requests
class Producer(object):
def __init__(self, dest_queue_name, rabbitmq_host, serializer=None, compression=None):
self.serializer = serializer
self.compression = compression
self.queue_cache = {}
self.rabbitmq_host = rabbitmq_host
self.dest_queue_name = dest_queue_name
# Connect to the queue.
#
broker = kombu.BrokerConnection(rabbitmq_host)
self.dest_queue = broker.SimpleQueue(dest_queue_name, serializer=serializer, compression=compression)
def put(self, item):
"""Put one item onto the queue.
"""
self.dest_queue.put(item)
def buffered_put(self, input_iter, batch_size, resume_threshold=0.1, delay_in_seconds=5.0):
"""Given an input iterator, keep adding batches of items to the
destination queue.
After each batch, wait for the queue size to drop to a certain level
until putting in the next batch.
(Wait until the queue size is batch_size * resume_threshold.)
Note that it isn't exact, but it will attempt to ensure that the queue
size never goes (much) beyond batch_size.
"""
num_enqueued = 0
while True:
try:
logging.debug("Starting batch (batch_size={0})".format(batch_size))
for i in range(batch_size):
self.put(input_iter.next())
num_enqueued += 1
logging.debug("Batch done. {0} items enqueued so far".format(num_enqueued))
except StopIteration:
# We're done!
#
logging.debug("Input exhausted. {0} items enqueued in total".format(num_enqueued))
break
# After each batch, we need to pause briefly.
# Otherwise get_num_messages won't include the messages that we
# just enqueued.
#
time.sleep(delay_in_seconds)
# Now that we have completed one batch, we need to wait.
#
max_size = resume_threshold * batch_size
num_messages = get_num_messages(self.rabbitmq_host, self.dest_queue_name)
while num_messages >= max_size:
logging.debug("Current queue size = {0}, waiting until size <= {1}".format(num_messages, max_size))
time.sleep(delay_in_seconds)
num_messages = get_num_messages(self.rabbitmq_host, self.dest_queue_name)
def get_num_messages(rabbitmq_host, queue_name, port=15672, vhost="%2F", auth=None):
"""A (very!) approximate attempt to get the number of messages in a queue.
It uses the rabbitmq http API (so make sure that is installed).
"""
if not auth:
auth = ("guest", "guest")
url = "http://{0}:{1}/api/queues/{2}/{3}".format(rabbitmq_host, port, vhost, queue_name)
response = requests.get(url, auth=auth)
queue_data = json.loads(response.content)
return queue_data["messages"]
| {
"repo_name": "sujaymansingh/queue_util",
"path": "queue_util/producer.py",
"copies": "1",
"size": "3114",
"license": "mit",
"hash": -8247455627727672000,
"line_mean": 36.9756097561,
"line_max": 115,
"alpha_frac": 0.6056518947,
"autogenerated": false,
"ratio": 4.044155844155844,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5149807738855844,
"avg_score": null,
"num_lines": null
} |
# Allow the use of / operator for division to yield floats instead of integers:
# http://docs.python.org/whatsnew/2.2.html#pep-238-changing-the-division-operator
from __future__ import division
# Imports from standard libraries
from datetime import date, timedelta
import hashlib
import os
import urllib2
# Imports from Django
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.contrib.markup.templatetags.markup import markdown
from django.contrib.sites.models import Site
from django.core.files import File
from django.contrib.gis.db import models
from django.utils import simplejson
# Imports from other sources
from pyPdf import PdfFileReader, PdfFileWriter
# Imports from Brubeck
from brubeck.core.models import ContentChannel
class Publication(models.Model):
"""
Provides support for multiple publications.
A publication may be identified either by its name (guaranteed to be unique
at the database level) or its associated Site (part of
django.contrib.sites). Views will probably use the latter more often.
"""
name = models.CharField(max_length=50, db_index=True, unique=True)
description = models.CharField(max_length=200)
site = models.ForeignKey(Site)
def __unicode__(self):
return self.name
def get_absolute_url(self):
return u'http://%s/' % self.site.domain
class Meta:
ordering = ['name']
class Section(ContentChannel):
"""
Provides support for organizing content by section.
"""
publication = models.ForeignKey(Publication, db_index=True)
def __unicode__(self):
return u'%s %s' % (self.publication, self.name)
def get_absolute_url(self):
return '/section/%s/' % self.slug
class Meta:
ordering = ['publication', 'name']
unique_together = (('name', 'publication'), ('slug', 'publication'))
class Volume(models.Model):
"""
Provides support for individual volumes.
"""
volume_id = models.PositiveIntegerField('volume number', db_index=True)
first_issue = models.DateField(help_text="When was the first issue of this volume published?")
description = models.CharField("time period", max_length=100, help_text="i.e., <em>Spring 2008</em> or <em>Fall 2007 - Spring 2008</em>")
publication = models.ForeignKey(Publication, db_index=True)
def __unicode__(self):
return u'%s v. %s' % (self.publication, self.volume_id)
class Meta:
get_latest_by = 'first_issue'
ordering = ['-first_issue', 'publication', 'volume_id']
unique_together = (('volume_id', 'publication'), ('first_issue', 'publication'))
class Issue(models.Model):
"""
Provides support for grouping content by issue.
Some fields in this model might be marked as deprecated. These will be
hidden in the admin site.
"""
volume = models.ForeignKey(Volume, db_index=True)
issue_id = models.PositiveIntegerField('issue number', db_index=True, blank=True, null=True, help_text="Fill this in unless this is an online update set.")
pub_date = models.DateTimeField('date published', db_column='date', help_text="When was this issue published?")
online_update = models.BooleanField(default=False, help_text="Is this actually a set of online updates instead of an issue?")
old_archive = models.BooleanField(default=False, help_text="Is this an imported set of articles from the old (PHP) site? (Should never be checked for new issues.)")
# FIXME: This had null=True in the 2008 site. Change this in the database.
name = models.CharField(max_length=50, blank=True, help_text="Deprecated. Was used to collect articles for special events before we had blogs.")
poll = models.ForeignKey('voxpopuli.Poll', db_index=True, blank=True, null=True, help_text="Deprecated. Was once used to publish polls on the front page.")
streaming = models.BooleanField("show streaming video?", default=False, help_text="Deprecated. Was used to show live video streams on the front page.")
color = models.CharField("primary website color", max_length=6, blank=True, null=True, help_text="Enter the six-characted hex code of the color you want featured on the site (note: this is a MOVE-specific feature).")
masthead_image = models.ImageField('MOVE masthead image', upload_to='style/%Y-%m/images/masthead/', blank=True, null=True, help_text='If an image is uploaded here, it will replace the standard masthead on the MOVE site.')
maneater_masthead_image = models.ImageField(upload_to='style/%Y-%m/images/masthead/', blank=True, null=True, help_text='If an image is uploaded here, it will replace the standard masthead on themaneater.com.')
render_issue_pdf = models.BooleanField('Create issue PDF', default=False, help_text='If you check this box, the site will automatically generate a PDF from all uploaded layout PDFs. Use sparingly. (To keep from accidentally rendering PDFs more often than we need, this box will automatically uncheck itself once the PDF has been created.)')
issue_pdf_link = models.CharField(max_length=250, blank=True, null=True)
submit_to_issuu = models.BooleanField('Submit PDF to Issuu', default=False, help_text='If you check this box, the site will automatically submit the PDF of this issue to Issuu (so that it can be displayed as a flip-book). Use sparingly. (To keep from accidentally submitting PDFs more often than we need, this box will automatically uncheck itself once the PDF has been sent to Issuu.) <strong>Note: You must create a PDF of this issue <em>before</em> you select this option.</strong>')
issuu_id = models.CharField(max_length=250, blank=True, null=True)
pdf_converted = models.BooleanField('Has the PDF converted?', default=False, help_text='Has the submitted PDF been successfully converted on Issuu?')
class Meta:
get_latest_by = 'pub_date'
ordering = ['-pub_date']
def __unicode__(self):
if self.name:
return self.name
elif self.old_archive:
return u'Archive for %s' % self.pub_date
elif self.online_update:
return u'%s v. %s, Update %s/%s' % (self.volume.publication, self.volume.volume_id, self.pub_date.month, self.pub_date.day)
else:
return u'%s v. %s, Issue %s' % (self.volume.publication, self.volume.volume_id, self.issue_id)
def save(self, *args, **kwargs):
if self.render_issue_pdf:
concat = PdfFileWriter()
layout_count = self.layout_set.count()
layouts = self.layout_set.all()[:layout_count]
for layout in layouts:
pagePDF = PdfFileReader(layout.PDF)
concat.addPage(pagePDF.getPage(0))
filename = 'vol%sissue%s.pdf' % (self.volume.volume_id, self.issue_id)
filepath = '%s%s' % (settings.MEDIA_ROOT, filename)
outputStream = file(filepath, 'wb')
concat.write(outputStream)
self.issue_pdf_link = '%s%s' % (settings.MEDIA_URL, filename)
self.render_issue_pdf = False
if self.submit_to_issuu and self.issue_pdf_link:
issuu_data = [
'action=issuu.document.url_upload',
'apiKey=%s' % settings.ISSUU_API_KEY,
'category=009000',
'commentsAllowed=false',
'description=%s' % self.pub_date.strftime("%A, %B %e, %Y"),
'downloadable=true',
'explicit=false',
'format=json',
'infoLink=%s' % self.volume.publication.site.domain,
'language=en',
'name=vol%sissue%s' % (self.volume.volume_id, self.issue_id),
'publishDate=%s' % self.pub_date.strftime("%Y-%m-%d"),
'slurpUrl=%s' % self.issue_pdf_link,
'tags=college newspapers,missouri,mizzou,news,the man-eater,university of missouri',
'title=The Maneater -- Volume %s, Issue %s' % (self.volume.volume_id, self.issue_id),
'type=007000',
]
signature = ''
for datum in issuu_data:
datum_formatted = datum.replace('=', '')
signature = signature + datum_formatted
signature= '%s%s' % (settings.ISSUU_SECRET_KEY, signature)
signature = hashlib.md5(signature).hexdigest()
url = 'http://api.issuu.com/1_0?'
for datum in issuu_data:
url = url + datum.replace(' ', '%20') + '&'
url = url + 'signature=' + signature
json = urllib2.urlopen(url).read()
document_id = simplejson.loads(json)['rsp']['_content']['document']['documentId']
self.issuu_id = document_id
if self.issuu_id and not self.pdf_converted and not self.submit_to_issuu:
issuu_data = [
'action=issuu.document.update',
'apiKey=%s' % settings.ISSUU_API_KEY,
'format=json',
'name=vol%sissue%s' % (self.volume.volume_id, self.issue_id),
]
signature = ''
for datum in issuu_data:
datum_formatted = datum.replace('=', '')
signature = signature + datum_formatted
signature = '%s%s' % (settings.ISSUU_SECRET_KEY, signature)
signature = hashlib.md5(signature).hexdigest()
url = 'http://api.issuu.com/1_0?'
for datum in issuu_data:
url = url + datum.replace(' ', '%20') + '&'
url = url + 'signature=' + signature
json = urllib2.urlopen(url).read()
status = simplejson.loads(json)['rsp']['_content']['document']['state']
if status == 'A':
self.pdf_converted = True
elif status == 'P':
self.pdf_converted = False
elif status == 'F':
self.pdf_converted = False
self.issuu_id = ''
self.submit_to_issuu = False
super(Issue, self).save(*args, **kwargs)
def get_absolute_url(self):
return '/issues/%s/' % self.id
| {
"repo_name": "albatrossandco/brubeck_cms",
"path": "brubeck/publishing/models.py",
"copies": "1",
"size": "10276",
"license": "bsd-3-clause",
"hash": 6072164684175818000,
"line_mean": 50.1243781095,
"line_max": 490,
"alpha_frac": 0.6390618918,
"autogenerated": false,
"ratio": 3.8515742128935533,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4990636104693553,
"avg_score": null,
"num_lines": null
} |
# Allow the use of / operator for division to yield floats instead of integers:
# http://docs.python.org/whatsnew/2.2.html#pep-238-changing-the-division-operator
from __future__ import division
# Imports from standard libraries
from datetime import date, timedelta
import hashlib
import os
import urllib2
# Imports from Django
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.contrib.markup.templatetags.markup import markdown
from django.contrib.sites.models import Site
from django.core.files import File
from django.contrib.gis.db import models
from django.utils import simplejson
# Imports from Brubeck
from brubeck.core.models import Content
# Imports from other source
from pyPdf import PdfFileReader, PdfFileWriter
# This is a hippopotamus.
# .-''''-. _
# (' ' '0)-/)
# '..____..: \._
# \u u ( '-..------._
# | / : '. '--.
# .nn_nn/ ( : ' '\
# ( '' '' / ; . \
# ''----' "\ : : '.
# .'/ '.
# / / '.
# /_| ) .\|
# | /\ . '
# '--.__| '--._ , /
# /'-, .'
# / | _.'
# (____\ /
# \ \
# '-'-'-'
class PublishedManager(models.Manager):
"""
Only returns articles that have been marked as published. This is handy for
such content as articles and blog posts that might need editing before being
published.
"""
def get_query_set(self):
return super(PublishedManager, self).get_query_set().filter(is_published=True)
class Article(Content):
"""
Provides support for news stories. The workhorse of the site.
Some fields in this model might be marked as deprecated. These will be
hidden in the admin site.
"""
TWO_WEEKS_AGO = date.today() - timedelta(14)
TYPE_CHOICES = (
('story', "Story/Brief"),
# ('online', "Online Exclusive"),
('online', "Web Update"),
('column', "Column"),
('editorial', "Editorial"),
('letter', "Letter to the Editor"),
('guest', "Guest Column"),
)
title = models.CharField(max_length=150, db_index=True)
issue = models.ForeignKey('publishing.Issue', db_index=True, help_text="The issue in which this article was published.")
section = models.ForeignKey('publishing.Section', db_index=True)
layout = models.ForeignKey('design.Layout', db_index=True, blank=True, null=True, db_column='page_id', help_text="Deprecated. In the 2008 site, this held the relation between Layout and Article objects. This relation is now in Layout.articles as a ManyToManyField.")
type = models.CharField(max_length=30, db_index=True, choices=TYPE_CHOICES, default='story')
priority = models.PositiveIntegerField('priority/page number', db_index=True, default=10, help_text="The lower this number, the higher the article is displayed (compared to other articles published the same day). <strong>You should use the page number for this</strong>, but it isn't strictly required that you do so.<br />If you set this to 0 (the number zero), it will become the top story on the site and be automatically sent out over Twitter.")
updated = models.BooleanField(db_index=True, default=False, help_text="Whether or not this article has been updated since it was last posted. If this is checked, the article will show the date and time when it was most recently saved on the front page, the archives and the article page itself.")
cdeck = models.CharField('c-deck', max_length=255, blank=True, help_text="The optional text that appears before the article. If provided, this becomes the article's teaser in various places on the site.")
body = models.TextField(help_text="The content of the article. Accepts HTML (for embeds and such), but primarily uses <a href=\"http://daringfireball.net/projects/markdown/basics\">Markdown</a> formatting. The basics:<ul><li>Separate paragraphs by blank lines.</li><li>Italicize words _like this_.</li><li>Make words (including subheads) bold **like this**.</li><li>Link things like so: Go to [themaneater.com](http://www.themaneater.com/).</li></ul>")
photos = models.ManyToManyField('photography.Photo', limit_choices_to={'pub_date__gte': TWO_WEEKS_AGO}, blank=True, null=True, help_text="Photos attached to this article. (Can select photos published within the last two weeks.)")
editorial_cartoons = models.ManyToManyField('comics.EditorialCartoon', limit_choices_to={'pub_date__gte': TWO_WEEKS_AGO}, blank=True, null=True, help_text="Editorial cartoons attached to this article. (You can select cartoons published within the last two weeks.)")
graphics = models.ManyToManyField('design.Graphic', limit_choices_to={'pub_date__gte': TWO_WEEKS_AGO}, blank=True, null=True, help_text="Graphics attached to this article. (Can select graphics published within the last two weeks.)")
attached_files = models.ManyToManyField('multimedia.AttachedFile', limit_choices_to={'pub_date__gte': TWO_WEEKS_AGO}, blank=True, null=True, help_text="Other files attached to this article. (Can select files uploaded within the last two weeks.)")
videos = models.ManyToManyField('multimedia.Video', limit_choices_to={'pub_date__gte': TWO_WEEKS_AGO}, blank=True, null=True, help_text="Videos attached to this article. (Can select videos published within the last two weeks.)")
slideshows = models.ManyToManyField('multimedia.Slideshow', limit_choices_to={'pub_date__gte': TWO_WEEKS_AGO}, blank=True, null=True, help_text="Slideshows attached to this article. (Can select slideshows published within the last two weeks.)")
audio_clips = models.ManyToManyField('multimedia.AudioClip', limit_choices_to={'pub_date__gte': TWO_WEEKS_AGO}, blank=True, null=True, help_text="Audio clips attached to this article. (Can select audio clips published within the last two weeks.)")
podcast_episodes = models.ManyToManyField('podcasts.Episode', limit_choices_to={'pub_date__gte': TWO_WEEKS_AGO}, blank=True, null=True, help_text="Podcast episodes related to this article. (Can select podcast episodes published in the last two weeks.)")
tags = models.ManyToManyField('tagging.Tag', blank=True, null=True, help_text="Tags that describe this article.")
calendar = models.ForeignKey('events.Calendar', blank=True, null=True, help_text="If we've created a calendar that has to do with the content of this article, select it here.")
map = models.ForeignKey('mapping.Map', verbose_name='attached map', blank=True, null=True, help_text="Choose a map to display with this article.")
polls = models.ManyToManyField('voxpopuli.Poll', limit_choices_to={'pub_date__gte': TWO_WEEKS_AGO}, verbose_name='attached polls', blank=True, null=True, help_text="Choose a poll to display with this article.")
slug = models.SlugField(db_index=True, unique_for_date='pub_date', help_text="Used for URLs. <strong>DO NOT ENTER THE RUNSHEET SLUG.</strong>. Autogenerated from title.")
blurb = models.TextField(blank=True, help_text="Use this if you would like the top story to show something other than its first 40 words on the front page, or if you would like a story other than the top one to show something other than its c-deck in the archives.")
sidebar = models.TextField(blank=True, help_text="Use this if you'd like for specific sidebar content (movie review information, related links, etc.) to show up with the article.")
runsheet_slug = models.CharField(max_length=50, blank=True, null=True, help_text="The brief phrase used to describe this article on the runsheet. Helps to ensure articles are given the same priority online and in print.")
teaser_photo = models.ImageField(upload_to='%Y/%m%d/articles/teaser-photos', blank=True, null=True, help_text="If this article has a magazine-style illustration (for use with top stories on MOVE and Columbia Prowl), upload it here.")
mediatype = 'article'
# Managers--both the default (normal behavior) and one that just returns
# published articles.
objects = models.GeoManager()
get_published = PublishedManager()
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
# FIXME: Look into ping_google and how it was used before.
try:
if self.body.count('\r\n\r\n') or self.body.count('\n\n'):
pass
else:
self.body = self.body.replace('\r', '')
self.body = self.body.replace('\n', '\r\n\r\n')
except:
pass
super(Article, self).save(*args, **kwargs)
def get_absolute_url(self):
if self.section.publication.name == 'Columbia Prowl':
return 'http://%s/renters-guide/stories/%s/%s/%s/%s/' % (self.section.publication.site.domain, self.pub_date.year, self.pub_date.month, self.pub_date.day, self.slug)
else:
return 'http://%s/stories/%s/%s/%s/%s/' % (self.section.publication.site.domain, self.pub_date.year, self.pub_date.month, self.pub_date.day, self.slug)
class Meta:
get_latest_by = 'pub_date'
ordering = ['issue', '-pub_date', 'priority', 'section', 'title']
from brubeck.core.moderation import AkismetModerator
from brubeck.core.emailing.views import render_email_and_send
from django.conf import settings
from django.contrib.comments.moderation import moderator
class ArticleModerator(AkismetModerator):
enable_field = 'enable_comments'
def email(self, comment, content_object, request):
moderators = []
chief = settings.EDITORS['chief']
moderators.append(chief)
managing = settings.EDITORS['managing']
moderators.append(managing)
online_dev = settings.EDITORS['online_dev']
moderators.append(online_dev)
context = {'comment': comment, 'content_object': content_object}
subject = 'New comment awaiting moderation on "%s"' % content_object
render_email_and_send(context=context, message_template='core/comment_notification_email.txt', subject=subject, recipients=moderators)
def moderate(self, comment, content_object, request):
return True
moderator.register(Article, ArticleModerator)
class Correction(models.Model):
"""
Provides support for publishing corrections to articles.
This model is edited inline as part of the Article change page.
"""
article = models.ForeignKey(Article)
date_corrected = models.DateTimeField()
correction = models.CharField(max_length=500)
def __unicode__(self):
return "Correction to %s" % self.article
class Meta:
get_latest_by = 'date_corrected'
ordering = ['-date_corrected', 'article']
| {
"repo_name": "albatrossandco/brubeck_cms",
"path": "brubeck/articles/models.py",
"copies": "1",
"size": "11291",
"license": "bsd-3-clause",
"hash": -349507177612159800,
"line_mean": 64.0292397661,
"line_max": 456,
"alpha_frac": 0.6620317067,
"autogenerated": false,
"ratio": 3.8029639609296058,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4964995667629606,
"avg_score": null,
"num_lines": null
} |
# allow to disable validations
validate_slicecount = True
validate_orientation = True
validate_orthogonal = True
validate_slice_increment = True
validate_instance_number = False
validate_multiframe_implicit = True
pydicom_read_force = False
gdcmconv_path = None
resample = False
resample_padding = 0
resample_spline_interpolation_order = 0 # spline interpolation order (0 nn , 1 bilinear, 3 cubic)
def disable_validate_slice_increment():
"""
Disable the validation of the slice increment.
This allows for converting data where the slice increment is not consistent.
USE WITH CAUTION!
"""
global validate_slice_increment
validate_slice_increment = False
def disable_validate_instance_number():
"""
Disable the validation of the slice increment.
This allows for converting data where the slice increment is not consistent.
USE WITH CAUTION!
"""
global validate_instance_number
validate_instance_number = False
def disable_validate_orientation():
"""
Disable the validation of the slice orientation.
This validation checks that all slices have the same orientation (are parallel).
USE WITH CAUTION!
"""
global validate_orientation
validate_orientation = False
def disable_validate_orthogonal():
"""
Disable the validation whether the volume is orthogonal (so without gantry tilting or alike).
This allows for converting gantry tilted data.
The gantry tilting will be reflected in the affine matrix and not in the data
USE WITH CAUTION!
"""
global validate_orthogonal
validate_orthogonal = False
def disable_validate_slicecount():
"""
Disable the validation of the minimal slice count of 4 slices.
This allows for converting data with less slices.
Usually less than 4 could be considered localizer or similar thus ignoring these scans by default
USE WITH CAUTION!
"""
global validate_slicecount
validate_slicecount = False
def disable_validate_multiframe_implicit():
"""
Disable the validation that checks that data is not multiframe implicit
This allows to sometimes convert Philips Multiframe with implicit transfer syntax
"""
global validate_multiframe_implicit
validate_multiframe_implicit = False
def enable_validate_slice_increment():
"""
Enable the slice increment validation again (DEFAULT ENABLED)
"""
global validate_slice_increment
validate_slice_increment = True
def enable_validate_instance_number():
"""
Enable the slice increment validation again (DEFAULT ENABLED)
"""
global validate_instance_number
validate_instance_number = True
def enable_validate_orientation():
"""
Enable the slice orientation validation again (DEFAULT ENABLED)
"""
global validate_orientation
validate_orientation = True
def enable_validate_orthogonal():
"""
Enable the validation whether the volume is orthogonal again (DEFAULT ENABLED)
"""
global validate_orthogonal
validate_orthogonal = True
def enable_validate_slicecount():
"""
Enable the validation of the minimal slice count of 4 slices again (DEFAULT ENABLED)
"""
global validate_slicecount
validate_slicecount = True
def enable_validate_multiframe_implicit():
"""
Enable the validation that checks that data is not multiframe implicit again (DEFAULT ENABLED)
"""
global validate_multiframe_implicit
validate_multiframe_implicit = True
def enable_pydicom_read_force():
"""
Enable the pydicom read force to try to read non conform dicom data
"""
global pydicom_read_force
pydicom_read_force = True
def disable_pydicom_read_force():
"""
Enable the pydicom read force to try to read non conform dicom data
"""
global pydicom_read_force
pydicom_read_force = False
def enable_resampling():
"""
Enable resampling in case of gantry tilted data (disabled by default)
"""
global resample
resample = True
def disable_resampling():
"""
Disable resampling in case of gantry tilted data (disabled by default)
"""
global resample
resample = False
def set_resample_padding(padding):
"""
Set the spline interpolation padding
"""
global resample_padding
resample_padding = padding
def set_resample_spline_interpolation_order(order):
"""
Set the spline interpolation order used during resampling of gantry tilted data
"""
global resample_spline_interpolation_order
resample_spline_interpolation_order = order
def set_gdcmconv_path(path):
"""
Set where the filepath to the gdcmconv executable (needed is it is not found in your PATH)
:param path: the file path to the gdcmconv executable
"""
global gdcmconv_path
gdcmconv_path = path
| {
"repo_name": "icometrix/dicom2nifti",
"path": "dicom2nifti/settings.py",
"copies": "1",
"size": "4835",
"license": "mit",
"hash": -6410171382783045000,
"line_mean": 26.1629213483,
"line_max": 101,
"alpha_frac": 0.7154084798,
"autogenerated": false,
"ratio": 4.363718411552346,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5579126891352346,
"avg_score": null,
"num_lines": null
} |
"""Allow to set up simple automation rules via the config file."""
import asyncio
from functools import partial
import importlib
import logging
from typing import Any, Awaitable, Callable
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NAME,
CONF_ID,
CONF_PLATFORM,
EVENT_AUTOMATION_TRIGGERED,
EVENT_HOMEASSISTANT_START,
SERVICE_RELOAD,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
)
from homeassistant.core import Context, CoreState, HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import condition, extract_domain_configs, script
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ENTITY_SERVICE_SCHEMA
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.typing import TemplateVarsType
from homeassistant.loader import bind_hass
from homeassistant.util.dt import parse_datetime, utcnow
# mypy: allow-untyped-calls, allow-untyped-defs
# mypy: no-check-untyped-defs, no-warn-return-any
DOMAIN = "automation"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
GROUP_NAME_ALL_AUTOMATIONS = "all automations"
CONF_ALIAS = "alias"
CONF_DESCRIPTION = "description"
CONF_HIDE_ENTITY = "hide_entity"
CONF_CONDITION = "condition"
CONF_ACTION = "action"
CONF_TRIGGER = "trigger"
CONF_CONDITION_TYPE = "condition_type"
CONF_INITIAL_STATE = "initial_state"
CONDITION_USE_TRIGGER_VALUES = "use_trigger_values"
CONDITION_TYPE_AND = "and"
CONDITION_TYPE_OR = "or"
DEFAULT_CONDITION_TYPE = CONDITION_TYPE_AND
DEFAULT_HIDE_ENTITY = False
DEFAULT_INITIAL_STATE = True
ATTR_LAST_TRIGGERED = "last_triggered"
ATTR_VARIABLES = "variables"
SERVICE_TRIGGER = "trigger"
_LOGGER = logging.getLogger(__name__)
AutomationActionType = Callable[[HomeAssistant, TemplateVarsType], Awaitable[None]]
def _platform_validator(config):
"""Validate it is a valid platform."""
try:
platform = importlib.import_module(
".{}".format(config[CONF_PLATFORM]), __name__
)
except ImportError:
raise vol.Invalid("Invalid platform specified") from None
return platform.TRIGGER_SCHEMA(config)
_TRIGGER_SCHEMA = vol.All(
cv.ensure_list,
[
vol.All(
vol.Schema({vol.Required(CONF_PLATFORM): str}, extra=vol.ALLOW_EXTRA),
_platform_validator,
)
],
)
_CONDITION_SCHEMA = vol.All(cv.ensure_list, [cv.CONDITION_SCHEMA])
PLATFORM_SCHEMA = vol.Schema(
{
# str on purpose
CONF_ID: str,
CONF_ALIAS: cv.string,
vol.Optional(CONF_DESCRIPTION): cv.string,
vol.Optional(CONF_INITIAL_STATE): cv.boolean,
vol.Optional(CONF_HIDE_ENTITY, default=DEFAULT_HIDE_ENTITY): cv.boolean,
vol.Required(CONF_TRIGGER): _TRIGGER_SCHEMA,
vol.Optional(CONF_CONDITION): _CONDITION_SCHEMA,
vol.Required(CONF_ACTION): cv.SCRIPT_SCHEMA,
}
)
TRIGGER_SERVICE_SCHEMA = ENTITY_SERVICE_SCHEMA.extend(
{vol.Optional(ATTR_VARIABLES, default={}): dict}
)
RELOAD_SERVICE_SCHEMA = vol.Schema({})
@bind_hass
def is_on(hass, entity_id):
"""
Return true if specified automation entity_id is on.
Async friendly.
"""
return hass.states.is_state(entity_id, STATE_ON)
async def async_setup(hass, config):
"""Set up the automation."""
component = EntityComponent(
_LOGGER, DOMAIN, hass, group_name=GROUP_NAME_ALL_AUTOMATIONS
)
await _async_process_config(hass, config, component)
async def trigger_service_handler(service_call):
"""Handle automation triggers."""
tasks = []
for entity in await component.async_extract_from_service(service_call):
tasks.append(
entity.async_trigger(
service_call.data.get(ATTR_VARIABLES),
skip_condition=True,
context=service_call.context,
)
)
if tasks:
await asyncio.wait(tasks)
async def turn_onoff_service_handler(service_call):
"""Handle automation turn on/off service calls."""
tasks = []
method = f"async_{service_call.service}"
for entity in await component.async_extract_from_service(service_call):
tasks.append(getattr(entity, method)())
if tasks:
await asyncio.wait(tasks)
async def toggle_service_handler(service_call):
"""Handle automation toggle service calls."""
tasks = []
for entity in await component.async_extract_from_service(service_call):
if entity.is_on:
tasks.append(entity.async_turn_off())
else:
tasks.append(entity.async_turn_on())
if tasks:
await asyncio.wait(tasks)
async def reload_service_handler(service_call):
"""Remove all automations and load new ones from config."""
conf = await component.async_prepare_reload()
if conf is None:
return
await _async_process_config(hass, conf, component)
hass.services.async_register(
DOMAIN, SERVICE_TRIGGER, trigger_service_handler, schema=TRIGGER_SERVICE_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_RELOAD, reload_service_handler, schema=RELOAD_SERVICE_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_TOGGLE, toggle_service_handler, schema=ENTITY_SERVICE_SCHEMA
)
for service in (SERVICE_TURN_ON, SERVICE_TURN_OFF):
hass.services.async_register(
DOMAIN, service, turn_onoff_service_handler, schema=ENTITY_SERVICE_SCHEMA
)
return True
class AutomationEntity(ToggleEntity, RestoreEntity):
"""Entity to show status of entity."""
def __init__(
self,
automation_id,
name,
async_attach_triggers,
cond_func,
async_action,
hidden,
initial_state,
):
"""Initialize an automation entity."""
self._id = automation_id
self._name = name
self._async_attach_triggers = async_attach_triggers
self._async_detach_triggers = None
self._cond_func = cond_func
self._async_action = async_action
self._last_triggered = None
self._hidden = hidden
self._initial_state = initial_state
self._is_enabled = False
@property
def name(self):
"""Name of the automation."""
return self._name
@property
def should_poll(self):
"""No polling needed for automation entities."""
return False
@property
def state_attributes(self):
"""Return the entity state attributes."""
return {ATTR_LAST_TRIGGERED: self._last_triggered}
@property
def hidden(self) -> bool:
"""Return True if the automation entity should be hidden from UIs."""
return self._hidden
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
return self._async_detach_triggers is not None or self._is_enabled
async def async_added_to_hass(self) -> None:
"""Startup with initial state or previous state."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state:
enable_automation = state.state == STATE_ON
last_triggered = state.attributes.get("last_triggered")
if last_triggered is not None:
self._last_triggered = parse_datetime(last_triggered)
_LOGGER.debug(
"Loaded automation %s with state %s from state "
" storage last state %s",
self.entity_id,
enable_automation,
state,
)
else:
enable_automation = DEFAULT_INITIAL_STATE
_LOGGER.debug(
"Automation %s not in state storage, state %s from " "default is used.",
self.entity_id,
enable_automation,
)
if self._initial_state is not None:
enable_automation = self._initial_state
_LOGGER.debug(
"Automation %s initial state %s overridden from "
"config initial_state",
self.entity_id,
enable_automation,
)
if enable_automation:
await self.async_enable()
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the entity on and update the state."""
await self.async_enable()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the entity off."""
await self.async_disable()
async def async_trigger(self, variables, skip_condition=False, context=None):
"""Trigger automation.
This method is a coroutine.
"""
if not skip_condition and not self._cond_func(variables):
return
# Create a new context referring to the old context.
parent_id = None if context is None else context.id
trigger_context = Context(parent_id=parent_id)
self.async_set_context(trigger_context)
self.hass.bus.async_fire(
EVENT_AUTOMATION_TRIGGERED,
{ATTR_NAME: self._name, ATTR_ENTITY_ID: self.entity_id},
context=trigger_context,
)
await self._async_action(self.entity_id, variables, trigger_context)
self._last_triggered = utcnow()
await self.async_update_ha_state()
async def async_will_remove_from_hass(self):
"""Remove listeners when removing automation from HASS."""
await super().async_will_remove_from_hass()
await self.async_disable()
async def async_enable(self):
"""Enable this automation entity.
This method is a coroutine.
"""
if self._is_enabled:
return
self._is_enabled = True
# HomeAssistant is starting up
if self.hass.state != CoreState.not_running:
self._async_detach_triggers = await self._async_attach_triggers(
self.async_trigger
)
self.async_write_ha_state()
return
async def async_enable_automation(event):
"""Start automation on startup."""
# Don't do anything if no longer enabled or already attached
if not self._is_enabled or self._async_detach_triggers is not None:
return
self._async_detach_triggers = await self._async_attach_triggers(
self.async_trigger
)
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, async_enable_automation
)
self.async_write_ha_state()
async def async_disable(self):
"""Disable the automation entity."""
if not self._is_enabled:
return
self._is_enabled = False
if self._async_detach_triggers is not None:
self._async_detach_triggers()
self._async_detach_triggers = None
self.async_write_ha_state()
@property
def device_state_attributes(self):
"""Return automation attributes."""
if self._id is None:
return None
return {CONF_ID: self._id}
async def _async_process_config(hass, config, component):
"""Process config and add automations.
This method is a coroutine.
"""
entities = []
for config_key in extract_domain_configs(config, DOMAIN):
conf = config[config_key]
for list_no, config_block in enumerate(conf):
automation_id = config_block.get(CONF_ID)
name = config_block.get(CONF_ALIAS) or f"{config_key} {list_no}"
hidden = config_block[CONF_HIDE_ENTITY]
initial_state = config_block.get(CONF_INITIAL_STATE)
action = _async_get_action(hass, config_block.get(CONF_ACTION, {}), name)
if CONF_CONDITION in config_block:
cond_func = await _async_process_if(hass, config, config_block)
if cond_func is None:
continue
else:
def cond_func(variables):
"""Condition will always pass."""
return True
async_attach_triggers = partial(
_async_process_trigger,
hass,
config,
config_block.get(CONF_TRIGGER, []),
name,
)
entity = AutomationEntity(
automation_id,
name,
async_attach_triggers,
cond_func,
action,
hidden,
initial_state,
)
entities.append(entity)
if entities:
await component.async_add_entities(entities)
def _async_get_action(hass, config, name):
"""Return an action based on a configuration."""
script_obj = script.Script(hass, config, name)
async def action(entity_id, variables, context):
"""Execute an action."""
_LOGGER.info("Executing %s", name)
try:
await script_obj.async_run(variables, context)
except Exception as err: # pylint: disable=broad-except
script_obj.async_log_exception(
_LOGGER, f"Error while executing automation {entity_id}", err
)
return action
async def _async_process_if(hass, config, p_config):
"""Process if checks."""
if_configs = p_config.get(CONF_CONDITION)
checks = []
for if_config in if_configs:
try:
checks.append(await condition.async_from_config(hass, if_config, False))
except HomeAssistantError as ex:
_LOGGER.warning("Invalid condition: %s", ex)
return None
def if_action(variables=None):
"""AND all conditions."""
return all(check(hass, variables) for check in checks)
return if_action
async def _async_process_trigger(hass, config, trigger_configs, name, action):
"""Set up the triggers.
This method is a coroutine.
"""
removes = []
info = {"name": name}
for conf in trigger_configs:
platform = importlib.import_module(".{}".format(conf[CONF_PLATFORM]), __name__)
remove = await platform.async_attach_trigger(hass, conf, action, info)
if not remove:
_LOGGER.error("Error setting up trigger %s", name)
continue
_LOGGER.info("Initialized trigger %s", name)
removes.append(remove)
if not removes:
return None
def remove_triggers():
"""Remove attached triggers."""
for remove in removes:
remove()
return remove_triggers
| {
"repo_name": "qedi-r/home-assistant",
"path": "homeassistant/components/automation/__init__.py",
"copies": "2",
"size": "14929",
"license": "apache-2.0",
"hash": 6455046691037192000,
"line_mean": 29.2819472617,
"line_max": 88,
"alpha_frac": 0.6107575859,
"autogenerated": false,
"ratio": 4.1958965711073635,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00028601808321178726,
"num_lines": 493
} |
"""Allow to set up simple automation rules via the config file."""
import asyncio
from functools import partial
import importlib
import logging
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_NAME, CONF_ID, CONF_PLATFORM,
EVENT_AUTOMATION_TRIGGERED, EVENT_HOMEASSISTANT_START, SERVICE_RELOAD,
SERVICE_TOGGLE, SERVICE_TURN_OFF, SERVICE_TURN_ON, STATE_ON)
from homeassistant.core import Context, CoreState
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import condition, extract_domain_configs, script
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.loader import bind_hass
from homeassistant.util.dt import utcnow
DOMAIN = 'automation'
ENTITY_ID_FORMAT = DOMAIN + '.{}'
GROUP_NAME_ALL_AUTOMATIONS = 'all automations'
CONF_ALIAS = 'alias'
CONF_HIDE_ENTITY = 'hide_entity'
CONF_CONDITION = 'condition'
CONF_ACTION = 'action'
CONF_TRIGGER = 'trigger'
CONF_CONDITION_TYPE = 'condition_type'
CONF_INITIAL_STATE = 'initial_state'
CONDITION_USE_TRIGGER_VALUES = 'use_trigger_values'
CONDITION_TYPE_AND = 'and'
CONDITION_TYPE_OR = 'or'
DEFAULT_CONDITION_TYPE = CONDITION_TYPE_AND
DEFAULT_HIDE_ENTITY = False
DEFAULT_INITIAL_STATE = True
ATTR_LAST_TRIGGERED = 'last_triggered'
ATTR_VARIABLES = 'variables'
SERVICE_TRIGGER = 'trigger'
_LOGGER = logging.getLogger(__name__)
def _platform_validator(config):
"""Validate it is a valid platform."""
try:
platform = importlib.import_module('.{}'.format(config[CONF_PLATFORM]),
__name__)
except ImportError:
raise vol.Invalid('Invalid platform specified') from None
return platform.TRIGGER_SCHEMA(config)
_TRIGGER_SCHEMA = vol.All(
cv.ensure_list,
[
vol.All(
vol.Schema({
vol.Required(CONF_PLATFORM): str
}, extra=vol.ALLOW_EXTRA),
_platform_validator
),
]
)
_CONDITION_SCHEMA = vol.All(cv.ensure_list, [cv.CONDITION_SCHEMA])
PLATFORM_SCHEMA = vol.Schema({
# str on purpose
CONF_ID: str,
CONF_ALIAS: cv.string,
vol.Optional(CONF_INITIAL_STATE): cv.boolean,
vol.Optional(CONF_HIDE_ENTITY, default=DEFAULT_HIDE_ENTITY): cv.boolean,
vol.Required(CONF_TRIGGER): _TRIGGER_SCHEMA,
vol.Optional(CONF_CONDITION): _CONDITION_SCHEMA,
vol.Required(CONF_ACTION): cv.SCRIPT_SCHEMA,
})
SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.comp_entity_ids,
})
TRIGGER_SERVICE_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.comp_entity_ids,
vol.Optional(ATTR_VARIABLES, default={}): dict,
})
RELOAD_SERVICE_SCHEMA = vol.Schema({})
@bind_hass
def is_on(hass, entity_id):
"""
Return true if specified automation entity_id is on.
Async friendly.
"""
return hass.states.is_state(entity_id, STATE_ON)
async def async_setup(hass, config):
"""Set up the automation."""
component = EntityComponent(_LOGGER, DOMAIN, hass,
group_name=GROUP_NAME_ALL_AUTOMATIONS)
await _async_process_config(hass, config, component)
async def trigger_service_handler(service_call):
"""Handle automation triggers."""
tasks = []
for entity in await component.async_extract_from_service(service_call):
tasks.append(entity.async_trigger(
service_call.data.get(ATTR_VARIABLES),
skip_condition=True,
context=service_call.context))
if tasks:
await asyncio.wait(tasks)
async def turn_onoff_service_handler(service_call):
"""Handle automation turn on/off service calls."""
tasks = []
method = 'async_{}'.format(service_call.service)
for entity in await component.async_extract_from_service(service_call):
tasks.append(getattr(entity, method)())
if tasks:
await asyncio.wait(tasks)
async def toggle_service_handler(service_call):
"""Handle automation toggle service calls."""
tasks = []
for entity in await component.async_extract_from_service(service_call):
if entity.is_on:
tasks.append(entity.async_turn_off())
else:
tasks.append(entity.async_turn_on())
if tasks:
await asyncio.wait(tasks)
async def reload_service_handler(service_call):
"""Remove all automations and load new ones from config."""
conf = await component.async_prepare_reload()
if conf is None:
return
await _async_process_config(hass, conf, component)
hass.services.async_register(
DOMAIN, SERVICE_TRIGGER, trigger_service_handler,
schema=TRIGGER_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_RELOAD, reload_service_handler,
schema=RELOAD_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_TOGGLE, toggle_service_handler,
schema=SERVICE_SCHEMA)
for service in (SERVICE_TURN_ON, SERVICE_TURN_OFF):
hass.services.async_register(
DOMAIN, service, turn_onoff_service_handler,
schema=SERVICE_SCHEMA)
return True
class AutomationEntity(ToggleEntity, RestoreEntity):
"""Entity to show status of entity."""
def __init__(self, automation_id, name, async_attach_triggers, cond_func,
async_action, hidden, initial_state):
"""Initialize an automation entity."""
self._id = automation_id
self._name = name
self._async_attach_triggers = async_attach_triggers
self._async_detach_triggers = None
self._cond_func = cond_func
self._async_action = async_action
self._last_triggered = None
self._hidden = hidden
self._initial_state = initial_state
@property
def name(self):
"""Name of the automation."""
return self._name
@property
def should_poll(self):
"""No polling needed for automation entities."""
return False
@property
def state_attributes(self):
"""Return the entity state attributes."""
return {
ATTR_LAST_TRIGGERED: self._last_triggered
}
@property
def hidden(self) -> bool:
"""Return True if the automation entity should be hidden from UIs."""
return self._hidden
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
return self._async_detach_triggers is not None
async def async_added_to_hass(self) -> None:
"""Startup with initial state or previous state."""
await super().async_added_to_hass()
if self._initial_state is not None:
enable_automation = self._initial_state
_LOGGER.debug("Automation %s initial state %s from config "
"initial_state", self.entity_id, enable_automation)
else:
state = await self.async_get_last_state()
if state:
enable_automation = state.state == STATE_ON
self._last_triggered = state.attributes.get('last_triggered')
_LOGGER.debug("Automation %s initial state %s from recorder "
"last state %s", self.entity_id,
enable_automation, state)
else:
enable_automation = DEFAULT_INITIAL_STATE
_LOGGER.debug("Automation %s initial state %s from default "
"initial state", self.entity_id,
enable_automation)
if not enable_automation:
return
# HomeAssistant is starting up
if self.hass.state == CoreState.not_running:
async def async_enable_automation(event):
"""Start automation on startup."""
await self.async_enable()
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, async_enable_automation)
# HomeAssistant is running
else:
await self.async_enable()
async def async_turn_on(self, **kwargs) -> None:
"""Turn the entity on and update the state."""
if self.is_on:
return
await self.async_enable()
async def async_turn_off(self, **kwargs) -> None:
"""Turn the entity off."""
if not self.is_on:
return
self._async_detach_triggers()
self._async_detach_triggers = None
await self.async_update_ha_state()
async def async_trigger(self, variables, skip_condition=False,
context=None):
"""Trigger automation.
This method is a coroutine.
"""
if not skip_condition and not self._cond_func(variables):
return
# Create a new context referring to the old context.
parent_id = None if context is None else context.id
trigger_context = Context(parent_id=parent_id)
self.async_set_context(trigger_context)
self.hass.bus.async_fire(EVENT_AUTOMATION_TRIGGERED, {
ATTR_NAME: self._name,
ATTR_ENTITY_ID: self.entity_id,
}, context=trigger_context)
await self._async_action(self.entity_id, variables, trigger_context)
self._last_triggered = utcnow()
await self.async_update_ha_state()
async def async_will_remove_from_hass(self):
"""Remove listeners when removing automation from HASS."""
await super().async_will_remove_from_hass()
await self.async_turn_off()
async def async_enable(self):
"""Enable this automation entity.
This method is a coroutine.
"""
if self.is_on:
return
self._async_detach_triggers = await self._async_attach_triggers(
self.async_trigger)
await self.async_update_ha_state()
@property
def device_state_attributes(self):
"""Return automation attributes."""
if self._id is None:
return None
return {
CONF_ID: self._id
}
async def _async_process_config(hass, config, component):
"""Process config and add automations.
This method is a coroutine.
"""
entities = []
for config_key in extract_domain_configs(config, DOMAIN):
conf = config[config_key]
for list_no, config_block in enumerate(conf):
automation_id = config_block.get(CONF_ID)
name = config_block.get(CONF_ALIAS) or "{} {}".format(config_key,
list_no)
hidden = config_block[CONF_HIDE_ENTITY]
initial_state = config_block.get(CONF_INITIAL_STATE)
action = _async_get_action(hass, config_block.get(CONF_ACTION, {}),
name)
if CONF_CONDITION in config_block:
cond_func = _async_process_if(hass, config, config_block)
if cond_func is None:
continue
else:
def cond_func(variables):
"""Condition will always pass."""
return True
async_attach_triggers = partial(
_async_process_trigger, hass, config,
config_block.get(CONF_TRIGGER, []), name
)
entity = AutomationEntity(
automation_id, name, async_attach_triggers, cond_func, action,
hidden, initial_state)
entities.append(entity)
if entities:
await component.async_add_entities(entities)
def _async_get_action(hass, config, name):
"""Return an action based on a configuration."""
script_obj = script.Script(hass, config, name)
async def action(entity_id, variables, context):
"""Execute an action."""
_LOGGER.info('Executing %s', name)
try:
await script_obj.async_run(variables, context)
except Exception as err: # pylint: disable=broad-except
script_obj.async_log_exception(
_LOGGER,
'Error while executing automation {}'.format(entity_id), err)
return action
def _async_process_if(hass, config, p_config):
"""Process if checks."""
if_configs = p_config.get(CONF_CONDITION)
checks = []
for if_config in if_configs:
try:
checks.append(condition.async_from_config(if_config, False))
except HomeAssistantError as ex:
_LOGGER.warning('Invalid condition: %s', ex)
return None
def if_action(variables=None):
"""AND all conditions."""
return all(check(hass, variables) for check in checks)
return if_action
async def _async_process_trigger(hass, config, trigger_configs, name, action):
"""Set up the triggers.
This method is a coroutine.
"""
removes = []
info = {
'name': name
}
for conf in trigger_configs:
platform = importlib.import_module('.{}'.format(conf[CONF_PLATFORM]),
__name__)
remove = await platform.async_trigger(hass, conf, action, info)
if not remove:
_LOGGER.error("Error setting up trigger %s", name)
continue
_LOGGER.info("Initialized trigger %s", name)
removes.append(remove)
if not removes:
return None
def remove_triggers():
"""Remove attached triggers."""
for remove in removes:
remove()
return remove_triggers
| {
"repo_name": "aequitas/home-assistant",
"path": "homeassistant/components/automation/__init__.py",
"copies": "1",
"size": "13825",
"license": "apache-2.0",
"hash": -3032927383393560600,
"line_mean": 30.6361556064,
"line_max": 79,
"alpha_frac": 0.6082459313,
"autogenerated": false,
"ratio": 4.212370505789153,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5320616437089153,
"avg_score": null,
"num_lines": null
} |
"""Allow to set up simple automation rules via the config file."""
import asyncio
from functools import partial
import importlib
import logging
import voluptuous as vol
from homeassistant.setup import async_prepare_setup_platform
from homeassistant.core import CoreState, Context
from homeassistant.loader import bind_hass
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_PLATFORM, STATE_ON, SERVICE_TURN_ON, SERVICE_TURN_OFF,
SERVICE_TOGGLE, SERVICE_RELOAD, EVENT_HOMEASSISTANT_START, CONF_ID,
EVENT_AUTOMATION_TRIGGERED, ATTR_NAME)
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import extract_domain_configs, script, condition
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.util.dt import utcnow
import homeassistant.helpers.config_validation as cv
DOMAIN = 'automation'
DEPENDENCIES = ['group']
ENTITY_ID_FORMAT = DOMAIN + '.{}'
GROUP_NAME_ALL_AUTOMATIONS = 'all automations'
CONF_ALIAS = 'alias'
CONF_HIDE_ENTITY = 'hide_entity'
CONF_CONDITION = 'condition'
CONF_ACTION = 'action'
CONF_TRIGGER = 'trigger'
CONF_CONDITION_TYPE = 'condition_type'
CONF_INITIAL_STATE = 'initial_state'
CONDITION_USE_TRIGGER_VALUES = 'use_trigger_values'
CONDITION_TYPE_AND = 'and'
CONDITION_TYPE_OR = 'or'
DEFAULT_CONDITION_TYPE = CONDITION_TYPE_AND
DEFAULT_HIDE_ENTITY = False
DEFAULT_INITIAL_STATE = True
ATTR_LAST_TRIGGERED = 'last_triggered'
ATTR_VARIABLES = 'variables'
SERVICE_TRIGGER = 'trigger'
_LOGGER = logging.getLogger(__name__)
def _platform_validator(config):
"""Validate it is a valid platform."""
try:
platform = importlib.import_module(
'homeassistant.components.automation.{}'.format(
config[CONF_PLATFORM]))
except ImportError:
raise vol.Invalid('Invalid platform specified') from None
return platform.TRIGGER_SCHEMA(config)
_TRIGGER_SCHEMA = vol.All(
cv.ensure_list,
[
vol.All(
vol.Schema({
vol.Required(CONF_PLATFORM): str
}, extra=vol.ALLOW_EXTRA),
_platform_validator
),
]
)
_CONDITION_SCHEMA = vol.All(cv.ensure_list, [cv.CONDITION_SCHEMA])
PLATFORM_SCHEMA = vol.Schema({
# str on purpose
CONF_ID: str,
CONF_ALIAS: cv.string,
vol.Optional(CONF_INITIAL_STATE): cv.boolean,
vol.Optional(CONF_HIDE_ENTITY, default=DEFAULT_HIDE_ENTITY): cv.boolean,
vol.Required(CONF_TRIGGER): _TRIGGER_SCHEMA,
vol.Optional(CONF_CONDITION): _CONDITION_SCHEMA,
vol.Required(CONF_ACTION): cv.SCRIPT_SCHEMA,
})
SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.comp_entity_ids,
})
TRIGGER_SERVICE_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.comp_entity_ids,
vol.Optional(ATTR_VARIABLES, default={}): dict,
})
RELOAD_SERVICE_SCHEMA = vol.Schema({})
@bind_hass
def is_on(hass, entity_id):
"""
Return true if specified automation entity_id is on.
Async friendly.
"""
return hass.states.is_state(entity_id, STATE_ON)
async def async_setup(hass, config):
"""Set up the automation."""
component = EntityComponent(_LOGGER, DOMAIN, hass,
group_name=GROUP_NAME_ALL_AUTOMATIONS)
await _async_process_config(hass, config, component)
async def trigger_service_handler(service_call):
"""Handle automation triggers."""
tasks = []
for entity in await component.async_extract_from_service(service_call):
tasks.append(entity.async_trigger(
service_call.data.get(ATTR_VARIABLES),
skip_condition=True,
context=service_call.context))
if tasks:
await asyncio.wait(tasks, loop=hass.loop)
async def turn_onoff_service_handler(service_call):
"""Handle automation turn on/off service calls."""
tasks = []
method = 'async_{}'.format(service_call.service)
for entity in await component.async_extract_from_service(service_call):
tasks.append(getattr(entity, method)())
if tasks:
await asyncio.wait(tasks, loop=hass.loop)
async def toggle_service_handler(service_call):
"""Handle automation toggle service calls."""
tasks = []
for entity in await component.async_extract_from_service(service_call):
if entity.is_on:
tasks.append(entity.async_turn_off())
else:
tasks.append(entity.async_turn_on())
if tasks:
await asyncio.wait(tasks, loop=hass.loop)
async def reload_service_handler(service_call):
"""Remove all automations and load new ones from config."""
conf = await component.async_prepare_reload()
if conf is None:
return
await _async_process_config(hass, conf, component)
hass.services.async_register(
DOMAIN, SERVICE_TRIGGER, trigger_service_handler,
schema=TRIGGER_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_RELOAD, reload_service_handler,
schema=RELOAD_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_TOGGLE, toggle_service_handler,
schema=SERVICE_SCHEMA)
for service in (SERVICE_TURN_ON, SERVICE_TURN_OFF):
hass.services.async_register(
DOMAIN, service, turn_onoff_service_handler,
schema=SERVICE_SCHEMA)
return True
class AutomationEntity(ToggleEntity, RestoreEntity):
"""Entity to show status of entity."""
def __init__(self, automation_id, name, async_attach_triggers, cond_func,
async_action, hidden, initial_state):
"""Initialize an automation entity."""
self._id = automation_id
self._name = name
self._async_attach_triggers = async_attach_triggers
self._async_detach_triggers = None
self._cond_func = cond_func
self._async_action = async_action
self._last_triggered = None
self._hidden = hidden
self._initial_state = initial_state
@property
def name(self):
"""Name of the automation."""
return self._name
@property
def should_poll(self):
"""No polling needed for automation entities."""
return False
@property
def state_attributes(self):
"""Return the entity state attributes."""
return {
ATTR_LAST_TRIGGERED: self._last_triggered
}
@property
def hidden(self) -> bool:
"""Return True if the automation entity should be hidden from UIs."""
return self._hidden
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
return self._async_detach_triggers is not None
async def async_added_to_hass(self) -> None:
"""Startup with initial state or previous state."""
await super().async_added_to_hass()
if self._initial_state is not None:
enable_automation = self._initial_state
_LOGGER.debug("Automation %s initial state %s from config "
"initial_state", self.entity_id, enable_automation)
else:
state = await self.async_get_last_state()
if state:
enable_automation = state.state == STATE_ON
self._last_triggered = state.attributes.get('last_triggered')
_LOGGER.debug("Automation %s initial state %s from recorder "
"last state %s", self.entity_id,
enable_automation, state)
else:
enable_automation = DEFAULT_INITIAL_STATE
_LOGGER.debug("Automation %s initial state %s from default "
"initial state", self.entity_id,
enable_automation)
if not enable_automation:
return
# HomeAssistant is starting up
if self.hass.state == CoreState.not_running:
async def async_enable_automation(event):
"""Start automation on startup."""
await self.async_enable()
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, async_enable_automation)
# HomeAssistant is running
else:
await self.async_enable()
async def async_turn_on(self, **kwargs) -> None:
"""Turn the entity on and update the state."""
if self.is_on:
return
await self.async_enable()
async def async_turn_off(self, **kwargs) -> None:
"""Turn the entity off."""
if not self.is_on:
return
self._async_detach_triggers()
self._async_detach_triggers = None
await self.async_update_ha_state()
async def async_trigger(self, variables, skip_condition=False,
context=None):
"""Trigger automation.
This method is a coroutine.
"""
if not skip_condition and not self._cond_func(variables):
return
# Create a new context referring to the old context.
parent_id = None if context is None else context.id
trigger_context = Context(parent_id=parent_id)
self.async_set_context(trigger_context)
self.hass.bus.async_fire(EVENT_AUTOMATION_TRIGGERED, {
ATTR_NAME: self._name,
ATTR_ENTITY_ID: self.entity_id,
}, context=trigger_context)
await self._async_action(self.entity_id, variables, trigger_context)
self._last_triggered = utcnow()
await self.async_update_ha_state()
async def async_will_remove_from_hass(self):
"""Remove listeners when removing automation from HASS."""
await super().async_will_remove_from_hass()
await self.async_turn_off()
async def async_enable(self):
"""Enable this automation entity.
This method is a coroutine.
"""
if self.is_on:
return
self._async_detach_triggers = await self._async_attach_triggers(
self.async_trigger)
await self.async_update_ha_state()
@property
def device_state_attributes(self):
"""Return automation attributes."""
if self._id is None:
return None
return {
CONF_ID: self._id
}
async def _async_process_config(hass, config, component):
"""Process config and add automations.
This method is a coroutine.
"""
entities = []
for config_key in extract_domain_configs(config, DOMAIN):
conf = config[config_key]
for list_no, config_block in enumerate(conf):
automation_id = config_block.get(CONF_ID)
name = config_block.get(CONF_ALIAS) or "{} {}".format(config_key,
list_no)
hidden = config_block[CONF_HIDE_ENTITY]
initial_state = config_block.get(CONF_INITIAL_STATE)
action = _async_get_action(hass, config_block.get(CONF_ACTION, {}),
name)
if CONF_CONDITION in config_block:
cond_func = _async_process_if(hass, config, config_block)
if cond_func is None:
continue
else:
def cond_func(variables):
"""Condition will always pass."""
return True
async_attach_triggers = partial(
_async_process_trigger, hass, config,
config_block.get(CONF_TRIGGER, []), name
)
entity = AutomationEntity(
automation_id, name, async_attach_triggers, cond_func, action,
hidden, initial_state)
entities.append(entity)
if entities:
await component.async_add_entities(entities)
def _async_get_action(hass, config, name):
"""Return an action based on a configuration."""
script_obj = script.Script(hass, config, name)
async def action(entity_id, variables, context):
"""Execute an action."""
_LOGGER.info('Executing %s', name)
try:
await script_obj.async_run(variables, context)
except Exception as err: # pylint: disable=broad-except
script_obj.async_log_exception(
_LOGGER,
'Error while executing automation {}'.format(entity_id), err)
return action
def _async_process_if(hass, config, p_config):
"""Process if checks."""
if_configs = p_config.get(CONF_CONDITION)
checks = []
for if_config in if_configs:
try:
checks.append(condition.async_from_config(if_config, False))
except HomeAssistantError as ex:
_LOGGER.warning('Invalid condition: %s', ex)
return None
def if_action(variables=None):
"""AND all conditions."""
return all(check(hass, variables) for check in checks)
return if_action
async def _async_process_trigger(hass, config, trigger_configs, name, action):
"""Set up the triggers.
This method is a coroutine.
"""
removes = []
info = {
'name': name
}
for conf in trigger_configs:
platform = await async_prepare_setup_platform(
hass, config, DOMAIN, conf.get(CONF_PLATFORM))
if platform is None:
return None
remove = await platform.async_trigger(hass, conf, action, info)
if not remove:
_LOGGER.error("Error setting up trigger %s", name)
continue
_LOGGER.info("Initialized trigger %s", name)
removes.append(remove)
if not removes:
return None
def remove_triggers():
"""Remove attached triggers."""
for remove in removes:
remove()
return remove_triggers
| {
"repo_name": "nugget/home-assistant",
"path": "homeassistant/components/automation/__init__.py",
"copies": "1",
"size": "14008",
"license": "apache-2.0",
"hash": 397769296175093950,
"line_mean": 30.6207674944,
"line_max": 79,
"alpha_frac": 0.613078241,
"autogenerated": false,
"ratio": 4.194011976047904,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5307090217047904,
"avg_score": null,
"num_lines": null
} |
# Allow transitive imports, e.g.
# `from ratelimitbackend import admin; admin.ModelAdmin`
from django.contrib.admin import * # noqa
from django.contrib.admin import site as django_site
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.utils.translation import ugettext as _
from .forms import AdminAuthenticationForm
from .views import login
class RateLimitAdminSite(AdminSite): # noqa
def login(self, request, extra_context=None):
"""
Displays the login form for the given HttpRequest.
"""
context = {
'title': _('Log in'),
'app_path': request.get_full_path(),
}
if (REDIRECT_FIELD_NAME not in request.GET and
REDIRECT_FIELD_NAME not in request.POST):
context[REDIRECT_FIELD_NAME] = request.get_full_path()
context.update(extra_context or {})
defaults = {
'extra_context': context,
'current_app': self.name,
'authentication_form': self.login_form or AdminAuthenticationForm,
'template_name': self.login_template or 'admin/login.html',
}
return login(request, **defaults)
site = RateLimitAdminSite()
for model, admin in django_site._registry.items():
site.register(model, admin.__class__)
| {
"repo_name": "brutasse/django-ratelimit-backend",
"path": "ratelimitbackend/admin.py",
"copies": "1",
"size": "1299",
"license": "bsd-3-clause",
"hash": 1597461607040618000,
"line_mean": 34.1081081081,
"line_max": 78,
"alpha_frac": 0.648960739,
"autogenerated": false,
"ratio": 4.059375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5208335739,
"avg_score": null,
"num_lines": null
} |
"""Allow users to set and activate scenes."""
from collections import namedtuple
import logging
from typing import Any, List
import voluptuous as vol
from homeassistant import config as conf_util
from homeassistant.components.light import ATTR_TRANSITION
from homeassistant.components.scene import DOMAIN as SCENE_DOMAIN, STATES, Scene
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_STATE,
CONF_ENTITIES,
CONF_ICON,
CONF_ID,
CONF_NAME,
CONF_PLATFORM,
SERVICE_RELOAD,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import DOMAIN as HA_DOMAIN, HomeAssistant, State, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import (
config_per_platform,
config_validation as cv,
entity_platform,
)
from homeassistant.helpers.state import async_reproduce_state
from homeassistant.loader import async_get_integration
def _convert_states(states):
"""Convert state definitions to State objects."""
result = {}
for entity_id, info in states.items():
entity_id = cv.entity_id(entity_id)
if isinstance(info, dict):
entity_attrs = info.copy()
state = entity_attrs.pop(ATTR_STATE, None)
attributes = entity_attrs
else:
state = info
attributes = {}
# YAML translates 'on' to a boolean
# http://yaml.org/type/bool.html
if isinstance(state, bool):
state = STATE_ON if state else STATE_OFF
elif not isinstance(state, str):
raise vol.Invalid(f"State for {entity_id} should be a string")
result[entity_id] = State(entity_id, state, attributes)
return result
def _ensure_no_intersection(value):
"""Validate that entities and snapshot_entities do not overlap."""
if (
CONF_SNAPSHOT not in value
or CONF_ENTITIES not in value
or all(
entity_id not in value[CONF_SNAPSHOT] for entity_id in value[CONF_ENTITIES]
)
):
return value
raise vol.Invalid("entities and snapshot_entities must not overlap")
CONF_SCENE_ID = "scene_id"
CONF_SNAPSHOT = "snapshot_entities"
DATA_PLATFORM = "homeassistant_scene"
EVENT_SCENE_RELOADED = "scene_reloaded"
STATES_SCHEMA = vol.All(dict, _convert_states)
PLATFORM_SCHEMA = vol.Schema(
{
vol.Required(CONF_PLATFORM): HA_DOMAIN,
vol.Required(STATES): vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Optional(CONF_ID): cv.string,
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_ICON): cv.icon,
vol.Required(CONF_ENTITIES): STATES_SCHEMA,
}
)
],
),
},
extra=vol.ALLOW_EXTRA,
)
CREATE_SCENE_SCHEMA = vol.All(
cv.has_at_least_one_key(CONF_ENTITIES, CONF_SNAPSHOT),
_ensure_no_intersection,
vol.Schema(
{
vol.Required(CONF_SCENE_ID): cv.slug,
vol.Optional(CONF_ENTITIES, default={}): STATES_SCHEMA,
vol.Optional(CONF_SNAPSHOT, default=[]): cv.entity_ids,
}
),
)
SERVICE_APPLY = "apply"
SERVICE_CREATE = "create"
SCENECONFIG = namedtuple("SceneConfig", [CONF_ID, CONF_NAME, CONF_ICON, STATES])
_LOGGER = logging.getLogger(__name__)
@callback
def scenes_with_entity(hass: HomeAssistant, entity_id: str) -> List[str]:
"""Return all scenes that reference the entity."""
if DATA_PLATFORM not in hass.data:
return []
platform = hass.data[DATA_PLATFORM]
return [
scene_entity.entity_id
for scene_entity in platform.entities.values()
if entity_id in scene_entity.scene_config.states
]
@callback
def entities_in_scene(hass: HomeAssistant, entity_id: str) -> List[str]:
"""Return all entities in a scene."""
if DATA_PLATFORM not in hass.data:
return []
platform = hass.data[DATA_PLATFORM]
entity = platform.entities.get(entity_id)
if entity is None:
return []
return list(entity.scene_config.states)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up Home Assistant scene entries."""
_process_scenes_config(hass, async_add_entities, config)
# This platform can be loaded multiple times. Only first time register the service.
if hass.services.has_service(SCENE_DOMAIN, SERVICE_RELOAD):
return
# Store platform for later.
platform = hass.data[DATA_PLATFORM] = entity_platform.current_platform.get()
async def reload_config(call):
"""Reload the scene config."""
try:
conf = await conf_util.async_hass_config_yaml(hass)
except HomeAssistantError as err:
_LOGGER.error(err)
return
integration = await async_get_integration(hass, SCENE_DOMAIN)
conf = await conf_util.async_process_component_config(hass, conf, integration)
if not (conf and platform):
return
await platform.async_reset()
# Extract only the config for the Home Assistant platform, ignore the rest.
for p_type, p_config in config_per_platform(conf, SCENE_DOMAIN):
if p_type != HA_DOMAIN:
continue
_process_scenes_config(hass, async_add_entities, p_config)
hass.bus.async_fire(EVENT_SCENE_RELOADED, context=call.context)
hass.helpers.service.async_register_admin_service(
SCENE_DOMAIN, SERVICE_RELOAD, reload_config
)
async def apply_service(call):
"""Apply a scene."""
reproduce_options = {}
if ATTR_TRANSITION in call.data:
reproduce_options[ATTR_TRANSITION] = call.data.get(ATTR_TRANSITION)
await async_reproduce_state(
hass,
call.data[CONF_ENTITIES].values(),
context=call.context,
reproduce_options=reproduce_options,
)
hass.services.async_register(
SCENE_DOMAIN,
SERVICE_APPLY,
apply_service,
vol.Schema(
{
vol.Optional(ATTR_TRANSITION): vol.All(
vol.Coerce(float), vol.Clamp(min=0, max=6553)
),
vol.Required(CONF_ENTITIES): STATES_SCHEMA,
}
),
)
async def create_service(call):
"""Create a scene."""
snapshot = call.data[CONF_SNAPSHOT]
entities = call.data[CONF_ENTITIES]
for entity_id in snapshot:
state = hass.states.get(entity_id)
if state is None:
_LOGGER.warning(
"Entity %s does not exist and therefore cannot be snapshotted",
entity_id,
)
continue
entities[entity_id] = State(entity_id, state.state, state.attributes)
if not entities:
_LOGGER.warning("Empty scenes are not allowed")
return
scene_config = SCENECONFIG(None, call.data[CONF_SCENE_ID], None, entities)
entity_id = f"{SCENE_DOMAIN}.{scene_config.name}"
old = platform.entities.get(entity_id)
if old is not None:
if not old.from_service:
_LOGGER.warning("The scene %s already exists", entity_id)
return
await platform.async_remove_entity(entity_id)
async_add_entities([HomeAssistantScene(hass, scene_config, from_service=True)])
hass.services.async_register(
SCENE_DOMAIN, SERVICE_CREATE, create_service, CREATE_SCENE_SCHEMA
)
def _process_scenes_config(hass, async_add_entities, config):
"""Process multiple scenes and add them."""
scene_config = config[STATES]
# Check empty list
if not scene_config:
return
async_add_entities(
HomeAssistantScene(
hass,
SCENECONFIG(
scene.get(CONF_ID),
scene[CONF_NAME],
scene.get(CONF_ICON),
scene[CONF_ENTITIES],
),
)
for scene in scene_config
)
class HomeAssistantScene(Scene):
"""A scene is a group of entities and the states we want them to be."""
def __init__(self, hass, scene_config, from_service=False):
"""Initialize the scene."""
self.hass = hass
self.scene_config = scene_config
self.from_service = from_service
@property
def name(self):
"""Return the name of the scene."""
return self.scene_config.name
@property
def icon(self):
"""Return the icon of the scene."""
return self.scene_config.icon
@property
def unique_id(self):
"""Return unique ID."""
return self.scene_config.id
@property
def device_state_attributes(self):
"""Return the scene state attributes."""
attributes = {ATTR_ENTITY_ID: list(self.scene_config.states)}
unique_id = self.unique_id
if unique_id is not None:
attributes[CONF_ID] = unique_id
return attributes
async def async_activate(self, **kwargs: Any) -> None:
"""Activate scene. Try to get entities into requested state."""
await async_reproduce_state(
self.hass,
self.scene_config.states.values(),
context=self._context,
reproduce_options=kwargs,
)
| {
"repo_name": "sdague/home-assistant",
"path": "homeassistant/components/homeassistant/scene.py",
"copies": "10",
"size": "9457",
"license": "apache-2.0",
"hash": 3625583014847985000,
"line_mean": 28.9272151899,
"line_max": 87,
"alpha_frac": 0.6070635508,
"autogenerated": false,
"ratio": 4.036278275714896,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9643341826514895,
"avg_score": null,
"num_lines": null
} |
"""Allow users to set and activate scenes."""
from collections import namedtuple
import logging
from typing import List
import voluptuous as vol
from homeassistant import config as conf_util
from homeassistant.components.scene import DOMAIN as SCENE_DOMAIN, STATES, Scene
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_STATE,
CONF_ENTITIES,
CONF_ID,
CONF_NAME,
CONF_PLATFORM,
SERVICE_RELOAD,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import DOMAIN as HA_DOMAIN, HomeAssistant, State, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import (
config_per_platform,
config_validation as cv,
entity_platform,
)
from homeassistant.helpers.state import async_reproduce_state
from homeassistant.loader import async_get_integration
def _convert_states(states):
"""Convert state definitions to State objects."""
result = {}
for entity_id in states:
entity_id = cv.entity_id(entity_id)
if isinstance(states[entity_id], dict):
entity_attrs = states[entity_id].copy()
state = entity_attrs.pop(ATTR_STATE, None)
attributes = entity_attrs
else:
state = states[entity_id]
attributes = {}
# YAML translates 'on' to a boolean
# http://yaml.org/type/bool.html
if isinstance(state, bool):
state = STATE_ON if state else STATE_OFF
elif not isinstance(state, str):
raise vol.Invalid(f"State for {entity_id} should be a string")
result[entity_id] = State(entity_id, state, attributes)
return result
def _ensure_no_intersection(value):
"""Validate that entities and snapshot_entities do not overlap."""
if (
CONF_SNAPSHOT not in value
or CONF_ENTITIES not in value
or not any(
entity_id in value[CONF_SNAPSHOT] for entity_id in value[CONF_ENTITIES]
)
):
return value
raise vol.Invalid("entities and snapshot_entities must not overlap")
CONF_SCENE_ID = "scene_id"
CONF_SNAPSHOT = "snapshot_entities"
DATA_PLATFORM = f"homeassistant_scene"
STATES_SCHEMA = vol.All(dict, _convert_states)
PLATFORM_SCHEMA = vol.Schema(
{
vol.Required(CONF_PLATFORM): HA_DOMAIN,
vol.Required(STATES): vol.All(
cv.ensure_list,
[
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ENTITIES): STATES_SCHEMA,
}
],
),
},
extra=vol.ALLOW_EXTRA,
)
CREATE_SCENE_SCHEMA = vol.All(
cv.has_at_least_one_key(CONF_ENTITIES, CONF_SNAPSHOT),
_ensure_no_intersection,
vol.Schema(
{
vol.Required(CONF_SCENE_ID): cv.slug,
vol.Optional(CONF_ENTITIES, default={}): STATES_SCHEMA,
vol.Optional(CONF_SNAPSHOT, default=[]): cv.entity_ids,
}
),
)
SERVICE_APPLY = "apply"
SERVICE_CREATE = "create"
SCENECONFIG = namedtuple("SceneConfig", [CONF_NAME, STATES])
_LOGGER = logging.getLogger(__name__)
@callback
def scenes_with_entity(hass: HomeAssistant, entity_id: str) -> List[str]:
"""Return all scenes that reference the entity."""
if DATA_PLATFORM not in hass.data:
return []
platform = hass.data[DATA_PLATFORM]
results = []
for scene_entity in platform.entities.values():
if entity_id in scene_entity.scene_config.states:
results.append(scene_entity.entity_id)
return results
@callback
def entities_in_scene(hass: HomeAssistant, entity_id: str) -> List[str]:
"""Return all entities in a scene."""
if DATA_PLATFORM not in hass.data:
return []
platform = hass.data[DATA_PLATFORM]
entity = platform.entities.get(entity_id)
if entity is None:
return []
return list(entity.scene_config.states)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up Home Assistant scene entries."""
_process_scenes_config(hass, async_add_entities, config)
# This platform can be loaded multiple times. Only first time register the service.
if hass.services.has_service(SCENE_DOMAIN, SERVICE_RELOAD):
return
# Store platform for later.
platform = hass.data[DATA_PLATFORM] = entity_platform.current_platform.get()
async def reload_config(call):
"""Reload the scene config."""
try:
conf = await conf_util.async_hass_config_yaml(hass)
except HomeAssistantError as err:
_LOGGER.error(err)
return
integration = await async_get_integration(hass, SCENE_DOMAIN)
conf = await conf_util.async_process_component_config(hass, conf, integration)
if not conf or not platform:
return
await platform.async_reset()
# Extract only the config for the Home Assistant platform, ignore the rest.
for p_type, p_config in config_per_platform(conf, SCENE_DOMAIN):
if p_type != HA_DOMAIN:
continue
_process_scenes_config(hass, async_add_entities, p_config)
hass.helpers.service.async_register_admin_service(
SCENE_DOMAIN, SERVICE_RELOAD, reload_config
)
async def apply_service(call):
"""Apply a scene."""
await async_reproduce_state(
hass, call.data[CONF_ENTITIES].values(), blocking=True, context=call.context
)
hass.services.async_register(
SCENE_DOMAIN,
SERVICE_APPLY,
apply_service,
vol.Schema({vol.Required(CONF_ENTITIES): STATES_SCHEMA}),
)
async def create_service(call):
"""Create a scene."""
snapshot = call.data[CONF_SNAPSHOT]
entities = call.data[CONF_ENTITIES]
for entity_id in snapshot:
state = hass.states.get(entity_id)
if state is None:
_LOGGER.warning(
"Entity %s does not exist and therefore cannot be snapshotted",
entity_id,
)
continue
entities[entity_id] = State(entity_id, state.state, state.attributes)
if not entities:
_LOGGER.warning("Empty scenes are not allowed")
return
scene_config = SCENECONFIG(call.data[CONF_SCENE_ID], entities)
entity_id = f"{SCENE_DOMAIN}.{scene_config.name}"
old = platform.entities.get(entity_id)
if old is not None:
if not old.from_service:
_LOGGER.warning("The scene %s already exists", entity_id)
return
await platform.async_remove_entity(entity_id)
async_add_entities([HomeAssistantScene(hass, scene_config, from_service=True)])
hass.services.async_register(
SCENE_DOMAIN, SERVICE_CREATE, create_service, CREATE_SCENE_SCHEMA
)
def _process_scenes_config(hass, async_add_entities, config):
"""Process multiple scenes and add them."""
scene_config = config[STATES]
# Check empty list
if not scene_config:
return
async_add_entities(
HomeAssistantScene(
hass,
SCENECONFIG(scene[CONF_NAME], scene[CONF_ENTITIES]),
scene.get(CONF_ID),
)
for scene in scene_config
)
class HomeAssistantScene(Scene):
"""A scene is a group of entities and the states we want them to be."""
def __init__(self, hass, scene_config, scene_id=None, from_service=False):
"""Initialize the scene."""
self._id = scene_id
self.hass = hass
self.scene_config = scene_config
self.from_service = from_service
@property
def name(self):
"""Return the name of the scene."""
return self.scene_config.name
@property
def unique_id(self):
"""Return unique ID."""
return self._id
@property
def device_state_attributes(self):
"""Return the scene state attributes."""
attributes = {ATTR_ENTITY_ID: list(self.scene_config.states)}
if self._id is not None:
attributes[CONF_ID] = self._id
return attributes
async def async_activate(self):
"""Activate scene. Try to get entities into requested state."""
await async_reproduce_state(
self.hass,
self.scene_config.states.values(),
blocking=True,
context=self._context,
)
| {
"repo_name": "Teagan42/home-assistant",
"path": "homeassistant/components/homeassistant/scene.py",
"copies": "1",
"size": "8465",
"license": "apache-2.0",
"hash": -3086675184480700000,
"line_mean": 28.8063380282,
"line_max": 88,
"alpha_frac": 0.6222090963,
"autogenerated": false,
"ratio": 4.011848341232228,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5134057437532228,
"avg_score": null,
"num_lines": null
} |
"""Allow users to set and activate scenes."""
from __future__ import annotations
import functools as ft
import importlib
import logging
from typing import Any
import voluptuous as vol
from homeassistant.components.light import ATTR_TRANSITION
from homeassistant.const import CONF_PLATFORM, SERVICE_TURN_ON
from homeassistant.core import DOMAIN as HA_DOMAIN
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
# mypy: allow-untyped-defs, no-check-untyped-defs
DOMAIN = "scene"
STATE = "scening"
STATES = "states"
def _hass_domain_validator(config):
"""Validate platform in config for homeassistant domain."""
if CONF_PLATFORM not in config:
config = {CONF_PLATFORM: HA_DOMAIN, STATES: config}
return config
def _platform_validator(config):
"""Validate it is a valid platform."""
try:
platform = importlib.import_module(
".{}".format(config[CONF_PLATFORM]), __name__
)
except ImportError:
try:
platform = importlib.import_module(
"homeassistant.components.{}.scene".format(config[CONF_PLATFORM])
)
except ImportError:
raise vol.Invalid("Invalid platform specified") from None
if not hasattr(platform, "PLATFORM_SCHEMA"):
return config
return platform.PLATFORM_SCHEMA(config)
PLATFORM_SCHEMA = vol.Schema(
vol.All(
_hass_domain_validator,
vol.Schema({vol.Required(CONF_PLATFORM): str}, extra=vol.ALLOW_EXTRA),
_platform_validator,
),
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the scenes."""
component = hass.data[DOMAIN] = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass
)
await component.async_setup(config)
# Ensure Home Assistant platform always loaded.
await component.async_setup_platform(HA_DOMAIN, {"platform": HA_DOMAIN, STATES: []})
component.async_register_entity_service(
SERVICE_TURN_ON,
{ATTR_TRANSITION: vol.All(vol.Coerce(float), vol.Clamp(min=0, max=6553))},
"async_activate",
)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class Scene(Entity):
"""A scene is a group of entities and the states we want them to be."""
@property
def should_poll(self) -> bool:
"""No polling needed."""
return False
@property
def state(self) -> str | None:
"""Return the state of the scene."""
return STATE
def activate(self, **kwargs: Any) -> None:
"""Activate scene. Try to get entities into requested state."""
raise NotImplementedError()
async def async_activate(self, **kwargs: Any) -> None:
"""Activate scene. Try to get entities into requested state."""
task = self.hass.async_add_job(ft.partial(self.activate, **kwargs))
if task:
await task
| {
"repo_name": "adrienbrault/home-assistant",
"path": "homeassistant/components/scene/__init__.py",
"copies": "2",
"size": "3174",
"license": "mit",
"hash": 8033773374964815000,
"line_mean": 27.5945945946,
"line_max": 88,
"alpha_frac": 0.661310649,
"autogenerated": false,
"ratio": 4.064020486555698,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00031963306106742005,
"num_lines": 111
} |
"""Allow users to set and activate scenes."""
import asyncio
import importlib
import logging
import voluptuous as vol
from homeassistant.core import DOMAIN as HA_DOMAIN
from homeassistant.const import CONF_PLATFORM, SERVICE_TURN_ON
from homeassistant.helpers.config_validation import ENTITY_SERVICE_SCHEMA
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.state import HASS_DOMAIN
DOMAIN = "scene"
STATE = "scening"
STATES = "states"
def _hass_domain_validator(config):
"""Validate platform in config for homeassistant domain."""
if CONF_PLATFORM not in config:
config = {CONF_PLATFORM: HASS_DOMAIN, STATES: config}
return config
def _platform_validator(config):
"""Validate it is a valid platform."""
try:
platform = importlib.import_module(
".{}".format(config[CONF_PLATFORM]), __name__
)
except ImportError:
try:
platform = importlib.import_module(
"homeassistant.components.{}.scene".format(config[CONF_PLATFORM])
)
except ImportError:
raise vol.Invalid("Invalid platform specified") from None
if not hasattr(platform, "PLATFORM_SCHEMA"):
return config
return platform.PLATFORM_SCHEMA(config)
PLATFORM_SCHEMA = vol.Schema(
vol.All(
_hass_domain_validator,
vol.Schema({vol.Required(CONF_PLATFORM): str}, extra=vol.ALLOW_EXTRA),
_platform_validator,
),
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the scenes."""
logger = logging.getLogger(__name__)
component = hass.data[DOMAIN] = EntityComponent(logger, DOMAIN, hass)
await component.async_setup(config)
# Ensure Home Assistant platform always loaded.
await component.async_setup_platform(
HA_DOMAIN, {"platform": "homeasistant", STATES: []}
)
async def async_handle_scene_service(service):
"""Handle calls to the switch services."""
target_scenes = await component.async_extract_from_service(service)
tasks = [scene.async_activate() for scene in target_scenes]
if tasks:
await asyncio.wait(tasks)
hass.services.async_register(
DOMAIN,
SERVICE_TURN_ON,
async_handle_scene_service,
schema=ENTITY_SERVICE_SCHEMA,
)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class Scene(Entity):
"""A scene is a group of entities and the states we want them to be."""
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def state(self):
"""Return the state of the scene."""
return STATE
def activate(self):
"""Activate scene. Try to get entities into requested state."""
raise NotImplementedError()
def async_activate(self):
"""Activate scene. Try to get entities into requested state.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.activate)
| {
"repo_name": "fbradyirl/home-assistant",
"path": "homeassistant/components/scene/__init__.py",
"copies": "1",
"size": "3374",
"license": "apache-2.0",
"hash": 5120263540438639000,
"line_mean": 27.3529411765,
"line_max": 81,
"alpha_frac": 0.6647895673,
"autogenerated": false,
"ratio": 4.228070175438597,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5392859742738597,
"avg_score": null,
"num_lines": null
} |
#Allow us to import from parent directory, even if python isn't being run with the -m option
import os
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0,parentdir)
from common import *
mt = morse_parse.morse_table
import itertools
### Constants
DOT = 1
DASH = 2
# The pauses
INTER_ELEMENT = 3
INTER_CHARACTER = 4
INTER_WORD = 5
DEAD_AIR = 6
def dict_invert(d):
return dict([[v,k] for k,v in d.iteritems()])
possible_on = {1:DOT, 3:DASH}
possible_off = {1:INTER_ELEMENT, 3:INTER_CHARACTER, 7:INTER_WORD}
possible_on_inv = dict_invert(possible_on)
possible_off_inv = dict_invert(possible_off)
def list_extend(l, number, item):
"""Make sure `l` ends with at least `number` `item`s
Does not modify l"""
if not hasattr(l, '__getitem__'):
# l is some sort of sequence funnybusiness; copy it out
l = list(l)
n = 0
try:
for i in xrange(number):
if l[-(i+1)] == item: # index the list in reverse
n += 1
continue
else:
break
except IndexError:
# IndexError's are to be expected here.
pass
# n is now the number of trailing correct elements. Now just add number - n items
to_add = number - n
if to_add <= 0:
# return the original
return l
else:
return l + [item] * to_add
def letters_to_sequence(text):
seq = []
text = text.upper()
text
for c in text:
try:
elements = mt[c]
except KeyError:
pass #Silently fail if we don't know how to encode it
for elem in mt[c]:
if elem == ".":
seq = list_extend(seq, possible_on_inv[DOT], True)
elif elem == "-":
seq = list_extend(seq, possible_on_inv[DASH], True)
elif elem == "_":
seq = list_extend(seq, possible_off_inv[INTER_WORD], False)
else:
raise Exception("Should never happen, unless something happened with the config file")
seq = list_extend(seq, possible_off_inv[INTER_ELEMENT], False)
seq = list_extend(seq, possible_off_inv[INTER_CHARACTER], False)
return seq
def print_morse(seq):
"""Mostly for debugging"""
print ''.join(["#" if s else "_" for s in seq])
if __name__ == "__main__":
print_morse(letters_to_sequence("I AM (batman)?"))
| {
"repo_name": "nickodell/morse-code",
"path": "modulate/chars_to_elements.py",
"copies": "1",
"size": "2148",
"license": "mit",
"hash": 4935556237407366000,
"line_mean": 22.6043956044,
"line_max": 92,
"alpha_frac": 0.6568901304,
"autogenerated": false,
"ratio": 2.9384404924760603,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8546580238678547,
"avg_score": 0.10975007683950251,
"num_lines": 91
} |
# Allow us to load `open_cp` without installing
import sys, os.path
sys.path.insert(0, os.path.abspath(os.path.join("..", "..")))
import matplotlib.pyplot as plt
import matplotlib
import descartes
import os
import numpy as np
import descartes
import pandas as pd
if not "GDAL_DATA" in os.environ:
try:
home = os.path.join(os.path.expanduser("~"), "Anaconda3", "Library", "share", "gdal")
if "gcs.csv" in os.listdir(home):
os.environ["GDAL_DATA"] = home
else:
print("GDAL_DATA not set and failed to find suitable location...")
except:
print("GDAL_DATA not set and failed to find suitable location... This is probably not a problem on linux.")
import open_cp.sources.chicago as chicago
import open_cp.predictors
import open_cp.naive
import open_cp.geometry
import open_cp.plot
import open_cp.pool
import open_cp.evaluation
import open_cp.retrohotspot as retro
side_choices = {"Far North", "Northwest", "North", "West", "Central",
"South", "Southwest", "Far Southwest", "Far Southeast"}
def get_side(side="South"):
return chicago.get_side(side)
def load_data(datadir, side="South"):
"""Load the data: Burglary, in the South side only, limit to events happening
on the days 2011-03-01 to 2012-01-06 inclusive.
:return: Pair of `(geometry, points)`
"""
chicago.set_data_directory(datadir)
points = chicago.load(os.path.join(datadir, "chicago_two.csv"), {"BURGLARY"}, type="all_other")
#points = chicago.load(os.path.join(datadir, "chicago_all_old.csv"), {"BURGLARY"}, type="all")
# Limit time range
start = np.datetime64("2011-03-01")
end = np.datetime64("2012-01-07")
points = points[(points.timestamps >= start) & (points.timestamps <= end)]
geo = get_side(side)
points = open_cp.geometry.intersect_timed_points(points, geo)
return geo, points
_cdict = {'red': [(0.0, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 1.0, 1.0),
(1.0, 0.0, 0.0)],
'blue': [(0.0, 0.2, 0.2),
(1.0, 0.2, 0.2)]}
yellow_to_red = matplotlib.colors.LinearSegmentedColormap("yellow_to_red", _cdict)
def grid_for_side(xoffset=0, yoffset=0, xsize=250, ysize=250, side="South"):
"""Generated a masked grid for the passed side value.
:param xoffset: How much to move the left side by
:param yoffset: How much to move the bottom side by
"""
grid = open_cp.data.Grid(xsize=xsize, ysize=ysize, xoffset=xoffset, yoffset=yoffset)
return open_cp.geometry.mask_grid_by_intersection(get_side(side), grid)
def grid_for_south_side(xoffset=0, yoffset=0, xsize=250, ysize=250):
"""Generated a masked grid for the South side geometry.
:param xoffset: How much to move the left side by
:param yoffset: How much to move the bottom side by
"""
return grid_for_side(xoffset, yoffset, xsize, ysize, "South")
def time_range():
"""28th September 2011 – 6th January 2012"""
return open_cp.evaluation.HitRateEvaluator.time_range(np.datetime64("2011-09-28"),
np.datetime64("2012-01-06"), np.timedelta64(1, "D"))
def to_dataframe(rates):
frame = pd.DataFrame(rates).T
frame.index.name = "Prediction Date"
frame.columns.name = "% Coverage"
return frame
# ---------------------------------------------------------------------------
# Retro hotspot stuff
# Defining here allows us to use multi-process code in a notebook
class RetroHotSpotEval(open_cp.evaluation.PredictionProvider):
def __init__(self, masked_grid, points, time_window_length = np.timedelta64(56, "D")):
self.time_window_length = time_window_length
self.masked_grid = masked_grid
self.points = points
def predict(self, time):
grid_pred = retro.RetroHotSpotGrid(grid=self.masked_grid)
grid_pred.data = self.points
grid_pred.weight = retro.Quartic(bandwidth = 1000)
grid_risk = grid_pred.predict(start_time = time - self.time_window_length, end_time = time)
grid_risk.mask_with(self.masked_grid)
return grid_risk
class RHS_Eval_Task(open_cp.pool.Task):
def __init__(self, masked_grid, points, key):
super().__init__(key)
self.evaluator = open_cp.evaluation.HitRateEvaluator(RetroHotSpotEval(masked_grid, points))
self.evaluator.data = points
def __call__(self):
return self.evaluator.run(time_range(), range(0,51))
| {
"repo_name": "QuantCrimAtLeeds/PredictCode",
"path": "examples/Case Study Chicago South Side/common.py",
"copies": "1",
"size": "4492",
"license": "artistic-2.0",
"hash": -1483693975360792600,
"line_mean": 37.0508474576,
"line_max": 116,
"alpha_frac": 0.640311804,
"autogenerated": false,
"ratio": 3.2702112163146393,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.92833518747179,
"avg_score": 0.025434229119347864,
"num_lines": 118
} |
"""Allow your decorations to be un-decorated.
In some cases, such as when testing, it can be useful to access the
decorated class or function directly, so as to not to use the behavior
or interface that the decorator might introduce.
Example:
>>> from functools import wraps
>>> from undecorate import unwrap, unwrappable
>>>
>>> @unwrappable
... def pack(func):
... @wraps(func)
... def wrapper(args, kwargs):
... return func(*args, **kwargs)
... return wrapper
...
>>> @pack
... def myfunc(a, b, c=None, d=None):
... return (a, b, c, d)
...
>>> myfunc('a', 'b', c='c')
Traceback (most recent call last):
...
TypeError: wrapper() got an unexpected keyword argument 'c'
>>>
>>> unwrap(myfunc)('a', 'b', c='c')
('a', 'b', 'c', None)
"""
import functools
WRAPPER_ASSIGNMENTS = functools.WRAPPER_ASSIGNMENTS
WRAPPER_UPDATES = functools.WRAPPER_UPDATES
def update_wrapper(wrapper,
wrapped,
assigned=WRAPPER_ASSIGNMENTS,
updated=WRAPPER_UPDATES):
"""Backport setting __wrapped__ from functools.update_wrapper().
Python 3.2 introduced, and Python 3.3 fixed, a new feature to
functools.update_wrapper() that adds a __wrapped__ attribute to the
wrapper function.
This is a backport of that fixed functionality, built on top of
functools.update_wrapper().
"""
wrapper = functools.update_wrapper(
wrapper, wrapped, assigned=assigned, updated=updated)
wrapper.__wrapped__ = wrapped
return wrapper
def wraps(wrapped,
assigned=WRAPPER_ASSIGNMENTS,
updated=WRAPPER_UPDATES):
"""Decorator factory to apply backported update_wrapper()."""
return functools.partial(update_wrapper, wrapped=wrapped,
assigned=assigned, updated=updated)
def unwrappable(decorator):
"""Make a decorator able to be un-decorated.
This meta-decorator takes a decorator, and returns a new decorator
that allows the wrapper to be used by unwrap().
This is unneeded in Python 3.3+, where the __wrapped__ attribute
is updated properly, if the wrapper is updated using functools.wraps or
functools.update_wrapper. The __wrapped__ attribute that gets added in
Python 3.2 always points to the innermost wrapped function.
"""
@wraps(decorator)
def meta_wrapper(wrapped):
wrapper = decorator(wrapped)
wrapper.__wrapped__ = wrapped
return wrapper
return meta_wrapper
def unwrap(wrapper):
"""Remove the wrapper, recursively all the way down."""
while True:
wrapped = getattr(wrapper, '__wrapped__', None)
if wrapped is None:
return wrapper
wrapper = wrapped
CLASS_WRAPPER_ASSIGNMENTS = ('__module__', '__doc__')
def create_class_wrapper(wrapper,
wrapped,
assigned=CLASS_WRAPPER_ASSIGNMENTS):
"""Create a wrapper class that looks like the wrapped class.
wrapper is the class used to override the wrapped class.
wrapped is the class has values overridden by the wrapper.
deleted is a tuple naming the __dict__ items to be removed from the
wrapper class (defaults to CLASS_WRAPPER_DELETES).
assigned is a tuple naming the __dict__ items to be copied directly
from the wrapped class (defaults to CLASS_WRAPPER_ASSIGNMENTS).
A notable difference from update_wrapper is that is creates a new class
that does not appear to be exactly the same as the wrapped class, but
rather mimics the name and the module, and inherits from the original
class, relying on class inheritance to mimic the behavior.
"""
__dict__ = dict(wrapper.__dict__)
for attr in assigned:
__dict__[attr] = getattr(wrapped, attr)
__dict__['__wrapped__'] = wrapped
# Use the metaclass of the wrapped class
return wrapped.__class__(wrapped.__name__, (wrapped,), __dict__)
def class_wraps(wrapped, assigned=CLASS_WRAPPER_ASSIGNMENTS):
"""Decorator factory to apply create_class_wrapper() to a wrapper class.
Return a decorator that invokes create_class_wrapper() with the decorated
class as the wrapper argument and the arguments to class_wraps() as the
remaining arguments. Default arguments are as for create_class_wrapper().
This is a convenience function to simplify applying partial() to
create_class_wrapper().
"""
return functools.partial(create_class_wrapper, wrapped=wrapped,
assigned=assigned)
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.IGNORE_EXCEPTION_DETAIL)
doctest.testfile('README.rst', optionflags=doctest.IGNORE_EXCEPTION_DETAIL)
| {
"repo_name": "ryanhiebert/undecorate",
"path": "undecorate.py",
"copies": "1",
"size": "4722",
"license": "mit",
"hash": 552177485216485500,
"line_mean": 32.4893617021,
"line_max": 79,
"alpha_frac": 0.6692079627,
"autogenerated": false,
"ratio": 4.254054054054054,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 141
} |
"""Allow you to run tests in an isolated container for an app or a lib.
If args are passed, default arguments are dropped
Usage:
test [options] <app_or_lib_name> [<suite_name>] [<args>...]
Options:
<suite_name> Name of the test suite you would like to run
If `all` is specified, all suites in the spec will be run
<args> A list of arguments to be passed to the test script
--recreate Ensures that the testing image will be recreated
--no-pull Do not pull dusty managed repos from remotes.
Examples:
To call test suite frontend with default arguments:
dusty test web frontend
To call test suite frontend with arguments in place of the defaults:
dusty test web frontend /web/javascript
"""
from docopt import docopt
from ..payload import Payload
from ..commands.test import (run_one_suite, run_all_suites, test_info_for_app_or_lib, setup_for_test,
ensure_valid_suite_name, ensure_vm_initialized,
log_in_to_required_registries)
def main(argv):
args = docopt(__doc__, argv, options_first=True)
if args['<suite_name>'] == 'all':
payload0 = Payload(ensure_vm_initialized)
payload1 = Payload(log_in_to_required_registries, args['<app_or_lib_name>'])
payload1.run_on_daemon = False
payload2 = Payload(setup_for_test,
args['<app_or_lib_name>'],
pull_repos=not args['--no-pull'],
force_recreate=args['--recreate'])
payload3 = Payload(run_all_suites,
args['<app_or_lib_name>'])
payload3.run_on_daemon = False
return [payload0, payload1, payload2, payload3]
elif args['<suite_name>']:
payload0 = Payload(ensure_valid_suite_name, args['<app_or_lib_name>'], args['<suite_name>'])
payload1 = Payload(ensure_vm_initialized)
payload2 = Payload(log_in_to_required_registries, args['<app_or_lib_name>'])
payload2.run_on_daemon = False
payload3 = Payload(setup_for_test,
args['<app_or_lib_name>'],
pull_repos=not args['--no-pull'],
force_recreate=args['--recreate'])
payload4 = Payload(run_one_suite,
args['<app_or_lib_name>'],
args['<suite_name>'],
args['<args>'])
payload4.run_on_daemon = False
return [payload0, payload1, payload2, payload3, payload4]
else:
return Payload(test_info_for_app_or_lib, args['<app_or_lib_name>'])
| {
"repo_name": "gamechanger/dusty",
"path": "dusty/cli/test.py",
"copies": "1",
"size": "2642",
"license": "mit",
"hash": 7794216381812545000,
"line_mean": 43.0333333333,
"line_max": 101,
"alpha_frac": 0.5878122634,
"autogenerated": false,
"ratio": 3.7475177304964538,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48353299938964533,
"avg_score": null,
"num_lines": null
} |
__all__ = ["PackageInfo"]
from panda3d.core import Filename, DocumentSpec, Multifile, Decompressor, EUOk, EUSuccess, VirtualFileSystem, Thread, getModelPath, ExecutionEnvironment, PStatCollector, TiXmlDocument, TiXmlDeclaration, TiXmlElement
import panda3d.core as core
from direct.p3d.FileSpec import FileSpec
from direct.p3d.ScanDirectoryNode import ScanDirectoryNode
from direct.showbase import VFSImporter
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.task.TaskManagerGlobal import taskMgr
import os
import sys
import random
import time
import copy
class PackageInfo:
""" This class represents a downloadable Panda3D package file that
can be (or has been) installed into the current runtime. It is
the Python equivalent of the P3DPackage class in the core API. """
notify = directNotify.newCategory("PackageInfo")
# Weight factors for computing download progress. This
# attempts to reflect the relative time-per-byte of each of
# these operations.
downloadFactor = 1
uncompressFactor = 0.01
unpackFactor = 0.01
patchFactor = 0.01
# These tokens are yielded (not returned) by __downloadFile() and
# other InstallStep functions.
stepComplete = 1
stepFailed = 2
restartDownload = 3
stepContinue = 4
UsageBasename = 'usage.xml'
class InstallStep:
""" This class is one step of the installPlan list; it
represents a single atomic piece of the installation step, and
the relative effort of that piece. When the plan is executed,
it will call the saved function pointer here. """
def __init__(self, func, bytes, factor, stepType):
self.__funcPtr = func
self.bytesNeeded = bytes
self.bytesDone = 0
self.bytesFactor = factor
self.stepType = stepType
self.pStatCol = PStatCollector(':App:PackageInstaller:%s' % (stepType))
def func(self):
""" self.__funcPtr(self) will return a generator of
tokens. This function defines a new generator that yields
each of those tokens, but wraps each call into the nested
generator within a pair of start/stop collector calls. """
self.pStatCol.start()
for token in self.__funcPtr(self):
self.pStatCol.stop()
yield token
self.pStatCol.start()
# Shouldn't ever get here.
self.pStatCol.stop()
raise StopIteration
def getEffort(self):
""" Returns the relative amount of effort of this step. """
return self.bytesNeeded * self.bytesFactor
def getProgress(self):
""" Returns the progress of this step, in the range
0..1. """
if self.bytesNeeded == 0:
return 1
return min(float(self.bytesDone) / float(self.bytesNeeded), 1)
def __init__(self, host, packageName, packageVersion, platform = None,
solo = False, asMirror = False, perPlatform = False):
self.host = host
self.packageName = packageName
self.packageVersion = packageVersion
self.platform = platform
self.solo = solo
self.asMirror = asMirror
self.perPlatform = perPlatform
# This will be active while we are in the middle of a download
# cycle.
self.http = None
# This will be filled in when the host's contents.xml file is
# read.
self.packageDir = None
# These will be filled in by HostInfo when the package is read
# from contents.xml.
self.descFile = None
self.importDescFile = None
# These are filled in when the desc file is successfully read.
self.hasDescFile = False
self.patchVersion = None
self.displayName = None
self.guiApp = False
self.uncompressedArchive = None
self.compressedArchive = None
self.extracts = []
self.requires = []
self.installPlans = None
# This is updated during downloadPackage(). It is in the
# range 0..1.
self.downloadProgress = 0
# This is set true when the package file has been fully
# downloaded and unpacked.
self.hasPackage = False
# This is set true when the package has been "installed",
# meaning it's been added to the paths and all.
self.installed = False
# This is set true when the package has been updated in this
# session, but not yet written to usage.xml.
self.updated = False
self.diskSpace = None
def getPackageDir(self):
""" Returns the directory in which this package is installed.
This may not be known until the host's contents.xml file has
been downloaded, which informs us of the host's own install
directory. """
if not self.packageDir:
if not self.host.hasContentsFile:
if not self.host.readContentsFile():
self.host.downloadContentsFile(self.http)
# Derive the packageDir from the hostDir.
self.packageDir = Filename(self.host.hostDir, self.packageName)
if self.packageVersion:
self.packageDir = Filename(self.packageDir, self.packageVersion)
if self.host.perPlatform:
# If we're running on a special host that wants us to
# include the platform, we include it.
includePlatform = True
elif self.perPlatform and self.host.appRunner.respectPerPlatform:
# Otherwise, if our package spec wants us to include
# the platform (and our plugin knows about this), then
# we also include it.
includePlatform = True
else:
# Otherwise, we must be running legacy code
# somewhere--either an old package or an old
# plugin--and we therefore shouldn't include the
# platform in the directory hierarchy.
includePlatform = False
if includePlatform and self.platform:
self.packageDir = Filename(self.packageDir, self.platform)
return self.packageDir
def getDownloadEffort(self):
""" Returns the relative amount of effort it will take to
download this package. The units are meaningless, except
relative to other packges."""
if not self.installPlans:
return 0
# Return the size of plan A, assuming it will work.
plan = self.installPlans[0]
size = sum([step.getEffort() for step in plan])
return size
def getPrevDownloadedEffort(self):
""" Returns a rough estimate of this package's total download
effort, even if it is already downloaded. """
effort = 0
if self.compressedArchive:
effort += self.compressedArchive.size * self.downloadFactor
if self.uncompressedArchive:
effort += self.uncompressedArchive.size * self.uncompressFactor
# Don't bother counting unpacking.
return effort
def getFormattedName(self):
""" Returns the name of this package, for output to the user.
This will be the "public" name of the package, as formatted
for user consumption; it will include capital letters and
spaces where appropriate. """
if self.displayName:
name = self.displayName
else:
name = self.packageName
if self.packageVersion:
name += ' %s' % (self.packageVersion)
if self.patchVersion:
name += ' rev %s' % (self.patchVersion)
return name
def setupFilenames(self):
""" This is called by the HostInfo when the package is read
from contents.xml, to set up the internal filenames and such
that rely on some of the information from contents.xml. """
dirname, basename = self.descFile.filename.rsplit('/', 1)
self.descFileDirname = dirname
self.descFileBasename = basename
def checkStatus(self):
""" Checks the current status of the desc file and the package
contents on disk. """
if self.hasPackage:
return True
if not self.hasDescFile:
filename = Filename(self.getPackageDir(), self.descFileBasename)
if self.descFile.quickVerify(self.getPackageDir(), pathname = filename, notify = self.notify):
if self.__readDescFile():
# Successfully read. We don't need to call
# checkArchiveStatus again, since readDescFile()
# has just done it.
return self.hasPackage
if self.hasDescFile:
if self.__checkArchiveStatus():
# It's all good.
self.hasPackage = True
return self.hasPackage
def hasCurrentDescFile(self):
""" Returns true if a desc file file has been successfully
read for this package and is still current, false
otherwise. """
if not self.host.hasCurrentContentsFile():
return False
return self.hasDescFile
def downloadDescFile(self, http):
""" Downloads the desc file for this particular package,
synchronously, and then reads it. Returns true on success,
false on failure. """
for token in self.downloadDescFileGenerator(http):
if token != self.stepContinue:
break
Thread.considerYield()
return (token == self.stepComplete)
def downloadDescFileGenerator(self, http):
""" A generator function that implements downloadDescFile()
one piece at a time. It yields one of stepComplete,
stepFailed, or stepContinue. """
assert self.descFile
if self.hasDescFile:
# We've already got one.
yield self.stepComplete; return
if not self.host.appRunner or self.host.appRunner.verifyContents != self.host.appRunner.P3DVCNever:
# We're allowed to download it.
self.http = http
func = lambda step, self = self: self.__downloadFile(
None, self.descFile,
urlbase = self.descFile.filename,
filename = self.descFileBasename)
step = self.InstallStep(func, self.descFile.size, self.downloadFactor, 'downloadDesc')
for token in step.func():
if token == self.stepContinue:
yield token
else:
break
while token == self.restartDownload:
# Try again.
func = lambda step, self = self: self.__downloadFile(
None, self.descFile,
urlbase = self.descFile.filename,
filename = self.descFileBasename)
step = self.InstallStep(func, self.descFile.size, self.downloadFactor, 'downloadDesc')
for token in step.func():
if token == self.stepContinue:
yield token
else:
break
if token == self.stepFailed:
# Couldn't download the desc file.
yield self.stepFailed; return
assert token == self.stepComplete
filename = Filename(self.getPackageDir(), self.descFileBasename)
# Now that we've written the desc file, make it read-only.
os.chmod(filename.toOsSpecific(), 0o444)
if not self.__readDescFile():
# Weird, it passed the hash check, but we still can't read
# it.
filename = Filename(self.getPackageDir(), self.descFileBasename)
self.notify.warning("Failure reading %s" % (filename))
yield self.stepFailed; return
yield self.stepComplete; return
def __readDescFile(self):
""" Reads the desc xml file for this particular package,
assuming it's been already downloaded and verified. Returns
true on success, false on failure. """
if self.hasDescFile:
# No need to read it again.
return True
if self.solo:
# If this is a "solo" package, we don't actually "read"
# the desc file; that's the entire contents of the
# package.
self.hasDescFile = True
self.hasPackage = True
return True
filename = Filename(self.getPackageDir(), self.descFileBasename)
if not hasattr(core, 'TiXmlDocument'):
return False
doc = core.TiXmlDocument(filename.toOsSpecific())
if not doc.LoadFile():
return False
xpackage = doc.FirstChildElement('package')
if not xpackage:
return False
try:
self.patchVersion = int(xpackage.Attribute('patch_version') or '')
except ValueError:
self.patchVersion = None
try:
perPlatform = int(xpackage.Attribute('per_platform') or '')
except ValueError:
perPlatform = False
if perPlatform != self.perPlatform:
self.notify.warning("per_platform disagreement on package %s" % (self.packageName))
self.displayName = None
xconfig = xpackage.FirstChildElement('config')
if xconfig:
# The name for display to an English-speaking user.
self.displayName = xconfig.Attribute('display_name')
# True if any apps that use this package must be GUI apps.
guiApp = xconfig.Attribute('gui_app')
if guiApp:
self.guiApp = int(guiApp)
# The uncompressed archive, which will be mounted directly,
# and also used for patching.
xuncompressedArchive = xpackage.FirstChildElement('uncompressed_archive')
if xuncompressedArchive:
self.uncompressedArchive = FileSpec()
self.uncompressedArchive.loadXml(xuncompressedArchive)
# The compressed archive, which is what is downloaded.
xcompressedArchive = xpackage.FirstChildElement('compressed_archive')
if xcompressedArchive:
self.compressedArchive = FileSpec()
self.compressedArchive.loadXml(xcompressedArchive)
# The list of files that should be extracted to disk.
self.extracts = []
xextract = xpackage.FirstChildElement('extract')
while xextract:
file = FileSpec()
file.loadXml(xextract)
self.extracts.append(file)
xextract = xextract.NextSiblingElement('extract')
# The list of additional packages that must be installed for
# this package to function properly.
self.requires = []
xrequires = xpackage.FirstChildElement('requires')
while xrequires:
packageName = xrequires.Attribute('name')
version = xrequires.Attribute('version')
hostUrl = xrequires.Attribute('host')
if packageName and hostUrl:
host = self.host.appRunner.getHostWithAlt(hostUrl)
self.requires.append((packageName, version, host))
xrequires = xrequires.NextSiblingElement('requires')
self.hasDescFile = True
# Now that we've read the desc file, go ahead and use it to
# verify the download status.
if self.__checkArchiveStatus():
# It's all fully downloaded, unpacked, and ready.
self.hasPackage = True
return True
# Still have to download it.
self.__buildInstallPlans()
return True
def __buildInstallPlans(self):
""" Sets up self.installPlans, a list of one or more "plans"
to download and install the package. """
pc = PStatCollector(':App:PackageInstaller:buildInstallPlans')
pc.start()
self.hasPackage = False
if self.host.appRunner and self.host.appRunner.verifyContents == self.host.appRunner.P3DVCNever:
# We're not allowed to download anything.
self.installPlans = []
pc.stop()
return
if self.asMirror:
# If we're just downloading a mirror archive, we only need
# to get the compressed archive file.
# Build a one-item install plan to download the compressed
# archive.
downloadSize = self.compressedArchive.size
func = lambda step, fileSpec = self.compressedArchive: self.__downloadFile(step, fileSpec, allowPartial = True)
step = self.InstallStep(func, downloadSize, self.downloadFactor, 'download')
installPlan = [step]
self.installPlans = [installPlan]
pc.stop()
return
# The normal download process. Determine what we will need to
# download, and build a plan (or two) to download it all.
self.installPlans = None
# We know we will at least need to unpack the archive contents
# at the end.
unpackSize = 0
for file in self.extracts:
unpackSize += file.size
step = self.InstallStep(self.__unpackArchive, unpackSize, self.unpackFactor, 'unpack')
planA = [step]
# If the uncompressed archive file is good, that's all we'll
# need to do.
self.uncompressedArchive.actualFile = None
if self.uncompressedArchive.quickVerify(self.getPackageDir(), notify = self.notify):
self.installPlans = [planA]
pc.stop()
return
# Maybe the compressed archive file is good.
if self.compressedArchive.quickVerify(self.getPackageDir(), notify = self.notify):
uncompressSize = self.uncompressedArchive.size
step = self.InstallStep(self.__uncompressArchive, uncompressSize, self.uncompressFactor, 'uncompress')
planA = [step] + planA
self.installPlans = [planA]
pc.stop()
return
# Maybe we can download one or more patches. We'll come back
# to that in a minute as plan A. For now, construct plan B,
# which will be to download the whole archive.
planB = planA[:]
uncompressSize = self.uncompressedArchive.size
step = self.InstallStep(self.__uncompressArchive, uncompressSize, self.uncompressFactor, 'uncompress')
planB = [step] + planB
downloadSize = self.compressedArchive.size
func = lambda step, fileSpec = self.compressedArchive: self.__downloadFile(step, fileSpec, allowPartial = True)
step = self.InstallStep(func, downloadSize, self.downloadFactor, 'download')
planB = [step] + planB
# Now look for patches. Start with the md5 hash from the
# uncompressedArchive file we have on disk, and see if we can
# find a patch chain from this file to our target.
pathname = Filename(self.getPackageDir(), self.uncompressedArchive.filename)
fileSpec = self.uncompressedArchive.actualFile
if fileSpec is None and pathname.exists():
fileSpec = FileSpec()
fileSpec.fromFile(self.getPackageDir(), self.uncompressedArchive.filename)
plan = None
if fileSpec:
plan = self.__findPatchChain(fileSpec)
if plan:
# We can download patches. Great! That means this is
# plan A, and the full download is plan B (in case
# something goes wrong with the patching).
planA = plan + planA
self.installPlans = [planA, planB]
else:
# There are no patches to download, oh well. Stick with
# plan B as the only plan.
self.installPlans = [planB]
# In case of unexpected failures on the internet, we will retry
# the full download instead of just giving up.
retries = core.ConfigVariableInt('package-full-dl-retries', 1).getValue()
for retry in range(retries):
self.installPlans.append(planB[:])
pc.stop()
def __scanDirectoryRecursively(self, dirname):
""" Generates a list of Filename objects: all of the files
(not directories) within and below the indicated dirname. """
contents = []
for dirpath, dirnames, filenames in os.walk(dirname.toOsSpecific()):
dirpath = Filename.fromOsSpecific(dirpath)
if dirpath == dirname:
dirpath = Filename('')
else:
dirpath.makeRelativeTo(dirname)
for filename in filenames:
contents.append(Filename(dirpath, filename))
return contents
def __removeFileFromList(self, contents, filename):
""" Removes the indicated filename from the given list, if it is
present. """
try:
contents.remove(Filename(filename))
except ValueError:
pass
def __checkArchiveStatus(self):
""" Returns true if the archive and all extractable files are
already correct on disk, false otherwise. """
if self.host.appRunner and self.host.appRunner.verifyContents == self.host.appRunner.P3DVCNever:
# Assume that everything is just fine.
return True
# Get a list of all of the files in the directory, so we can
# remove files that don't belong.
contents = self.__scanDirectoryRecursively(self.getPackageDir())
self.__removeFileFromList(contents, self.descFileBasename)
self.__removeFileFromList(contents, self.compressedArchive.filename)
self.__removeFileFromList(contents, self.UsageBasename)
if not self.asMirror:
self.__removeFileFromList(contents, self.uncompressedArchive.filename)
for file in self.extracts:
self.__removeFileFromList(contents, file.filename)
# Now, any files that are still in the contents list don't
# belong. It's important to remove these files before we
# start verifying the files that we expect to find here, in
# case there is a problem with ambiguous filenames or
# something (e.g. case insensitivity).
for filename in contents:
self.notify.info("Removing %s" % (filename))
pathname = Filename(self.getPackageDir(), filename)
pathname.unlink()
self.updated = True
if self.asMirror:
return self.compressedArchive.quickVerify(self.getPackageDir(), notify = self.notify)
allExtractsOk = True
if not self.uncompressedArchive.quickVerify(self.getPackageDir(), notify = self.notify):
self.notify.debug("File is incorrect: %s" % (self.uncompressedArchive.filename))
allExtractsOk = False
if allExtractsOk:
# OK, the uncompressed archive is good; that means there
# shouldn't be a compressed archive file here.
pathname = Filename(self.getPackageDir(), self.compressedArchive.filename)
pathname.unlink()
for file in self.extracts:
if not file.quickVerify(self.getPackageDir(), notify = self.notify):
self.notify.debug("File is incorrect: %s" % (file.filename))
allExtractsOk = False
break
if allExtractsOk:
self.notify.debug("All %s extracts of %s seem good." % (
len(self.extracts), self.packageName))
return allExtractsOk
def __updateStepProgress(self, step):
""" This callback is made from within the several step
functions as the download step proceeds. It updates
self.downloadProgress with the current progress, so the caller
can asynchronously query this value. """
size = self.totalPlanCompleted + self.currentStepEffort * step.getProgress()
self.downloadProgress = min(float(size) / float(self.totalPlanSize), 1)
def downloadPackage(self, http):
""" Downloads the package file, synchronously, then
uncompresses and unpacks it. Returns true on success, false
on failure.
This assumes that self.installPlans has already been filled
in, which will have been done by self.__readDescFile().
"""
for token in self.downloadPackageGenerator(http):
if token != self.stepContinue:
break
Thread.considerYield()
return (token == self.stepComplete)
def downloadPackageGenerator(self, http):
""" A generator function that implements downloadPackage() one
piece at a time. It yields one of stepComplete, stepFailed,
or stepContinue. """
assert self.hasDescFile
if self.hasPackage:
# We've already got one.
yield self.stepComplete; return
if self.host.appRunner and self.host.appRunner.verifyContents == self.host.appRunner.P3DVCNever:
# We're not allowed to download anything. Assume it's already downloaded.
yield self.stepComplete; return
# We should have an install plan by the time we get here.
assert self.installPlans
self.http = http
for token in self.__followInstallPlans():
if token == self.stepContinue:
yield token
else:
break
while token == self.restartDownload:
# Try again.
for token in self.downloadDescFileGenerator(http):
if token == self.stepContinue:
yield token
else:
break
if token == self.stepComplete:
for token in self.__followInstallPlans():
if token == self.stepContinue:
yield token
else:
break
if token == self.stepFailed:
yield self.stepFailed; return
assert token == self.stepComplete
yield self.stepComplete; return
def __followInstallPlans(self):
""" Performs all of the steps in self.installPlans. Yields
one of stepComplete, stepFailed, restartDownload, or
stepContinue. """
if not self.installPlans:
self.__buildInstallPlans()
installPlans = self.installPlans
self.installPlans = None
for plan in installPlans:
self.totalPlanSize = sum([step.getEffort() for step in plan])
self.totalPlanCompleted = 0
self.downloadProgress = 0
planFailed = False
for step in plan:
self.currentStepEffort = step.getEffort()
for token in step.func():
if token == self.stepContinue:
yield token
else:
break
if token == self.restartDownload:
yield token
if token == self.stepFailed:
planFailed = True
break
assert token == self.stepComplete
self.totalPlanCompleted += self.currentStepEffort
if not planFailed:
# Successfully downloaded!
yield self.stepComplete; return
if taskMgr.destroyed:
yield self.stepFailed; return
# All plans failed.
yield self.stepFailed; return
def __findPatchChain(self, fileSpec):
""" Finds the chain of patches that leads from the indicated
patch version to the current patch version. If found,
constructs an installPlan that represents the steps of the
patch installation; otherwise, returns None. """
from direct.p3d.PatchMaker import PatchMaker
patchMaker = PatchMaker(self.getPackageDir())
patchChain = patchMaker.getPatchChainToCurrent(self.descFileBasename, fileSpec)
if patchChain is None:
# No path.
patchMaker.cleanup()
return None
plan = []
for patchfile in patchChain:
downloadSize = patchfile.file.size
func = lambda step, fileSpec = patchfile.file: self.__downloadFile(step, fileSpec, allowPartial = True)
step = self.InstallStep(func, downloadSize, self.downloadFactor, 'download')
plan.append(step)
patchSize = patchfile.targetFile.size
func = lambda step, patchfile = patchfile: self.__applyPatch(step, patchfile)
step = self.InstallStep(func, patchSize, self.patchFactor, 'patch')
plan.append(step)
patchMaker.cleanup()
return plan
def __downloadFile(self, step, fileSpec, urlbase = None, filename = None,
allowPartial = False):
""" Downloads the indicated file from the host into
packageDir. Yields one of stepComplete, stepFailed,
restartDownload, or stepContinue. """
if self.host.appRunner and self.host.appRunner.verifyContents == self.host.appRunner.P3DVCNever:
# We're not allowed to download anything.
yield self.stepFailed; return
self.updated = True
if not urlbase:
urlbase = self.descFileDirname + '/' + fileSpec.filename
# Build up a list of URL's to try downloading from. Unlike
# the C++ implementation in P3DPackage.cxx, here we build the
# URL's in forward order.
tryUrls = []
if self.host.appRunner and self.host.appRunner.superMirrorUrl:
# We start with the "super mirror", if it's defined.
url = self.host.appRunner.superMirrorUrl + urlbase
tryUrls.append((url, False))
if self.host.mirrors:
# Choose two mirrors at random.
mirrors = self.host.mirrors[:]
for i in range(2):
mirror = random.choice(mirrors)
mirrors.remove(mirror)
url = mirror + urlbase
tryUrls.append((url, False))
if not mirrors:
break
# After trying two mirrors and failing (or if there are no
# mirrors), go get it from the original host.
url = self.host.downloadUrlPrefix + urlbase
tryUrls.append((url, False))
# And finally, if the original host also fails, try again with
# a cache-buster.
tryUrls.append((url, True))
for url, cacheBust in tryUrls:
request = DocumentSpec(url)
if cacheBust:
# On the last attempt to download a particular file,
# we bust through the cache: append a query string to
# do this.
url += '?' + str(int(time.time()))
request = DocumentSpec(url)
request.setCacheControl(DocumentSpec.CCNoCache)
self.notify.info("%s downloading %s" % (self.packageName, url))
if not filename:
filename = fileSpec.filename
targetPathname = Filename(self.getPackageDir(), filename)
targetPathname.setBinary()
channel = self.http.makeChannel(False)
# If there's a previous partial download, attempt to resume it.
bytesStarted = 0
if allowPartial and not cacheBust and targetPathname.exists():
bytesStarted = targetPathname.getFileSize()
if bytesStarted < 1024*1024:
# Not enough bytes downloaded to be worth the risk of
# a partial download.
bytesStarted = 0
elif bytesStarted >= fileSpec.size:
# Couldn't possibly be our file.
bytesStarted = 0
if bytesStarted:
self.notify.info("Resuming %s after %s bytes already downloaded" % (url, bytesStarted))
# Make sure the file is writable.
os.chmod(targetPathname.toOsSpecific(), 0o644)
channel.beginGetSubdocument(request, bytesStarted, 0)
else:
# No partial download possible; get the whole file.
targetPathname.makeDir()
targetPathname.unlink()
channel.beginGetDocument(request)
channel.downloadToFile(targetPathname)
while channel.run():
if step:
step.bytesDone = channel.getBytesDownloaded() + channel.getFirstByteDelivered()
if step.bytesDone > step.bytesNeeded:
# Oops, too much data. Might as well abort;
# it's the wrong file.
self.notify.warning("Got more data than expected for download %s" % (url))
break
self.__updateStepProgress(step)
if taskMgr.destroyed:
# If the task manager has been destroyed, we must
# be shutting down. Get out of here.
self.notify.warning("Task Manager destroyed, aborting %s" % (url))
yield self.stepFailed; return
yield self.stepContinue
if step:
step.bytesDone = channel.getBytesDownloaded() + channel.getFirstByteDelivered()
self.__updateStepProgress(step)
if not channel.isValid():
self.notify.warning("Failed to download %s" % (url))
elif not fileSpec.fullVerify(self.getPackageDir(), pathname = targetPathname, notify = self.notify):
self.notify.warning("After downloading, %s incorrect" % (Filename(fileSpec.filename).getBasename()))
# This attempt failed. Maybe the original contents.xml
# file is stale. Try re-downloading it now, just to be
# sure.
if self.host.redownloadContentsFile(self.http):
# Yes! Go back and start over from the beginning.
yield self.restartDownload; return
else:
# Success!
yield self.stepComplete; return
# Maybe the mirror is bad. Go back and try the next
# mirror.
# All attempts failed. Maybe the original contents.xml file
# is stale. Try re-downloading it now, just to be sure.
if self.host.redownloadContentsFile(self.http):
# Yes! Go back and start over from the beginning.
yield self.restartDownload; return
# All mirrors failed; the server (or the internet connection)
# must be just fubar.
yield self.stepFailed; return
def __applyPatch(self, step, patchfile):
""" Applies the indicated patching in-place to the current
uncompressed archive. The patchfile is removed after the
operation. Yields one of stepComplete, stepFailed,
restartDownload, or stepContinue. """
self.updated = True
origPathname = Filename(self.getPackageDir(), self.uncompressedArchive.filename)
patchPathname = Filename(self.getPackageDir(), patchfile.file.filename)
result = Filename.temporary('', 'patch_')
self.notify.info("Patching %s with %s" % (origPathname, patchPathname))
p = core.Patchfile() # The C++ class
ret = p.initiate(patchPathname, origPathname, result)
if ret == EUSuccess:
ret = p.run()
while ret == EUOk:
step.bytesDone = step.bytesNeeded * p.getProgress()
self.__updateStepProgress(step)
if taskMgr.destroyed:
# If the task manager has been destroyed, we must
# be shutting down. Get out of here.
self.notify.warning("Task Manager destroyed, aborting patch %s" % (origPathname))
yield self.stepFailed; return
yield self.stepContinue
ret = p.run()
del p
patchPathname.unlink()
if ret < 0:
self.notify.warning("Patching of %s failed." % (origPathname))
result.unlink()
yield self.stepFailed; return
if not result.renameTo(origPathname):
self.notify.warning("Couldn't rename %s to %s" % (result, origPathname))
yield self.stepFailed; return
yield self.stepComplete; return
def __uncompressArchive(self, step):
""" Turns the compressed archive into the uncompressed
archive. Yields one of stepComplete, stepFailed,
restartDownload, or stepContinue. """
if self.host.appRunner and self.host.appRunner.verifyContents == self.host.appRunner.P3DVCNever:
# We're not allowed to!
yield self.stepFailed; return
self.updated = True
sourcePathname = Filename(self.getPackageDir(), self.compressedArchive.filename)
targetPathname = Filename(self.getPackageDir(), self.uncompressedArchive.filename)
targetPathname.unlink()
self.notify.info("Uncompressing %s to %s" % (sourcePathname, targetPathname))
decompressor = Decompressor()
decompressor.initiate(sourcePathname, targetPathname)
totalBytes = self.uncompressedArchive.size
result = decompressor.run()
while result == EUOk:
step.bytesDone = int(totalBytes * decompressor.getProgress())
self.__updateStepProgress(step)
result = decompressor.run()
if taskMgr.destroyed:
# If the task manager has been destroyed, we must
# be shutting down. Get out of here.
self.notify.warning("Task Manager destroyed, aborting decompresss %s" % (sourcePathname))
yield self.stepFailed; return
yield self.stepContinue
if result != EUSuccess:
yield self.stepFailed; return
step.bytesDone = totalBytes
self.__updateStepProgress(step)
if not self.uncompressedArchive.quickVerify(self.getPackageDir(), notify= self.notify):
self.notify.warning("after uncompressing, %s still incorrect" % (
self.uncompressedArchive.filename))
yield self.stepFailed; return
# Now that we've verified the archive, make it read-only.
os.chmod(targetPathname.toOsSpecific(), 0o444)
# Now we can safely remove the compressed archive.
sourcePathname.unlink()
yield self.stepComplete; return
def __unpackArchive(self, step):
""" Unpacks any files in the archive that want to be unpacked
to disk. Yields one of stepComplete, stepFailed,
restartDownload, or stepContinue. """
if not self.extracts:
# Nothing to extract.
self.hasPackage = True
yield self.stepComplete; return
if self.host.appRunner and self.host.appRunner.verifyContents == self.host.appRunner.P3DVCNever:
# We're not allowed to!
yield self.stepFailed; return
self.updated = True
mfPathname = Filename(self.getPackageDir(), self.uncompressedArchive.filename)
self.notify.info("Unpacking %s" % (mfPathname))
mf = Multifile()
if not mf.openRead(mfPathname):
self.notify.warning("Couldn't open %s" % (mfPathname))
yield self.stepFailed; return
allExtractsOk = True
step.bytesDone = 0
for file in self.extracts:
i = mf.findSubfile(file.filename)
if i == -1:
self.notify.warning("Not in Multifile: %s" % (file.filename))
allExtractsOk = False
continue
targetPathname = Filename(self.getPackageDir(), file.filename)
targetPathname.setBinary()
targetPathname.unlink()
if not mf.extractSubfile(i, targetPathname):
self.notify.warning("Couldn't extract: %s" % (file.filename))
allExtractsOk = False
continue
if not file.quickVerify(self.getPackageDir(), notify = self.notify):
self.notify.warning("After extracting, still incorrect: %s" % (file.filename))
allExtractsOk = False
continue
# Make sure it's executable, and not writable.
os.chmod(targetPathname.toOsSpecific(), 0o555)
step.bytesDone += file.size
self.__updateStepProgress(step)
if taskMgr.destroyed:
# If the task manager has been destroyed, we must
# be shutting down. Get out of here.
self.notify.warning("Task Manager destroyed, aborting unpacking %s" % (mfPathname))
yield self.stepFailed; return
yield self.stepContinue
if not allExtractsOk:
yield self.stepFailed; return
self.hasPackage = True
yield self.stepComplete; return
def installPackage(self, appRunner):
""" Mounts the package and sets up system paths so it becomes
available for use. Returns true on success, false on failure. """
assert self.hasPackage
if self.installed:
# Already installed.
return True
assert self not in appRunner.installedPackages
mfPathname = Filename(self.getPackageDir(), self.uncompressedArchive.filename)
mf = Multifile()
if not mf.openRead(mfPathname):
self.notify.warning("Couldn't open %s" % (mfPathname))
return False
# We mount it under its actual location on disk.
root = self.getPackageDir()
vfs = VirtualFileSystem.getGlobalPtr()
vfs.mount(mf, root, vfs.MFReadOnly)
# Add this to the Python search path, if it's not already
# there. We have to take a bit of care to check if it's
# already there, since there can be some ambiguity in
# os-specific path strings.
osRoot = self.getPackageDir().toOsSpecific()
foundOnPath = False
for p in sys.path:
if osRoot == p:
# Already here, exactly.
foundOnPath = True
break
elif osRoot == Filename.fromOsSpecific(p).toOsSpecific():
# Already here, with some futzing.
foundOnPath = True
break
if not foundOnPath:
# Not already here; add it.
sys.path.append(osRoot)
# Put it on the model-path, too. We do this indiscriminantly,
# because the Panda3D runtime won't be adding things to the
# model-path, so it shouldn't be already there.
getModelPath().appendDirectory(self.getPackageDir())
# Set the environment variable to reference the package root.
envvar = '%s_ROOT' % (self.packageName.upper())
ExecutionEnvironment.setEnvironmentVariable(envvar, osRoot)
# Add the package root to the system paths.
if sys.platform.startswith('win'):
path = os.environ.get('PATH', '')
os.environ['PATH'] = "%s;%s" % (osRoot, path)
else:
path = os.environ.get('PATH', '')
os.environ['PATH'] = "%s:%s" % (osRoot, path)
path = os.environ.get('LD_LIBRARY_PATH', '')
os.environ['LD_LIBRARY_PATH'] = "%s:%s" % (osRoot, path)
if sys.platform == "darwin":
path = os.environ.get('DYLD_LIBRARY_PATH', '')
os.environ['DYLD_LIBRARY_PATH'] = "%s:%s" % (osRoot, path)
# Now that the environment variable is set, read all of the
# prc files in the package.
appRunner.loadMultifilePrcFiles(mf, self.getPackageDir())
# Also, find any toplevel Python packages, and add these as
# shared packages. This will allow different packages
# installed in different directories to share Python files as
# if they were all in the same directory.
for filename in mf.getSubfileNames():
if filename.endswith('/__init__.pyc') or \
filename.endswith('/__init__.pyo') or \
filename.endswith('/__init__.py'):
components = filename.split('/')[:-1]
moduleName = '.'.join(components)
VFSImporter.sharedPackages[moduleName] = True
# Fix up any shared directories so we can load packages from
# disparate locations.
VFSImporter.reloadSharedPackages()
self.installed = True
appRunner.installedPackages.append(self)
self.markUsed()
return True
def __measureDiskSpace(self):
""" Returns the amount of space used by this package, in
bytes, as determined by examining the actual contents of the
package directory and its subdirectories. """
thisDir = ScanDirectoryNode(self.getPackageDir(), ignoreUsageXml = True)
diskSpace = thisDir.getTotalSize()
self.notify.info("Package %s uses %s MB" % (
self.packageName, (diskSpace + 524288) // 1048576))
return diskSpace
def markUsed(self):
""" Marks the package as having been used. This is normally
called automatically by installPackage(). """
if not hasattr(core, 'TiXmlDocument'):
return
if self.host.appRunner and self.host.appRunner.verifyContents == self.host.appRunner.P3DVCNever:
# Not allowed to write any files to the package directory.
return
if self.updated:
# If we've just installed a new version of the package,
# re-measure the actual disk space used.
self.diskSpace = self.__measureDiskSpace()
filename = Filename(self.getPackageDir(), self.UsageBasename)
doc = TiXmlDocument(filename.toOsSpecific())
if not doc.LoadFile():
decl = TiXmlDeclaration("1.0", "utf-8", "")
doc.InsertEndChild(decl)
xusage = doc.FirstChildElement('usage')
if not xusage:
doc.InsertEndChild(TiXmlElement('usage'))
xusage = doc.FirstChildElement('usage')
now = int(time.time())
count = xusage.Attribute('count_app')
try:
count = int(count or '')
except ValueError:
count = 0
xusage.SetAttribute('first_use', str(now))
count += 1
xusage.SetAttribute('count_app', str(count))
xusage.SetAttribute('last_use', str(now))
if self.updated:
xusage.SetAttribute('last_update', str(now))
self.updated = False
else:
# Since we haven't changed the disk space, we can just
# read it from the previous xml file.
diskSpace = xusage.Attribute('disk_space')
try:
diskSpace = int(diskSpace or '')
except ValueError:
# Unless it wasn't set already.
self.diskSpace = self.__measureDiskSpace()
xusage.SetAttribute('disk_space', str(self.diskSpace))
# Write the file to a temporary filename, then atomically move
# it to its actual filename, to avoid race conditions when
# updating this file.
tfile = Filename.temporary(str(self.getPackageDir()), '.xml')
if doc.SaveFile(tfile.toOsSpecific()):
tfile.renameTo(filename)
def getUsage(self):
""" Returns the xusage element that is read from the usage.xml
file, or None if there is no usage.xml file. """
if not hasattr(core, 'TiXmlDocument'):
return None
filename = Filename(self.getPackageDir(), self.UsageBasename)
doc = TiXmlDocument(filename.toOsSpecific())
if not doc.LoadFile():
return None
xusage = doc.FirstChildElement('usage')
if not xusage:
return None
return copy.copy(xusage)
| {
"repo_name": "tobspr/panda3d",
"path": "direct/src/p3d/PackageInfo.py",
"copies": "12",
"size": "48231",
"license": "bsd-3-clause",
"hash": 3712609591782133000,
"line_mean": 37.9902991108,
"line_max": 215,
"alpha_frac": 0.6011486388,
"autogenerated": false,
"ratio": 4.549665125931516,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0026034122966061336,
"num_lines": 1237
} |
__all__ = ["PackageInstaller"]
from direct.showbase.DirectObject import DirectObject
from direct.stdpy.threading import Lock, RLock
from direct.showbase.MessengerGlobal import messenger
from direct.task.TaskManagerGlobal import taskMgr
from direct.p3d.PackageInfo import PackageInfo
from panda3d.core import TPLow, PStatCollector
from direct.directnotify.DirectNotifyGlobal import directNotify
class PackageInstaller(DirectObject):
""" This class is used in a p3d runtime environment to manage the
asynchronous download and installation of packages. If you just
want to install a package synchronously, see
appRunner.installPackage() for a simpler interface.
To use this class, you should subclass from it and override any of
the six callback methods: downloadStarted(), packageStarted(),
packageProgress(), downloadProgress(), packageFinished(),
downloadFinished().
Also see DWBPackageInstaller, which does exactly this, to add a
DirectWaitBar GUI.
"""
notify = directNotify.newCategory("PackageInstaller")
globalLock = Lock()
nextUniqueId = 1
# This is a chain of state values progressing forward in time.
S_initial = 0 # addPackage() calls are being made
S_ready = 1 # donePackages() has been called
S_started = 2 # download has started
S_done = 3 # download is over
class PendingPackage:
""" This class describes a package added to the installer for
download. """
notify = directNotify.newCategory("PendingPackage")
def __init__(self, packageName, version, host):
self.packageName = packageName
self.version = version
self.host = host
# This will be filled in properly by checkDescFile() or
# getDescFile(); in the meantime, set a placeholder.
self.package = PackageInfo(host, packageName, version)
# Set true when the package has finished downloading,
# either successfully or unsuccessfully.
self.done = False
# Set true or false when self.done has been set.
self.success = False
# Set true when the packageFinished() callback has been
# delivered.
self.notified = False
# These are used to ensure the callbacks only get
# delivered once for a particular package.
self.calledPackageStarted = False
self.calledPackageFinished = False
# This is the amount of stuff we have to process to
# install this package, and the amount of stuff we have
# processed so far. "Stuff" includes bytes downloaded,
# bytes uncompressed, and bytes extracted; and each of
# which is weighted differently into one grand total. So,
# the total doesn't really represent bytes; it's a
# unitless number, which means something only as a ratio
# to other packages. Filled in by checkDescFile() or
# getDescFile().
self.downloadEffort = 0
# Similar, but this is the theoretical effort if the
# package were already downloaded.
self.prevDownloadedEffort = 0
def __cmp__(self, pp):
""" Python comparision function. This makes all
PendingPackages withe same (packageName, version, host)
combination be deemed equivalent. """
return cmp((self.packageName, self.version, self.host),
(pp.packageName, pp.version, pp.host))
def getProgress(self):
""" Returns the download progress of this package in the
range 0..1. """
return self.package.downloadProgress
def checkDescFile(self):
""" Returns true if the desc file is already downloaded
and good, or false if it needs to be downloaded. """
if not self.host.hasCurrentContentsFile():
# If the contents file isn't ready yet, we can't check
# the desc file yet.
return False
# All right, get the package info now.
package = self.host.getPackage(self.packageName, self.version)
if not package:
self.notify.warning("Package %s %s not known on %s" % (
self.packageName, self.version, self.host.hostUrl))
return False
self.package = package
self.package.checkStatus()
if not self.package.hasDescFile:
return False
self.downloadEffort = self.package.getDownloadEffort()
self.prevDownloadEffort = 0
if self.downloadEffort == 0:
self.prevDownloadedEffort = self.package.getPrevDownloadedEffort()
return True
def getDescFile(self, http):
""" Synchronously downloads the desc files required for
the package. """
if not self.host.downloadContentsFile(http):
return False
# All right, get the package info now.
package = self.host.getPackage(self.packageName, self.version)
if not package:
self.notify.warning("Package %s %s not known on %s" % (
self.packageName, self.version, self.host.hostUrl))
return False
self.package = package
if not self.package.downloadDescFile(http):
return False
self.package.checkStatus()
self.downloadEffort = self.package.getDownloadEffort()
self.prevDownloadEffort = 0
if self.downloadEffort == 0:
self.prevDownloadedEffort = self.package.getPrevDownloadedEffort()
return True
def __init__(self, appRunner, taskChain = 'default'):
self.globalLock.acquire()
try:
self.uniqueId = PackageInstaller.nextUniqueId
PackageInstaller.nextUniqueId += 1
finally:
self.globalLock.release()
self.appRunner = appRunner
self.taskChain = taskChain
# If we're to be running on an asynchronous task chain, and
# the task chain hasn't yet been set up already, create the
# default parameters now.
if taskChain != 'default' and not taskMgr.hasTaskChain(self.taskChain):
taskMgr.setupTaskChain(self.taskChain, numThreads = 1,
threadPriority = TPLow)
self.callbackLock = Lock()
self.calledDownloadStarted = False
self.calledDownloadFinished = False
# A list of all packages that have been added to the
# installer.
self.packageLock = RLock()
self.packages = []
self.state = self.S_initial
# A list of packages that are waiting for their desc files.
self.needsDescFile = []
self.descFileTask = None
# A list of packages that are waiting to be downloaded and
# installed.
self.needsDownload = []
self.downloadTask = None
# A list of packages that were already done at the time they
# were passed to addPackage().
self.earlyDone = []
# A list of packages that have been successfully installed, or
# packages that have failed.
self.done = []
self.failed = []
# This task is spawned on the default task chain, to update
# the status during the download.
self.progressTask = None
self.accept('PackageInstaller-%s-allHaveDesc' % self.uniqueId,
self.__allHaveDesc)
self.accept('PackageInstaller-%s-packageStarted' % self.uniqueId,
self.__packageStarted)
self.accept('PackageInstaller-%s-packageDone' % self.uniqueId,
self.__packageDone)
def destroy(self):
""" Interrupts all pending downloads. No further callbacks
will be made. """
self.cleanup()
def cleanup(self):
""" Interrupts all pending downloads. No further callbacks
will be made. """
self.packageLock.acquire()
try:
if self.descFileTask:
taskMgr.remove(self.descFileTask)
self.descFileTask = None
if self.downloadTask:
taskMgr.remove(self.downloadTask)
self.downloadTask = None
finally:
self.packageLock.release()
if self.progressTask:
taskMgr.remove(self.progressTask)
self.progressTask = None
self.ignoreAll()
def addPackage(self, packageName, version = None, hostUrl = None):
""" Adds the named package to the list of packages to be
downloaded. Call donePackages() to finish the list. """
if self.state != self.S_initial:
raise ValueError, 'addPackage called after donePackages'
host = self.appRunner.getHostWithAlt(hostUrl)
pp = self.PendingPackage(packageName, version, host)
self.packageLock.acquire()
try:
self.__internalAddPackage(pp)
finally:
self.packageLock.release()
def __internalAddPackage(self, pp):
""" Adds the indicated "pending package" to the appropriate
list(s) for downloading and installing. Assumes packageLock
is already held."""
if pp in self.packages:
# Already added.
return
self.packages.append(pp)
# We always add the package to needsDescFile, even if we
# already have its desc file; this guarantees that packages
# are downloaded in the order they are added.
self.needsDescFile.append(pp)
if not self.descFileTask:
self.descFileTask = taskMgr.add(
self.__getDescFileTask, 'getDescFile',
taskChain = self.taskChain)
def donePackages(self):
""" After calling addPackage() for each package to be
installed, call donePackages() to mark the end of the list.
This is necessary to determine what the complete set of
packages is (and therefore how large the total download size
is). None of the low-level callbacks will be made before this
call. """
if self.state != self.S_initial:
# We've already been here.
return
# Throw the messages for packages that were already done
# before we started.
for pp in self.earlyDone:
self.__donePackage(pp, True)
self.earlyDone = []
self.packageLock.acquire()
try:
if self.state != self.S_initial:
return
self.state = self.S_ready
if not self.needsDescFile:
# All package desc files are already available; so begin.
self.__prepareToStart()
finally:
self.packageLock.release()
if not self.packages:
# Trivial no-op.
self.__callDownloadFinished(True)
def downloadStarted(self):
""" This callback is made at some point after donePackages()
is called; at the time of this callback, the total download
size is known, and we can sensibly report progress through the
whole. """
self.notify.info("downloadStarted")
def packageStarted(self, package):
""" This callback is made for each package between
downloadStarted() and downloadFinished() to indicate the start
of a new package. """
self.notify.debug("packageStarted: %s" % (package.packageName))
def packageProgress(self, package, progress):
""" This callback is made repeatedly between packageStarted()
and packageFinished() to update the current progress on the
indicated package only. The progress value ranges from 0
(beginning) to 1 (complete). """
self.notify.debug("packageProgress: %s %s" % (package.packageName, progress))
def downloadProgress(self, overallProgress):
""" This callback is made repeatedly between downloadStarted()
and downloadFinished() to update the current progress through
all packages. The progress value ranges from 0 (beginning) to
1 (complete). """
self.notify.debug("downloadProgress: %s" % (overallProgress))
def packageFinished(self, package, success):
""" This callback is made for each package between
downloadStarted() and downloadFinished() to indicate that a
package has finished downloading. If success is true, there
were no problems and the package is now installed.
If this package did not require downloading (because it was
already downloaded), this callback will be made immediately,
*without* a corresponding call to packageStarted(), and may
even be made before downloadStarted(). """
self.notify.info("packageFinished: %s %s" % (package.packageName, success))
def downloadFinished(self, success):
""" This callback is made when all of the packages have been
downloaded and installed (or there has been some failure). If
all packages where successfully installed, success is True.
If there were no packages that required downloading, this
callback will be made immediately, *without* a corresponding
call to downloadStarted(). """
self.notify.info("downloadFinished: %s" % (success))
def __prepareToStart(self):
""" This is called internally when transitioning from S_ready
to S_started. It sets up whatever initial values are
needed. Assumes self.packageLock is held. Returns False if
there were no packages to download, and the state was
therefore transitioned immediately to S_done. """
if not self.needsDownload:
self.state = self.S_done
return False
self.state = self.S_started
assert not self.downloadTask
self.downloadTask = taskMgr.add(
self.__downloadPackageTask, 'downloadPackage',
taskChain = self.taskChain)
assert not self.progressTask
self.progressTask = taskMgr.add(
self.__progressTask, 'packageProgress')
return True
def __allHaveDesc(self):
""" This method is called internally when all of the pending
packages have their desc info. """
working = True
self.packageLock.acquire()
try:
if self.state == self.S_ready:
# We've already called donePackages(), so move on now.
working = self.__prepareToStart()
finally:
self.packageLock.release()
if not working:
self.__callDownloadFinished(True)
def __packageStarted(self, pp):
""" This method is called when a single package is beginning
to download. """
self.__callDownloadStarted()
self.__callPackageStarted(pp)
def __packageDone(self, pp):
""" This method is called when a single package has been
downloaded and installed, or has failed. """
self.__callPackageFinished(pp, pp.success)
pp.notified = True
# See if there are more packages to go.
success = True
allDone = True
self.packageLock.acquire()
try:
for pp in self.packages:
if pp.notified:
success = success and pp.success
else:
allDone = False
finally:
self.packageLock.release()
if allDone:
self.__callDownloadFinished(success)
def __callPackageStarted(self, pp):
""" Calls the packageStarted() callback for a particular
package if it has not already been called, being careful to
avoid race conditions. """
self.callbackLock.acquire()
try:
if not pp.calledPackageStarted:
self.packageStarted(pp.package)
self.packageProgress(pp.package, 0)
pp.calledPackageStarted = True
finally:
self.callbackLock.release()
def __callPackageFinished(self, pp, success):
""" Calls the packageFinished() callback for a paricular
package if it has not already been called, being careful to
avoid race conditions. """
self.callbackLock.acquire()
try:
if not pp.calledPackageFinished:
if success:
self.packageProgress(pp.package, 1)
self.packageFinished(pp.package, success)
pp.calledPackageFinished = True
finally:
self.callbackLock.release()
def __callDownloadStarted(self):
""" Calls the downloadStarted() callback if it has not already
been called, being careful to avoid race conditions. """
self.callbackLock.acquire()
try:
if not self.calledDownloadStarted:
self.downloadStarted()
self.downloadProgress(0)
self.calledDownloadStarted = True
finally:
self.callbackLock.release()
def __callDownloadFinished(self, success):
""" Calls the downloadFinished() callback if it has not
already been called, being careful to avoid race
conditions. """
self.callbackLock.acquire()
try:
if not self.calledDownloadFinished:
if success:
self.downloadProgress(1)
self.downloadFinished(success)
self.calledDownloadFinished = True
finally:
self.callbackLock.release()
def __getDescFileTask(self, task):
""" This task runs on the aysynchronous task chain; each pass,
it extracts one package from self.needsDescFile and downloads
its desc file. On success, it adds the package to
self.needsDownload. """
self.packageLock.acquire()
try:
# If we've finished all of the packages that need desc
# files, stop the task.
if not self.needsDescFile:
self.descFileTask = None
eventName = 'PackageInstaller-%s-allHaveDesc' % self.uniqueId
messenger.send(eventName, taskChain = 'default')
return task.done
pp = self.needsDescFile[0]
del self.needsDescFile[0]
finally:
self.packageLock.release()
# Now serve this one package.
if not pp.checkDescFile():
if not pp.getDescFile(self.appRunner.http):
self.__donePackage(pp, False)
return task.cont
# This package is now ready to be downloaded. We always add
# it to needsDownload, even if it's already downloaded, to
# guarantee ordering of packages.
self.packageLock.acquire()
try:
# Also add any packages required by this one.
for packageName, version, host in pp.package.requires:
pp2 = self.PendingPackage(packageName, version, host)
self.__internalAddPackage(pp2)
self.needsDownload.append(pp)
finally:
self.packageLock.release()
return task.cont
def __downloadPackageTask(self, task):
""" This task runs on the aysynchronous task chain; each pass,
it extracts one package from self.needsDownload and downloads
it. """
while True:
self.packageLock.acquire()
try:
# If we're done downloading, stop the task.
if self.state == self.S_done or not self.needsDownload:
self.downloadTask = None
self.packageLock.release()
yield task.done; return
assert self.state == self.S_started
pp = self.needsDownload[0]
del self.needsDownload[0]
except:
self.packageLock.release()
raise
self.packageLock.release()
# Now serve this one package.
eventName = 'PackageInstaller-%s-packageStarted' % self.uniqueId
messenger.send(eventName, [pp], taskChain = 'default')
if not pp.package.hasPackage:
for token in pp.package.downloadPackageGenerator(self.appRunner.http):
if token == pp.package.stepContinue:
yield task.cont
else:
break
if token != pp.package.stepComplete:
pc = PStatCollector(':App:PackageInstaller:donePackage:%s' % (pp.package.packageName))
pc.start()
self.__donePackage(pp, False)
pc.stop()
yield task.cont
continue
# Successfully downloaded and installed.
pc = PStatCollector(':App:PackageInstaller:donePackage:%s' % (pp.package.packageName))
pc.start()
self.__donePackage(pp, True)
pc.stop()
# Continue the loop without yielding, so we pick up the
# next package within this same frame.
def __donePackage(self, pp, success):
""" Marks the indicated package as done, either successfully
or otherwise. """
assert not pp.done
if success:
pc = PStatCollector(':App:PackageInstaller:install:%s' % (pp.package.packageName))
pc.start()
pp.package.installPackage(self.appRunner)
pc.stop()
self.packageLock.acquire()
try:
pp.done = True
pp.success = success
if success:
self.done.append(pp)
else:
self.failed.append(pp)
finally:
self.packageLock.release()
eventName = 'PackageInstaller-%s-packageDone' % self.uniqueId
messenger.send(eventName, [pp], taskChain = 'default')
def __progressTask(self, task):
self.callbackLock.acquire()
try:
if not self.calledDownloadStarted:
# We haven't yet officially started the download.
return task.cont
if self.calledDownloadFinished:
# We've officially ended the download.
self.progressTask = None
return task.done
downloadEffort = 0
currentDownloadSize = 0
for pp in self.packages:
downloadEffort += pp.downloadEffort + pp.prevDownloadedEffort
packageProgress = pp.getProgress()
currentDownloadSize += pp.downloadEffort * packageProgress + pp.prevDownloadedEffort
if pp.calledPackageStarted and not pp.calledPackageFinished:
self.packageProgress(pp.package, packageProgress)
if not downloadEffort:
progress = 1
else:
progress = float(currentDownloadSize) / float(downloadEffort)
self.downloadProgress(progress)
finally:
self.callbackLock.release()
return task.cont
| {
"repo_name": "Wilee999/panda3d",
"path": "direct/src/p3d/PackageInstaller.py",
"copies": "9",
"size": "23408",
"license": "bsd-3-clause",
"hash": 7132608142070511000,
"line_mean": 35.575,
"line_max": 106,
"alpha_frac": 0.598726931,
"autogenerated": false,
"ratio": 4.780069430263427,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9878796361263427,
"avg_score": null,
"num_lines": null
} |
__all__ = ["PackageMerger", "PackageMergerError"]
from direct.p3d.FileSpec import FileSpec
from direct.p3d.SeqValue import SeqValue
from direct.directnotify.DirectNotifyGlobal import *
from panda3d.core import *
import shutil
import os
class PackageMergerError(Exception):
pass
class PackageMerger:
""" This class will combine two or more separately-built stage
directories, the output of Packager.py or the ppackage tool, into
a single output directory. It assumes that the clocks on all
hosts are in sync, so that the file across all builds with the
most recent timestamp (indicated in the contents.xml file) is
always the most current version of the file. """
notify = directNotify.newCategory("PackageMerger")
class PackageEntry:
""" This corresponds to a <package> entry in the contents.xml
file. """
def __init__(self, xpackage, sourceDir):
self.sourceDir = sourceDir
self.loadXml(xpackage)
def getKey(self):
""" Returns a tuple used for sorting the PackageEntry
objects uniquely per package. """
return (self.packageName, self.platform, self.version)
def isNewer(self, other):
return self.descFile.timestamp > other.descFile.timestamp
def loadXml(self, xpackage):
self.packageName = xpackage.Attribute('name')
self.platform = xpackage.Attribute('platform')
self.version = xpackage.Attribute('version')
solo = xpackage.Attribute('solo')
self.solo = int(solo or '0')
perPlatform = xpackage.Attribute('per_platform')
self.perPlatform = int(perPlatform or '0')
self.descFile = FileSpec()
self.descFile.loadXml(xpackage)
self.validatePackageContents()
self.descFile.quickVerify(packageDir = self.sourceDir, notify = PackageMerger.notify, correctSelf = True)
self.packageSeq = SeqValue()
self.packageSeq.loadXml(xpackage, 'seq')
self.packageSetVer = SeqValue()
self.packageSetVer.loadXml(xpackage, 'set_ver')
self.importDescFile = None
ximport = xpackage.FirstChildElement('import')
if ximport:
self.importDescFile = FileSpec()
self.importDescFile.loadXml(ximport)
self.importDescFile.quickVerify(packageDir = self.sourceDir, notify = PackageMerger.notify, correctSelf = True)
def makeXml(self):
""" Returns a new TiXmlElement. """
xpackage = TiXmlElement('package')
xpackage.SetAttribute('name', self.packageName)
if self.platform:
xpackage.SetAttribute('platform', self.platform)
if self.version:
xpackage.SetAttribute('version', self.version)
if self.solo:
xpackage.SetAttribute('solo', '1')
if self.perPlatform:
xpackage.SetAttribute('per_platform', '1')
self.descFile.storeXml(xpackage)
self.packageSeq.storeXml(xpackage, 'seq')
self.packageSetVer.storeXml(xpackage, 'set_ver')
if self.importDescFile:
ximport = TiXmlElement('import')
self.importDescFile.storeXml(ximport)
xpackage.InsertEndChild(ximport)
return xpackage
def validatePackageContents(self):
""" Validates the contents of the package directory itself
against the expected hashes and timestamps. Updates
hashes and timestamps where needed. """
if self.solo:
return
needsChange = False
packageDescFullpath = Filename(self.sourceDir, self.descFile.filename)
packageDir = Filename(packageDescFullpath.getDirname())
doc = TiXmlDocument(packageDescFullpath.toOsSpecific())
if not doc.LoadFile():
message = "Could not read XML file: %s" % (self.descFile.filename)
raise OSError(message)
xpackage = doc.FirstChildElement('package')
if not xpackage:
message = "No package definition: %s" % (self.descFile.filename)
raise OSError(message)
xcompressed = xpackage.FirstChildElement('compressed_archive')
if xcompressed:
spec = FileSpec()
spec.loadXml(xcompressed)
if not spec.quickVerify(packageDir = packageDir, notify = PackageMerger.notify, correctSelf = True):
spec.storeXml(xcompressed)
needsChange = True
xpatch = xpackage.FirstChildElement('patch')
while xpatch:
spec = FileSpec()
spec.loadXml(xpatch)
if not spec.quickVerify(packageDir = packageDir, notify = PackageMerger.notify, correctSelf = True):
spec.storeXml(xpatch)
needsChange = True
xpatch = xpatch.NextSiblingElement('patch')
if needsChange:
PackageMerger.notify.info("Rewriting %s" % (self.descFile.filename))
doc.SaveFile()
self.descFile.quickVerify(packageDir = self.sourceDir, notify = PackageMerger.notify, correctSelf = True)
# PackageMerger constructor
def __init__(self, installDir):
self.installDir = installDir
self.xhost = None
self.contents = {}
self.maxAge = None
self.contentsSeq = SeqValue()
# We allow the first one to fail quietly.
self.__readContentsFile(self.installDir, None)
def __readContentsFile(self, sourceDir, packageNames):
""" Reads the contents.xml file from the indicated sourceDir,
and updates the internal set of packages appropriately. """
assert sourceDir != None, "No source directory was specified!"
contentsFilename = Filename(sourceDir, 'contents.xml')
doc = TiXmlDocument(contentsFilename.toOsSpecific())
if not doc.LoadFile():
# Couldn't read file.
return False
xcontents = doc.FirstChildElement('contents')
if xcontents:
maxAge = xcontents.Attribute('max_age')
if maxAge:
maxAge = int(maxAge)
if self.maxAge is None:
self.maxAge = maxAge
else:
self.maxAge = min(self.maxAge, maxAge)
contentsSeq = SeqValue()
if contentsSeq.loadXml(xcontents):
self.contentsSeq = max(self.contentsSeq, contentsSeq)
xhost = xcontents.FirstChildElement('host')
if xhost:
self.xhost = xhost.Clone()
xpackage = xcontents.FirstChildElement('package')
while xpackage:
pe = self.PackageEntry(xpackage, sourceDir)
# Filter out any packages not listed in
# packageNames (unless packageNames is None,
# in which case don't filter anything).
if packageNames is None or pe.packageName in packageNames:
other = self.contents.get(pe.getKey(), None)
if not other or pe.isNewer(other):
# Store this package in the resulting output.
self.contents[pe.getKey()] = pe
xpackage = xpackage.NextSiblingElement('package')
self.contentsDoc = doc
return True
def __writeContentsFile(self):
""" Writes the contents.xml file at the end of processing. """
filename = Filename(self.installDir, 'contents.xml')
doc = TiXmlDocument(filename.toOsSpecific())
decl = TiXmlDeclaration("1.0", "utf-8", "")
doc.InsertEndChild(decl)
xcontents = TiXmlElement('contents')
if self.xhost:
xcontents.InsertEndChild(self.xhost)
if self.maxAge is not None:
xcontents.SetAttribute('max_age', str(self.maxAge))
self.contentsSeq.storeXml(xcontents)
contents = list(self.contents.items())
contents.sort()
for key, pe in contents:
xpackage = pe.makeXml()
xcontents.InsertEndChild(xpackage)
doc.InsertEndChild(xcontents)
doc.SaveFile()
def __copySubdirectory(self, pe):
""" Copies the subdirectory referenced in the indicated
PackageEntry object into the installDir, replacing the
contents of any similarly-named subdirectory already
there. """
dirname = Filename(pe.descFile.filename).getDirname()
self.notify.info("copying %s" % (dirname))
sourceDirname = Filename(pe.sourceDir, dirname)
targetDirname = Filename(self.installDir, dirname)
self.__rCopyTree(sourceDirname, targetDirname)
def __rCopyTree(self, sourceFilename, targetFilename):
""" Recursively copies the contents of sourceDirname onto
targetDirname. This behaves like shutil.copytree, but it does
not remove pre-existing subdirectories. """
if targetFilename.exists():
if not targetFilename.isDirectory():
# Delete any regular files in the way.
targetFilename.unlink()
elif not sourceFilename.isDirectory():
# If the source file is a regular file, but the target
# file is a directory, completely remove the target
# file.
shutil.rmtree(targetFilename.toOsSpecific())
else:
# Both the source file and target file are
# directories.
# We have to clean out the target directory first.
# Instead of using shutil.rmtree(), remove the files in
# this directory one at a time, so we don't inadvertently
# clean out subdirectories too.
files = os.listdir(targetFilename.toOsSpecific())
for file in files:
f = Filename(targetFilename, file)
if f.isRegularFile():
f.unlink()
if sourceFilename.isDirectory():
# Recursively copying a directory.
Filename(targetFilename, '').makeDir()
files = os.listdir(sourceFilename.toOsSpecific())
for file in files:
self.__rCopyTree(Filename(sourceFilename, file),
Filename(targetFilename, file))
else:
# Copying a regular file.
sourceFilename.copyTo(targetFilename)
# Also try to copy the timestamp, but don't fuss too much
# if it doesn't work.
try:
st = os.stat(sourceFilename.toOsSpecific())
os.utime(targetFilename.toOsSpecific(), (st.st_atime, st.st_mtime))
except OSError:
pass
def merge(self, sourceDir, packageNames = None):
""" Adds the contents of the indicated source directory into
the current pool. If packageNames is not None, it is a list
of package names that we wish to include from the source;
packages not named in this list will be unchanged. """
if not self.__readContentsFile(sourceDir, packageNames):
message = "Couldn't read %s" % (sourceDir)
raise PackageMergerError(message)
def close(self):
""" Finalizes the results of all of the previous calls to
merge(), writes the new contents.xml file, and copies in all
of the new contents. """
dirname = Filename(self.installDir, '')
dirname.makeDir()
for pe in self.contents.values():
if pe.sourceDir != self.installDir:
# Here's a new subdirectory we have to copy in.
self.__copySubdirectory(pe)
self.contentsSeq += 1
self.__writeContentsFile()
| {
"repo_name": "grimfang/panda3d",
"path": "direct/src/p3d/PackageMerger.py",
"copies": "10",
"size": "12061",
"license": "bsd-3-clause",
"hash": -7735742091881892000,
"line_mean": 38.4150326797,
"line_max": 127,
"alpha_frac": 0.5966337783,
"autogenerated": false,
"ratio": 4.556479032867397,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__all__ = ["Package", "Packages"]
from .utils import pypi_api
from .utils.version import wanted_version, sort_versions
from .utils.exception import OperationException
def version_xor(v1, v2):
if v1 != "" and v2 != "":
raise("package info conflict")
return v1 or v2
class Package():
name = ""
version = ""
wanted_version = ""
wanted_rule = ""
latest_version = ""
avaliable_versions = []
installed = False
dependencies = []
top_level_packs = []
dist_info = ""
def __init__(self, **kwargs):
self.name = kwargs.get("name", "")
self.version = kwargs.get("version", "")
self.wanted_version = kwargs.get("wanted_version", "")
self.wanted_rule = kwargs.get("wanted_rule", "")
self.installed = kwargs.get("installed", False)
self.top_level_packs = kwargs.get("top_level_packs", [])
self.avaliable_versions = kwargs.get("avaliable_versions", [])
def __repr__(self):
return '\n'.join([
"---------------------------",
"name: {}".format(self.name),
"version: {}".format(self.version),
"wanted_version: {}".format(self.wanted_version),
"installed: {}".format(self.installed)
])
def merge(self, other):
self.version = version_xor(self.version, other.version)
self.wanted_version = version_xor(self.wanted_version, other.wanted_version)
self.wanted_rule = version_xor(self.wanted_rule, other.wanted_rule)
self.installed = self.installed or other.installed
self.dependencies = self.dependencies or other.dependencies
self.top_level_packs = self.top_level_packs or other.top_level_packs
self.dist_info = self.dist_info or other.dist_info
def get_wanted_version(self):
versions_metadata = pypi_api.get_avaliable_versions(self.name)
self.avaliable_versions = list(map(lambda version: version["version"], versions_metadata))
self.latest_version = sort_versions(self.avaliable_versions)[-1:][0]
if len(self.avaliable_versions):
self.wanted_version = wanted_version(self.wanted_rule, self.avaliable_versions)
def install(self):
from .utils import installer
if not self.name or not self.wanted_version:
raise OperationException
return installer.install(self.name, self.wanted_version)
def remove(self):
from .utils import installer
return installer.remove(self.name)
class Packages(list):
def get_by_name(self, package_name):
for package in self:
if package.name == package_name:
return package
return None
| {
"repo_name": "piton-package-manager/piton",
"path": "piton/package.py",
"copies": "1",
"size": "2389",
"license": "mit",
"hash": 8028725391031615000,
"line_mean": 35.196969697,
"line_max": 92,
"alpha_frac": 0.6977814985,
"autogenerated": false,
"ratio": 3.118798955613577,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4316580454113577,
"avg_score": null,
"num_lines": null
} |
# All Packages required
import requests
import ujson as json
import urllib
import collections
import hmac
import hashlib
from base64 import b64encode
from constants import API_ENDPOINT, API_MERCHANT_ENDPOINT, API_SERVICEABILITY_ENDPOINT, STAGING_URL_HOST, PRODUCTION_URL_HOST
from .errors import HTTPError
class OpinioDelivery:
def __init__(self, access_key, secret_key, sandbox=False, debug=False):
self.ACCESS_KEY = access_key
self.SECRET_KEY = secret_key
if sandbox:
self.SERVER_HOST = STAGING_URL_HOST
else:
self.SERVER_HOST = PRODUCTION_URL_HOST
self.API_ENDPOINT = 'http://'+self.SERVER_HOST+API_ENDPOINT
self.API_MERCHANT_ENDPOINT = 'http://'+self.SERVER_HOST+API_MERCHANT_ENDPOINT
self.API_SERVICEABILITY_ENDPOINT = 'http://'+self.SERVER_HOST+API_SERVICEABILITY_ENDPOINT
self.DEBUG = debug
def get_req_header(self, params, method, path):
if params:
sorted_params = collections.OrderedDict(sorted(params.items()))
qstring = '&'+urllib.urlencode(sorted_params)
qstring = qstring.replace('+', '%20')
qstring = qstring.replace('*', '%2A')
qstring = qstring.replace('%7E', '~')
else:
qstring = ''
encode_request = '\n'.join([
method,
self.SERVER_HOST,
path,
self.ACCESS_KEY,
qstring,
'&SignatureVersion=1',
'&SignatureMethod=HmacSHA1'])
sig = hmac.new(self.SECRET_KEY, encode_request, hashlib.sha1)
auth_key = "Opinio "+self.ACCESS_KEY+":"+b64encode(sig.digest())
headers = {"Authorization": auth_key}
return headers
def _get_repsonse_dict(self, response):
if response.status_code not in [200, 201]:
raise HTTPError(response.content)
return json.loads(response.content)
def create_order(self, params):
if self.DEBUG:
print '-- Create New Order --'
headers = self.get_req_header(params, 'POST', API_ENDPOINT)
response = requests.post(self.API_ENDPOINT, data=params, headers=headers)
if self.DEBUG:
print response.content
return self._get_repsonse_dict(response)
def get_order(self, order_id):
if self.DEBUG:
print '-- Getting Order '+order_id+' --'
headers = self.get_req_header({}, 'GET', API_ENDPOINT+'/'+order_id)
response = requests.get(self.API_ENDPOINT+'/'+order_id, headers=headers)
if self.DEBUG:
print response.content
return self._get_repsonse_dict(response)
def cancel_order(self, order_id):
if self.DEBUG:
print '-- Cancelling Order '+order_id+' --'
params = {'is_cancelled': 1}
headers = self.get_req_header(params, 'PUT', API_ENDPOINT+'/'+order_id)
response = requests.put(self.API_ENDPOINT+'/'+order_id, data=params, headers=headers)
if self.DEBUG:
print response.content
return self._get_repsonse_dict(response)
def get_orders(self):
if self.DEBUG:
print '-- Getting All Orders --'
headers = self.get_req_header({}, 'GET', API_ENDPOINT)
if self.DEBUG:
print headers
response = requests.get(self.API_ENDPOINT, headers=headers)
if self.DEBUG:
print response.content
return self._get_repsonse_dict(response)
def create_merchant(self, params):
if self.DEBUG:
print '-- Create New Merchant --'
headers = self.get_req_header(params, 'POST', API_MERCHANT_ENDPOINT)
response = requests.post(self.API_MERCHANT_ENDPOINT, data=params, headers=headers)
if self.DEBUG:
print response.content
return self._get_repsonse_dict(response)
def merchant_status(self, merchant_id):
if self.DEBUG:
print '-- Getting Merchant Status '+merchant_id+' --'
headers = self.get_req_header({}, 'GET', API_MERCHANT_ENDPOINT+'/'+merchant_id)
response = requests.get(self.API_MERCHANT_ENDPOINT+'/'+merchant_id, headers=headers)
if self.DEBUG:
print response.content
return self._get_repsonse_dict(response)
def serviceability(self, params):
if self.DEBUG:
print '-- Serviceability --'
print API_SERVICEABILITY_ENDPOINT
print '++++++++++++++++++'
headers = self.get_req_header(params, 'GET', API_SERVICEABILITY_ENDPOINT)
response = requests.get(self.API_SERVICEABILITY_ENDPOINT, params=params, headers=headers)
if self.DEBUG:
print response.content
return self._get_repsonse_dict(response)
| {
"repo_name": "anistark/opiniohll",
"path": "OpinioDelivery/OpinioDelivery.py",
"copies": "1",
"size": "4752",
"license": "mit",
"hash": -4076761259631256000,
"line_mean": 38.9327731092,
"line_max": 125,
"alpha_frac": 0.611952862,
"autogenerated": false,
"ratio": 3.9272727272727272,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5039225589272727,
"avg_score": null,
"num_lines": null
} |
__all__ = ["PacketEvaluator"]
class PacketEvaluator:
r"""
An object that can be used to lazily evaluate a function.
PacketEvaluators are created by called ``TemplateFunction.make_packet``.
Their purpose is to bind a function to some specific parameters. When
called, a packet will return the result of calling the function with
it's given parameters.
A PacketEvaluator can also be set to memoize the result of calling
itself using the ``set_memoize`` method.
"""
def __init__(self, func, params):
self._func = func
self._params = params
self._result = None
self._memoize = True
def __repr__(self):
r"""
X.__repr__() <==> repr(X)
"""
return "PacketEvaluator for function <%s>" % self._func.__name__
def __call__(self):
r"""
X.__call__() <==> X()
"""
if self._memoize:
if self._result is None:
self._result = self._func(*self._params[0], **self._params[1])
return self._result
return self._func(*self._params[0], **self._params[1])
def set_memoize(self, bool):
r"""
Set the memoization state.
True >> Memoize the result of calling the packet.
False >> Discard the result of calling the packet.
"""
self._memoize = bool
@property
def function(self):
return self._func
@property
def parameters(self):
return self._params | {
"repo_name": "Michaelfonzolo/py-template-function",
"path": "source/main/python/template-function/packet.py",
"copies": "1",
"size": "1297",
"license": "mit",
"hash": 5254582311954866000,
"line_mean": 23.9615384615,
"line_max": 73,
"alpha_frac": 0.6638396299,
"autogenerated": false,
"ratio": 3.275252525252525,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4439092155152525,
"avg_score": null,
"num_lines": null
} |
###############################################################################
# Import needed library
import sys
###############################################################################
# Define the getPath() function
# Implements the "linear time" extra credit option with ideas from
# https://en.wikipedia.org/wiki/Floyd-Warshall_algorithm#Path_reconstruction
def getPath(n, u, v):
path = [u]
while u != v:
u = n[u][v]
path.append(u)
return path
# Define the floyd_warshall() function
# m is the adjacency matrix, n is the next matrix
def floyd_warshall(m, n):
v = len(m[0])
for i in range(v):
m[i][i] = 0
for k in range(v):
for i in range(v):
for j in range(v):
if(m[i][k] + m[k][j] < m[i][j]):
m[i][j] = m[i][k] + m[k][j]
n[i][j] = n[i][k]
###############################################################################
# Create lists to store input
words = []
queries = []
# Read input from stdin
while True:
try:
# Store everything in words list for now
line = input()
words.append(line)
except EOFError:
break
# Get n and m
n = int(words[0])
m = int(words[n+1])
# Clean up & separate into two lists
queries = words[n+2:]
words = words[1:n+1]
# Create adjacency matrix
adj = [[sys.maxsize for x in range(n)] for x in range(n)]
# Create next matrix
next = [[None for x in range(n)] for x in range(n)]
# Find edges
# For each word in the list
for word in words:
# Go through the rest of the words
for in_word in [x for x in words if x != word]:
# Check if the length is the same
if(len(word) == len(in_word)):
# Get differences
differences = [i for i in range(len(word)) if word[i] != in_word[i]]
# If different at only 1 character, we have an edge
if(len(differences) == 1):
# Compute weight
weight = ord(word[differences[0]]) - ord(in_word[differences[0]])
# If negative, make positive
if(weight < 0):
weight *= -1
# Insert edge into adjacency matrix
i = words.index(word)
j = words.index(in_word)
adj[i][j] = weight
adj[j][i] = weight
# Insert into next matrix
next[i][j] = j
next[j][i] = i
# Run Floyd-Warshall
floyd_warshall(adj, next)
# Compute "average number of reachable words"
count = 0
for row in adj:
count += len([x for x in row if x != sys.maxsize])
# Print out result
print(str(round(count/n, 2)))
# Run queries
for query in queries:
split = query.split(" ")
i = words.index(split[0])
j = words.index(split[1])
if(adj[i][j] == sys.maxsize):
# No path
print(query + " not reachable")
else:
# There is a path, find it
strToPrint = str(adj[i][j])
for v in getPath(next, i, j):
strToPrint += " " + words[v]
print(strToPrint) | {
"repo_name": "dixoncrews/ncsu-fall16-csc505",
"path": "all_pairs_shortest_paths/allpairs.py",
"copies": "1",
"size": "3274",
"license": "mit",
"hash": 5470844551273960000,
"line_mean": 26.0661157025,
"line_max": 81,
"alpha_frac": 0.4899205864,
"autogenerated": false,
"ratio": 3.605726872246696,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4595647458646696,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Parachute']
from pymavlink import mavutil
from ..connection import CommandLong
from ...connection import Message
from ...configuration import Configuration
from ...state import State
from ...specification import Specification
from ...environment import Environment
from ...command import Parameter, Command
from ...specification import Specification, Idle
from ...valueRange import DiscreteValueRange
def timeout_parachute_normally(a, s, e, c) -> float:
timeout = s.altitude * c.time_per_metre_travelled
timeout += c.constant_timeout_offset
return timeout
ParachuteNormally = Specification(
'normal',
"""
(and
(= $parachute_action 2)
(= _armed true)
(= _mode "GUIDED")
(> _altitude 10.0))
""",
"""
(and
(= __armed false)
(< __altitude 0.3)
(= __vz 0.0))
""",
timeout_parachute_normally)
class Parachute(Command):
uid = 'ardu:copter:parachute'
name = 'parachute'
parameters = [
# 0=disable, 1=enable, 2=release
Parameter('parachute_action', DiscreteValueRange([0, 1, 2]))
]
specifications = [
ParachuteNormally,
Idle
]
def to_message(self) -> Message:
return CommandLong(cmd_id=208,
param1=self.parachute_action)
def dispatch(self,
sandbox: 'Sandbox',
state: State,
environment: Environment,
config: Configuration
) -> None:
vehicle = sandbox.connection
msg = vehicle.message_factory.command_long_encode(
0, 0,
mavutil.mavlink.MAV_CMD_DO_PARACHUTE,
0, self.parachute_action, 0, 0, 0, 0, 0, 0)
vehicle.send_mavlink(msg)
| {
"repo_name": "squaresLab/Houston",
"path": "houston/ardu/copter/parachute.py",
"copies": "1",
"size": "1784",
"license": "mit",
"hash": 8423892688206211000,
"line_mean": 25.6268656716,
"line_max": 68,
"alpha_frac": 0.5908071749,
"autogenerated": false,
"ratio": 3.8614718614718613,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4952279036371861,
"avg_score": null,
"num_lines": null
} |
__all__ = ['parse_arlequin']
import re
def parse_arlequin(entry):
"""Parse a block of Arlequin formatted text."""
if not entry.upper().startswith('[PROFILE]'):
raise TypeError("entry does not start with '[Profile]'")
parsed = {}
sections = [ x.strip() for x in RE_SECTIONS.split(entry) if len(x.strip()) != 0 ]
for s in sections:
if s.upper().startswith('[PROFILE]'):
parsed['Profile'] = parse_profile(s)
elif s.upper().startswith('[DATA]'):
parsed['Data'] = parse_data(s, parsed['Profile']['GenotypicData'])
return parsed
def parse_arlequin_results(results_text):
"""Parse a block of Arlequin results text."""
raise NotImplementedError
#
# PRIVATE UTILITY FUNCTIONS USED IN PARSER
#
# These functions are implementation details and should not be used outside of
# this parser. There is no guarantee that any of these will be maintained or
# necessarily function the same as the parser evolves. The call signature and
# return values of the 'parse_arlequin' function are the only supported public
# interface.
#
def parse_profile(profile_text):
parsed = {'Title': None,
'NbSamples': None,
'DataType': None,
'GenotypicData': None,
'LocusSeparator': 'WHITESPACE',
'GameticPhase': True,
'RecessiveData': False,
'RecessiveAllele': 'null',
'MissingData': '?',
'Frequency': 'ABS',
'CompDistMatrix': False,
'FrequencyThreshold': 1e-5,
'EpsilonValue': 1e-7}
for line in RE_NEWLINE.split(profile_text):
if '=' in line:
key, val = line.strip().split('=', 1)
if parsed.has_key(key):
parsed[key] = convert(key, val)
return parsed
def parse_data(data_text, genotypic):
"""Parse the entire Data section."""
parsed = {}
# first result of the split is just the '[Data]' line,
# so discard that
sections = RE_DATA_SUBDIV.split(data_text)[1:]
for s in sections:
if RE_SAMPLES.match(s):
parsed['Samples'] = parse_samples(s, genotypic)
return parsed
def parse_samples(samples_text, genotypic):
"""Parse the entire Samples sub-section."""
extracted = RE_SAMPLE.findall(samples_text)
samples = []
for s in extracted:
samples.append(parse_sample(s, genotypic))
return samples
def parse_sample(sample_text, genotypic):
"""Parse a single Sample."""
d = {}
d['SampleName'] = clean_up(RE_SAMPLENAME.search(sample_text).group(1))
d['SampleSize'] = int(RE_SAMPLESIZE.search(sample_text).group(1))
d['SampleData'] = parse_sampledata(RE_SAMPLEDATA.search(sample_text).group(1), genotypic)
return d
def parse_sampledata(sd_text, genotypic):
# first, strip whitespace
txt = sd_text.strip()
# next, split the lines
lines = RE_NEWLINE.split(txt)
# a list to put everything in
individuals = []
if genotypic:
# individuals take two lines each
for i in range(0,len(lines),2):
name, freq, g1 = lines[i].split(None, 2)
g2 = lines[i+1].strip()
individuals.append({'name': name,
'frequency': int(freq),
'data': [g1, g2]})
else:
# each individual is on a line
for line in lines:
name, freq, data = line.split(None, 2)
individuals.append({'name': name,
'frequency': int(freq),
'data': data})
return individuals
def clean_up(strng):
"""Strip excess whitespace and de-quote a string."""
strng = strng.strip()
# remove opening and closing "s or 's if present
quote_mo = RE_DOUBLE_QUOTED.match(strng)
if quote_mo:
strng = quote_mo.group(1)
else:
quote_mo = RE_SINGLE_QUOTED.match(strng)
if quote_mo:
strng = quote_mo.group(1)
return strng
def convert(key, val):
"""Convert strings as read to string, int, bool as necessary."""
if PROFILE_TYPES[key] == str:
return clean_up(val)
elif PROFILE_TYPES[key] == bool:
return val == '1'
elif PROFILE_TYPES[key] == int:
return int(val)
elif PROFILE_TYPES[key] == float:
return float(val)
#
# REGULAR EXPRESSIONS USED IN PARSER
#
#: Match newlines
RE_NEWLINE = re.compile(r'\r\n|\r|\n')
#: Match "quoted" or 'quoted' text, capturing the inner text
RE_DOUBLE_QUOTED = re.compile(r'^"(.*?)"$')
RE_SINGLE_QUOTED = re.compile(r"^'(.*?)'$")
#: Match sections
RE_SECTIONS = re.compile(r'(^|[\r\n]+)(?=\[\w)')
#: Match subsections in Data section
RE_DATA_SUBDIV = re.compile(r'[\r\n]\s*(?=\[\[)')
#: Match Data Samples subsection
RE_SAMPLES = re.compile(r'\s*\[\[Samples\]\]')
#: Match inidividual Sample subsection
RE_SAMPLE = re.compile(r'Sample.*?}', re.S)
#: Extract SampleName from Sample sub-section
RE_SAMPLENAME = re.compile(r'SampleName\s*=\s*(.*)')
#: Extract SampleSize from Sample sub-section
RE_SAMPLESIZE = re.compile(r'SampleSize\s*=\s*(\d+)')
#: Extract SampleData from Sample sub-section
RE_SAMPLEDATA = re.compile(r'SampleData\s*=\s*{([^}]+)', re.S)
#: Extract individual from SampleData
RE_SAMPLE_INDIVIDUAL = re.compile(r'', re.S)
#
# CONSTANTS USED IN PARSING
#
PROFILE_TYPES ={'Title': str,
'NbSamples': int,
'DataType': str,
'GenotypicData': bool,
'LocusSeparator': str,
'GameticPhase': bool,
'RecessiveData': bool,
'RecessiveAllele': str,
'MissingData': str,
'Frequency': str,
'CompDistMatrix': bool,
'FrequencyThreshold': float,
'EpsilonValue': float}
| {
"repo_name": "ryanraaum/oldowan.arlequin",
"path": "oldowan/arlequin/parse.py",
"copies": "1",
"size": "5753",
"license": "mit",
"hash": -8430177679296048000,
"line_mean": 30.2663043478,
"line_max": 93,
"alpha_frac": 0.5991656527,
"autogenerated": false,
"ratio": 3.457331730769231,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9504590846543305,
"avg_score": 0.010381307385185185,
"num_lines": 184
} |
__all__ = ['parse', 'AST', 'ParserError', 'Parser']
from spark import GenericASTBuilder, GenericASTMatcher
import string, exceptions, sys
from UserList import UserList
from Scanner import Token
class AST(UserList):
def __init__(self, type, kids=[]):
self.type = intern(type)
UserList.__init__(self, kids)
def __getslice__(self, low, high): return self.data[low:high]
def __eq__(self, o):
if isinstance(o, AST):
return self.type == o.type \
and UserList.__eq__(self, o)
else:
return self.type == o
def __hash__(self): return hash(self.type)
def __repr__(self, indent=''):
rv = str(self.type)
for k in self:
rv = rv + '\n' + string.replace(str(k), '\n', '\n ')
return rv
class ParserError(Exception):
def __init__(self, token, offset):
self.token = token
self.offset = offset
def __str__(self):
return "Syntax error at or near `%r' token at offset %s" % \
(self.token, self.offset)
class Parser(GenericASTBuilder):
def __init__(self):
GenericASTBuilder.__init__(self, AST, 'stmts')
self.customized = {}
def cleanup(self):
"""
Remove recursive references to allow garbage
collector to collect this object.
"""
for dict in (self.rule2func, self.rules, self.rule2name, self.first):
for i in dict.keys():
dict[i] = None
for i in dir(self):
setattr(self, i, None)
def error(self, token):
raise ParserError(token, token.offset)
def typestring(self, token):
return token.type
def p_funcdef(self, args):
'''
stmt ::= funcdef
funcdef ::= mkfunc designator
load_closure ::= load_closure LOAD_CLOSURE
load_closure ::= LOAD_CLOSURE
'''
def p_list_comprehension(self, args):
'''
expr ::= list_compr
list_compr ::= BUILD_LIST_0 DUP_TOP LOAD_ATTR
designator list_iter del_stmt
list_iter ::= list_for
list_iter ::= list_if
list_iter ::= lc_body
list_for ::= expr _for designator list_iter
JUMP_ABSOLUTE COME_FROM
list_if ::= expr condjmp list_iter
_jump COME_FROM POP_TOP
COME_FROM
lc_body ::= LOAD_NAME expr CALL_FUNCTION_1 POP_TOP
lc_body ::= LOAD_FAST expr CALL_FUNCTION_1 POP_TOP
'''
def p_augmented_assign(self, args):
'''
stmt ::= augassign1
stmt ::= augassign2
augassign1 ::= expr expr inplace_op designator
augassign1 ::= expr expr inplace_op ROT_THREE STORE_SUBSCR
augassign1 ::= expr expr inplace_op ROT_TWO STORE_SLICE+0
augassign1 ::= expr expr inplace_op ROT_THREE STORE_SLICE+1
augassign1 ::= expr expr inplace_op ROT_THREE STORE_SLICE+2
augassign1 ::= expr expr inplace_op ROT_FOUR STORE_SLICE+3
augassign2 ::= expr DUP_TOP LOAD_ATTR expr
inplace_op ROT_TWO STORE_ATTR
inplace_op ::= INPLACE_ADD
inplace_op ::= INPLACE_SUBTRACT
inplace_op ::= INPLACE_MULTIPLY
inplace_op ::= INPLACE_DIVIDE
inplace_op ::= INPLACE_TRUE_DIVIDE
inplace_op ::= INPLACE_FLOOR_DIVIDE
inplace_op ::= INPLACE_MODULO
inplace_op ::= INPLACE_POWER
inplace_op ::= INPLACE_LSHIFT
inplace_op ::= INPLACE_RSHIFT
inplace_op ::= INPLACE_AND
inplace_op ::= INPLACE_XOR
inplace_op ::= INPLACE_OR
'''
def p_assign(self, args):
'''
stmt ::= assign
assign ::= expr DUP_TOP designList
assign ::= expr designator
'''
def p_print(self, args):
'''
stmt ::= print_stmt
stmt ::= print_stmt_nl
stmt ::= print_nl_stmt
print_stmt ::= expr PRINT_ITEM
print_nl_stmt ::= PRINT_NEWLINE
print_stmt_nl ::= print_stmt print_nl_stmt
'''
def p_print_to(self, args):
'''
stmt ::= print_to
stmt ::= print_to_nl
stmt ::= print_nl_to
print_to ::= expr print_to_items POP_TOP
print_to_nl ::= expr print_to_items PRINT_NEWLINE_TO
print_nl_to ::= expr PRINT_NEWLINE_TO
print_to_items ::= print_to_items print_to_item
print_to_items ::= print_to_item
print_to_item ::= DUP_TOP expr ROT_TWO PRINT_ITEM_TO
'''
# expr print_to* POP_TOP
# expr { print_to* } PRINT_NEWLINE_TO
def p_import15(self, args):
'''
stmt ::= importstmt
stmt ::= importfrom
importstmt ::= IMPORT_NAME STORE_FAST
importstmt ::= IMPORT_NAME STORE_NAME
importfrom ::= IMPORT_NAME importlist POP_TOP
importlist ::= importlist IMPORT_FROM
importlist ::= IMPORT_FROM
'''
def p_import20(self, args):
'''
stmt ::= importstmt2
stmt ::= importfrom2
stmt ::= importstar2
importstmt2 ::= LOAD_CONST import_as
importstar2 ::= LOAD_CONST IMPORT_NAME IMPORT_STAR
importfrom2 ::= LOAD_CONST IMPORT_NAME importlist2 POP_TOP
importlist2 ::= importlist2 import_as
importlist2 ::= import_as
import_as ::= IMPORT_NAME designator
import_as ::= IMPORT_NAME LOAD_ATTR designator
import_as ::= IMPORT_FROM designator
stmt ::= importstmt25
stmt ::= importfrom25
stmt ::= importstar25
importstmt25 ::= LOAD_CONST LOAD_CONST import_as importstmt25:: = LOAD_CONST LOAD_CONST import_as
importstar25 ::= LOAD_CONST LOAD_CONST IMPORT_NAME IMPORT_STAR importstar25:: = LOAD_CONST LOAD_CONST IMPORT_NAME IMPORT_STAR
importfrom25 ::= LOAD_CONST LOAD_CONST IMPORT_NAME importlist2 POP_TOP importfrom25:: = LOAD_CONST LOAD_CONST IMPORT_NAME importlist2 POP_TOP
'''
def p_grammar(self, args):
'''
stmts ::= stmts stmt
stmts ::= stmt
stmts_opt ::= stmts
stmts_opt ::= passstmt
passstmt ::=
designList ::= designator designator
designList ::= designator DUP_TOP designList
designator ::= STORE_FAST
designator ::= STORE_NAME
designator ::= STORE_GLOBAL
designator ::= STORE_DEREF
designator ::= expr STORE_ATTR
designator ::= expr STORE_SLICE+0
designator ::= expr expr STORE_SLICE+1
designator ::= expr expr STORE_SLICE+2
designator ::= expr expr expr STORE_SLICE+3
designator ::= store_subscr
store_subscr ::= expr expr STORE_SUBSCR
designator ::= unpack
designator ::= unpack_list
stmt ::= classdef
stmt ::= call_stmt
call_stmt ::= expr POP_TOP
stmt ::= return_stmt
return_stmt ::= expr RETURN_VALUE
stmt ::= yield_stmt
yield_stmt ::= expr YIELD_STMT
yield_stmt ::= expr YIELD_VALUE
stmt ::= break_stmt
break_stmt ::= BREAK_LOOP
stmt ::= continue_stmt
continue_stmt ::= JUMP_ABSOLUTE
continue_stmt ::= CONTINUE_LOOP
stmt ::= raise_stmt
raise_stmt ::= exprlist RAISE_VARARGS
raise_stmt ::= nullexprlist RAISE_VARARGS
stmt ::= exec_stmt
exec_stmt ::= expr exprlist DUP_TOP EXEC_STMT
exec_stmt ::= expr exprlist EXEC_STMT
stmt ::= assert
stmt ::= assert2
stmt ::= assert3
stmt ::= assert4
stmt ::= ifstmt
stmt ::= ifelsestmt
stmt ::= whilestmt
stmt ::= while1stmt
stmt ::= whileelsestmt
stmt ::= while1elsestmt
stmt ::= forstmt
stmt ::= forelsestmt
stmt ::= trystmt
stmt ::= tryfinallystmt
stmt ::= del_stmt
del_stmt ::= DELETE_FAST
del_stmt ::= DELETE_NAME
del_stmt ::= DELETE_GLOBAL
del_stmt ::= expr DELETE_SLICE+0
del_stmt ::= expr expr DELETE_SLICE+1
del_stmt ::= expr expr DELETE_SLICE+2
del_stmt ::= expr expr expr DELETE_SLICE+3
del_stmt ::= delete_subscr
delete_subscr ::= expr expr DELETE_SUBSCR
del_stmt ::= expr DELETE_ATTR
kwarg ::= LOAD_CONST expr
classdef ::= LOAD_CONST expr mkfunc
CALL_FUNCTION_0 BUILD_CLASS designator
condjmp ::= JUMP_IF_FALSE POP_TOP
condjmp ::= JUMP_IF_TRUE POP_TOP
assert ::= expr JUMP_IF_FALSE POP_TOP
expr JUMP_IF_TRUE POP_TOP
LOAD_GLOBAL RAISE_VARARGS
COME_FROM COME_FROM POP_TOP
assert2 ::= expr JUMP_IF_FALSE POP_TOP
expr JUMP_IF_TRUE POP_TOP
LOAD_GLOBAL expr RAISE_VARARGS
COME_FROM COME_FROM POP_TOP
assert3 ::= expr JUMP_IF_TRUE POP_TOP
LOAD_GLOBAL RAISE_VARARGS
COME_FROM POP_TOP
assert4 ::= expr JUMP_IF_TRUE POP_TOP
LOAD_GLOBAL expr RAISE_VARARGS
COME_FROM POP_TOP
_jump ::= JUMP_ABSOLUTE
_jump ::= JUMP_FORWARD
ifstmt ::= expr condjmp stmts_opt
_jump COME_FROM POP_TOP COME_FROM
ifelsestmt ::= expr condjmp stmts_opt
_jump COME_FROM
POP_TOP stmts COME_FROM
trystmt ::= SETUP_EXCEPT stmts_opt
POP_BLOCK _jump
COME_FROM except_stmt
try_end ::= END_FINALLY COME_FROM
try_end ::= except_else
except_else ::= END_FINALLY COME_FROM stmts
except_stmt ::= except_cond except_stmt COME_FROM
except_stmt ::= except_conds try_end COME_FROM
except_stmt ::= except try_end COME_FROM
except_stmt ::= try_end
except_conds ::= except_cond except_conds COME_FROM
except_conds ::=
except_cond ::= except_cond1
except_cond ::= except_cond2
except_cond1 ::= DUP_TOP expr COMPARE_OP
JUMP_IF_FALSE
POP_TOP POP_TOP POP_TOP POP_TOP
stmts_opt _jump COME_FROM
POP_TOP
except_cond2 ::= DUP_TOP expr COMPARE_OP
JUMP_IF_FALSE
POP_TOP POP_TOP designator POP_TOP
stmts_opt _jump COME_FROM
POP_TOP
except ::= POP_TOP POP_TOP POP_TOP
stmts_opt _jump
tryfinallystmt ::= SETUP_FINALLY stmts_opt
POP_BLOCK LOAD_CONST
COME_FROM stmts_opt END_FINALLY
whilestmt ::= SETUP_LOOP
expr JUMP_IF_FALSE POP_TOP
stmts_opt JUMP_ABSOLUTE
COME_FROM POP_TOP POP_BLOCK COME_FROM
while1stmt ::= SETUP_LOOP
JUMP_FORWARD JUMP_IF_FALSE POP_TOP COME_FROM
stmts_opt JUMP_ABSOLUTE
COME_FROM POP_TOP POP_BLOCK COME_FROM
whileelsestmt ::= SETUP_LOOP
expr JUMP_IF_FALSE POP_TOP
stmts_opt JUMP_ABSOLUTE
COME_FROM POP_TOP POP_BLOCK
stmts COME_FROM
while1elsestmt ::= SETUP_LOOP
JUMP_FORWARD JUMP_IF_FALSE POP_TOP COME_FROM
stmts_opt JUMP_ABSOLUTE
COME_FROM POP_TOP POP_BLOCK
stmts COME_FROM
_for ::= GET_ITER FOR_ITER
_for ::= LOAD_CONST FOR_LOOP
forstmt ::= SETUP_LOOP expr _for designator
stmts_opt JUMP_ABSOLUTE
COME_FROM POP_BLOCK COME_FROM
forelsestmt ::= SETUP_LOOP expr _for designator
stmts_opt JUMP_ABSOLUTE
COME_FROM POP_BLOCK stmts COME_FROM
'''
def p_expr(self, args):
'''
expr ::= load_closure mklambda
expr ::= mklambda
expr ::= SET_LINENO
expr ::= LOAD_FAST
expr ::= LOAD_NAME
expr ::= LOAD_CONST
expr ::= LOAD_GLOBAL
expr ::= LOAD_DEREF
expr ::= LOAD_LOCALS
expr ::= expr LOAD_ATTR
expr ::= binary_expr
expr ::= build_list
binary_expr ::= expr expr binary_op
binary_op ::= BINARY_ADD
binary_op ::= BINARY_SUBTRACT
binary_op ::= BINARY_MULTIPLY
binary_op ::= BINARY_DIVIDE
binary_op ::= BINARY_TRUE_DIVIDE
binary_op ::= BINARY_FLOOR_DIVIDE
binary_op ::= BINARY_MODULO
binary_op ::= BINARY_LSHIFT
binary_op ::= BINARY_RSHIFT
binary_op ::= BINARY_AND
binary_op ::= BINARY_OR
binary_op ::= BINARY_XOR
binary_op ::= BINARY_POWER
expr ::= binary_subscr
binary_subscr ::= expr expr BINARY_SUBSCR
expr ::= expr expr DUP_TOPX_2 BINARY_SUBSCR
expr ::= cmp
expr ::= expr UNARY_POSITIVE
expr ::= expr UNARY_NEGATIVE
expr ::= expr UNARY_CONVERT
expr ::= expr UNARY_INVERT
expr ::= expr UNARY_NOT
expr ::= mapexpr
expr ::= expr SLICE+0
expr ::= expr expr SLICE+1
expr ::= expr expr SLICE+2
expr ::= expr expr expr SLICE+3
expr ::= expr DUP_TOP SLICE+0
expr ::= expr expr DUP_TOPX_2 SLICE+1
expr ::= expr expr DUP_TOPX_2 SLICE+2
expr ::= expr expr expr DUP_TOPX_3 SLICE+3
expr ::= and
expr ::= and2
expr ::= or
or ::= expr JUMP_IF_TRUE POP_TOP expr COME_FROM
and ::= expr JUMP_IF_FALSE POP_TOP expr COME_FROM
and2 ::= _jump JUMP_IF_FALSE POP_TOP COME_FROM expr COME_FROM
cmp ::= cmp_list
cmp ::= compare
compare ::= expr expr COMPARE_OP
cmp_list ::= expr cmp_list1 ROT_TWO POP_TOP
COME_FROM
cmp_list1 ::= expr DUP_TOP ROT_THREE
COMPARE_OP JUMP_IF_FALSE POP_TOP
cmp_list1 COME_FROM
cmp_list1 ::= expr DUP_TOP ROT_THREE
COMPARE_OP JUMP_IF_FALSE POP_TOP
cmp_list2 COME_FROM
cmp_list2 ::= expr COMPARE_OP JUMP_FORWARD
mapexpr ::= BUILD_MAP kvlist
kvlist ::= kvlist kv
kvlist ::= kvlist kv2
kvlist ::=
kv ::= DUP_TOP expr ROT_TWO expr STORE_SUBSCR
kv2 ::= DUP_TOP expr expr ROT_THREE STORE_SUBSCR
exprlist ::= exprlist expr
exprlist ::= expr
nullexprlist ::=
'''
def nonterminal(self, nt, args):
collect = ('stmts', 'exprlist', 'kvlist')
if nt in collect and len(args) > 1:
#
# Collect iterated thingies together.
#
rv = args[0]
rv.append(args[1])
else:
rv = GenericASTBuilder.nonterminal(self, nt, args)
return rv
def __ambiguity(self, children):
# only for debugging! to be removed hG/2000-10-15
print children
return GenericASTBuilder.ambiguity(self, children)
def resolve(self, list):
if len(list) == 2 and 'funcdef' in list and 'assign' in list:
return 'funcdef'
#print >> sys.stderr, 'resolve', str(list)
return GenericASTBuilder.resolve(self, list)
nop = lambda self, args: None
p = Parser()
def parse(tokens, customize):
# Special handling for opcodes that take a variable number
# of arguments -- we add a new rule for each:
# expr ::= {expr}^n BUILD_LIST_n
# expr ::= {expr}^n BUILD_TUPLE_n
# expr ::= {expr}^n BUILD_SLICE_n
# unpack_list ::= UNPACK_LIST {expr}^n
# unpack ::= UNPACK_TUPLE {expr}^n
# unpack ::= UNPACK_SEQUENCE {expr}^n
# mkfunc ::= {expr}^n LOAD_CONST MAKE_FUNCTION_n
# mkfunc ::= {expr}^n load_closure LOAD_CONST MAKE_FUNCTION_n
# expr ::= expr {expr}^n CALL_FUNCTION_n
# expr ::= expr {expr}^n CALL_FUNCTION_VAR_n POP_TOP
# expr ::= expr {expr}^n CALL_FUNCTION_VAR_KW_n POP_TOP
# expr ::= expr {expr}^n CALL_FUNCTION_KW_n POP_TOP
#
global p
for k, v in customize.items():
# avoid adding the same rule twice to this parser
if p.customized.has_key(k):
continue
p.customized[k] = None
nop = lambda self, args: None
op = k[:string.rfind(k, '_')]
if op in ('BUILD_LIST', 'BUILD_TUPLE'):
rule = 'build_list ::= ' + 'expr '*v + k
elif op == 'BUILD_SLICE':
rule = 'expr ::= ' + 'expr '*v + k
elif op in ('UNPACK_TUPLE', 'UNPACK_SEQUENCE'):
rule = 'unpack ::= ' + k + ' designator'*v
elif op == 'UNPACK_LIST':
rule = 'unpack_list ::= ' + k + ' designator'*v
elif op == 'DUP_TOPX':
# no need to add a rule
continue
#rule = 'dup_topx ::= ' + 'expr '*v + k
elif op == 'MAKE_FUNCTION':
p.addRule('mklambda ::= %s LOAD_LAMBDA %s' %
('expr '*v, k), nop)
rule = 'mkfunc ::= %s LOAD_CONST %s' % ('expr '*v, k)
elif op == 'MAKE_CLOSURE':
p.addRule('mklambda ::= %s load_closure LOAD_LAMBDA %s' %
('expr '*v, k), nop)
rule = 'mkfunc ::= %s load_closure LOAD_CONST %s' % ('expr '*v, k)
elif op in ('CALL_FUNCTION', 'CALL_FUNCTION_VAR',
'CALL_FUNCTION_VAR_KW', 'CALL_FUNCTION_KW'):
na = (v & 0xff) # positional parameters
nk = (v >> 8) & 0xff # keyword parameters
# number of apply equiv arguments:
nak = ( len(op)-len('CALL_FUNCTION') ) / 3
rule = 'expr ::= expr ' + 'expr '*na + 'kwarg '*nk \
+ 'expr ' * nak + k
else:
raise 'unknown customize token %s' % k
p.addRule(rule, nop)
ast = p.parse(tokens)
p.cleanup()
return ast
| {
"repo_name": "evandrix/Splat",
"path": "doc/Parser.py",
"copies": "1",
"size": "14855",
"license": "mit",
"hash": -766380107425525600,
"line_mean": 27.2414448669,
"line_max": 149,
"alpha_frac": 0.6473914507,
"autogenerated": false,
"ratio": 2.8193205541848547,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8690732231310169,
"avg_score": 0.055195954714937254,
"num_lines": 526
} |
__all__ = ['parse_author']
import datetime
import collections
import re
Author = collections.namedtuple('Author', 'name email date')
PARSE_OFFSET = re.compile(r'([-+])(\d\d)(\d\d)$')
def parse_timestamp(timestamp):
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
return timestamp.replace(tzinfo=datetime.timezone.utc)
def parse_offset(offset):
m = PARSE_OFFSET.match(offset)
if m is None:
raise Failurue('Cannot parse offset: {!r}'.format(line[i+1:]))
offset = datetime.timedelta(
hours=int(m.group(2)),
minutes=int(m.group(3)))
if m.group(1) == '-':
offset = -offset
return datetime.timezone(offset)
PARSE_USER = re.compile('([^<]*)(?:<([^>]*)>)$')
def parse_user(user):
m = PARSE_USER.match(user)
if m is None:
raise Failure('Cannot parse user: {!r}'.format(user))
return m.group(1).rstrip(), m.group(2)
def parse_author(line):
j = line.rindex(' ')
i = line.rindex(' ', 0, j)
name, email = parse_user(line[:i])
timestamp = parse_timestamp(line[i+1:j])
offset = parse_offset(line[j+1:])
timestamp = timestamp.astimezone(offset).isoformat()
return Author(name, email, timestamp)
if __name__ == '__main__':
test_in = 'Dietrich Epp <depp@zdome.net> 1368039878 -0700'
expected_out = Author(
'Dietrich Epp',
'depp@zdome.net',
'2013-05-08T12:04:38-07:00')
test_out = parse_author(test_in)
if test_out != expected_out:
raise Exception('Test failed: expected {!r}, got {!r}'
.format(expected_out, test_out))
| {
"repo_name": "depp/doublegit",
"path": "doublegit/author.py",
"copies": "1",
"size": "1604",
"license": "bsd-2-clause",
"hash": 3925079603906457000,
"line_mean": 30.4509803922,
"line_max": 70,
"alpha_frac": 0.6128428928,
"autogenerated": false,
"ratio": 3.2338709677419355,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43467138605419353,
"avg_score": null,
"num_lines": null
} |
__all__ = ["ParseError", "parse_nested_list"]
class ParseError(Exception):
pass
# Basic functions for parsing PDDL (Lisp) files.
def parse_nested_list(input_file):
tokens = tokenize(input_file)
next_token = tokens.next()
if next_token != "(":
raise ParseError("Expected '(', got %s." % next_token)
result = list(parse_list_aux(tokens))
for tok in tokens: # Check that generator is exhausted.
raise ParseError("Unexpected token: %s." % tok)
return result
def tokenize(input):
# Here comes the ugly hack: :moduleoptions section must not be decapitalized!
# therefore need to make sure, when the section ends.
inModOpts = False
modOptsOpenParen = 0
for line in input:
line = line.split(";", 1)[0] # Strip comments.
line = line.replace("(", " ( ").replace(")", " ) ").replace("?", " ?")
# added to tokenize module calls
line = line.replace("[", " [ ").replace("]", " ] ")
for token in line.split():
if token == ":moduleoptions" or token == ":moduleexitoptions":
inModOpts = True
modOptsOpenParen = 1 # the one right before :moduleoptions
if token == "(" and inModOpts:
modOptsOpenParen += 1
if token == ")" and inModOpts:
modOptsOpenParen -= 1
if inModOpts and modOptsOpenParen <= 0:
inModOpts = False
if token.find("@") != -1 or inModOpts: # keep library calls correct
yield token
else:
yield token.lower()
def parse_list_aux(tokenstream):
# Leading "(" has already been swallowed.
while True:
try:
token = tokenstream.next()
except StopIteration:
raise ParseError()
if token == ")":
return
elif token == "(":
yield list(parse_list_aux(tokenstream))
else:
yield token
| {
"repo_name": "GKIFreiburg/gki_symbolic_planning",
"path": "tfd_modules/downward/translate/pddl/parser.py",
"copies": "1",
"size": "1784",
"license": "bsd-3-clause",
"hash": -6581535469639992000,
"line_mean": 30.8571428571,
"line_max": 79,
"alpha_frac": 0.6165919283,
"autogenerated": false,
"ratio": 3.7322175732217575,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48488095015217575,
"avg_score": null,
"num_lines": null
} |
__all__ = ["ParseFromValue", \
"CreateValueFrom", \
"CreateElement", \
"CallAtomsJs", \
"VerifyElementClickable", \
"FindElement", \
"IsElementEnabled", \
"IsOptionElementSelected", \
"GetElementSize", \
"IsElementDisplayed", \
"IsOptionElementTogglable",\
"SetOptionElementSelected", \
"GetActiveElement", \
"GetElementAttribute", \
"GetElementTagName", \
"IsElementFocused", \
"ToggleOptionElement", \
"GetElementRegion", \
"GetElementEffectiveStyle", \
"GetElementBorder", \
"ScrollElementRegionIntoViewHelper", \
"IsElementAttributeEqualToIgnoreCase", \
"ScrollElementRegionIntoView", \
"ScrollElementIntoView", \
"GetElementClickableLocation"]
import time
import copy
from misc.basic_types import WebPoint
from misc.basic_types import WebSize
from misc.basic_types import WebRect
from third_party.atoms import *
from browser.status import *
from browser.js import *
kElementKey = "ELEMENT"
def ParseFromValue(value, target):
if type(value) != dict:
return False
# target is WebPoint
if isinstance(target, WebPoint):
x = value.get("x")
y = value.get("y")
if type(x) in [float, int] and type(y) in [float, int]:
target.x = int(x)
target.y = int(y)
return True
return False
# target is WebSize
if isinstance(target, WebSize):
width = value.get("width")
height = value.get("height")
if type(width) in [float, int] and type(height) in [float, int]:
target.width = int(width)
target.height = int(height)
return True
return False
# target is WebRect
if isinstance(target, WebRect):
x = value.get("left")
y = value.get("top")
width = value.get("width")
height = value.get("height")
if type(x) in [float, int] and type(y) in [float, int] and type(width) in [float, int] and type(height) in [float, int]:
target.origin.x = int(x)
target.origin.y = int(y)
target.size.width = int(width)
target.size.height = int(height)
return True
return False
def CreateValueFrom(target):
dict_value = {}
# create value from WebPoint
if isinstance(target, WebPoint):
dict_value["x"] = target.x
dict_value["y"] = target.y
return dict_value
# create value from WebSize
if isinstance(target, WebSize):
dict_value["width"] = target.width
dict_value["height"] = target.height
return dict_value
# create value from WebRect
if isinstance(target, WebRect):
dict_value["left"] = target.X()
dict_value["right"] = target.Y()
dict_value["width"] = target.Width()
dict_value["height"] = target.Height()
return dict_value
def CreateElement(element_id):
element = {}
element[kElementKey] = element_id
return element
def CallAtomsJs(frame, web_view, atom_function, args, result):
return web_view.CallFunction(frame, atom_function, args, result)
def VerifyElementClickable(frame, web_view, element_id, location):
args = []
args.append(CreateElement(element_id))
args.append(CreateValueFrom(location))
result = {}
status = CallAtomsJs(frame, web_view, IS_ELEMENT_CLICKABLE, args, result)
if status.IsError():
return status
is_clickable = False
is_clickable = result["value"].get("clickable")
if type(is_clickable) != bool:
return Status(kUnknownError, "failed to parse value of IS_ELEMENT_CLICKABLE")
if not is_clickable:
message = result.get("message")
if type(message) != str:
message = "element is not clickable"
return Status(kUnknownError, message)
return Status(kOk)
def FindElement(interval_ms, only_one, root_element_id, session, web_view, params, value):
strategy = params.get("using")
if type(strategy) != str:
return Status(kUnknownError, "'using' must be a string")
target = params.get("value")
if type(target) != str:
return Status(kUnknownError, "'value' must be a string")
script = FIND_ELEMENT if only_one else FIND_ELEMENTS
locator = {}
locator[strategy] = target
arguments = []
arguments.append(locator)
if root_element_id:
arguments.append(CreateElement(root_element_id))
start_time = time.time()
while True:
temp = {}
status = web_view.CallFunction(session.GetCurrentFrameId(), script, arguments, temp)
if status.IsError():
return status
# no matter what kind of result, it will packed in {"value": RemoteObject} format
# RemoteObject can be JSON type
if temp != {}:
if only_one:
value.clear()
value.update(temp)
return Status(kOk)
else:
if type(temp["value"]) != list:
return Status(kUnknownError, "script returns unexpected result")
if len(temp["value"]) > 0:
value.clear()
value.update(temp)
return Status(kOk)
if ((time.time() - start_time) >= session.implicit_wait):
if only_one:
return Status(kNoSuchElement)
else:
value.update({"value": []})
return Status(kOk)
time.sleep(float(interval_ms)/1000)
return Status(kUnknownError)
# return status and is_enabled<bool>
def IsElementEnabled(session, web_view, element_id):
is_enabled = False
args = []
args.append(CreateElement(element_id))
result = {}
status = CallAtomsJs(session.GetCurrentFrameId(), web_view, IS_ENABLED, args, result)
if status.IsError():
return (status, is_enabled)
# we packed everything in key "value", remember?
is_enabled = result["value"]
if type(is_enabled) != bool:
return (Status(kUnknownError, "IS_ENABLED should return a boolean value"), False)
return (Status(kOk), is_enabled)
# return status and is_selected<bool>
def IsOptionElementSelected(session, web_view, element_id):
is_selected = False
args = []
args.append(CreateElement(element_id))
result = {}
status = CallAtomsJs(session.GetCurrentFrameId(), web_view, IS_SELECTED, args, result)
if status.IsError():
return (status, is_selected)
# we packed everything in key "value", remember?
is_selected = result["value"]
if type(is_selected) != bool:
return (Status(kUnknownError, "IS_SELECTED should return a boolean value"), False)
return (Status(kOk), is_selected)
def GetElementSize(session, web_view, element_id, size):
args = []
args.append(CreateElement(element_id))
result = {}
status = CallAtomsJs(session.GetCurrentFrameId(), web_view, GET_SIZE, args, result)
if status.IsError():
return status
# we packed everything in key "value", remember?
if not ParseFromValue(result["value"], size):
return Status(kUnknownError, "failed to parse value of GET_SIZE")
return Status(kOk)
# return status and is_displayed<bool>
def IsElementDisplayed(session, web_view, element_id, ignore_opacity):
is_displayed = False
args = []
args.append(CreateElement(element_id))
args.append(ignore_opacity)
result = {}
status = CallAtomsJs(session.GetCurrentFrameId(), web_view, IS_DISPLAYED, args, result)
if status.IsError():
return (status, is_displayed)
# we packed everything in key "value", remember?
is_displayed = result["value"]
if type(is_displayed) != bool:
return (Status(kUnknownError, "IS_DISPLAYED should return a boolean value"), False)
return (Status(kOk), is_displayed)
# return status and is_togglable<bool>
def IsOptionElementTogglable(session, web_view, element_id):
is_togglable = False
args = []
args.append(CreateElement(element_id))
result = {}
status = web_view.CallFunction(session.GetCurrentFrameId(), kIsOptionElementToggleableScript, args, result)
if status.IsError():
return (status, is_togglable)
# we packed everything in key "value", remember?
is_togglable = result["value"]
if type(is_togglable) != bool:
return (Status(kUnknownError, "failed check if option togglable or not"), False)
return (Status(kOk), is_togglable)
def SetOptionElementSelected(session, web_view, element_id, selected):
# TODO(wyh): need to fix throwing error if an alert is triggered.
args = []
args.append(CreateElement(element_id))
args.append(selected)
return CallAtomsJs(session.GetCurrentFrameId(), web_view, CLICK, args, {})
def GetActiveElement(session, web_view, value):
return web_view.CallFunction(session.GetCurrentFrameId(), "function() { return document.activeElement || document.body }", [], value)
def GetElementAttribute(session, web_view, element_id, attribute_name, value):
args = []
args.append(CreateElement(element_id))
args.append(attribute_name)
return CallAtomsJs(session.GetCurrentFrameId(), web_view, GET_ATTRIBUTE, args, value)
# return status and name<string>
def GetElementTagName(session, web_view, element_id):
name = ""
args = []
args.append(CreateElement(element_id))
result = {}
status = web_view.CallFunction(session.GetCurrentFrameId(), "function(elem) { return elem.tagName.toLowerCase(); }", args, result)
if status.IsError():
return (status, name)
# we packed everything in key "value", remember?
name = result["value"]
if type(name) != str:
return (Status(kUnknownError, "failed to get element tag name"), "")
return (Status(kOk), name)
# return status and is_focused<bool>
def IsElementFocused(session, web_view, element_id):
is_focused = False
result = {}
status = GetActiveElement(session, web_view, result)
if status.IsError():
return (status, is_focused)
element_dict = CreateElement(element_id)
# we packed everything in key "value", remember?
is_focused = (result["value"] == element_dict)
return (Status(kOk), is_focused)
def ToggleOptionElement(session, web_view, element_id):
is_selected = False
(status, is_selected) = IsOptionElementSelected(session, web_view, element_id)
if status.IsError():
return status
return SetOptionElementSelected(session, web_view, element_id, not is_selected)
def GetElementRegion(session, web_view, element_id, rect):
args = []
args.append(CreateElement(element_id))
result = {}
status = web_view.CallFunction(session.GetCurrentFrameId(), kGetElementRegionScript, args, result)
if status.IsError():
return status
if not ParseFromValue(result["value"], rect):
return Status(kUnknownError, "failed to parse value of getElementRegion")
return Status(kOk)
# return status and value<string>
def GetElementEffectiveStyle(frame, web_view, element_id, sproperty):
style = ""
args = []
args.append(CreateElement(element_id))
args.append(sproperty)
result = {}
status = web_view.CallFunction(frame, GET_EFFECTIVE_STYLE, args, result)
if status.IsError():
return (status, style)
style = result["value"]
if type(style) != str:
return (Status(kUnknownError, "failed to parse value of GET_EFFECTIVE_STYLE"), "")
return (Status(kOk), style)
# return status and border_left<int> and border_top<int>
def GetElementBorder(frame, web_view, element_id):
(status, border_left_str) = GetElementEffectiveStyle(frame, web_view, element_id, "border-left-width")
if status.IsError():
return (status, -1, -1)
(status, border_top_str) = GetElementEffectiveStyle(frame, web_view, element_id, "border-top-width")
if status.IsError():
return (status, -1, -1)
try:
border_left = int(border_left_str)
border_top = int(border_top_str)
except:
return (Status(kUnknownError, "failed to get border width of element"), -1, -1)
return (Status(kOk), border_left, border_top)
def ScrollElementRegionIntoViewHelper(frame, web_view, element_id, region, center, clickable_element_id, location):
tmp_location = copy.deepcopy(location)
args = []
args.append(CreateElement(element_id))
args.append(CreateValueFrom(region))
args.append(center)
# TODO(wyh): why append the following param between above two cause the null value of y?
result = {}
status = web_view.CallFunction(frame, GET_LOCATION_IN_VIEW, args, result)
if status.IsError():
return status
if not ParseFromValue(result["value"], tmp_location):
return Status(kUnknownError, "failed to parse value of GET_LOCATION_IN_VIEW")
if clickable_element_id:
middle = copy.deepcopy(tmp_location)
middle.Offset(region.Width() / 2, region.Height() / 2)
status = VerifyElementClickable(frame, web_view, clickable_element_id, middle)
if status.IsError():
return status
location.Update(tmp_location)
return Status(kOk)
# return status and is_equal<bool>
def IsElementAttributeEqualToIgnoreCase(session, web_view, element_id, attribute_name, attribute_value):
is_equal = False
result = {}
status = GetElementAttribute(session, web_view, element_id, attribute_name, result)
if status.IsError():
return (status, is_equal)
actual_value = result["value"]
if type(actual_value) == str:
is_equal = (actual_value.lower() == attribute_value.lower())
else:
is_equal = False
return (status, is_equal)
def ScrollElementRegionIntoView(session, web_view, element_id, region, center, clickable_element_id, location):
region_offset = region.origin;
region_size = region.size;
status = ScrollElementRegionIntoViewHelper(session.GetCurrentFrameId(), web_view, element_id, \
region, center, clickable_element_id, region_offset)
if status.IsError():
return status
kFindSubFrameScript = \
"function(xpath) {"\
" return document.evaluate(xpath, document, null,"\
" XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue;"\
"}"
session_frames_tmp = copy.deepcopy(session.frames)
session_frames_tmp.reverse()
for rit in session_frames_tmp:
args = []
args.append("//*[@cd_frame_id_ = '%s']" % rit.xwalkdriver_frame_id)
result = {}
status = web_view.CallFunction(rit.parent_frame_id, kFindSubFrameScript, args, result)
if status.IsError():
return status
element_dict = result["value"]
if type(element_dict) != dict:
return Status(kUnknownError, "no element reference returned by script")
frame_element_id = element_dict.get(kElementKey)
if type(frame_element_id) != str:
return Status(kUnknownError, "failed to locate a sub frame")
# Modify |region_offset| by the frame's border.
(status, border_left, border_top) = GetElementBorder(rit.parent_frame_id, web_view, frame_element_id)
if status.IsError():
return status
region_offset.Offset(border_left, border_top)
status = ScrollElementRegionIntoViewHelper(rit.parent_frame_id, web_view, frame_element_id, \
WebRect(region_offset, region_size), center, frame_element_id, region_offset)
if status.IsError():
return status
location.Update(region_offset)
return Status(kOk)
def ScrollElementIntoView(session, web_view, sid, location):
size = WebSize()
status = GetElementSize(session, web_view, sid, size);
if status.IsError():
return status
status = ScrollElementRegionIntoView(session, web_view, sid, WebRect(WebPoint(0, 0), size), False, "", location)
return status
def GetElementClickableLocation(session, web_view, element_id, location):
(status, tag_name) = GetElementTagName(session, web_view, element_id)
if status.IsError():
return status
target_element_id = element_id
if (tag_name == "area"):
# Scroll the image into view instead of the area.
kGetImageElementForArea = \
"function (element) {"\
" var map = element.parentElement;"\
" if (map.tagName.toLowerCase() != 'map')"\
" throw new Error('the area is not within a map');"\
" var mapName = map.getAttribute('name');"\
" if (mapName == null)"\
" throw new Error ('area\\'s parent map must have a name');"\
" mapName = '#' + mapName.toLowerCase();"\
" var images = document.getElementsByTagName('img');"\
" for (var i = 0; i < images.length; i++) {"\
" if (images[i].useMap.toLowerCase() == mapName)"\
" return images[i];"\
" }"\
" throw new Error('no img is found for the area');"\
"}"
args = []
args.append(CreateElement(element_id))
result = {}
status = web_view.CallFunction(session.GetCurrentFrameId(), kGetImageElementForArea, args, result)
if status.IsError():
return status
element_dict = result["value"]
if type(element_dict) != dict:
return Status(kUnknownError, "no element reference returned by script")
target_element_id = element_dict.get(kElementKey)
if type(target_element_id) != str:
return Status(kUnknownError, "no element reference returned by script")
(status, is_displayed) = IsElementDisplayed(session, web_view, target_element_id, True)
if status.IsError():
return status
if not is_displayed:
return Status(kElementNotVisible)
rect = WebRect()
status = GetElementRegion(session, web_view, element_id, rect)
if status.IsError():
return status
# TODO(wyh): manually change center to false make element.click() ok
status = ScrollElementRegionIntoView(session, web_view, target_element_id, rect, False, element_id, location)
if status.IsError():
return status
location.Offset(rect.Width() / 2, rect.Height() / 2)
return Status(kOk)
| {
"repo_name": "PeterWangIntel/crosswalk-webdriver-python",
"path": "command/element_util.py",
"copies": "1",
"size": "17371",
"license": "bsd-3-clause",
"hash": -3778442259220018700,
"line_mean": 36.1970021413,
"line_max": 135,
"alpha_frac": 0.6787749698,
"autogenerated": false,
"ratio": 3.545825678709941,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47246006485099407,
"avg_score": null,
"num_lines": null
} |
__all__ = ['parser']
import platform
import getpass
from collections import namedtuple
from urllib.parse import urlparse,ParseResult,parse_qsl
from .error import *
from typing import Dict,Optional,Tuple,Union,Any
Default = namedtuple('Default',['user','password','host','port'])
if platform.system() == 'Darwin':
user = getpass.getuser()
password = ''
elif platform.system() == 'windows':
user = 'postgres'
password = 'postgres'
else:
user = 'postgres'
password = ''
DEFAULT_INFO = {
'mysql':Default(user='root',password='',host='localhost',port='3306'),
'postgresql':Default(user=user,password=password,host='localhost',port='5432')
}
def check_scheme(scheme:Optional[str])->str:
SCHEMES = ('mysql','postgresql')
if scheme:
if scheme.lower() in SCHEMES:
return scheme.lower()
else:
raise InvalidURI("unknow database")
else:
raise InvalidURI("must have a database uri")
def check_netloc(parse_result:ParseResult,scheme:str)->Tuple[Optional[str],Optional[str],Optional[str],Optional[str]]:
default = DEFAULT_INFO.get(scheme)
user = parse_result.username or default.user
password = parse_result.password or default.password
host = parse_result.hostname or default.host
if parse_result.port:
port = str(parse_result.port)
else:
port = default.port
return user,password,host,port
def check_path(path:str)->str:
if path:
return path.replace("/","")
else:
raise InvalidURI("need to point out the db's name")
def check_query(query:str)->Any:
if query:
return dict(parse_qsl(query))
else:
return None
def parser(uri:str)->Dict[str,str]:
parse_result = urlparse(uri)
scheme = check_scheme(parse_result.scheme)
usr,password,host,port = check_netloc(parse_result,scheme)
db = check_path(parse_result.path)
query = check_query(parse_result.query)
result = dict(
scheme=scheme,
username = usr,
password = password,
host = host,
port = int(port),
database = db
)
if query:
result.update(**query)
return result
| {
"repo_name": "Python-Tools/aioorm",
"path": "aioorm/uri_parser.py",
"copies": "1",
"size": "2165",
"license": "mpl-2.0",
"hash": 2910644597830366700,
"line_mean": 24.7738095238,
"line_max": 118,
"alpha_frac": 0.6526558891,
"autogenerated": false,
"ratio": 3.700854700854701,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4853510589954701,
"avg_score": null,
"num_lines": null
} |
# All parsing functions
from collections import defaultdict
import numpy as np
import pdb
NUM_GENES = 8499
directory = 'data/'
files = ['BRCA.txt', 'COAD.txt', 'KIRC.txt', 'LUSC.txt', 'OV.txt', 'UCEC.txt']
reg_file = 'tfs.txt'
cancer = 'KIRC'
# X is in the form: Genes vs Tumor Sample
# regulators is a list of regulators
def parse_data():
gene_data = {} # Dictionary of cancer to matrix
for fname in files:
with open(directory + fname) as f:
X = []
for i,line in enumerate(f):
# Skip title
if i == 0:
continue
vals = line.strip().split('\t') # Parse tsv
vals = vals[1:] # Remove name
vals = [float(x) for x in vals] # Convert to floats
X.append(vals)
gene_data[fname.strip()[:-4]] = np.asarray(X)
regulators = []
with open(directory + reg_file) as f:
regulators = [x.strip() for x in f]
return gene_data, regulators
### For example converts BRCA.txt to BRCA_reformatted.txt New format
'''
every gene expression data file is in the form:
Name exp1 exp2 ...
G1 valG1 valG1 valG1 ...
G2 valG2 valG2 valG2 ...
We want to convert that into the following format:
G1 G2 G3 ...
valG1 valG2 valG3 ...
valG1 valG2 valG3 ...
...
'''
def reformat_data():
gene_data = {} # Dictionary of cancer to matrix
for fname in files:
with open(directory + fname) as f:
X = []
gene_names = []
for i,line in enumerate(f):
# Skip title
if i == 0:
continue
vals = line.strip().split('\t') # Parse tsv
gene_names.append(vals[0])
vals = vals[1:] # Remove name
vals = [float(x) for x in vals] # Convert to floats
X.append(vals)
label = fname.strip()[:-4]
gene_data[label] = np.asarray(X)
first_string = '' + gene_names[0]
with open(directory + fname[:-4] + '_reformatted.txt', 'wb') as f_out:
for gene_name in gene_names[1:]:
first_string += '\t%s' %(gene_name)
f_out.write(first_string + '\n')
next_string = ''
height, width = gene_data[label].shape
for i in range(width):
exp_string = ''
for j in range(height):
exp_string += '%s\t' %(str(2.0**(gene_data[label][j, i])))
f_out.write(exp_string[:-1] + '\n')
# This method parses modules.txt and outputs in new data structure described below.
def parse_module(fname):
f = open(fname, 'rb')
module_id_to_genes = {}
gene_to_module_id = {}
content = f.read()
f.close()
content_lst = content.split('\n')
modules_seen = set()
for line in content_lst:
if not line:
break
line_split = line.split('\t')
gene, module_id = line_split[0], line_split[1]
if not module_id in modules_seen:
modules_seen.add(module_id)
module_id_to_genes[module_id] = set()
module_id_to_genes[module_id].add(gene)
gene_to_module_id[gene] = module_id
return module_id_to_genes, gene_to_module_id
# This method parses the binary matrix and the cluster assignments from sbm
def parse_sbm_results():
# Transcription Factors
tfs = []
filename = 'data/tfs.txt'
with open(filename) as f:
tfs = [x.strip() for x in f]
# Genes
genes = []
filename = 'data/genes.txt'
with open(filename) as f:
genes = [x.strip() for x in f]
# Cluster assignments
filename = '../results/sbm/' + cancer + '_cluster.txt'
module_to_gene, gene_to_module = parse_module(filename)
# Binary matrix
binary_matrix = []
filename = '../results/sbm/' + cancer + '_weights.txt'
with open(filename) as f:
for line in f:
line = line.strip()
vals = [int(x) for x in line.split(',') if x is not '']
binary_matrix.append(vals)
genes_regulated = {}
for gene,regulated in zip(genes, binary_matrix):
if gene in tfs:
reg = [genes[x-1] for x in regulated]
genes_regulated[gene] = reg
return tfs, genes, genes_regulated, module_to_gene, gene_to_module
# This method parses the true ChIP-seq data
def parse_chip_seq():
filename = '../results/chipseq/MSig.txt'
genes_regulated = defaultdict(list)
with open(filename) as f:
for line in f:
vals = line.strip().split('\t')
tf, gene = vals[0], vals[1]
genes_regulated[tf].append(gene)
return genes_regulated
def parse_merlin():
filename = '../results/fold14/prediction_k300.txt'
genes_regulated = defaultdict(list)
with open(filename) as f:
for line in f:
vals = line.strip().split('\t')
tf, gene = vals[0], vals[1]
genes_regulated[tf].append(gene)
return genes_regulated
| {
"repo_name": "kevintee/Predicting-Gene-Networks",
"path": "src/parse.py",
"copies": "1",
"size": "5051",
"license": "mit",
"hash": 1076532608977266700,
"line_mean": 29.7987804878,
"line_max": 83,
"alpha_frac": 0.5545436547,
"autogenerated": false,
"ratio": 3.4197698036560595,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44743134583560595,
"avg_score": null,
"num_lines": null
} |
__all__=["ParticipaTriplification"]
import psycopg2, rdflib as r, sys, urllib, re
class ParticipaTriplification:
def __init__(self,dbname="newdb",username="r",separator="/",filename="participaTriplestore",render=True,write_files=True,compute_networks=True,compute_bows=False):
"""Reads PostgreSQL Participabr/Noosfero database and writes them as RDF.
"""
self.connectDatabase(dbname,username)
self.readTables() #creates D and N
self.initNamespaces() # creates P
self.startGraph()
self.auxFunctions()
self.separator=separator
self.filename=filename
self.write_files=write_files
self.render=render
self.compute_networks=compute_networks
self.compute_bows=compute_bows
if render:
self.triplifyAll()
if write_files:
self.writeGraph()
def triplifyAll(self):
self.triplifyPortalInfo()
self.triplifyProfiles()
self.triplifyComments()
self.triplifyFriendships()
self.triplifyVotes()
self.triplifyTags()
self.triplifyOverallStructures()
def connectDatabase(self,dbname="newdb",username="r"):
self.con = psycopg2.connect(database=dbname, user=username)
self.cur = self.con.cursor()
def readTables(self):
"""Retrieves data (D) and names (N) from relational tables.
"""
class D: pass
cur=self.cur
cur.execute('SELECT * FROM users')
D.users = cur.fetchall()
cur.execute('SELECT * FROM profiles')
D.profiles = cur.fetchall()
cur.execute('SELECT * FROM articles')
D.articles = cur.fetchall()
cur.execute('SELECT * FROM comments')
D.comments = cur.fetchall()
cur.execute('SELECT * FROM friendships')
D.friendships= cur.fetchall()
cur.execute('SELECT * FROM votes')
D.votes= cur.fetchall()
cur.execute('SELECT * FROM tags')
D.tags= cur.fetchall()
cur.execute('SELECT * FROM taggings')
D.taggings= cur.fetchall()
self.D=D
class N: pass
cur.execute("select column_name from information_schema.columns where table_name='users';")
UN=cur.fetchall()
N.UN=[i[0] for i in UN[::-1]]
cur.execute("select column_name from information_schema.columns where table_name='profiles';")
PN=cur.fetchall()
N.PN=[i[0] for i in PN[::-1]]
cur.execute("select column_name from information_schema.columns where table_name='articles';")
AN=cur.fetchall()
N.AN=[i[0] for i in AN[::-1]]
cur.execute("select column_name from information_schema.columns where table_name='comments';")
CN=cur.fetchall()
N.CN=[i[0] for i in CN[::-1]]
cur.execute("select column_name from information_schema.columns where table_name='friendships';")
FRN=cur.fetchall()
N.FRN=[i[0] for i in FRN[::-1]]
cur.execute("select column_name from information_schema.columns where table_name='votes';")
VN=cur.fetchall()
N.VN=[i[0] for i in VN[::-1]]
cur.execute("select column_name from information_schema.columns where table_name='tags';")
TN=cur.fetchall()
N.TN=[i[0] for i in TN[::-1]]
cur.execute("select column_name from information_schema.columns where table_name='taggings';")
TTN=cur.fetchall()
N.TTN=[i[0] for i in TTN[::-1]]
self.N=N
def initNamespaces(self):
class P: pass
P.rdf = r.namespace.RDF
P.foaf = r.namespace.FOAF
P.xsd = r.namespace.XSD
P.opa = r.Namespace("http://purl.org/socialparticipation/opa/")
P.ops = r.Namespace("http://purl.org/socialparticipation/ops/")
P.wsg = r.Namespace("http://www.w3.org/2003/01/geo/wgs84_pos#")
P.dc2 = r.Namespace("http://purl.org/dc/elements/1.1/")
P.dc = r.Namespace("http://purl.org/dc/terms/")
P.sioc = r.Namespace("http://rdfs.org/sioc/ns#")
P.tsioc = r.Namespace("http://rdfs.org/sioc/types#")
P.skos = r.Namespace("http://www.w3.org/2004/02/skos/core#")
P.schema = r.Namespace("http://schema.org/")
P.part = r.Namespace("http://participa.br/")
self.P=P
def startGraph(self):
"""Starts RDF graph and bing namespaces"""
g = r.Graph()
g.namespace_manager.bind("rdf", r.namespace.RDF)
g.namespace_manager.bind("foaf", r.namespace.FOAF)
g.namespace_manager.bind("xsd", r.namespace.XSD)
g.namespace_manager.bind("opa", "http://purl.org/socialparticipation/opa/")
g.namespace_manager.bind("ops", "http://purl.org/socialparticipation/ops/")
g.namespace_manager.bind("wsg", "http://www.w3.org/2003/01/geo/wgs84_pos#")
g.namespace_manager.bind("dc2", "http://purl.org/dc/elements/1.1/")
g.namespace_manager.bind("dc", "http://purl.org/dc/terms/")
g.namespace_manager.bind("sioc", "http://rdfs.org/sioc/ns#")
g.namespace_manager.bind("tsioc", "http://rdfs.org/sioc/types#")
g.namespace_manager.bind("schema", "http://schema.org/")
g.namespace_manager.bind("part", "http://participa.br/")
self.g=g
def auxFunctions(self):
TAG_RE = re.compile(r'<[^>]+>')
class X:
def remove_tags(text):
return TAG_RE.sub('', text)
def Qu(termo):
user_id=pp[PN.index("user_id")]
val=[i for i in users if i[0]==user_id][0][UN.index(termo)]
return val
def G(S,P,O):
self.g.add((S,P,O))
def L(data, datatype_=None):
if datatype_:
return r.Literal(data, datatype=datatype_)
else:
return r.Literal(data)
def fparse(mstring):
foo=[i for i in mstring.split("\n")[1:-1] if i]
return dict([[j.strip().replace('"',"") for j in i.split(":")[1:]] for i in foo if len(i.split(":"))==3])
U=r.URIRef
QQ=urllib.parse.quote
def Q_(mstr):
return QQ(pp[PN.index(mstr)])
def Q(mstr):
return pp[PN.index(mstr)]
def QA(mstr):
return aa[AN.index(mstr)]
def QC(mstr):
return cc[CN.index(mstr)]
def QF(mstr):
return fr[FRN.index(mstr)]
self.X=X
def triplifyPortalInfo(self):
"""Make triples with information about the portal.
"""
uri=self.P.opa.ParticipationPortal+self.separator+"participabr"
self.X.G(uri,self.P.rdf.type,self.P.opa.ParticipationPortal)
self.X.G(uri,self.P.opa.description,self.X.L(DATA.portal_description,self.P.xsd.string))
self.X.G(uri,self.P.opa.url,self.X.L("http://participa.br/",self.P.xsd.string))
def triplifyOverallStructures(self):
"""Insert into RDF graph the textual and network structures.
Ideally, one should be able to make bag of words related to
each item (communities, users, posts, comments, tags, etc).
Interaction and friendship networks should be made.
Human networks mediated by co-ocurrance (time os posts,
geographical locations, vocabulary, etc) should be addressed
as well.
"""
if self.compute_networks:
self.computeNetworks()
if self.compute_bows:
self.computeBows()
def computeBows(self):
pass
def computeNetworks(self):
pass
def triplifyProfiles(self):
pass
def triplifyComments(self):
pass
def triplifyFriendships(self):
pass
def triplifyVotes(self):
pass
def triplifyTags(self):
pass
def writeGraph(self):
f=open("participaTriplestore.rdf","wb")
f.write(self.g.serialize())
f.close()
f=open("participaTriplestore.ttl","wb")
f.write(self.g.serialize(format="turtle"))
f.close()
class DATA:
portal_description="""
O que é o dados.gov.br?
O Portal Brasileiro de Dados Abertos é a ferramenta disponibilizada pelo governo para que todos possam encontrar e utilizar os dados e as informações públicas. O portal preza pela simplicidade e organização para que você possa encontrar facilmente os dados e informações que precisa. O portal também tem o objetivo de promover a interlocução entre atores da sociedade e com o governo para pensar a melhor utilização dos dados em prol de uma sociedade melhor.
Quais dados estão disponíveis aqui?
O portal tem o objetivo de disponibilizar todo e qualquer tipo de dado. Por exemplo, dados da saúde suplementar, do sistema de transporte, de segurança pública, indicadores de educação, gastos governamentais, processo eleitoral, etc.
O portal funciona como um grande catálogo que facilita a busca e uso de dados publicados pelos órgãos do governo. Neste momento o portal disponibiliza o acesso à uma parcela dos dados publicados pelo governo. O plano estratégico prevê que nos próximos 3 anos o portal disponibilize acesso aos dados publicados por todos os órgãos do governo federal, além de dados das esferas estaduais e municipais.
O que dados abertos tem a ver com você?
O acesso a informação está previsto na Constituição Federal e na Declaração Universal dos Direitos Humanos. Dados Abertos é a publicação e disseminação dos dados e informações públicas na Internet, organizados de tal maneira que permita sua reutilização em aplicativos digitais desenvolvidos pela sociedade.
Isso proporciona ao cidadão um melhor entendimento do governo, no acesso aos serviços públicos, no controle das contas públicas e na participação no planejamento e desenvolvimento das políticas públicas. Se interessou pelo tema? Saiba mais sobre Dados Abertos aqui !
Por que estamos fazendo isso?
Em 18 de novembro de 2011 foi sancionada a Lei de Acesso a Informação Pública (Lei 12.527/2011) que regula o acesso a dados e informações detidas pelo governo. Essa lei constitui um marco para a democratização da informação pública, e preconiza, dentre outros requisitos técnicos, que a informação solicitada pelo cidadão deve seguir critérios tecnológicos alinhados com as “3 leis de dados abertos”. Dentro desse contexto o Portal Brasileiro de Dados Abertos é a ferramenta construída pelo governo para centralizar a busca e o acesso dos dados e informações públicas.
O Brasil como membro co-líder da Parceria de Governo Aberto, ou Open Government Partnership (OGP), tem este Portal como um de seus compromissos que foram formalizados no Plano de ação de governo aberto, lançado na OGP e referenciado pelo Decreto sem número de 15 de setembro de 2011.
"""
| {
"repo_name": "ttm/participationLegacy",
"path": "participation/triplification/participaTriplification.py",
"copies": "1",
"size": "10937",
"license": "mit",
"hash": -4268547982895012000,
"line_mean": 49.6962616822,
"line_max": 572,
"alpha_frac": 0.640612038,
"autogenerated": false,
"ratio": 3.250149790293589,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43907618282935895,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Particle']
from sympy import sympify
from sympy.physics.mechanics.point import Point
class Particle(object):
"""A particle.
Particles have a non-zero mass and lack spatial extension; they take up no
space.
Values need to be supplied on initialization, but can be changed later.
Parameters
==========
name : str
Name of particle
mass : sympifyable
A SymPy expression representing the Particle's mass
point : Point
A physics/mechanics Point which represents the position, velocity, and
acceleration of this Particle
Examples
========
>>> from sympy.physics.mechanics import Particle, Point
>>> from sympy import Symbol
>>> po = Point('po')
>>> m = Symbol('m')
>>> pa = Particle('pa', po, m)
>>> # Or you could change these later
>>> pa.mass = m
>>> pa.point = po
"""
def __init__(self, name, point, mass):
if not isinstance(name, str):
raise TypeError('Supply a valid name.')
self._name = name
self.set_mass(mass)
self.set_point(point)
def __str__(self):
return self._name
__repr__ = __str__
def get_mass(self):
"""Mass of the particle."""
return self._mass
def set_mass(self, mass):
self._mass = sympify(mass)
mass = property(get_mass, set_mass)
def get_point(self):
"""Point of the particle."""
return self._point
def set_point(self, p):
if not isinstance(p, Point):
raise TypeError("Particle point attribute must be a Point object.")
self._point = p
point = property(get_point, set_point)
| {
"repo_name": "ichuang/sympy",
"path": "sympy/physics/mechanics/particle.py",
"copies": "1",
"size": "1687",
"license": "bsd-3-clause",
"hash": 270748484759059140,
"line_mean": 23.8088235294,
"line_max": 79,
"alpha_frac": 0.5892116183,
"autogenerated": false,
"ratio": 4.104622871046229,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5193834489346228,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Particle']
from sympy import sympify
from sympy.physics.mechanics.point import Point
class Particle(object):
"""A particle.
Particles have a non-zero mass and lack spatial extension; they take up no
space.
Values need to be supplied on initialization, but can be changed later.
Parameters
==========
name : str
Name of particle
point : Point
A physics/mechanics Point which represents the position, velocity, and
acceleration of this Particle
mass : sympifyable
A SymPy expression representing the Particle's mass
Examples
========
>>> from sympy.physics.mechanics import Particle, Point
>>> from sympy import Symbol
>>> po = Point('po')
>>> m = Symbol('m')
>>> pa = Particle('pa', po, m)
>>> # Or you could change these later
>>> pa.mass = m
>>> pa.point = po
"""
def __init__(self, name, point, mass):
if not isinstance(name, str):
raise TypeError('Supply a valid name.')
self._name = name
self.set_mass(mass)
self.set_point(point)
self._pe = sympify(0)
def __str__(self):
return self._name
__repr__ = __str__
def get_mass(self):
"""Mass of the particle."""
return self._mass
def set_mass(self, mass):
self._mass = sympify(mass)
mass = property(get_mass, set_mass)
def get_point(self):
"""Point of the particle."""
return self._point
def set_point(self, p):
if not isinstance(p, Point):
raise TypeError("Particle point attribute must be a Point object.")
self._point = p
point = property(get_point, set_point)
def linear_momentum(self, frame):
"""Linear momentum of the particle.
The linear momentum L, of a particle P, with respect to frame N is
given by
L = m * v
where m is the mass of the particle, and v is the velocity of the
particle in the frame N.
Parameters
==========
frame : ReferenceFrame
The frame in which linear momentum is desired.
Examples
========
>>> from sympy.physics.mechanics import Particle, Point, ReferenceFrame
>>> from sympy.physics.mechanics import dynamicsymbols
>>> m, v = dynamicsymbols('m v')
>>> N = ReferenceFrame('N')
>>> P = Point('P')
>>> A = Particle('A', P, m)
>>> P.set_vel(N, v * N.x)
>>> A.linear_momentum(N)
m*v*N.x
"""
return self.mass * self.point.vel(frame)
def angular_momentum(self, point, frame):
"""Angular momentum of the particle about the point.
The angular momentum H, about some point O of a particle, P, is given
by:
H = r x m * v
where r is the position vector from point O to the particle P, m is
the mass of the particle, and v is the velocity of the particle in
the inertial frame, N.
Parameters
==========
point : Point
The point about which angular momentum of the particle is desired.
frame : ReferenceFrame
The frame in which angular momentum is desired.
Examples
========
>>> from sympy.physics.mechanics import Particle, Point, ReferenceFrame
>>> from sympy.physics.mechanics import dynamicsymbols
>>> m, v, r = dynamicsymbols('m v r')
>>> N = ReferenceFrame('N')
>>> O = Point('O')
>>> A = O.locatenew('A', r * N.x)
>>> P = Particle('P', A, m)
>>> P.point.set_vel(N, v * N.y)
>>> P.angular_momentum(O, N)
m*r*v*N.z
"""
return self.point.pos_from(point) ^ (self.mass * self.point.vel(frame))
def kinetic_energy(self, frame):
"""Kinetic energy of the particle
The kinetic energy, T, of a particle, P, is given by
'T = 1/2 m v^2'
where m is the mass of particle P, and v is the velocity of the
particle in the supplied ReferenceFrame.
Parameters
==========
frame : ReferenceFrame
The Particle's velocity is typically defined with respect to
an inertial frame but any relevant frame in which the velocity is
known can be supplied.
Examples
========
>>> from sympy.physics.mechanics import Particle, Point, ReferenceFrame
>>> from sympy import symbols
>>> m, v, r = symbols('m v r')
>>> N = ReferenceFrame('N')
>>> O = Point('O')
>>> P = Particle('P', O, m)
>>> P.point.set_vel(N, v * N.y)
>>> P.kinetic_energy(N)
m*v**2/2
"""
return (self.mass / sympify(2) * self.point.vel(frame) &
self.point.vel(frame))
def set_potential_energy(self, scalar):
"""Used to set the potential energy of the Particle.
Parameters
==========
scalar : Sympifyable
The potential energy (a scalar) of the Particle.
Examples
========
>>> from sympy.physics.mechanics import Particle, Point
>>> from sympy import symbols
>>> m, g, h = symbols('m g h')
>>> O = Point('O')
>>> P = Particle('P', O, m)
>>> P.set_potential_energy(m * g * h)
"""
self._pe = sympify(scalar)
@property
def potential_energy(self):
"""The potential energy of the Particle.
Examples
========
>>> from sympy.physics.mechanics import Particle, Point
>>> from sympy import symbols
>>> m, g, h = symbols('m g h')
>>> O = Point('O')
>>> P = Particle('P', O, m)
>>> P.set_potential_energy(m * g * h)
>>> P.potential_energy
g*h*m
"""
return self._pe
| {
"repo_name": "amitjamadagni/sympy",
"path": "sympy/physics/mechanics/particle.py",
"copies": "3",
"size": "5890",
"license": "bsd-3-clause",
"hash": 8568144810573467000,
"line_mean": 25.5315315315,
"line_max": 79,
"alpha_frac": 0.543803056,
"autogenerated": false,
"ratio": 4.081774081774082,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 222
} |
__all__ = ["PatchMaker"]
from direct.p3d.FileSpec import FileSpec
from direct.p3d.SeqValue import SeqValue
from panda3d.core import *
import copy
class PatchMaker:
""" This class will operate on an existing package install
directory, as generated by the Packager, and create patchfiles
between versions as needed. It is also used at runtime, to apply
the downloaded patches. """
class PackageVersion:
""" A specific patch version of a package. This is not just
the package's "version" string; it also corresponds to the
particular patch version, which increments independently of
the "version". """
def __init__(self, packageName, platform, version, hostUrl, file):
self.packageName = packageName
self.platform = platform
self.version = version
self.hostUrl = hostUrl
self.file = file
self.printName = None
# The Package object that produces this version, if this
# is the current, base, or top form, respectively.
self.packageCurrent = None
self.packageBase = None
self.packageTop = None
# A list of patchfiles that can produce this version.
self.fromPatches = []
# A list of patchfiles that can start from this version.
self.toPatches = []
# A temporary file for re-creating the archive file for
# this version.
self.tempFile = None
def cleanup(self):
if self.tempFile:
self.tempFile.unlink()
def getPatchChain(self, startPv, alreadyVisited = []):
""" Returns a list of patches that, when applied in
sequence to the indicated PackageVersion object, will
produce this PackageVersion object. Returns None if no
chain can be found. """
if self is startPv:
# We're already here. A zero-length patch chain is
# therefore the answer.
return []
if self in alreadyVisited:
# We've already been here; this is a loop. Avoid
# infinite recursion.
return None
alreadyVisited = alreadyVisited[:]
alreadyVisited.append(self)
bestPatchChain = None
for patchfile in self.fromPatches:
fromPv = patchfile.fromPv
patchChain = fromPv.getPatchChain(startPv, alreadyVisited)
if patchChain is not None:
# There's a path through this patchfile.
patchChain = patchChain + [patchfile]
if bestPatchChain is None or len(patchChain) < len(bestPatchChain):
bestPatchChain = patchChain
# Return the shortest path found, or None if there were no
# paths found.
return bestPatchChain
def getRecreateFilePlan(self, alreadyVisited = []):
""" Returns the tuple (startFile, startPv, plan),
describing how to recreate the archive file for this
version. startFile and startPv is the Filename and
packageVersion of the file to start with, and plan is a
list of tuples (patchfile, pv), listing the patches to
apply in sequence, and the packageVersion object
associated with each patch. Returns (None, None, None) if
there is no way to recreate this archive file. """
if self.tempFile:
return (self.tempFile, self, [])
if self in alreadyVisited:
# We've already been here; this is a loop. Avoid
# infinite recursion.
return (None, None, None)
alreadyVisited = alreadyVisited[:]
alreadyVisited.append(self)
if self.packageCurrent:
# This PackageVersion instance represents the current
# version of some package.
package = self.packageCurrent
return (Filename(package.packageDir, package.compressedFilename), self, [])
if self.packageBase:
# This PackageVersion instance represents the base
# (oldest) version of some package.
package = self.packageBase
return (Filename(package.packageDir, package.baseFile.filename + '.pz'), self, [])
# We'll need to re-create the file.
bestPlan = None
bestStartFile = None
bestStartPv = None
for patchfile in self.fromPatches:
fromPv = patchfile.fromPv
startFile, startPv, plan = fromPv.getRecreateFilePlan(alreadyVisited)
if plan is not None:
# There's a path through this patchfile.
plan = plan + [(patchfile, self)]
if bestPlan is None or len(plan) < len(bestPlan):
bestPlan = plan
bestStartFile = startFile
bestStartPv = startPv
# Return the shortest path found, or None if there were no
# paths found.
return (bestStartFile, bestStartPv, bestPlan)
def getFile(self):
""" Returns the Filename of the archive file associated
with this version. If the file doesn't actually exist on
disk, a temporary file will be created. Returns None if
the file can't be recreated. """
startFile, startPv, plan = self.getRecreateFilePlan()
if startFile.getExtension() == 'pz':
# If the starting file is compressed, we have to
# decompress it first.
assert startPv.tempFile is None
startPv.tempFile = Filename.temporary('', 'patch_')
if not decompressFile(startFile, startPv.tempFile):
# Failure trying to decompress the file.
return None
startFile = startPv.tempFile
if not plan:
# If plan is a zero-length list, we're already
# here--return startFile. If plan is None, there's no
# solution, and startFile is None. In either case, we
# can return startFile.
return startFile
# If plan is a non-empty list, we have to walk the list to
# apply the patch plan.
prevFile = startFile
for patchfile, pv in plan:
fromPv = patchfile.fromPv
patchFilename = Filename(patchfile.package.packageDir, patchfile.file.filename)
result = self.applyPatch(prevFile, patchFilename)
if not result:
# Failure trying to re-create the file.
return None
pv.tempFile = result
prevFile = result
# Successfully patched.
assert pv is self and prevFile is self.tempFile
return prevFile
def applyPatch(self, origFile, patchFilename):
""" Applies the named patch to the indicated original
file, storing the results in a temporary file, and returns
that temporary Filename. Returns None on failure. """
result = Filename.temporary('', 'patch_')
p = Patchfile()
if not p.apply(patchFilename, origFile, result):
print "Internal patching failed: %s" % (patchFilename)
return None
return result
def getNext(self, package):
""" Gets the next patch in the chain towards this
package. """
for patch in self.toPatches:
if patch.packageName == package.packageName and \
patch.platform == package.platform and \
patch.version == package.version and \
patch.hostUrl == package.hostUrl:
return patch.toPv
return None
class Patchfile:
""" A single patchfile for a package. """
def __init__(self, package):
self.package = package
self.packageName = package.packageName
self.platform = package.platform
self.version = package.version
self.hostUrl = None
# FileSpec for the patchfile itself
self.file = None
# FileSpec for the package file that the patch is applied to
self.sourceFile = None
# FileSpec for the package file that the patch generates
self.targetFile = None
# The PackageVersion corresponding to our sourceFile
self.fromPv = None
# The PackageVersion corresponding to our targetFile
self.toPv = None
def getSourceKey(self):
""" Returns the key for locating the package that this
patchfile can be applied to. """
return (self.packageName, self.platform, self.version, self.hostUrl, self.sourceFile)
def getTargetKey(self):
""" Returns the key for locating the package that this
patchfile will generate. """
return (self.packageName, self.platform, self.version, self.hostUrl, self.targetFile)
def fromFile(self, packageDir, patchFilename, sourceFile, targetFile):
""" Creates the data structures from an existing patchfile
on disk. """
self.file = FileSpec()
self.file.fromFile(packageDir, patchFilename)
self.sourceFile = sourceFile
self.targetFile = targetFile
def loadXml(self, xpatch):
""" Reads the data structures from an xml file. """
self.packageName = xpatch.Attribute('name') or self.packageName
self.platform = xpatch.Attribute('platform') or self.platform
self.version = xpatch.Attribute('version') or self.version
self.hostUrl = xpatch.Attribute('host') or self.hostUrl
self.file = FileSpec()
self.file.loadXml(xpatch)
xsource = xpatch.FirstChildElement('source')
if xsource:
self.sourceFile = FileSpec()
self.sourceFile.loadXml(xsource)
xtarget = xpatch.FirstChildElement('target')
if xtarget:
self.targetFile = FileSpec()
self.targetFile.loadXml(xtarget)
def makeXml(self, package):
xpatch = TiXmlElement('patch')
if self.packageName != package.packageName:
xpatch.SetAttribute('name', self.packageName)
if self.platform != package.platform:
xpatch.SetAttribute('platform', self.platform)
if self.version != package.version:
xpatch.SetAttribute('version', self.version)
if self.hostUrl != package.hostUrl:
xpatch.SetAttribute('host', self.hostUrl)
self.file.storeXml(xpatch)
xsource = TiXmlElement('source')
self.sourceFile.storeMiniXml(xsource)
xpatch.InsertEndChild(xsource)
xtarget = TiXmlElement('target')
self.targetFile.storeMiniXml(xtarget)
xpatch.InsertEndChild(xtarget)
return xpatch
class Package:
""" This is a particular package. This contains all of the
information needed to reconstruct the package's desc file. """
def __init__(self, packageDesc, patchMaker, xpackage = None):
self.packageDir = Filename(patchMaker.installDir, packageDesc.getDirname())
self.packageDesc = packageDesc
self.patchMaker = patchMaker
self.contentsDocPackage = xpackage
self.patchVersion = 1
self.currentPv = None
self.basePv = None
self.topPv = None
self.packageName = None
self.platform = None
self.version = None
self.hostUrl = None
self.currentFile = None
self.baseFile = None
self.doc = None
self.anyChanges = False
self.patches = []
def getCurrentKey(self):
""" Returns the key to locate the current version of this
package. """
return (self.packageName, self.platform, self.version, self.hostUrl, self.currentFile)
def getBaseKey(self):
""" Returns the key to locate the "base" or oldest version
of this package. """
return (self.packageName, self.platform, self.version, self.hostUrl, self.baseFile)
def getTopKey(self):
""" Returns the key to locate the "top" or newest version
of this package. """
return (self.packageName, self.platform, self.version, self.hostUrl, self.topFile)
def getGenericKey(self, fileSpec):
""" Returns the key that has the indicated hash. """
return (self.packageName, self.platform, self.version, self.hostUrl, fileSpec)
def readDescFile(self, doProcessing = False):
""" Reads the existing package.xml file and stores it in
this class for later rewriting. if doProcessing is true,
it may massage the file and the directory contents in
preparation for building patches. Returns true on
success, false on failure. """
self.anyChanges = False
packageDescFullpath = Filename(self.patchMaker.installDir, self.packageDesc)
self.doc = TiXmlDocument(packageDescFullpath.toOsSpecific())
if not self.doc.LoadFile():
print "Couldn't read %s" % (packageDescFullpath)
return False
xpackage = self.doc.FirstChildElement('package')
if not xpackage:
return False
self.packageName = xpackage.Attribute('name')
self.platform = xpackage.Attribute('platform')
self.version = xpackage.Attribute('version')
# All packages we defined in-line are assigned to the
# "none" host. TODO: support patching from packages on
# other hosts, which means we'll need to fill in a value
# here for those hosts.
self.hostUrl = None
self.currentFile = None
self.baseFile = None
self.topFile = None
self.compressedFilename = None
compressedFile = None
# Assume there are changes for this version, until we
# discover that there aren't.
isNewVersion = True
# Get the actual current version.
xarchive = xpackage.FirstChildElement('uncompressed_archive')
if xarchive:
self.currentFile = FileSpec()
self.currentFile.loadXml(xarchive)
# Get the top_version--the top (newest) of the patch
# chain.
xarchive = xpackage.FirstChildElement('top_version')
if xarchive:
self.topFile = FileSpec()
self.topFile.loadXml(xarchive)
if self.topFile.hash == self.currentFile.hash:
# No new version this pass.
isNewVersion = False
else:
# There's a new version this pass. Update it.
self.anyChanges = True
else:
# If there isn't a top_version yet, we have to make
# one, by duplicating the currentFile.
self.topFile = copy.copy(self.currentFile)
self.anyChanges = True
# Get the current patch version. If we have a
# patch_version attribute, it refers to this particular
# instance of the file, and that is the current patch
# version number. If we only have a last_patch_version
# attribute, it means a patch has not yet been built for
# this particular instance, and that number is the
# previous version's patch version number.
patchVersion = xpackage.Attribute('patch_version')
if patchVersion:
self.patchVersion = int(patchVersion)
else:
patchVersion = xpackage.Attribute('last_patch_version')
if patchVersion:
self.patchVersion = int(patchVersion)
if isNewVersion:
self.patchVersion += 1
self.anyChanges = True
# Put the patchVersion in the compressed filename, for
# cache-busting. This means when the version changes, its
# URL will also change, guaranteeing that users will
# download the latest version, and not some stale cache
# file.
xcompressed = xpackage.FirstChildElement('compressed_archive')
if xcompressed:
compressedFile = FileSpec()
compressedFile.loadXml(xcompressed)
oldCompressedFilename = compressedFile.filename
self.compressedFilename = oldCompressedFilename
if doProcessing:
newCompressedFilename = '%s.%s.pz' % (self.currentFile.filename, self.patchVersion)
if newCompressedFilename != oldCompressedFilename:
oldCompressedPathname = Filename(self.packageDir, oldCompressedFilename)
newCompressedPathname = Filename(self.packageDir, newCompressedFilename)
if oldCompressedPathname.renameTo(newCompressedPathname):
compressedFile.fromFile(self.packageDir, newCompressedFilename)
compressedFile.storeXml(xcompressed)
self.compressedFilename = newCompressedFilename
self.anyChanges = True
# Get the base_version--the bottom (oldest) of the patch
# chain.
xarchive = xpackage.FirstChildElement('base_version')
if xarchive:
self.baseFile = FileSpec()
self.baseFile.loadXml(xarchive)
else:
# If there isn't a base_version yet, we have to make
# one, by duplicating the currentFile.
self.baseFile = copy.copy(self.currentFile)
# Note that the we only store the compressed version
# of base_filename on disk, but we store the md5 of
# the uncompressed version in the xml file. To
# emphasize this, we name it without the .pz extension
# in the xml file, even though the compressed file on
# disk actually has a .pz extension.
self.baseFile.filename += '.base'
# Also duplicate the (compressed) file itself.
if doProcessing and self.compressedFilename:
fromPathname = Filename(self.packageDir, self.compressedFilename)
toPathname = Filename(self.packageDir, self.baseFile.filename + '.pz')
fromPathname.copyTo(toPathname)
self.anyChanges = True
self.patches = []
xpatch = xpackage.FirstChildElement('patch')
while xpatch:
patchfile = PatchMaker.Patchfile(self)
patchfile.loadXml(xpatch)
self.patches.append(patchfile)
xpatch = xpatch.NextSiblingElement('patch')
return True
def writeDescFile(self):
""" Rewrites the desc file with the new patch
information. """
if not self.anyChanges:
# No need to rewrite.
return
xpackage = self.doc.FirstChildElement('package')
if not xpackage:
return
packageSeq = SeqValue()
packageSeq.loadXml(xpackage, 'seq')
packageSeq += 1
packageSeq.storeXml(xpackage, 'seq')
# Remove all of the old patch entries from the desc file
# we read earlier.
xremove = []
for value in ['base_version', 'top_version', 'patch']:
xpatch = xpackage.FirstChildElement(value)
while xpatch:
xremove.append(xpatch)
xpatch = xpatch.NextSiblingElement(value)
for xelement in xremove:
xpackage.RemoveChild(xelement)
xpackage.RemoveAttribute('last_patch_version')
# Now replace them with the current patch information.
xpackage.SetAttribute('patch_version', str(self.patchVersion))
xarchive = TiXmlElement('base_version')
self.baseFile.storeXml(xarchive)
xpackage.InsertEndChild(xarchive)
# The current version is now the top version.
xarchive = TiXmlElement('top_version')
self.currentFile.storeXml(xarchive)
xpackage.InsertEndChild(xarchive)
for patchfile in self.patches:
xpatch = patchfile.makeXml(self)
xpackage.InsertEndChild(xpatch)
self.doc.SaveFile()
# Also copy the seq to the import desc file, for
# documentation purposes.
importDescFilename = str(self.packageDesc)[:-3] + 'import.xml'
importDescFullpath = Filename(self.patchMaker.installDir, importDescFilename)
doc = TiXmlDocument(importDescFullpath.toOsSpecific())
if doc.LoadFile():
xpackage = doc.FirstChildElement('package')
if xpackage:
packageSeq.storeXml(xpackage, 'seq')
doc.SaveFile()
else:
print "Couldn't read %s" % (importDescFullpath)
if self.contentsDocPackage:
# Now that we've rewritten the xml file, we have to
# change the contents.xml file that references it to
# indicate the new file hash.
fileSpec = FileSpec()
fileSpec.fromFile(self.patchMaker.installDir, self.packageDesc)
fileSpec.storeXml(self.contentsDocPackage)
# Also important to update the import.xml hash.
ximport = self.contentsDocPackage.FirstChildElement('import')
if ximport:
fileSpec = FileSpec()
fileSpec.fromFile(self.patchMaker.installDir, importDescFilename)
fileSpec.storeXml(ximport)
# Also copy the package seq value into the
# contents.xml file, mainly for documentation purposes
# (the authoritative seq value is within the desc
# file).
packageSeq.storeXml(self.contentsDocPackage, 'seq')
# PatchMaker constructor.
def __init__(self, installDir):
self.installDir = installDir
self.packageVersions = {}
self.packages = []
def buildPatches(self, packageNames = None):
""" Makes the patches required in a particular directory
structure on disk. If packageNames is None, this makes
patches for all packages; otherwise, it should be a list of
package name strings, limiting the set of packages that are
processed. """
if not self.readContentsFile():
return False
self.buildPatchChains()
if packageNames is None:
self.processAllPackages()
else:
self.processSomePackages(packageNames)
self.writeContentsFile()
self.cleanup()
return True
def cleanup(self):
""" Should be called on exit to remove temporary files and
such created during processing. """
for pv in self.packageVersions.values():
pv.cleanup()
def getPatchChainToCurrent(self, descFilename, fileSpec):
""" Reads the package defined in the indicated desc file, and
constructs a patch chain from the version represented by
fileSpec to the current version of this package, if possible.
Returns the patch chain if successful, or None otherwise. """
package = self.readPackageDescFile(descFilename)
if not package:
return None
self.buildPatchChains()
fromPv = self.getPackageVersion(package.getGenericKey(fileSpec))
toPv = package.currentPv
patchChain = None
if toPv and fromPv:
patchChain = toPv.getPatchChain(fromPv)
return patchChain
def readPackageDescFile(self, descFilename):
""" Reads a desc file associated with a particular package,
and adds the package to self.packages. Returns the Package
object, or None on failure. """
package = self.Package(Filename(descFilename), self)
if not package.readDescFile(doProcessing = False):
return None
self.packages.append(package)
return package
def readContentsFile(self):
""" Reads the contents.xml file at the beginning of
processing. """
contentsFilename = Filename(self.installDir, 'contents.xml')
doc = TiXmlDocument(contentsFilename.toOsSpecific())
if not doc.LoadFile():
# Couldn't read file.
print "couldn't read %s" % (contentsFilename)
return False
xcontents = doc.FirstChildElement('contents')
if xcontents:
contentsSeq = SeqValue()
contentsSeq.loadXml(xcontents)
contentsSeq += 1
contentsSeq.storeXml(xcontents)
xpackage = xcontents.FirstChildElement('package')
while xpackage:
solo = xpackage.Attribute('solo')
solo = int(solo or '0')
filename = xpackage.Attribute('filename')
if filename and not solo:
filename = Filename(filename)
package = self.Package(filename, self, xpackage)
package.readDescFile(doProcessing = True)
self.packages.append(package)
xpackage = xpackage.NextSiblingElement('package')
self.contentsDoc = doc
return True
def writeContentsFile(self):
""" Writes the contents.xml file at the end of processing. """
# We also have to write the desc file for all packages that
# might need it, because we might have changed some of them on
# read.
for package in self.packages:
package.writeDescFile()
# The above writeDescFile() call should also update each
# package's element within the contents.xml document, so all
# we have to do now is write out the document.
self.contentsDoc.SaveFile()
def getPackageVersion(self, key):
""" Returns a shared PackageVersion object for the indicated
key. """
packageName, platform, version, hostUrl, file = key
# We actually key on the hash, not the FileSpec itself.
k = (packageName, platform, version, hostUrl, file.hash)
pv = self.packageVersions.get(k, None)
if not pv:
pv = self.PackageVersion(*key)
self.packageVersions[k] = pv
return pv
def buildPatchChains(self):
""" Builds up the chains of PackageVersions and the patchfiles
that connect them. """
self.patchFilenames = {}
for package in self.packages:
if not package.baseFile:
# This package doesn't have any versions yet.
continue
currentPv = self.getPackageVersion(package.getCurrentKey())
package.currentPv = currentPv
currentPv.packageCurrent = package
currentPv.printName = package.currentFile.filename
basePv = self.getPackageVersion(package.getBaseKey())
package.basePv = basePv
basePv.packageBase = package
basePv.printName = package.baseFile.filename
topPv = self.getPackageVersion(package.getTopKey())
package.topPv = topPv
topPv.packageTop = package
for patchfile in package.patches:
self.recordPatchfile(patchfile)
def recordPatchfile(self, patchfile):
""" Adds the indicated patchfile to the patch chains. """
self.patchFilenames[patchfile.file.filename] = patchfile
fromPv = self.getPackageVersion(patchfile.getSourceKey())
patchfile.fromPv = fromPv
fromPv.toPatches.append(patchfile)
toPv = self.getPackageVersion(patchfile.getTargetKey())
patchfile.toPv = toPv
toPv.fromPatches.append(patchfile)
toPv.printName = patchfile.file.filename
def processSomePackages(self, packageNames):
""" Builds missing patches only for the named packages. """
remainingNames = packageNames[:]
for package in self.packages:
if package.packageName in packageNames:
self.processPackage(package)
if package.packageName in remainingNames:
remainingNames.remove(package.packageName)
if remainingNames:
print "Unknown packages: %s" % (remainingNames,)
def processAllPackages(self):
""" Walks through the list of packages, and builds missing
patches for each one. """
for package in self.packages:
self.processPackage(package)
def processPackage(self, package):
""" Builds missing patches for the indicated package. """
if not package.baseFile:
# No versions.
return
# What's the current version on the top of the tree?
topPv = package.topPv
currentPv = package.currentPv
if topPv != currentPv:
# They're different, so build a new patch.
filename = Filename(package.currentFile.filename + '.%s.patch' % (package.patchVersion))
assert filename not in self.patchFilenames
if not self.buildPatch(topPv, currentPv, package, filename):
raise StandardError, "Couldn't build patch."
def buildPatch(self, v1, v2, package, patchFilename):
""" Builds a patch from PackageVersion v1 to PackageVersion
v2, and stores it in patchFilename.pz. Returns true on
success, false on failure."""
pathname = Filename(package.packageDir, patchFilename)
if not self.buildPatchFile(v1.getFile(), v2.getFile(), pathname,
v1.printName, v2.printName):
return False
compressedPathname = Filename(pathname + '.pz')
compressedPathname.unlink()
if not compressFile(pathname, compressedPathname, 9):
raise StandardError, "Couldn't compress patch."
pathname.unlink()
patchfile = self.Patchfile(package)
patchfile.fromFile(package.packageDir, patchFilename + '.pz',
v1.file, v2.file)
package.patches.append(patchfile)
package.anyChanges = True
self.recordPatchfile(patchfile)
return True
def buildPatchFile(self, origFilename, newFilename, patchFilename,
printOrigName, printNewName):
""" Creates a patch file from origFilename to newFilename,
storing the result in patchFilename. Returns true on success,
false on failure. """
if not origFilename.exists():
# No original version to patch from.
return False
print "Building patch from %s to %s" % (printOrigName, printNewName)
patchFilename.unlink()
p = Patchfile() # The C++ class
if p.build(origFilename, newFilename, patchFilename):
return True
# Unable to build a patch for some reason.
patchFilename.unlink()
return False
| {
"repo_name": "silly-wacky-3-town-toon/SOURCE-COD",
"path": "Panda3D-1.10.0/direct/p3d/PatchMaker.py",
"copies": "9",
"size": "32276",
"license": "apache-2.0",
"hash": -1788616039141211100,
"line_mean": 38.6511056511,
"line_max": 103,
"alpha_frac": 0.5851716446,
"autogenerated": false,
"ratio": 4.763282172373081,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9848453816973082,
"avg_score": null,
"num_lines": null
} |
__all__ = ['path_sign_to_signed_nodes', 'signed_nodes_to_signed_edge',
'get_sorted_neighbors', 'get_subgraph', 'Node', 'Edge',
'EdgeFilter', 'SendType']
import logging
import networkx as nx
import functools
from typing import List, Tuple, Union, Optional, Callable, Set
logger = logging.getLogger(__name__)
# Derived type hints
Node = Union[str, Tuple[str, int]]
Edge = Tuple[Node, Node]
EdgeFilter = Callable[[nx.DiGraph, Node, Node], bool]
SendType = Tuple[Optional[Set[Node]], Optional[Set[Edge]]]
def path_sign_to_signed_nodes(source, target, edge_sign):
"""Translates a signed edge or path to valid signed nodes
Pairs with a negative source node are filtered out.
Parameters
----------
source : str|int
The source node
target : str|int
The target node
edge_sign : int
The sign of the edge
Returns
-------
sign_tuple : (a, sign), (b, sign)
Tuple of tuples of the valid combination of signed nodes
"""
# Sign definitions: + == 0, - == 1
# + path -> (a+, b+)
# - path -> (a+, b-)
# (a-, b-) and (a-, b+) are also technically valid but not in this context
try:
if int(edge_sign) == 0:
return (source, 0), (target, 0)
else:
return (source, 1), (target, 0)
except ValueError:
logger.warning('Invalid sign %s when translating edge sign to int'
% edge_sign)
return (None, None), (None, None)
def signed_nodes_to_signed_edge(source, target):
"""Create the triple (node, node, sign) from a pair of signed nodes
Assuming source, target forms an edge of signed nodes:
edge = (a, sign), (b, sign), return the corresponding signed edge triple
Parameters
----------
source : tuple(str|int, sign)
A valid signed node
target : tuple(str|int, sign)
A valid signed node
Returns
-------
tuple
A tuple, (source, target, sign), representing the corresponding
signed edge.
"""
# Sign definitions: + == 0, - == 1
# + edge/path -> (a+, b+) and (a-, b-)
# - edge/path -> (a-, b+) and (a+, b-)
source_name, source_sign = source
target_name, target_sign = target
try:
if int(source_sign) == int(target_sign):
return source_name, target_name, 0
else:
return source_name, target_name, 1
except ValueError:
logger.warning('Error translating signed nodes to signed edge using '
'(%s, %s)' % (source, target))
return None, None, None
def get_sorted_neighbors(
G: nx.DiGraph,
node: Node,
reverse: bool,
force_edges: Optional[List[Edge]] = None,
edge_filter: Optional[EdgeFilter] = None
) -> List[Node]:
"""Filter and sort neighbors of a node in descending order by belief
Parameters
----------
G
A networkx DiGraph
node
A valid node name or signed node name
reverse
Indicates direction of search. Neighbors are either successors
(downstream search) or predecessors (reverse search).
force_edges
A list of allowed edges. If provided, only allow neighbors that
can be reached by the allowed edges.
edge_filter
If provided, must be a function that takes three arguments: a graph
g, and the nodes u, v of the edge between u and v. The function must
return a boolean. The function must return True if the edge is
allowed, otherwise False.
Returns
-------
List[Node]
A list of nodes representing the filtered and sorted neighbors
"""
# Check for edge filtering
if force_edges or edge_filter:
neigh_edges = G.in_edges if reverse else G.out_edges
ix = 0 if reverse else 1
edges = set(neigh_edges(node))
if force_edges:
edges = edges.intersection(set(force_edges))
if edge_filter:
neighbors = (e[ix] for e in edges if edge_filter(G, *e))
else:
neighbors = (e[ix] for e in edges)
# No edge filtering applied
else:
neighbors = G.predecessors(node) if reverse else G.successors(node)
# Return neighbors sorted by the edge belief
if reverse:
return sorted(neighbors,
key=lambda n: G.edges[n, node].get('belief', 0),
reverse=True)
else:
return sorted(neighbors,
key=lambda n: G.edges[node, n].get('belief', 0),
reverse=True)
def get_subgraph(g, edge_filter_func):
"""Get a subgraph of original graph filtered by a provided function."""
logger.info('Getting subgraph with %s function' % edge_filter_func)
view = nx.subgraph_view(
g, filter_edge=functools.partial(edge_filter_func, g))
# Copying to get a graph object instead of view
new_g = view.copy()
return new_g
| {
"repo_name": "bgyori/indra",
"path": "indra/explanation/pathfinding/util.py",
"copies": "3",
"size": "4961",
"license": "bsd-2-clause",
"hash": -4038458236941311500,
"line_mean": 30.8012820513,
"line_max": 78,
"alpha_frac": 0.5966539004,
"autogenerated": false,
"ratio": 3.9341792228390164,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6030833123239017,
"avg_score": null,
"num_lines": null
} |
__all__ = ['path_sign_to_signed_nodes', 'signed_nodes_to_signed_edge',
'get_sorted_neighbors']
import logging
logger = logging.getLogger(__name__)
def path_sign_to_signed_nodes(source, target, edge_sign):
"""Translates a signed edge or path to valid signed nodes
Pairs with a negative source node are filtered out.
Parameters
----------
source : str|int
The source node
target : str|int
The target node
edge_sign : int
The sign of the edge
Returns
-------
sign_tuple : (a, sign), (b, sign)
Tuple of tuples of the valid combination of signed nodes
"""
# Sign definitions: + == 0, - == 1
# + path -> (a+, b+)
# - path -> (a+, b-)
# (a-, b-) and (a-, b+) are also technically valid but not in this context
try:
if int(edge_sign) == 0:
return (source, 0), (target, 0)
else:
return (source, 1), (target, 0)
except ValueError:
logger.warning('Invalid sign %s when translating edge sign to int'
% edge_sign)
return (None, None), (None, None)
def signed_nodes_to_signed_edge(source, target):
"""Create the triple (node, node, sign) from a pair of signed nodes
Assuming source, target forms an edge of signed nodes:
edge = (a, sign), (b, sign), return the corresponding signed edge triple
Parameters
----------
source : tuple(str|int, sign)
A valid signed node
target : tuple(str|int, sign)
A valid signed node
Returns
-------
tuple
A tuple, (source, target, sign), representing the corresponding
signed edge.
"""
# Sign definitions: + == 0, - == 1
# + edge/path -> (a+, b+) and (a-, b-)
# - edge/path -> (a-, b+) and (a+, b-)
source_name, source_sign = source
target_name, target_sign = target
try:
if int(source_sign) == int(target_sign):
return source_name, target_name, 0
else:
return source_name, target_name, 1
except ValueError:
logger.warning('Error translating signed nodes to signed edge using '
'(%s, %s)' % (source, target))
return None, None, None
def get_sorted_neighbors(G, node, reverse, force_edges=None):
"""Sort the returned neighbors in descending order by belief
Parameters
----------
G : nx.DiGraph
A networkx DiGraph
node : str|int
A valid networkx node name
reverse : bool
Indicates direction of search. Neighbors are either successors
(downstream search) or predecessors (reverse search).
force_edges : list
A list of allowed edges. If provided, only allow neighbors that
can be reached by the allowed edges.
"""
if reverse:
if force_edges:
neighbors = list(e[0] for e in set(G.in_edges(
node)).intersection(set(force_edges)))
else:
neighbors = G.predecessors(node)
return sorted(
neighbors,
key=lambda n:
G.edges[(n, node)].get('belief', 0),
reverse=True
)
else:
if force_edges:
neighbors = list(e[1] for e in set(G.out_edges(
node)).intersection(set(force_edges)))
else:
neighbors = G.successors(node)
return sorted(
neighbors,
key=lambda n:
G.edges[(node, n)].get('belief', 0),
reverse=True)
| {
"repo_name": "johnbachman/belpy",
"path": "indra/explanation/pathfinding/util.py",
"copies": "1",
"size": "3526",
"license": "mit",
"hash": -552622888989789700,
"line_mean": 29.6608695652,
"line_max": 78,
"alpha_frac": 0.5601247873,
"autogenerated": false,
"ratio": 3.979683972911964,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 115
} |
__all__ = ["Pattern"]
from ..utils import isfunction
import lasagne.objectives
from lasagne.layers import get_all_layers
from lasagne.layers import InputLayer
from lasagne.layers import Layer
import theano.tensor as T
import numpy as np
import itertools
from collections import OrderedDict
import inspect
import copy
import os
import tarfile
from warnings import warn
class Pattern(object):
"""
The :class:`Pattern` class represents a side information pattern and
should be subclassed when implementing a new pattern.
It is similar to :class:`lasagne.layers.Layer` and mimics some of
its functionality, but does not inherit from it.
*How to implement your own pattern?*
A minimal example should implement the following functions:
- get_side_objective
- default_target_objective
- default_side_objective
- default_beta_input
- default_beta_output_shape
See the docstrings for each function to understand what it should do.
Depending on the pattern, you also might need to override (usually if you
need additional side variables):
- training_input_vars
- side_vars
If you beta has multiple inputs, you will need to implement:
- get_beta_output_for
Optionally, if your side variable is a supervised learning target, then you
should return the theano variable representing this target in the method:
- side_target_var(self)
Parameters
----------
phi : lasagne layer
a lasagne layer for computing the intermediate representation
:math:`\phi(s)=y` from the input x
psi : lasagne layer
a lasagne layer for computing the prediction of the target
from the intermediate representation s, :math:`\psi(s)=y`
target_var : theano tensor variable
Theano variable representing the target. Required for formulating the target loss.
side_var: theano tensor variable
Theano variable representing the side information.
The semantics of this variable depend on the pattern.
Note that additional side variables might be required by a pattern.
input_shape : int or tuple
Shape of the input variable
target_shape : int or tuple
Shape of the target variable
side_shape : int or tuple
Shape of the side information variable
representation_shape : int or tuple
Shape of the intermediate representation to be learned
(for some patterns that may coincide with the side_shape)
target_loss: theano tensor variable, optional
Function (e.g. lasagne objective) for the optimizing the target.
All patterns have standard objectives applicable here
side_loss: theano tensor variable, optional
Theano expression or lasagne objective for the side loss.
Most patterns have standard objectives applicable here.
name : string, optional
An optional name to attach to this layer.
"""
PHI_OUTPUT_SHAPE='PHI_OUTPUT_SHAPE'
PSI_OUTPUT_SHAPE='PSI_OUTPUT_SHAPE'
BETA_OUTPUT_SHAPE='BETA_OUTPUT_SHAPE'
def __init__(self,
phi, psi, beta=None,
input_var=None, target_var=None, side_var=None,
input_shape=None, target_shape=None, side_shape=None,
representation_shape=None,
target_loss=None, side_loss=None,
name=None):
self.phi = phi
self.psi = psi
self.beta = beta
self.input_var = input_var
self.target_var = target_var
self.side_var = side_var
self.input_shape = input_shape
self.target_shape = target_shape
self.side_shape = side_shape
self.representation_shape = representation_shape
self.target_loss = target_loss
self.target_loss_fn = None
self.side_loss = side_loss
self.side_loss_fn = None
self.name = name
self.input_layer = None
self.get_output_kwargs = []
if isfunction(self.target_loss):
self.target_loss_fn = self.target_loss
self.target_loss = None
elif self.target_loss is not None:
warn("target_loss: passing something different than a python function object "
"to the constructor of a Pattern is deprecated. "
"Recommended way is to use a function from lasagne.objectives "
"or equivalent." )
if isfunction(self.side_loss):
self.side_loss_fn = self.side_loss
self.side_loss = None
elif self.side_loss is not None:
warn("side_loss: passing something different than a python function object "
"to the constructor of a Pattern is deprecated. "
"Recommended way is to use a function from lasagne.objectives "
"or equivalent." )
# convert phi, psi and beta to real lasagne layers if they
# are passed as a list/dictionary
if isinstance(phi, list) or isinstance(phi, tuple):
# if no input layer in list -> build it
assert (input_var is not None)
phi = copy.deepcopy(phi)
self.phi = \
self._initialize_function('phi', phi, self.default_phi_input,
self.PHI_OUTPUT_SHAPE,
self.representation_shape)
self.input_layer = lasagne.layers.get_all_layers(self.phi)[0]
else:
# extract input layer and variable from the given phi
self.input_layer = lasagne.layers.get_all_layers(self.phi)[0]
self.input_var = self.input_layer.input_var
self.phi._fun_name = "phi"
if isinstance(psi, list) or isinstance(psi, tuple):
# if no input layer in list -> build it
psi = copy.deepcopy(psi)
self.psi = \
self._initialize_function('psi', psi, self.default_psi_input,
self.PSI_OUTPUT_SHAPE,
self.target_shape)
self.psi._fun_name = "psi"
if beta is not None and isinstance(beta, list) or isinstance(beta, tuple):
# if no input layer in list -> build it
beta = copy.deepcopy(beta)
try:
self.beta = \
self._initialize_function('beta', beta, self.default_beta_input,
self.BETA_OUTPUT_SHAPE,
self.default_beta_output_shape
)
self.beta._fun_name = "beta"
except ValueError as e:
raise Exception("Could not replace BETA_OUTPUT_SHAPE marker --"
" is the value returned by self.default_beta_output_shape"
" valid? (not None)\n"
" Futher hints: " + str(e))
# tag the parameters of each function with the name of the function
for fun, fun_name in zip([self.phi, self.psi, self.beta], ['phi', 'psi', 'beta']):
self._tag_function_parameters(fun, fun_name)
self._create_target_objective()
self._create_side_objective()
def get_side_objective(self, input, target):
"""
Pattern-specific function to get the theano expression of the side objective.
Must be implemented by each pattern.
"""
raise NotImplementedError()
@property
def training_input_vars(self):
"""Return the theano variables that are required for training.
Usually this will correspond to
(input_var, target_var, side_var)
which is also the default.
Order matters!
Returns
-------
tuple of theano tensor variables
"""
return (self.input_var, self.target_var, self.side_var)
@property
def side_input_vars(self):
"""Return the theano input variables for validating the side loss.
Per default we assume that it is all training variables except for the
target variable (see :method:`Pattern.training_input_vars`) and the
optional side target variable (see :method:`Pattern.side_target_var`).
You can override this method in your pattern.
Order matters!
Returns
-------
tuple of theano tensor variables
"""
excluded_vars = [self.target_var]
if self.side_target_var is not None:
excluded_vars.append(self.side_target_var)
return tuple([i for i in self.training_input_vars if i not in excluded_vars])
@property
def side_target_var(self):
"""Return the theano target variable required for validating
the side information (optional).
This returns None per default.
Override it the side loss of the pattern is a supervised loss, and one of the
side variables is the supervised (side) target - and then return this variable.
Also see :method:`Pattern.side_input_vars`
Returns
-------
theano tensor variable
"""
return None
@property
def side_vars(self):
"""Return the theano variables that are required for training.
Usually this will correspond to
(input_var, target_var, side_var)
which is also the default.
Order matters!
Returns
-------
tuple of theano tensor variables
"""
return (self.side_var, )
@property
def default_target_objective(self):
""" Return the default target objective used by this pattern.
(implementation required)
The target objective can be overridden by passing the
target_loss argument to the constructor of a pattern
Returns
-------
theano expression
"""
raise NotImplementedError()
@property
def default_side_objective(self):
""" Return the default side objective used by this pattern.
(implementation required)
The side objective can be overridden by passing the
side_loss argument to the constructor of a pattern
Returns
-------
theano expression
"""
raise NotImplementedError()
@property
def default_phi_input(self):
""" Specifies the default input to the function :math:`\phi` in this pattern
(implementation required)
This may either return a tuple of lasagne layer class and a
dictionary containing the params for instantiation of a layer,
or it contains a lasagne layer object
Per default, this will create/return an input_layer with self.input_var
of dimensionality self.input_shape
-------
Returns:
tuple of lasagne layer class and dictionary, or lasagne layer instance
"""
if self.input_layer is None:
# create input layer
#print ("Creating input layer for phi")
input_dim = self.input_shape
if isinstance(self.input_shape, int):
input_dim = (None, self.input_shape)
self.input_layer = lasagne.layers.InputLayer(shape=input_dim,
input_var=self.input_var, name="input")
return self.input_layer
@property
def default_psi_input(self):
""" Specifies the default input to the function :math:`\psi` in this pattern
This may either return a tuple of lasagne layer class and a
dictionary containing the params for instantiation of a layer,
or it contains a lasagne layer object
Per default, this will return the output of :math:`\phi`.
-------
Returns:
tuple of lasagne layer class and dictionary, or lasagne layer instance
"""
return self.phi
@property
def default_beta_input(self):
""" Specifies the default input to the function :math:`\beta` in this pattern
This may either return a tuple of lasagne layer class and a
dictionary containing the params for instantiation of a layer,
or it contains a lasagne layer object
-------
Returns:
tuple of lasagne layer class and dictionary, or lasagne layer instance
"""
raise NotImplementedError()
@property
def default_beta_output_shape(self):
"""Every pattern that uses an auxiliary function beta should
implement this method which computes the shape.
This is helpful for automatically building beta in nolearn style
function parameterization
--------
Returns:
int or tuple of ints
"""
raise NotImplementedError()
def _get_all_function_layers(self, fun):
"""
Get only the layers that belong to a certain function
"""
layers = []
for l in lasagne.layers.get_all_layers(fun):
if l._pattern_function == fun._fun_name:
layers.append(l)
return layers
def _tag_function_parameters(self, fun, fun_name):
"""
Helper function to add the tag `fun_name` (encoding the function name,
e.g. phi or psi) to the function `fun`
"""
for l in lasagne.layers.get_all_layers(fun):
params = l.get_params()
for p in params:
if fun_name != 'phi' and 'phi' in l.params[p]:
# print ("omitting phi for %s" % str(p))
continue
# print ("adding %s to param %s" % (fun_name, str(p)))
l.params[p].add(fun_name)
# print (" tags: " + str(l.params[p]))
def _get_params_for(self, name):
"""This method has been adapted from the NeuralFit class in nolearn.
https://github.com/dnouri/nolearn/blob/master/nolearn/lasagne/base.py
Copyright (c) 2012-2015 Daniel Nouri"""
collected = {}
prefix = '{}_'.format(name)
params = vars(self)
more_params = self.more_params
for key, value in itertools.chain(params.items(), more_params.items()):
if key.startswith(prefix):
collected[key[len(prefix):]] = value
return collected
def _layer_name(self, layer_class, index):
"""This method has been adapted from the NeuralFit class in nolearn.
https://github.com/dnouri/nolearn/blob/master/nolearn/lasagne/base.py
Copyright (c) 2012-2015 Daniel Nouri"""
return "{}{}".format(
layer_class.__name__.lower().replace("layer", ""), index)
def _initialize_function(self, fun_name, layers, input_layer_tuple,
output_shape_marker, output_shape):
"""Function to build phi, psi and beta automatically from a
nolearn style network-as-list description.
This method has been adapted from the NeuralFit class in nolearn.
https://github.com/dnouri/nolearn/blob/master/nolearn/lasagne/base.py
Copyright (c) 2012-2015 Daniel Nouri"""
class Layers(OrderedDict):
def __getitem__(self, key):
if isinstance(key, int):
return list(self.values()).__getitem__(key)
elif isinstance(key, slice):
items = list(self.items()).__getitem__(key)
return Layers(items)
else:
return super(Layers, self).__getitem__(key)
def keys(self):
return list(super(Layers, self).keys())
def values(self):
return list(super(Layers, self).values())
#self.__dict__[fun_name] = Layers()
#fun_ = self.__dict__[fun_name]
fun_ = Layers()
# check if layers contains input layer; if not, create one
user_input_layer = None
for i, layer_def in enumerate(layers):
if isinstance(layer_def[0], str):
# The legacy format: ('name', Layer)
layer_name, layer_factory = layer_def
layer_kw = {'name': layer_name}
else:
# New format: (Layer, {'layer': 'kwargs'})
layer_factory, layer_kw = layer_def
layer_kw = layer_kw.copy()
if issubclass(layer_factory, InputLayer):
user_input_layer = layer_factory
break
if isinstance(input_layer_tuple, list) or isinstance(input_layer_tuple, tuple):
input_layer, input_layer_params = input_layer_tuple
else:
input_layer, input_layer_params = input_layer_tuple, None
if (inspect.isclass(input_layer) and issubclass(input_layer, InputLayer))\
or isinstance(input_layer, InputLayer):
if user_input_layer is not None:
# TODO check that the user provided input layer is compatible
# with the one that the pattern expects
# ok - we stick to the users input layer
pass
else:
# push the input layer into the dictionary
layers.insert(0, (input_layer, input_layer_params))
else: # input_layer is output of another function
if user_input_layer is not None:
# the user has provided an input layer. ignore it because
# we use the functional input layer from the patern
raise Exception("You have provided an input layer for %s,"
" but the pattern requires the input %s" % (fun_name, str(input_layer)))
else:
# push the input layer into the dictionary
layers.insert(0, (input_layer, input_layer_params))
# iterate through layers
if isinstance(layers[0], Layer):
# 'layers[0]' is already the output layer with type
# 'lasagne.layers.Layer', so we only have to fill
# 'fun_' and we're done:
for i, layer in enumerate(get_all_layers(layers[0])):
name = layer.name or self._layer_name(layer.__class__, i)
fun_[name] = layer
if self._get_params_for(name) != {}:
raise ValueError(
"You can't use keyword params when passing a Lasagne "
"instance object as the 'layers' parameter of "
"'Pattern'."
)
return layers[0]
# 'layers' are a list of '(Layer class, kwargs)', so
# we'll have to actually instantiate the layers given the
# arguments:
layer = None
for i, layer_def in enumerate(layers):
if isinstance(layer_def[0], str):
# The legacy format: ('name', Layer)
layer_name, layer_factory = layer_def
layer_kw = {'name': layer_name}
else:
# New format: (Layer, {'layer': 'kwargs'})
layer_factory, layer_kw = layer_def
if layer_kw is not None:
layer_kw = layer_kw.copy()
layer_is_instance = False
if layer_kw is None:
# the passed object is a an expression or an object instance.
# hence we don't have to build it later
layer_is_instance = True
layer_kw = {'name': layer_factory.name}
if 'name' not in layer_kw:
layer_kw['name'] = fun_name + "_" + self._layer_name(layer_factory, i)
#more_params = self._get_params_for(layer_kw['name'])
#layer_kw.update(more_params)
if layer_kw['name'] in fun_:
raise ValueError(
"Two layers with name {}.".format(layer_kw['name']))
# Any layers that aren't subclasses of InputLayer are
# assumed to require an 'incoming' paramter. By default,
# we'll use the previous layer as input:
if not layer_is_instance and not issubclass(layer_factory, InputLayer):
if 'incoming' in layer_kw:
layer_kw['incoming'] = fun_[
layer_kw['incoming']]
elif 'incomings' in layer_kw:
layer_kw['incomings'] = [
fun_[nm] for nm in layer_kw['incomings']]
else:
layer_kw['incoming'] = layer
for attr in ('W', 'b'):
if isinstance(layer_kw.get(attr), str):
name = layer_kw[attr]
layer_kw[attr] = getattr(fun_[name], attr, None)
for k,v in layer_kw.items():
if v == output_shape_marker:
#print ("%s triggered -> %s" % (output_shape_marker, str(output_shape)))
if output_shape is None:
raise ValueError("Cannot automatically set output shape (is None)"
" for %s - did you set all required shape variables"
" in the constructor of the pattern?"
" (marker was: %s)" % (fun_name, output_shape_marker))
layer_kw[k] = output_shape
if layer_is_instance:
layer = layer_factory
layer_wrapper = None
else:
try:
layer_wrapper = layer_kw.pop('layer_wrapper', None)
layer = layer_factory(**layer_kw)
except TypeError as e:
msg = ("Failed to instantiate {} with args {}.\n"
"Maybe parameter names have changed?".format(
layer_factory, layer_kw))
raise Exception(TypeError(msg), e)
fun_[layer_kw['name']] = layer
if layer_wrapper is not None:
layer = layer_wrapper(layer)
fun_["LW_%s" % layer_kw['name']] = layer
layer._pattern_function = fun_name
# we return the last layer as the representative of the function
# as it's common in lasagne
return layer
def _create_target_objective(self, output=None, target=None):
"""
Helper function to build the member variable target_loss.
"""
if output is None:
output = self.get_psi_output_for(self.input_var)
if target is None:
target = self.target_var
if self.target_loss is None:
assert (self.input_var is not None)
assert (self.target_var is not None)
if self.target_loss_fn is None:
fn = self.default_target_objective
else:
#print ("Target loss is function object: %s" % str(self.target_loss_fn))
fn = self.target_loss_fn
# special case: if we use the squared_error loss, but target_var is a vector
# (1 dim target) we flatten the prediction -- otherwise we get a theano error
if fn == lasagne.objectives.squared_error and \
target.type == T.dvector or target.type == T.dvector:
output = output.flatten()
# define target loss
self.target_loss = fn(output, target).mean()
# store the function, too (required by PatternTrainer)
self.target_loss_fn = fn
def _create_side_objective(self):
"""
Helper function to build the member variable side_loss.
"""
if self.side_loss is None:
assert (self.input_var is not None)
assert (self.side_var is not None)
if self.side_loss_fn is None:
# store the function, too (required by PatternTrainer)
self.side_loss_fn = self.default_side_objective
self.side_loss = self.get_side_objective(self.input_var, self.side_var)
@property
def output_shape(self):
return self.get_output_shape_for(self.input_var)
def get_params(self, **tags):
"""
Returns a list of all the Theano variables that parameterize the
pattern.
By default, all parameters that participate in the forward pass will be
returned. The list can optionally be filtered by
specifying tags as keyword arguments. For example, ``trainable=True``
will only return trainable parameters, and ``regularizable=True``
will only return parameters that can be regularized (e.g., by L2
decay).
Parameters
----------
**tags (optional)
tags can be specified to filter the list. Specifying ``tag1=True``
will limit the list to parameters that are tagged with ``tag1``.
Specifying ``tag1=False`` will limit the list to parameters that
are not tagged with ``tag1``. Commonly used tags are
``regularizable`` and ``trainable``.
Returns
-------
list of Theano shared variables
A list of variables that parameterize the layer
Notes
-----
For patterns without any parameters, this will return an empty list.
"""
# check between tags that belong to the pattern and those that belong to the layers
params = lasagne.layers.get_all_params(self.psi, **tags)
if self.beta is not None:
params += lasagne.layers.get_all_params(self.beta, **tags)
params += lasagne.layers.get_all_params(self.phi, **tags)
return params
def get_all_params(self, **tags):
"""Alias for get_params"""
return self.get_params(**tags)
def get_output_shape_for(self, input_shape):
"""
Computes the output shape of this layer, given an input shape.
Parameters
----------
input_shape : tuple
A tuple representing the shape of the input. The tuple should have
as many elements as there are input dimensions, and the elements
should be integers or `None`.
Returns
-------
tuple
A tuple representing the shape of the output of this layer. The
tuple has as many elements as there are output dimensions, and the
elements are all either integers or `None`.
Notes
-----
This method will typically be overridden when implementing a new
:class:`Layer` class. By default it simply returns the input
shape. This means that a layer that does not modify the shape
(e.g. because it applies an elementwise operation) does not need
to override this method.
"""
phi_output_shape = self.phi.get_output_shape_for(input_shape)
return self.psi.get_output_shape_for(phi_output_shape)
def get_output_for(self, input=None, **kwargs):
if input is None:
input = self.input_var
return self.get_psi_output_for(input, **kwargs)
def get_psi_output_for(self, input=None, **kwargs):
if input is None:
input = self.input_var
return lasagne.layers.get_output(self.psi, inputs=input, **kwargs)
def get_beta_output_for(self, input=None, **kwargs):
if input is None:
input = self.input_var
return lasagne.layers.get_output(self.beta, inputs=input, **kwargs)
def get_phi_output_for(self, input=None, **kwargs):
if input is None:
input = self.input_var
return lasagne.layers.get_output(self.phi, inputs=input, **kwargs)
def get_output_for_function(self, fun_or_fun_name, input, **kwargs):
"""
Get the output for a pattern subfunction (i.e. phi, psi, beta) by
setting the input to that subfunction manually.
The problem is that if you apply lasagne.layers.get_output to, e.g.,
psi, which gets phi as input, lasagne.layers.get_output will except
the 'input' to be the input of phi, not the input of psi.
Sometimes, it is desirable to set the input of psi manually.
Note, that this differs from the behavior of get_phi_output_for,
get_psi_output_for and get_beta_output_for, which expect the initial
input to the network (depending on the pattern, often phi's input).
"""
if type(fun_or_fun_name) == str:
fun = self.__dict__[fun_or_fun_name]
else:
fun = fun_or_fun_name
last_input = input
for l in self._get_all_function_layers(fun):
last_input = l.get_output_for(last_input, **kwargs)
return last_input
def training_loss(self, target_weight=0.5, side_weight=0.5, all_losses=False):
"""
Compute the sum of the target and side info loss. Returns a theano expression.
If all_losses is true, additionally to the summed loss the individual (weighted)
losses are returned, too.
Parameters
----------
target_weight : float
target weight
side_weight : float
side weight
target_weight : float
Default false, returns (loss) only.
If true, returns tuple (loss, target_loss, side_loss)
"""
# we need to gate because if we set one weight to 0., we might
# also want to omit the involved theano variables; w/o the if-else
# we get an "unconnected inputs" error in theano
loss = 0.
tls, sls = np.nan, np.nan
if target_weight > 0.:
tls = target_weight * self.target_loss
loss += tls
if side_weight > 0.:
sls = side_weight * self.side_loss
loss += sls
if all_losses:
return loss, tls, sls
return loss
def save(self, fn):
"""
Save your pattern's weights in a tar file containing npz files.
You can then use `load` to recreate the pattern from this file.
Parameters
----------
fn : str
file name
"""
# tar to one file
tmp_files = []
with tarfile.open(fn + ".tar", mode='w') as out:
# use lasagne style parameter storage to avoid CUDA vs. non-CUDA
# theano issue
phi_pval = lasagne.layers.get_all_param_values(self.phi)
phi_fn = fn+"_phi.npz"
np.savez(phi_fn, *phi_pval)
out.add (phi_fn)
tmp_files.append(phi_fn)
psi_pval = lasagne.layers.get_all_param_values(self.psi)
psi_fn = fn+"_psi.npz"
np.savez(psi_fn, *psi_pval)
out.add (psi_fn)
tmp_files.append(psi_fn)
if self.beta is not None:
beta_pval = lasagne.layers.get_all_param_values(self.beta)
beta_fn = fn+"_beta.npz"
np.savez(beta_fn, *beta_pval)
out.add (beta_fn)
tmp_files.append(beta_fn)
for d in tmp_files:
try:
os.unlink(d)
except:
pass
def load(self, fn):
"""
Assuming you have initialized the pattern exactly as it
was pickled in the file 'fn', you can restore all function
parameters using this function.
Parameters
----------
fn : str
file name
"""
fn_split = os.path.splitext(fn)
assert (fn_split[-1] == ".tar")
fun_names = ['phi', 'psi', 'beta']
npz_files_loaded = {}
with tarfile.open(fn, mode='r') as t:
for m in t.getmembers():
for fun_name in fun_names:
if fun_name in m.name:
#print ("extracting" + str(m))
t.extract(m)
npz_files_loaded[fun_name] = m.name
for fun_name, fun_npz in npz_files_loaded.items():
with np.load(fun_npz) as f:
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
lasagne.layers.set_all_param_values(self.__dict__[fun_name], param_values)
for _,d in npz_files_loaded.items():
try:
os.unlink(d)
except:
pass
| {
"repo_name": "tu-rbo/concarne",
"path": "concarne/patterns/base.py",
"copies": "1",
"size": "33555",
"license": "mit",
"hash": -6265479839047217000,
"line_mean": 37.1740614334,
"line_max": 92,
"alpha_frac": 0.5511846223,
"autogenerated": false,
"ratio": 4.549213665943601,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01440581471221296,
"num_lines": 879
} |
__all__ = ['PeriodicKernel']
import numpy as np
import sympy as sym
from functools import wraps
from gp.ext import periodic_c
from . import Kernel
DTYPE = np.float64
EPS = np.finfo(DTYPE).eps
class PeriodicKernel(Kernel):
r"""
Periodic kernel function.
Parameters
----------
h : float
Output scale kernel parameter
w : float
Input scale kernel parameter
p : float
Period kernel parameter
Notes
-----
The periodic kernel is defined by Equation 4.31 of [RW06]_:
.. math:: K(x_1, x_2) = h^2\exp\left(\frac{-2\sin^2\left(\frac{x_1-x_2}{2p}\right)}{w^2}\right)
where :math:`w` is the input scale parameter (equivalent to the
standard deviation of the Gaussian), :math:`h` is the output
scale parameter, and :math:`p` is the period kernel parameter.
"""
def __init__(self, h, w, p):
self.h = None #: Output scale kernel parameter
self.w = None #: Input scale kernel parameter
self.p = None #: Period kernel parameter
self.set_param('h', h)
self.set_param('w', w)
self.set_param('p', p)
@property
def params(self):
r"""
Kernel parameters.
Returns
-------
params : numpy.ndarray ``(h, w, p)``
"""
return np.array([self.h, self.w, self.p], dtype=DTYPE)
@params.setter
def params(self, val):
self.set_param('h', val[0])
self.set_param('w', val[1])
self.set_param('p', val[2])
def set_param(self, name, val):
if name == 'h':
if val < EPS:
raise ValueError("invalid value for h: %s" % val)
self.h = DTYPE(val)
elif name == 'w':
if val < EPS:
raise ValueError("invalid value for w: %s" % val)
self.w = DTYPE(val)
elif name == 'p':
if val < EPS:
raise ValueError("invalid value for p: %s" % val)
self.p = DTYPE(val)
else:
raise ValueError("unknown parameter: %s" % name)
@property
@wraps(Kernel.sym_K)
def sym_K(self):
h = sym.Symbol('h')
w = sym.Symbol('w')
p = sym.Symbol('p')
d = sym.Symbol('d')
h2 = h ** 2
w2 = w ** 2
f = h2 * sym.exp(-2. * (sym.sin(d / (2. * p)) ** 2) / w2)
return f
@wraps(Kernel.K)
def K(self, x1, x2, out=None):
if out is None:
out = np.empty((x1.size, x2.size), dtype=DTYPE)
periodic_c.K(out, x1, x2, self.h, self.w, self.p)
return out
@wraps(Kernel.jacobian)
def jacobian(self, x1, x2, out=None):
if out is None:
out = np.empty((3, x1.size, x2.size), dtype=DTYPE)
periodic_c.jacobian(out, x1, x2, self.h, self.w, self.p)
return out
@wraps(Kernel.hessian)
def hessian(self, x1, x2, out=None):
if out is None:
out = np.empty((3, 3, x1.size, x2.size), dtype=DTYPE)
periodic_c.hessian(out, x1, x2, self.h, self.w, self.p)
return out
def dK_dh(self, x1, x2, out=None):
if out is None:
out = np.empty((x1.size, x2.size), dtype=DTYPE)
periodic_c.dK_dh(out, x1, x2, self.h, self.w, self.p)
return out
def dK_dw(self, x1, x2, out=None):
if out is None:
out = np.empty((x1.size, x2.size), dtype=DTYPE)
periodic_c.dK_dw(out, x1, x2, self.h, self.w, self.p)
return out
def dK_dp(self, x1, x2, out=None):
if out is None:
out = np.empty((x1.size, x2.size), dtype=DTYPE)
periodic_c.dK_dp(out, x1, x2, self.h, self.w, self.p)
return out
def d2K_dhdh(self, x1, x2, out=None):
if out is None:
out = np.empty((x1.size, x2.size), dtype=DTYPE)
periodic_c.d2K_dhdh(out, x1, x2, self.h, self.w, self.p)
return out
def d2K_dhdw(self, x1, x2, out=None):
if out is None:
out = np.empty((x1.size, x2.size), dtype=DTYPE)
periodic_c.d2K_dhdw(out, x1, x2, self.h, self.w, self.p)
return out
def d2K_dhdp(self, x1, x2, out=None):
if out is None:
out = np.empty((x1.size, x2.size), dtype=DTYPE)
periodic_c.d2K_dhdp(out, x1, x2, self.h, self.w, self.p)
return out
def d2K_dwdh(self, x1, x2, out=None):
if out is None:
out = np.empty((x1.size, x2.size), dtype=DTYPE)
periodic_c.d2K_dwdh(out, x1, x2, self.h, self.w, self.p)
return out
def d2K_dwdw(self, x1, x2, out=None):
if out is None:
out = np.empty((x1.size, x2.size), dtype=DTYPE)
periodic_c.d2K_dwdw(out, x1, x2, self.h, self.w, self.p)
return out
def d2K_dwdp(self, x1, x2, out=None):
if out is None:
out = np.empty((x1.size, x2.size), dtype=DTYPE)
periodic_c.d2K_dwdp(out, x1, x2, self.h, self.w, self.p)
return out
def d2K_dpdh(self, x1, x2, out=None):
if out is None:
out = np.empty((x1.size, x2.size), dtype=DTYPE)
periodic_c.d2K_dpdh(out, x1, x2, self.h, self.w, self.p)
return out
def d2K_dpdw(self, x1, x2, out=None):
if out is None:
out = np.empty((x1.size, x2.size), dtype=DTYPE)
periodic_c.d2K_dpdw(out, x1, x2, self.h, self.w, self.p)
return out
def d2K_dpdp(self, x1, x2, out=None):
if out is None:
out = np.empty((x1.size, x2.size), dtype=DTYPE)
periodic_c.d2K_dpdp(out, x1, x2, self.h, self.w, self.p)
return out
| {
"repo_name": "jhamrick/gaussian_processes",
"path": "gp/kernels/periodic.py",
"copies": "1",
"size": "5591",
"license": "mit",
"hash": -3328763917622509600,
"line_mean": 28.4263157895,
"line_max": 99,
"alpha_frac": 0.5356823466,
"autogenerated": false,
"ratio": 2.889405684754522,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39250880313545217,
"avg_score": null,
"num_lines": null
} |
__all__ = ['period', 'Period']
def period(pstr=''):
'''Create a period object from a period string'''
return Period.make(pstr)
def find_first_of(st, possible):
possible = tuple(possible)
lowi = -1
for p in possible:
i = st.find(p)
if i != -1 and (i < lowi or lowi == -1):
lowi = i
return lowi
def safediv(x, d):
return x // d if x >= 0 else -(-x // d)
def safemod(x, d):
return x % d if x >= 0 else -(-x % d)
class Period:
def __init__(self, months=0, days=0):
self._months = months
self._days = days
@classmethod
def make(cls, pstr=''):
if isinstance(pstr, cls):
return pstr
else:
return cls().add_tenure(pstr)
def isempty(self):
return self._months == 0 and self._days == 0
def add_days(self, days):
self._days += days
def add_weeks(self, weeks):
self._days += int(7*weeks)
def add_months(self, months):
self._months += months
def add_years(self, years):
self._months += int(12*years)
@property
def years(self):
return safediv(self._months, 12)
@property
def months(self):
return safemod(self._months, 12)
@property
def weeks(self):
return safediv(self._days, 7)
@property
def days(self):
return safemod(self._days, 7)
@property
def totaldays(self):
return 30*self._months + self._days
def __repr__(self):
'''The period string'''
return self.components()
def __str__(self):
return self.__repr__()
def components(self):
'''The period string'''
p = ''
neg = self.totaldays < 0
y = self.years
m = self.months
w = self.weeks
d = self.days
if y:
p = '%sY' % abs(y)
if m:
p = '%s%sM' % (p, abs(m))
if w:
p = '%s%sW' % (p, abs(w))
if d:
p = '%s%sD' % (p, abs(d))
return '-'+p if neg else p
def simple(self):
'''A string representation with only one period delimiter.'''
if self._days:
return '%sD' % self.totaldays
elif self.months:
return '%sM' % self._months
elif self.years:
return '%sY' % self.years
else:
return ''
def add_tenure(self, pstr):
if isinstance(pstr, self.__class__):
self._months += pstr._months
self._days += pstr._days
return self
st = str(pstr).upper()
done = False
sign = 1
while not done:
if not st:
done = True
else:
ip = find_first_of(st, 'DWMY')
if ip == -1:
raise ValueError("Unknown period %s" % pstr)
p = st[ip]
v = int(st[:ip])
sign = sign if v > 0 else -sign
v = sign*abs(v)
if p == 'D':
self.add_days(v)
elif p == 'W':
self.add_weeks(v)
elif p == 'M':
self.add_months(v)
elif p == 'Y':
self.add_years(v)
st = st[ip+1:]
return self
def __add__(self, other):
other = self.make(other)
return self.__class__(self._months+other._months,
self._days+other._days)
def __radd__(self, other):
return self + other
def __sub__(self, other):
other = self.make(other)
return self.__class__(self._months-other._months,
self._days-other._days)
def __rsub__(self, other):
return self.make(other) - self
def __gt__(self, other):
return self.totaldays > self.make(other).totaldays
def __lt__(self, other):
return self.totaldays < self.make(other).totaldays
def __ge__(self, other):
return self.totaldays >= self.make(other).totaldays
def __le__(self, other):
return self.totaldays <= self.make(other).totaldays
def __eq__(self, other):
return self.totaldays == self.make(other).totaldays
| {
"repo_name": "artisavotins/ccy",
"path": "ccy/dates/period.py",
"copies": "1",
"size": "4260",
"license": "bsd-3-clause",
"hash": -2350562769324093400,
"line_mean": 24.0588235294,
"line_max": 69,
"alpha_frac": 0.4805164319,
"autogenerated": false,
"ratio": 3.7665782493368702,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.974709468123687,
"avg_score": 0,
"num_lines": 170
} |
__all__ = ['perm_cache']
import json
import pickle
from functools import update_wrapper
from os.path import exists
def perm_cache(cache_type='pkl', cache_file=None):
class PermCache(object):
_cache_type = cache_type
_cache_file = cache_file
def __init__(self, func):
if self._cache_type not in ['pkl', 'json']:
raise ValueError("Invalid cache type: %s" % self._cache_type)
self._cache_type = self._cache_type
self.func = func
if self._cache_file is None:
self._cache_file = (func.__code__.co_filename
.replace('.py', '.' + self.func.__name__))
self._cache_file += '.cache'
if self._cache_file.endswith('.py'):
self._cache_file = self._cache_file.replace('.py',
'.' + self._cache_type)
else:
self._cache_file += '.' + self._cache_type
if exists(self._cache_file):
if self._cache_type == 'pkl':
with open(self._cache_file, 'rb') as f:
self.cache = pickle.load(f)
elif self._cache_type == 'json':
with open(self._cache_file, 'r') as f:
self.cache = json.load(f)
else:
self.cache = {}
self.__cache_info = dict.fromkeys(['added', 'read', 'total'], 0)
update_wrapper(self, func)
return
def __call__(self, *args, **kwargs):
key = ' '.join(args) \
+ ' '.join(['%s=%s' % (k, v) for k, v in kwargs.items()])
self.__cache_info['total'] += 1
try:
res = self.cache[key]
self.__cache_info['read'] += 1
except KeyError:
res = self.func(*args, **kwargs)
self.cache[key] = res
self.__cache_info['added'] += 1
return res
def cache_info(self):
return self.__cache_info.copy()
def stash_cache(self):
if self._cache_type == 'pkl':
with open(self._cache_file, 'wb') as f:
pickle.dump(self.cache, f)
elif self._cache_type == 'json':
with open(self._cache_file, 'w') as f:
json.dump(self.cache, f, indent=2)
return
return PermCache
| {
"repo_name": "sorgerlab/indra",
"path": "indra/util/perm_cache.py",
"copies": "4",
"size": "2493",
"license": "bsd-2-clause",
"hash": 6183060594598028000,
"line_mean": 36.2089552239,
"line_max": 81,
"alpha_frac": 0.4540713999,
"autogenerated": false,
"ratio": 4.141196013289036,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6595267413189037,
"avg_score": null,
"num_lines": null
} |
""" All permissions are defined here.
They are also defined in permissions.zcml.
The two files must be kept in sync.
bika.lims.__init__ imports * from this file, so
bika.lims.PermName or bika.lims.permissions.PermName are
both valid.
"""
from Products.CMFCore.permissions import AddPortalContent
# Add Permissions:
# ----------------
AddAnalysis = 'BIKA: Add Analysis'
AddAnalysisProfile = 'BIKA: Add AnalysisProfile'
AddAnalysisRequest = 'BIKA: Add Analysis Request'
AddAnalysisSpec = 'BIKA: Add AnalysisSpec'
AddAttachment = 'BIKA: Add Attachment'
AddARTemplate = 'BIKA: Add ARTemplate'
AddBatch = 'BIKA: Add Batch'
AddClient = 'BIKA: Add Client'
AddClientFolder = 'BIKA: Add ClientFolder'
AddInvoice = 'BIKA: Add Invoice'
AddMethod = 'BIKA: Add Method'
AddMultifile = 'BIKA: Add Multifile'
AddPricelist = 'BIKA: Add Pricelist'
AddProduct = 'BIKA: Add Product'
AddProductCategory = 'BIKA: Add ProductCategory'
AddStockItem = 'BIKA: Add StockItem'
AddSupplyOrder = 'BIKA: Add SupplyOrder'
AddInventoryOrder = 'BIKA: Add Inventory Order'
AddSample = 'BIKA: Add Sample'
AddSampleMatrix = 'BIKA: Add SampleMatrix'
AddSamplePartition = 'BIKA: Add SamplePartition'
AddSamplePoint = 'BIKA: Add SamplePoint'
AddStorageLocation = 'BIKA: Add StorageLocation'
AddSamplingDeviation = 'BIKA: Add SamplingDeviation'
AddSamplingRound = 'BIKA: Add SamplingRound'
AddSRTemplate = 'BIKA: Add SRTemplate'
AddStorageLevel = 'BIKA: Add StorageLevel'
AddStorageUnit = 'BIKA: Add StorageUnit'
AddSubGroup = 'BIKA: Add Sub-group'
# Default Archetypes Add Permission
ADD_CONTENT_PERMISSION = AddPortalContent
# Add Permissions for specific types, if required
ADD_CONTENT_PERMISSIONS = {
'ARAnalysisSpec': AddAnalysisSpec,
'AnalysisProfile': AddAnalysisProfile,
'Analysis': AddAnalysis,
'AnalysisRequest': AddAnalysisRequest,
'Attachment': AddAttachment,
'Batch': AddBatch,
'Client': AddClient,
'Invoice': AddInvoice,
'Method': AddMethod,
'Multifile': AddMultifile,
'SupplyOrder': AddSupplyOrder,
'Order': AddInventoryOrder,
'Sample': AddSample,
'SampleMatrix': AddSampleMatrix,
'SamplePartition': AddSamplePartition,
'SamplingDeviation': AddSamplingDeviation,
'SamplingRound': AddSamplingRound,
'SubGroup': AddSubGroup,
'StorageLevel': AddStorageLevel,
'StorageUnit': AddStorageUnit,
}
# Very Old permissions:
# ---------------------
ManageBika = 'BIKA: Manage Bika'
DispatchOrder = 'BIKA: Dispatch Order'
ManageAnalysisRequests = 'BIKA: Manage Analysis Requests'
ManageSamples = 'BIKA: Manage Samples'
ManageSuppliers = 'BIKA: Manage Reference Suppliers'
ManageReference = 'BIKA: Manage Reference'
PostInvoiceBatch = 'BIKA: Post Invoice batch'
ManagePricelists = 'BIKA: Manage Pricelists'
# This allows to edit all client fields, and perform admin tasks on Clients.
ManageClients = 'BIKA: Manage Clients'
# this is for creating and transitioning worksheets
ManageWorksheets = 'BIKA: Manage Worksheets'
# this is for adding/editing/exporting analyses on worksheets
EditWorksheet = 'BIKA: Edit Worksheet'
RejectWorksheet = 'BIKA: Reject Worksheet'
ImportInstrumentResults = "BIKA: Import Instrument Results"
AccessJSONAPI = 'BIKA: Access JSON API'
# New or changed permissions:
# ---------------------------
DispatchInventoryOrder = 'BIKA: Dispatch Inventory Order'
ReceiveInventoryOrder = 'BIKA: Receive Inventory Order'
StoreInventoryOrder = 'BIKA: Store Inventory Order'
SampleSample = 'BIKA: Sample Sample'
PreserveSample = 'BIKA: Preserve Sample'
ReceiveSample = 'BIKA: Receive Sample'
ExpireSample = 'BIKA: Expire Sample'
DisposeSample = 'BIKA: Dispose Sample'
ImportAnalysis = 'BIKA: Import Analysis'
Retract = "BIKA: Retract"
Verify = 'BIKA: Verify'
VerifyOwnResults = 'BIKA: Verify own results'
Publish = 'BIKA: Publish'
EditSample = 'BIKA: Edit Sample'
EditAR = 'BIKA: Edit AR'
ResultsNotRequested = 'BIKA: Results not requested'
ManageInvoices = 'BIKA: Manage Invoices'
ViewResults = 'BIKA: View Results'
EditResults = 'BIKA: Edit Results'
EditFieldResults = 'BIKA: Edit Field Results'
ViewRetractedAnalyses = 'BIKA: View Retracted Analyses'
CancelAndReinstate = 'BIKA: Cancel and reinstate'
# For adding login credentials to Contacts.
ManageLoginDetails = 'BIKA: Manage Login Details'
Assign = 'BIKA: Assign analyses'
Unassign = 'BIKA: Unassign analyses'
# Field permissions
EditARContact = "BIKA: Edit AR Contact"
ViewLogTab = 'BIKA: View Log Tab'
# Edit AR
# -----------------------------------------------------------------------------
# Allows to set values for AR fields in AR view
#
# Only takes effect if:
# - The AR's 'cancellation_state' is 'active'
# - The AR's 'review_state' is in:
# 'sample_registered', 'to_be_sampled', 'sampled', 'to_be_preserved',
# 'sample_due', 'sample_received', 'to_be_verified', 'attachment_due'
EditAR = 'BIKA: Edit AR'
# Edit Sample Partition
# -----------------------------------------------------------------------------
# Allows to set a Container and/or Preserver for a Sample Partition.
# See AR view: Sample Partitions table and Sample Partitions tab
#
# Only takes effect if:
# - The Sample's 'cancellation_state' is 'active'
# - The Sample's 'review_state' is in:
# 'sample_registered', 'to_be_sampled', 'sampled', 'to_be_preserved',
# 'sample_due', 'sample_received', 'to_be_verified', 'attachment_due'
EditSamplePartition = 'BIKA: Edit Sample Partition'
# Edit Client
# ----------------------------------------------
# Allows access to 'Edit' and 'Contacts' tabs from Client View
EditClient = 'BIKA: Edit Client'
# Manage Supply Orders
# ----------------------------------------------
# Allows access to 'Supply Orders' tab in Client context
ManageSupplyOrders = 'BIKA: Manage Supply Orders'
# Batch-specific permissions
# ----------------------------------------------
EditBatch = 'BIKA: Edit Batch'
CloseBatch = 'BIKA: Close Batch'
ReopenBatch = 'BIKA: Reopen Batch'
# Sampling Round permissions
# --------------------------
CloseSamplingRound = 'BIKA: Close SamplingRound'
ReopenSamplingRound = 'BIKA: Reopen SamplingRound'
# Manage AR Imports
# ----------------------------------------------
ManageARImport = 'BIKA: Manage ARImport'
# Manage AR Priorities
# ----------------------------------------------
ManageARPriority = 'BIKA: Manage ARPriority'
| {
"repo_name": "hocinebendou/bika.gsoc",
"path": "bika/lims/permissions.py",
"copies": "1",
"size": "6349",
"license": "mit",
"hash": -2278182654544300300,
"line_mean": 33.693989071,
"line_max": 79,
"alpha_frac": 0.6901874311,
"autogenerated": false,
"ratio": 3.5829571106094806,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47731445417094803,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Person']
class Person(object):
searchable_fields = ['first_name', 'last_name', 'email', 'emails']
def __init__(self, first_name, last_name, addresses, phone_numbers, emails):
"""
Constructor of Person class
:param first_name: first name of the person
:type first_name: str or unicode
:param last_name: last name of the person
:type last_name: str or unicode
:param addresses: list of person's addresses (list of strings)
:type addresses: list
:param phone_numbers: list of person's phone numbers (list of strings)
:type phone_numbers: list
:param emails: list of person's emails (list of strings)
:type emails: list
"""
self.first_name = first_name
self.last_name = last_name
self.addresses = addresses
self.phone_numbers = phone_numbers
self.emails = emails
self.groups = []
def add_address(self, address):
"""
Add the address string to the list of addresses of current person
:param address: address string to be added
:type address: str or unicode
"""
self.addresses.append(address)
def add_phone_number(self, phone_number):
"""
Add the phone number string to the list of phone numbers of current person
:param phone_number: phone number string to be added
:type phone_number: str or unicode
"""
self.phone_numbers.append(phone_number)
def add_email(self, email):
"""
Add email string to the list of emails of current person
:param email: email to be added
:type email: str or unicode
"""
self.emails.append(email)
def add_to_group(self, group, update_group=True):
"""
Connects current person and given group
:param group: group to be extended with current person instance
:param update_group: indicates if we also must update give group with current person
:type group: address_book.Group
:type update_group: bool
"""
self.groups.append(group)
if update_group:
group.add_person(self, update_person=False)
def match(self, **match_fields):
"""
Match curren person object with set of fields
:param match_fields: set of fields to be matched with current instance
:return: does current person match given set of fields or not
:rtype: bool
"""
matches = {}
for field, value in match_fields.iteritems():
#TODO: sounds like the hack :3
if field == 'email':
field = 'emails'
value = [value]
self_value = getattr(self, field)
if type(value) == list:
if field == 'emails':
matched = True
for search_email in value:
for actual_email in self_value:
if actual_email.startswith(search_email):
break
else:
matched = False
break
else:
matched = set(self_value).issuperset(set(value))
else:
matched = self_value == value
matches[field] = matched
if all(matches.values()):
return True
return False
def __unicode__(self):
return u'Person<{first_name} {last_name}>'.format(
first_name=self.first_name,
last_name=self.last_name
)
def __str__(self):
return unicode(self)
def __repr__(self):
return unicode(self) | {
"repo_name": "dizpers/python-address-book-assignment",
"path": "address_book/person.py",
"copies": "1",
"size": "3766",
"license": "mit",
"hash": 595361485026406100,
"line_mean": 32.0438596491,
"line_max": 92,
"alpha_frac": 0.5541688794,
"autogenerated": false,
"ratio": 4.615196078431373,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008255847335372553,
"num_lines": 114
} |
__all__ = ['physics', 'shapes', 'ui', 'constants', 'util']
def go():
# Imports (DO NOT EDIT)
from visual import vector, display
from physics import BField
from shapes import Wire, Coil, Bar, Particle
from ui import Elviz
# Initial setup
e = Elviz(1600, 900) # (width, height)
d = e.scene
B = BField(d, color = (1, 1, 0)) # color tuples are in (r, g, b) format
# Constructor parameters:
#
# Wire(start, end, current, scene)
# Coil(center, radius, normal vector, current, scene, loops(default=1), pitch(default=1))
# Bar(start, direction, magnetic moment, length, scene, height(default=1), width(default=0.5))
# Particle(center, magnetic moment, scene)
#
# See `shapes.py` for more precise definitions.
# Example shapes
#w = Wire(vector(0, -15, 15), vector(0, 15, -15), 1, d)
r = Coil(vector(0, 0, 0), 10, vector(0, 1, 1), 10, d, 10, 0.5)
#bar = Bar(vector(10, 0, 0), vector(0, 0, 1), 1, 10, d)
#p = Particle(vector(0, 10, 0), 0.5*vector(0, -1, 0), d)
# Add inducers
#B.add_inducer(w)
B.add_inducer(r)
#B.add_inducer(bar)
#B.add_inducer(p)
# Set display field (origin, size, step, radius)
B.draw(vector(-30, -30, -30), vector(60, 60, 60), 6, 30)
| {
"repo_name": "Octaplex/elviz",
"path": "elviz/__init__.py",
"copies": "1",
"size": "1267",
"license": "mit",
"hash": 7377079448483172000,
"line_mean": 32.3421052632,
"line_max": 98,
"alpha_frac": 0.5935280189,
"autogenerated": false,
"ratio": 2.8155555555555556,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3909083574455555,
"avg_score": null,
"num_lines": null
} |
__all__ = ['PingProtocolCommand']
import time
import random
from print_colors import PrintColors
from contact import Contact
from protocol_command import ProtocolCommand
class PingProtocolCommand(ProtocolCommand):
def start(self):
self.req()
def stop(self):
raise NotImplementedError
def req(self):
node_id = self.node.id
local_host = self.node.listen_host
local_port = self.node.listen_port
if random.random() < 0.5:
# ping contact to be added
c = self.node.rt.add_contacts.get(0)
else:
# ping known contact
c = self.node.rt.contacts.random(without_id=self.node.id)
if c:
# print('ping:', c)
args = ()
kwargs = {
'id': node_id,
'local_host': local_host,
'local_port': local_port,
}
res = (args, kwargs)
# build message
message_data = self.node.build_message(
self.protocol_major_version,
self.protocol_minor_version,
self.PROTOCOL_REQ,
self.protocol_command_code,
res,
)
# force del
del args
del kwargs
del res
# send message
self.node.send_message(message_data, c.remote_host, c.remote_port)
# schedule next discover
self.node.loop.call_later(0.0 + random.random() * 0.5, self.req)
def on_req(self, remote_host, remote_port, *args, **kwargs):
node_id = kwargs['id']
local_host = kwargs['local_host']
local_port = kwargs['local_port']
bootstrap = kwargs.get('bootstrap', False)
# update contact's `last_seen`, or add contact
c = self.node.rt.contacts.get(node_id)
if c:
c.id = node_id
c.last_seen = time.time()
else:
c = self.node.rt.contacts.get((remote_host, remote_port))
if c:
c.id = node_id
c.last_seen = time.time()
else:
# add_contact
c = self.node.rt.add_contacts.get(node_id)
if c:
self.node.rt.add_contacts.remove(c)
self.node.rt.contacts.add(c)
c.id = node_id
c.last_seen = time.time()
print(PrintColors.GREEN + 'new contact [PING ON REQ]:', self.node, c, PrintColors.END)
else:
c = self.node.rt.add_contacts.get((remote_host, remote_port))
if c:
self.node.rt.add_contacts.remove(c)
self.node.rt.contacts.add(c)
c.id = node_id
c.last_seen = time.time()
print(PrintColors.GREEN + 'new contact [PING ON REQ]:', self.node, c, PrintColors.END)
else:
# remove_contact
c = self.node.rt.remove_contacts.get(node_id)
if c:
self.node.rt.remove_contacts.remove(c)
self.node.rt.contacts.add(c)
c.id = node_id
c.last_seen = time.time()
print(PrintColors.GREEN + 'new contact [PING ON REQ]:', self.node, c, PrintColors.END)
else:
c = self.node.rt.remove_contacts.get((remote_host, remote_port))
if c:
self.node.rt.remove_contacts.remove(c)
self.node.rt.contacts.add(c)
c.id = node_id
c.last_seen = time.time()
print(PrintColors.GREEN + 'new contact [PING ON REQ]:', self.node, c, PrintColors.END)
else:
c = Contact(
id = node_id,
local_host = local_host,
local_port = local_port,
remote_host = remote_host,
remote_port = remote_port,
bootstrap = bootstrap,
)
# because `c` is requesting to discover nodes
# put it into known active contacts
c.last_seen = time.time()
self.node.rt.contacts.add(c)
print(PrintColors.GREEN + 'new contact [PING ON REQ]:', self.node, c, PrintColors.END)
# forward to res_discover_nodes
self.res(remote_host, remote_port, *args, **kwargs)
def res(self, remote_host, remote_port, *args, **kwargs):
# response
node_id = self.node.id
local_host = self.node.listen_host
local_port = self.node.listen_port
res = {
'id': node_id,
'local_host': local_host,
'local_port': local_port,
}
# build message
message_data = self.node.build_message(
self.protocol_major_version,
self.protocol_minor_version,
self.PROTOCOL_RES,
self.protocol_command_code,
res,
)
# force del
del res
# send message
self.node.send_message(message_data, remote_host, remote_port)
def on_res(self, remote_host, remote_port, res):
node_id = res['id']
local_host = res['local_host']
local_port = res['local_port']
bootstrap = res.get('bootstrap', False)
# update contact's `last_seen`, or add contact
c = self.node.rt.contacts.get(node_id)
if c:
c.id = node_id
c.last_seen = time.time()
else:
c = self.node.rt.contacts.get((remote_host, remote_port))
if c:
c.id = node_id
c.last_seen = time.time()
else:
# add_contact
c = self.node.rt.add_contacts.get(node_id)
if c:
self.node.rt.add_contacts.remove(c)
self.node.rt.contacts.add(c)
c.id = node_id
c.last_seen = time.time()
print(PrintColors.GREEN + 'new contact [PING ON RES]:', self.node, c, PrintColors.END)
else:
c = self.node.rt.add_contacts.get((remote_host, remote_port))
if c:
self.node.rt.add_contacts.remove(c)
self.node.rt.contacts.add(c)
c.id = node_id
c.last_seen = time.time()
print(PrintColors.GREEN + 'new contact [PING ON RES]:', self.node, c, PrintColors.END)
else:
# remove_contact
c = self.node.rt.remove_contacts.get(node_id)
if c:
self.node.rt.remove_contacts.remove(c)
self.node.rt.contacts.add(c)
c.id = node_id
c.last_seen = time.time()
print(PrintColors.GREEN + 'new contact [PING ON RES]:', self.node, c, PrintColors.END)
else:
c = self.node.rt.remove_contacts.get((remote_host, remote_port))
if c:
self.node.rt.remove_contacts.remove(c)
self.node.rt.contacts.add(c)
c.id = node_id
c.last_seen = time.time()
print(PrintColors.GREEN + 'new contact [PING ON RES]:', self.node, c, PrintColors.END)
else:
c = Contact(
id = node_id,
local_host = local_host,
local_port = local_port,
remote_host = remote_host,
remote_port = remote_port,
bootstrap = bootstrap,
)
# because `c` is requesting to discover nodes
# put it into known active contacts
c.last_seen = time.time()
self.node.rt.contacts.add(c)
print(PrintColors.GREEN + 'new contact [PING ON RES]:', self.node, c, PrintColors.END)
| {
"repo_name": "mtasic85/routingtable",
"path": "ping_protocol_command.py",
"copies": "1",
"size": "9122",
"license": "mit",
"hash": -6973916327591847000,
"line_mean": 38.1502145923,
"line_max": 118,
"alpha_frac": 0.4304976979,
"autogenerated": false,
"ratio": 4.545092177379173,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5475589875279173,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Planet']
import os
import numpy as np
from pylightcurve.errors import *
from pylightcurve.models.exoplanet_lc import planet_orbit, planet_star_projected_distance, planet_phase, \
transit, transit_integrated, \
transit_duration, transit_depth, eclipse, eclipse_integrated, eclipse_mid_time, eclipse_duration, eclipse_depth,\
exotethys, fp_over_fs, _get_filter
from pylightcurve.analysis.optimisation import EmceeFitting
from pylightcurve.processes.files import open_dict, save_dict
from pylightcurve.plots.plots_fitting import plot_transit_fitting_models
from pylightcurve.spacetime.angles import Degrees, _request_angle
from pylightcurve.spacetime.targets import FixedTarget
class Filter:
def __init__(self, rp_over_rs, ldc1, ldc2, ldc3, ldc4, fp_over_fs):
self.rp_over_rs = rp_over_rs
self.fp_over_fs = fp_over_fs
self.limb_darkening_coefficients = [ldc1, ldc2, ldc3, ldc4]
class Planet:
def __init__(self, name, ra, dec, stellar_logg, stellar_temperature, stellar_metallicity,
rp_over_rs, period, sma_over_rs, eccentricity, inclination,
periastron, mid_time, mid_time_format, ww=0,
albedo=0.15, emissivity=1.0,
ldc_method='claret', ldc_stellar_model='phoenix'):
if isinstance(ra, float):
ra = Degrees(ra)
else:
_request_angle(ra)
if isinstance(dec, float):
dec = Degrees(dec)
else:
_request_angle(dec)
if isinstance(inclination, float):
pass
else:
_request_angle(inclination)
inclination = inclination.deg()
if isinstance(periastron, float):
pass
else:
_request_angle(periastron)
periastron = periastron.deg()
self.name = name
self.target = FixedTarget(ra, dec)
self.stellar_logg = stellar_logg
self.stellar_temperature = stellar_temperature
self.stellar_metallicity = stellar_metallicity
self.ldc_method = ldc_method
self.ldc_stellar_model = ldc_stellar_model
self.rp_over_rs = rp_over_rs
self.period = period
self.sma_over_rs = sma_over_rs
self.eccentricity = eccentricity
self.inclination = inclination
self.periastron = periastron
self.mid_time = self.target.convert_to_bjd_tdb(mid_time, mid_time_format)
self.eclipse_mid_time = eclipse_mid_time(self.period, self.sma_over_rs, self.eccentricity, self.inclination,
self.periastron, self.mid_time)
self.ww = ww
self.albedo = albedo
self.emissivity = emissivity
self.filters = {}
self.observations = {}
# filter-dependent values
def filter(self, filter_name):
if filter_name in self.filters:
return self.filters[filter_name]
else:
try:
_get_filter(filter_name)
ldc1, ldc2, ldc3, ldc4 = exotethys(self.stellar_logg, self.stellar_temperature, self.stellar_metallicity, filter_name,
method=self.ldc_method, stellar_model=self.ldc_stellar_model)
fp = fp_over_fs(self.rp_over_rs, self.sma_over_rs, self.albedo, self.emissivity, self.stellar_temperature,
filter_name)
print('Fp/Fs estimated using A={0}, e={1} for filter {2}.'.format(self.albedo, self.emissivity,
filter_name))
self.filters[filter_name] = Filter(self.rp_over_rs, ldc1, ldc2, ldc3, ldc4, fp)
return self.filters[filter_name]
except PyLCInputError:
raise PyLCInputError('Filter not available, you need to add {0} to the existing filters first.'.format
(filter_name))
def add_filter(self, filter_name, rp_over_rs, ldc1, ldc2, ldc3, ldc4, fp):
self.filters[filter_name] = Filter(rp_over_rs, ldc1, ldc2, ldc3, ldc4, fp)
# planet calculations
def planet_orbit(self, time, time_format):
time = self.target.convert_to_bjd_tdb(time, time_format)
return planet_orbit(self.period, self.sma_over_rs, self.eccentricity, self.inclination, self.periastron,
self.mid_time, time, ww=self.ww)
def planet_star_projected_distance(self, time, time_format):
time = self.target.convert_to_bjd_tdb(time, time_format)
return planet_star_projected_distance(self.period, self.sma_over_rs, self.eccentricity, self.inclination,
self.periastron, self.mid_time, time)
def planet_phase(self, time, time_format):
time = np.array([self.target.convert_to_bjd_tdb(ff, time_format) for ff in time])
return planet_phase(self.period, self.mid_time, time)
def transit(self, time, time_format, filter_name, precision=3):
filter_data = self.filter(filter_name)
time = self.target.convert_to_bjd_tdb(time, time_format)
return transit(filter_data.limb_darkening_coefficients,
filter_data.rp_over_rs, self.period, self.sma_over_rs, self.eccentricity,
self.inclination, self.periastron, self.mid_time, time,
method=self.ldc_method, precision=precision)
def transit_integrated(self, time, time_format, exp_time, time_stamp, filter_name, max_sub_exp_time=10, precision=3):
if time_stamp == 'start':
time = np.array(time) + 0.5 * exp_time / (60.0 * 60.0 * 24.0)
elif time_stamp == 'mid':
time = np.array(time)
elif time_stamp == 'end':
time = np.array(time) - 0.5 * exp_time / (60.0 * 60.0 * 24.0)
else:
raise PyLCInputError(
'Not acceptable time stamp {0}. Please choose between "mid", "start", "end".'.format(time_stamp))
filter_data = self.filter(filter_name)
time = self.target.convert_to_bjd_tdb(time, time_format)
return transit_integrated(filter_data.limb_darkening_coefficients,
filter_data.rp_over_rs, self.period, self.sma_over_rs, self.eccentricity,
self.inclination, self.periastron, self.mid_time, time, exp_time=exp_time,
max_sub_exp_time=max_sub_exp_time,
method=self.ldc_method, precision=precision)
def transit_duration(self, filter_name):
filter_data = self.filter(filter_name)
return transit_duration(filter_data.rp_over_rs,
self.period, self.sma_over_rs, self.eccentricity, self.inclination, self.periastron)
def transit_depth(self, filter_name, precision=6):
filter_data = self.filter(filter_name)
return transit_depth(filter_data.limb_darkening_coefficients,
filter_data.rp_over_rs, self.period, self.sma_over_rs, self.eccentricity,
self.inclination, self.periastron,
method=self.ldc_method, precision=precision)
def eclipse(self, time, time_format, filter_name, precision=3):
filter_data = self.filter(filter_name)
time = self.target.convert_to_bjd_tdb(time, time_format)
return eclipse(filter_data.fp_over_fs, filter_data.rp_over_rs,
self.period, self.sma_over_rs, self.eccentricity,
self.inclination, self.periastron, self.eclipse_mid_time, time, precision=precision)
def eclipse_integrated(self, time, time_format, exp_time, time_stamp, filter_name, max_sub_exp_time=10, precision=3):
if time_stamp == 'start':
time = np.array(time) + 0.5 * exp_time / (60.0 * 60.0 * 24.0)
elif time_stamp == 'mid':
time = np.array(time)
elif time_stamp == 'end':
time = np.array(time) - 0.5 * exp_time / (60.0 * 60.0 * 24.0)
else:
raise PyLCInputError(
'Not acceptable time stamp {0}. Please choose between "mid", "start", "end".'.format(time_stamp))
filter_data = self.filter(filter_name)
time = self.target.convert_to_bjd_tdb(time, time_format)
return eclipse_integrated(filter_data.fp_over_fs, filter_data.rp_over_rs,
self.period, self.sma_over_rs, self.eccentricity,
self.inclination, self.periastron, self.eclipse_mid_time, time, exp_time=exp_time,
max_sub_exp_time=max_sub_exp_time, precision=precision)
def eclipse_duration(self, filter_name):
filter_data = self.filter(filter_name)
return eclipse_duration(filter_data.rp_over_rs, self.period, self.sma_over_rs, self.eccentricity,
self.inclination, self.periastron)
def eclipse_depth(self, filter_name, precision=6):
filter_data = self.filter(filter_name)
return eclipse_depth(filter_data.fp_over_fs, filter_data.rp_over_rs, self.period,
self.sma_over_rs, self.eccentricity, self.inclination, self.periastron,
precision=precision)
# data fitting
def add_observation(self, time, time_format, exp_time, time_stamp, flux, flux_unc, flux_format, filter_name):
filter_data = self.filter(filter_name)
original_data = {
'time': time,
'time_stamp': time_stamp,
'time_format': time_format,
'flux': flux,
'flux_format': flux_format,
'flux_unc': flux_unc,
'exp_time': exp_time,
'filter': filter_name
}
if time_stamp == 'start':
time = np.array(time) + 0.5 * exp_time / (60.0 * 60.0 * 24.0)
elif time_stamp == 'mid':
time = np.array(time)
elif time_stamp == 'end':
time = np.array(time) - 0.5 * exp_time / (60.0 * 60.0 * 24.0)
else:
raise PyLCInputError(
'Not acceptable time stamp {0}. Please choose between "mid", "start", "end".'.format(time_stamp))
date = self.target.convert_to_jd(time[0], time_format)
time = np.array([self.target.convert_to_bjd_tdb(ff, time_format) for ff in time])
if flux_format == 'mag':
flux_unc = np.abs(np.array(flux_unc) * (-0.921034 * np.exp(0.921034 * flux[0] - 0.921034 * np.array(flux))))
flux = 10 ** ((flux[0] - np.array(flux)) / 2.5)
elif flux_format == 'flux':
flux = np.array(flux)
flux_unc = np.array(flux_unc)
else:
raise PyLCInputError('Not acceptable flux format {0}. Please choose between "flux" and '
'"mag".'.format(flux_format))
obs_id = 0
while obs_id in self.observations:
obs_id += 1
check_transit = (np.mean(time) - self.mid_time) / self.period
check_transit = abs(check_transit - int(check_transit))
check_eclipse = (np.mean(time) - self.eclipse_mid_time) / self.period
check_eclipse = abs(check_eclipse - int(check_eclipse))
if check_transit < check_eclipse:
observation_type = 'transit'
epoch = int(round((np.mean(time) - self.mid_time) / self.period, 0))
else:
observation_type = 'eclipse'
epoch = int(round((np.mean(time) - self.eclipse_mid_time) / self.period, 0))
self.observations[obs_id] = {
'target': self.name,
'time': time,
'dtime': time - time[0],
'flux': flux,
'flux_unc': flux_unc,
'exp_time': exp_time,
'filter': filter_name,
'epoch': epoch,
'date': date,
'observation_type': observation_type,
'original_data': original_data
}
def transit_fitting(self, output_folder,
max_sub_exp_time=10, precision=3,
detrending_order=2,
iterations=130000, walkers=200, burn_in=30000,
fit_ldc1=False, fit_ldc2=False, fit_ldc3=False, fit_ldc4=False,
fit_rp_over_rs=True, fit_individual_rp_over_rs=True,
fit_sma_over_rs=False, fit_inclination=False,
fit_mid_time=True, fit_period=False,
fit_individual_times=True,
fit_ldc_limits=[0.0, 1.0],
fit_rp_over_rs_limits=[0.5, 2.0],
fit_sma_over_rs_limits=[0.5, 2.0],
fit_inclination_limits=[70.0, 90.0],
fit_mid_time_limits=[-0.2, 0.2],
fit_period_limits=[0.8, 1.2],
counter='MCMC'):
if not os.path.isdir(output_folder):
os.mkdir(output_folder)
parameters_map = [[] for observation in self.observations]
names = []
print_names = []
limits1 = []
limits2 = []
initial = []
# de-trending parameters
for observation_num, observation in enumerate(self.observations):
max_limit = (10 * (max(self.observations[observation]['flux']) - min(self.observations[observation]['flux'])) /
(max(self.observations[observation]['flux']) - min(self.observations[observation]['flux'])) / np.mean(self.observations[observation]['flux']))
names.append('N_{0}'.format(observation_num + 1))
print_names.append('N_{0}'.format(observation_num + 1))
initial.append(np.mean(self.observations[observation]['flux']))
limits1.append(0.5 * np.mean(self.observations[observation]['flux']))
limits2.append(1.5 * np.mean(self.observations[observation]['flux']))
parameters_map[observation_num].append(len(names) - 1)
names.append('L_{0}'.format(observation_num + 1))
print_names.append('L_{0}'.format(observation_num + 1))
initial.append(0)
if detrending_order >= 1:
limits1.append(-max_limit)
limits2.append(max_limit)
else:
limits1.append(np.nan)
limits2.append(np.nan)
parameters_map[observation_num].append(len(names) - 1)
names.append('Q_{0}'.format(observation_num + 1))
print_names.append('Q_{0}'.format(observation_num + 1))
initial.append(0)
if detrending_order == 2:
limits1.append(-5)
limits2.append(5)
else:
limits1.append(np.nan)
limits2.append(np.nan)
parameters_map[observation_num].append(len(names) - 1)
# limb-darkening and rp_over_rs parameters
unique_filters = []
for observation in self.observations:
if self.observations[observation]['filter'] not in unique_filters:
unique_filters.append(self.observations[observation]['filter'])
for phot_filter in unique_filters:
filter_data = self.filter(phot_filter)
ldc1, ldc2, ldc3, ldc4 = filter_data.limb_darkening_coefficients
rp_over_rs = filter_data.rp_over_rs
names.append('LDC1_{0}'.format(phot_filter))
print_names.append('LDC1_{0}'.format(phot_filter))
initial.append(ldc1)
if fit_ldc1:
limits1.append(fit_ldc_limits[0])
limits2.append(fit_ldc_limits[1])
else:
limits1.append(np.nan)
limits2.append(np.nan)
for observation_num, observation in enumerate(self.observations):
if self.observations[observation]['filter'] == phot_filter:
parameters_map[observation_num].append(len(names) - 1)
names.append('LDC2_{0}'.format(phot_filter))
print_names.append('LDC2_{0}'.format(phot_filter))
initial.append(ldc2)
if fit_ldc2:
limits1.append(fit_ldc_limits[0])
limits2.append(fit_ldc_limits[1])
else:
limits1.append(np.nan)
limits2.append(np.nan)
for observation_num, observation in enumerate(self.observations):
if self.observations[observation]['filter'] == phot_filter:
parameters_map[observation_num].append(len(names) - 1)
names.append('LDC3_{0}'.format(phot_filter))
print_names.append('LDC3_{0}'.format(phot_filter))
initial.append(ldc3)
if fit_ldc3:
limits1.append(fit_ldc_limits[0])
limits2.append(fit_ldc_limits[1])
else:
limits1.append(np.nan)
limits2.append(np.nan)
for observation_num, observation in enumerate(self.observations):
if self.observations[observation]['filter'] == phot_filter:
parameters_map[observation_num].append(len(names) - 1)
names.append('LDC4_{0}'.format(phot_filter))
print_names.append('LDC4_{0}'.format(phot_filter))
initial.append(ldc4)
if fit_ldc4:
limits1.append(fit_ldc_limits[0])
limits2.append(fit_ldc_limits[1])
else:
limits1.append(np.nan)
limits2.append(np.nan)
for observation_num, observation in enumerate(self.observations):
if self.observations[observation]['filter'] == phot_filter:
parameters_map[observation_num].append(len(names) - 1)
if fit_individual_rp_over_rs:
names.append('rp_{0}'.format(phot_filter))
print_names.append('(R_\mathrm{p}/R_*)_{' + phot_filter + '}')
initial.append(rp_over_rs)
if fit_rp_over_rs:
limits1.append(rp_over_rs * fit_rp_over_rs_limits[0])
limits2.append(rp_over_rs * fit_rp_over_rs_limits[1])
else:
limits1.append(np.nan)
limits2.append(np.nan)
for observation_num, observation in enumerate(self.observations):
if self.observations[observation]['filter'] == phot_filter:
parameters_map[observation_num].append(len(names) - 1)
if not fit_individual_rp_over_rs:
names.append('rp')
print_names.append('(R_\mathrm{p}/R_*)')
initial.append(self.rp_over_rs)
if fit_rp_over_rs:
limits1.append(self.rp_over_rs * fit_rp_over_rs_limits[0])
limits2.append(self.rp_over_rs * fit_rp_over_rs_limits[1])
else:
limits1.append(np.nan)
limits2.append(np.nan)
for observation_num, observation in enumerate(self.observations):
parameters_map[observation_num].append(len(names) - 1)
# orbital parameters
names.append('P')
print_names.append('P')
initial.append(self.period)
if fit_period:
limits1.append(self.period * fit_period_limits[0])
limits2.append(self.period * fit_period_limits[1])
else:
limits1.append(np.nan)
limits2.append(np.nan)
for observation_num, observation in enumerate(self.observations):
parameters_map[observation_num].append(len(names) - 1)
names.append('a')
print_names.append('a/R_*')
initial.append(self.sma_over_rs)
if fit_sma_over_rs:
limits1.append(self.sma_over_rs * fit_sma_over_rs_limits[0])
limits2.append(self.sma_over_rs * fit_sma_over_rs_limits[1])
else:
limits1.append(np.nan)
limits2.append(np.nan)
for observation_num, observation in enumerate(self.observations):
parameters_map[observation_num].append(len(names) - 1)
names.append('e')
print_names.append('e')
initial.append(self.eccentricity)
limits1.append(np.nan)
limits2.append(np.nan)
for observation_num, observation in enumerate(self.observations):
parameters_map[observation_num].append(len(names) - 1)
names.append('i')
print_names.append('i')
initial.append(self.inclination)
if fit_inclination:
limits1.append(fit_inclination_limits[0])
limits2.append(fit_inclination_limits[1])
else:
limits1.append(np.nan)
limits2.append(np.nan)
for observation_num, observation in enumerate(self.observations):
parameters_map[observation_num].append(len(names) - 1)
names.append('w')
print_names.append('\omega')
initial.append(self.periastron)
limits1.append(np.nan)
limits2.append(np.nan)
for observation_num, observation in enumerate(self.observations):
parameters_map[observation_num].append(len(names) - 1)
# time parameters
test_epochs = []
test_epochs_weights = []
for observation in self.observations:
test_epochs.append((self.observations[observation]['time'] - self.mid_time) / self.period)
norm_errors = self.observations[observation]['flux_unc'] / self.observations[observation]['flux']
test_epochs_weights.append(1 / (norm_errors * norm_errors))
test_epochs = np.concatenate(test_epochs)
test_epochs_weights = np.concatenate(test_epochs_weights)
new_epoch = np.round(np.sum(test_epochs * test_epochs_weights) / np.sum(test_epochs_weights), 0)
new_mid_time = self.mid_time + new_epoch * self.period
for observation in self.observations:
self.observations[observation]['epoch'] = int(round((np.mean(self.observations[observation]['time']) -new_mid_time) / self.period, 0))
unique_epochs = []
for observation in self.observations:
if self.observations[observation]['epoch'] not in unique_epochs:
unique_epochs.append(self.observations[observation]['epoch'])
if not fit_individual_times:
names.append('T_0')
print_names.append('T_0')
initial.append(new_mid_time)
if fit_mid_time:
limits1.append(new_mid_time + fit_mid_time_limits[0])
limits2.append(new_mid_time + fit_mid_time_limits[1])
else:
limits1.append(np.nan)
limits2.append(np.nan)
for observation_num, observation in enumerate(self.observations):
parameters_map[observation_num].append(len(names) - 1)
else:
if fit_period:
raise PyLCInputError('Period and individual mid times cannot be fitted simultaneously.')
for epoch in unique_epochs:
names.append('T_mid_{0}'.format(epoch))
print_names.append('T_\mathrm{mid_' + str(epoch) + '}')
initial.append(new_mid_time + epoch * self.period)
if fit_mid_time:
limits1.append(new_mid_time + epoch * self.period + fit_mid_time_limits[0])
limits2.append(new_mid_time + epoch * self.period + fit_mid_time_limits[1])
else:
limits1.append(np.nan)
limits2.append(np.nan)
for observation_num, observation in enumerate(self.observations):
if self.observations[observation]['epoch'] == epoch:
parameters_map[observation_num].append(len(names) - 1)
model_time = np.array([])
model_flux = np.array([])
model_flux_unc = np.array([])
for observation in self.observations:
model_time = np.append(model_time, self.observations[observation]['time'])
model_flux = np.append(model_flux, self.observations[observation]['flux'])
model_flux_unc = np.append(model_flux_unc, self.observations[observation]['flux_unc'])
parameters_map = np.array(parameters_map)
def detrend_model(model_time, *model_variables):
model = []
for observation_num, observation in enumerate(self.observations):
detrend_zero, detrend_one, detrend_two, ldc1, ldc2, ldc3, ldc4, r, p, a, e, i, w, mt = \
np.array(model_variables)[parameters_map[observation_num]]
deltatime = self.observations[observation]['dtime']
model.append(detrend_zero * (1 + detrend_one * deltatime + detrend_two * deltatime * deltatime))
return np.concatenate(model)
def full_model(model_time, *model_variables):
model_variables = np.array(model_variables)
model = []
for observation_num, observation in enumerate(self.observations):
detrend_zero, detrend_one, detrend_two, ldc1, ldc2, ldc3, ldc4, r, p, a, e, i, w, mt = \
model_variables[parameters_map[observation_num]]
deltatime = self.observations[observation]['dtime']
model1 = detrend_zero * (1 + detrend_one * deltatime + detrend_two * deltatime * deltatime)
model.append(model1 * transit_integrated(
[ldc1, ldc2, ldc3, ldc4], r, p, a, e, i, w, mt,
time_array=self.observations[observation]['time'],
exp_time=self.observations[observation]['exp_time'],
max_sub_exp_time=max_sub_exp_time,
method=self.ldc_method,
precision=precision
))
return np.concatenate(model)
fitting = EmceeFitting(model_time, model_flux, model_flux_unc,
full_model, initial, limits1, limits2, walkers,
iterations, burn_in,
data_x_name='time', data_y_name='flux', data_x_print_name='Time',
data_y_print_name='Relative Flux',
parameters_names=names, parameters_print_names=print_names,
counter=counter)
fitting.run_mcmc()
results = fitting.results
# TODO
# save observations separately and calculate some statistics individually
results['settings'] = {}
results['settings']['max_sub_exp_time'] = max_sub_exp_time
results['settings']['precision'] = precision
results['settings']['detrending_order'] = detrending_order
results['settings']['iterations'] = iterations
results['settings']['walkers'] = walkers
results['settings']['burn_in'] = burn_in
results['settings']['fit_ldc1'] = fit_ldc1
results['settings']['fit_ldc2'] = fit_ldc2
results['settings']['fit_ldc3'] = fit_ldc3
results['settings']['fit_ldc4'] = fit_ldc4
results['settings']['fit_rp_over_rs'] = fit_rp_over_rs
results['settings']['force_same_rp_over_rs'] = fit_individual_rp_over_rs
results['settings']['fit_sma_over_rs'] = fit_sma_over_rs
results['settings']['fit_inclination'] = fit_inclination
results['settings']['fit_mid_time'] = fit_mid_time
results['settings']['fit_period'] = fit_period
results['settings']['fit_individual_times'] = fit_individual_times
results['settings']['fit_ldc_limits'] = fit_ldc_limits
results['settings']['fit_rp_over_rs_limits'] = fit_rp_over_rs_limits
results['settings']['fit_sma_over_rs_limits'] = fit_sma_over_rs_limits
results['settings']['fit_inclination_limits'] = fit_inclination_limits
results['settings']['fit_mid_time_limits'] = fit_mid_time_limits
results['settings']['fit_period_limits'] = fit_period_limits
results['settings']['filter_map'] = self.filters
results['detrended_input_series'] = {
'time': results['input_series']['time'],
'flux': results['input_series']['flux'] / detrend_model(model_time, *results['parameters_final']),
'flux_unc': results['input_series']['flux_unc'] / detrend_model(model_time, *results['parameters_final'])}
results['detrended_output_series'] = {
'model': results['output_series']['model'] / detrend_model(model_time, *results['parameters_final']),
'residuals': (results['output_series']['residuals']
/ detrend_model(model_time, *results['parameters_final']))}
results['detrended_statistics'] = {
'res_mean': np.mean(results['detrended_output_series']['residuals']),
'res_std': np.std(results['detrended_output_series']['residuals']),
'res_rms': np.sqrt(np.mean(results['detrended_output_series']['residuals'] ** 2))
}
observations_id_series = np.array([])
for observation in self.observations:
observations_id_series = np.append(observations_id_series,
np.ones(len(self.observations[observation]['time'])) * observation)
for observation in self.observations:
id_series = np.where(observations_id_series == observation)
self.observations[observation]['model'] = results['output_series']['model'][id_series]
self.observations[observation]['residuals'] = results['output_series']['residuals'][id_series]
res_autocorr = np.correlate(self.observations[observation]['residuals'],
self.observations[observation]['residuals'], mode='full')
res_autocorr = res_autocorr[res_autocorr.size // 2:] / res_autocorr[res_autocorr.size // 2:][0]
self.observations[observation]['res_autocorr'] = res_autocorr
self.observations[observation]['res_max_autocorr'] = np.max(res_autocorr[1:])
self.observations[observation]['res_mean'] = np.mean(self.observations[observation]['residuals'])
self.observations[observation]['res_std'] = np.std(self.observations[observation]['residuals'])
self.observations[observation]['res_rms'] = np.sqrt(np.mean(self.observations[observation]['residuals'] ** 2))
self.observations[observation]['res_chi_sqr'] = np.sum(
(self.observations[observation]['residuals'] ** 2) / (self.observations[observation]['flux_unc'] ** 2))
self.observations[observation]['res_red_chi_sqr'] = (
self.observations[observation]['res_chi_sqr'] / (
len(self.observations[observation]['flux_unc']) - len(results['statistics']['corr_variables'])))
self.observations[observation]['detrended_flux'] = results['detrended_input_series']['flux'][id_series]
self.observations[observation]['detrended_flux_unc'] = results['detrended_input_series']['flux_unc'][id_series]
self.observations[observation]['detrended_model'] = results['detrended_output_series']['model'][id_series]
self.observations[observation]['detrended_residuals'] = results['detrended_output_series']['residuals'][id_series]
res_autocorr = np.correlate(self.observations[observation]['detrended_residuals'],
self.observations[observation]['detrended_residuals'], mode='full')
res_autocorr = res_autocorr[res_autocorr.size // 2:] / res_autocorr[res_autocorr.size // 2:][0]
self.observations[observation]['detrended_res_autocorr'] = res_autocorr
self.observations[observation]['detrended_res_max_autocorr'] = np.max(res_autocorr[1:])
self.observations[observation]['detrended_res_mean'] = np.mean(self.observations[observation]['detrended_residuals'])
self.observations[observation]['detrended_res_std'] = np.std(self.observations[observation]['detrended_residuals'])
self.observations[observation]['detrended_res_rms'] = np.sqrt(
np.mean(self.observations[observation]['detrended_residuals'] ** 2))
self.observations[observation]['detrended_res_chi_sqr'] = np.sum(
(self.observations[observation]['detrended_residuals'] ** 2) / (self.observations[observation]['detrended_flux_unc'] ** 2))
self.observations[observation]['detrended_res_red_chi_sqr'] = (
self.observations[observation]['detrended_res_chi_sqr'] / (
len(self.observations[observation]['detrended_flux_unc']) - len(results['statistics']['corr_variables'])))
w = open(os.path.join(output_folder, 'diagnostics_dataset_{0}.txt'.format(observation + 1)), 'w')
w.write('\n#Residuals:\n')
w.write('#Mean: {0}\n'.format(self.observations[observation]['res_mean']))
w.write('#STD: {0}\n'.format(self.observations[observation]['res_std']))
w.write('#RMS: {0}\n'.format(self.observations[observation]['res_rms']))
w.write('#Max auto-correlation: {0}\n'.format(self.observations[observation]['res_max_autocorr']))
w.write('#Chi squared: {0}\n'.format(self.observations[observation]['res_chi_sqr']))
w.write('#Reduced chi squared: {0}\n'.format(self.observations[observation]['res_red_chi_sqr']))
w.write('\n\n#Detrended Residuals:\n')
w.write('#Mean: {0}\n'.format(self.observations[observation]['detrended_res_mean']))
w.write('#STD: {0}\n'.format(self.observations[observation]['detrended_res_std']))
w.write('#RMS: {0}\n'.format(self.observations[observation]['detrended_res_rms']))
w.write('#Max auto-correlation: {0}\n'.format(self.observations[observation]['detrended_res_max_autocorr']))
w.write('#Chi squared: {0}\n'.format(self.observations[observation]['detrended_res_chi_sqr']))
w.write('#Reduced chi squared: {0}\n'.format(self.observations[observation]['detrended_res_red_chi_sqr']))
w.close()
results['observations'] = self.observations
save_dict(results, os.path.join(output_folder, 'results.pickle'))
fitting.save_results(os.path.join(output_folder, 'results.txt'))
fitting.plot_corner(os.path.join(output_folder, 'correlations.pdf'))
fitting.plot_traces(os.path.join(output_folder, 'traces.pdf'))
plot_transit_fitting_models(results, os.path.join(output_folder, 'lightcurves.pdf'))
def eclipse_fitting(self, output_folder,
max_sub_exp_time=10, precision=3,
detrending_order=2,
iterations=130000, walkers=200, burn_in=30000,
fit_fp_over_fs=True, fit_individual_fp_over_fs=False,
fit_rp_over_rs=False, fit_individual_rp_over_rs=False,
fit_sma_over_rs=False, fit_inclination=False,
fit_mid_time=False, fit_period=False,
fit_individual_times=False,
fit_rp_over_rs_limits=[0.5, 2.0],
fit_fp_over_fs_limits=[0.001, 1000.0],
fit_sma_over_rs_limits=[0.5, 2.0],
fit_inclination_limits=[70.0, 90.0],
fit_mid_time_limits=[-0.2, 0.2],
fit_period_limits=[0.8, 1.2],
counter='MCMC'):
if not os.path.isdir(output_folder):
os.mkdir(output_folder)
parameters_map = [[] for observation in self.observations]
names = []
print_names = []
limits1 = []
limits2 = []
initial = []
# de-trending parameters
for observation_num, observation in enumerate(self.observations):
max_limit = (10 * (max(self.observations[observation]['flux']) - min(self.observations[observation]['flux'])) /
(max(self.observations[observation]['flux']) - min(self.observations[observation]['flux'])) / np.mean(self.observations[observation]['flux']))
names.append('N_{0}'.format(observation_num + 1))
print_names.append('N_{0}'.format(observation_num + 1))
initial.append(np.mean(self.observations[observation]['flux']))
limits1.append(0.5 * np.mean(self.observations[observation]['flux']))
limits2.append(1.5 * np.mean(self.observations[observation]['flux']))
parameters_map[observation_num].append(len(names) - 1)
names.append('L_{0}'.format(observation_num + 1))
print_names.append('L_{0}'.format(observation_num + 1))
initial.append(0)
if detrending_order >= 1:
limits1.append(-max_limit)
limits2.append(max_limit)
else:
limits1.append(np.nan)
limits2.append(np.nan)
parameters_map[observation_num].append(len(names) - 1)
names.append('Q_{0}'.format(observation_num + 1))
print_names.append('Q_{0}'.format(observation_num + 1))
initial.append(0)
if detrending_order == 2:
limits1.append(-5)
limits2.append(5)
else:
limits1.append(np.nan)
limits2.append(np.nan)
parameters_map[observation_num].append(len(names) - 1)
# limb-darkening and rp_over_rs parameters
unique_filters = []
for observation in self.observations:
if self.observations[observation]['filter'] not in unique_filters:
unique_filters.append(self.observations[observation]['filter'])
fp_over_fs = 0.00001
for phot_filter in unique_filters:
filter_data = self.filter(phot_filter)
fp_over_fs = filter_data.fp_over_fs
if fit_individual_fp_over_fs:
names.append('fp_{0}'.format(phot_filter))
print_names.append('(F_\mathrm{p}/F_*)_{' + phot_filter + '}')
initial.append(fp_over_fs)
if fit_fp_over_fs:
limits1.append(fp_over_fs * fit_fp_over_fs_limits[0])
limits2.append(fp_over_fs * fit_fp_over_fs_limits[1])
else:
limits1.append(np.nan)
limits2.append(np.nan)
for observation_num, observation in enumerate(self.observations):
if self.observations[observation]['filter'] == phot_filter:
parameters_map[observation_num].append(len(names) - 1)
if not fit_individual_fp_over_fs:
names.append('fp')
print_names.append('(F_\mathrm{p}/F_*)')
initial.append(fp_over_fs)
if fit_fp_over_fs:
limits1.append(fp_over_fs * fit_fp_over_fs_limits[0])
limits2.append(fp_over_fs * fit_fp_over_fs_limits[1])
else:
limits1.append(np.nan)
limits2.append(np.nan)
for observation_num, observation in enumerate(self.observations):
parameters_map[observation_num].append(len(names) - 1)
rp_over_rs = self.rp_over_rs
for phot_filter in unique_filters:
filter_data = self.filter(phot_filter)
rp_over_rs = filter_data.rp_over_rs
if fit_individual_rp_over_rs:
names.append('rp_{0}'.format(phot_filter))
print_names.append('(R_\mathrm{p}/R_*)_{' + phot_filter + '}')
initial.append(rp_over_rs)
if fit_rp_over_rs:
limits1.append(rp_over_rs * fit_rp_over_rs_limits[0])
limits2.append(rp_over_rs * fit_rp_over_rs_limits[1])
else:
limits1.append(np.nan)
limits2.append(np.nan)
for observation_num, observation in enumerate(self.observations):
if self.observations[observation]['filter'] == phot_filter:
parameters_map[observation_num].append(len(names) - 1)
if not fit_individual_rp_over_rs:
names.append('rp')
print_names.append('(R_\mathrm{p}/R_*)')
initial.append(rp_over_rs)
if fit_rp_over_rs:
limits1.append(rp_over_rs * fit_rp_over_rs_limits[0])
limits2.append(rp_over_rs * fit_rp_over_rs_limits[1])
else:
limits1.append(np.nan)
limits2.append(np.nan)
for observation_num, observation in enumerate(self.observations):
parameters_map[observation_num].append(len(names) - 1)
# orbital parameters
names.append('P')
print_names.append('P')
initial.append(self.period)
if fit_period:
limits1.append(self.period * fit_period_limits[0])
limits2.append(self.period * fit_period_limits[1])
else:
limits1.append(np.nan)
limits2.append(np.nan)
for observation_num, observation in enumerate(self.observations):
parameters_map[observation_num].append(len(names) - 1)
names.append('a')
print_names.append('a/R_*')
initial.append(self.sma_over_rs)
if fit_sma_over_rs:
limits1.append(self.sma_over_rs * fit_sma_over_rs_limits[0])
limits2.append(self.sma_over_rs * fit_sma_over_rs_limits[1])
else:
limits1.append(np.nan)
limits2.append(np.nan)
for observation_num, observation in enumerate(self.observations):
parameters_map[observation_num].append(len(names) - 1)
names.append('e')
print_names.append('e')
initial.append(self.eccentricity)
limits1.append(np.nan)
limits2.append(np.nan)
for observation_num, observation in enumerate(self.observations):
parameters_map[observation_num].append(len(names) - 1)
names.append('i')
print_names.append('i')
initial.append(self.inclination)
if fit_inclination:
limits1.append(fit_inclination_limits[0])
limits2.append(fit_inclination_limits[1])
else:
limits1.append(np.nan)
limits2.append(np.nan)
for observation_num, observation in enumerate(self.observations):
parameters_map[observation_num].append(len(names) - 1)
names.append('w')
print_names.append('\omega')
initial.append(self.periastron)
limits1.append(np.nan)
limits2.append(np.nan)
for observation_num, observation in enumerate(self.observations):
parameters_map[observation_num].append(len(names) - 1)
# time parameters
unique_epochs = []
for observation in self.observations:
if self.observations[observation]['epoch'] not in unique_epochs:
unique_epochs.append(self.observations[observation]['epoch'])
self.eclipse_mid_time += min(unique_epochs) * self.period
if not fit_individual_times:
names.append('T_0')
print_names.append('T_0')
initial.append(self.eclipse_mid_time)
if fit_mid_time:
limits1.append(self.eclipse_mid_time + fit_mid_time_limits[0])
limits2.append(self.eclipse_mid_time + fit_mid_time_limits[1])
else:
limits1.append(np.nan)
limits2.append(np.nan)
for observation_num, observation in enumerate(self.observations):
parameters_map[observation_num].append(len(names) - 1)
else:
if fit_period:
raise PyLCInputError('Period and individual mid times cannot be fitted simultaneously.')
for epoch in unique_epochs:
epoch -= min(unique_epochs)
names.append('T_{0}'.format(epoch))
print_names.append('T_{' + str(epoch) + '}')
initial.append(self.eclipse_mid_time + epoch * self.period)
if fit_mid_time:
limits1.append(self.eclipse_mid_time + epoch * self.period + fit_mid_time_limits[0])
limits2.append(self.eclipse_mid_time + epoch * self.period + fit_mid_time_limits[1])
else:
limits1.append(np.nan)
limits2.append(np.nan)
for observation_num, observation in enumerate(self.observations):
if self.observations[observation]['epoch'] - min(unique_epochs) == epoch:
parameters_map[observation_num].append(len(names) - 1)
model_time = np.array([])
model_flux = np.array([])
model_flux_unc = np.array([])
for observation in self.observations:
model_time = np.append(model_time, self.observations[observation]['time'])
model_flux = np.append(model_flux, self.observations[observation]['flux'])
model_flux_unc = np.append(model_flux_unc, self.observations[observation]['flux_unc'])
parameters_map = np.array(parameters_map)
def detrend_model(model_time, *model_variables):
model = []
for observation_num, observation in enumerate(self.observations):
detrend_zero, detrend_one, detrend_two, f, r, p, a, e, i, w, mt = \
np.array(model_variables)[parameters_map[observation_num]]
deltatime = self.observations[observation]['dtime']
model.append(detrend_zero * (1 + detrend_one * deltatime + detrend_two * deltatime * deltatime))
return np.concatenate(model)
def full_model(model_time, *model_variables):
model = []
for observation_num, observation in enumerate(self.observations):
detrend_zero, detrend_one, detrend_two, f, r, p, a, e, i, w, mt = \
np.array(model_variables)[parameters_map[observation_num]]
deltatime = self.observations[observation]['dtime']
model1 = detrend_zero * (1 + detrend_one * deltatime + detrend_two * deltatime * deltatime)
model.append(model1 * eclipse_integrated(f, r, p, a, e, i, w, mt,
time_array=self.observations[observation]['time'],
exp_time=self.observations[observation]['exp_time'],
max_sub_exp_time=max_sub_exp_time,
precision=precision
))
return np.concatenate(model)
fitting = EmceeFitting(model_time, model_flux, model_flux_unc,
full_model, initial, limits1, limits2, walkers,
iterations, burn_in,
data_x_name='time', data_y_name='flux', data_x_print_name='Time',
data_y_print_name='Relative Flux',
parameters_names=names, parameters_print_names=print_names,
counter=counter)
fitting.run_mcmc()
results = fitting.results
# TODO
# save observations separately and calculate some statistics individually
results['settings'] = {}
results['settings']['max_sub_exp_time'] = max_sub_exp_time
results['settings']['precision'] = precision
results['settings']['detrending_order'] = detrending_order
results['settings']['iterations'] = iterations
results['settings']['walkers'] = walkers
results['settings']['burn_in'] = burn_in
results['settings']['fit_fp_over_fs'] = fit_fp_over_fs
results['settings']['force_same_fp_over_fs'] = fit_individual_fp_over_fs
results['settings']['fit_rp_over_rs'] = fit_rp_over_rs
results['settings']['force_same_rp_over_rs'] = fit_individual_rp_over_rs
results['settings']['fit_sma_over_rs'] = fit_sma_over_rs
results['settings']['fit_inclination'] = fit_inclination
results['settings']['fit_mid_time'] = fit_mid_time
results['settings']['fit_period'] = fit_period
results['settings']['fit_individual_times'] = fit_individual_times
results['settings']['fit_rp_over_rs_limits'] = fit_rp_over_rs_limits
results['settings']['fit_rp_over_rs_limits'] = fit_fp_over_fs_limits
results['settings']['fit_sma_over_rs_limits'] = fit_sma_over_rs_limits
results['settings']['fit_inclination_limits'] = fit_inclination_limits
results['settings']['fit_mid_time_limits'] = fit_mid_time_limits
results['settings']['fit_period_limits'] = fit_period_limits
results['settings']['filter_map'] = self.filters
results['detrended_input_series'] = {
'time': results['input_series']['time'],
'flux': results['input_series']['flux'] / detrend_model(model_time, *results['parameters_final']),
'flux_unc': results['input_series']['flux_unc'] / detrend_model(model_time, *results['parameters_final'])}
results['detrended_output_series'] = {
'model': results['output_series']['model'] / detrend_model(model_time, *results['parameters_final']),
'residuals': (results['output_series']['residuals']
/ detrend_model(model_time, *results['parameters_final']))}
results['detrended_statistics'] = {
'res_mean': np.mean(results['detrended_output_series']['residuals']),
'res_std': np.std(results['detrended_output_series']['residuals']),
'res_rms': np.sqrt(np.mean(results['detrended_output_series']['residuals'] ** 2))
}
observations_id_series = np.array([])
for observation in self.observations:
observations_id_series = np.append(observations_id_series,
np.ones(len(self.observations[observation]['time'])) * observation)
for observation in self.observations:
id_series = np.where(observations_id_series == observation)
self.observations[observation]['model'] = results['output_series']['model'][id_series]
self.observations[observation]['residuals'] = results['output_series']['residuals'][id_series]
res_autocorr = np.correlate(self.observations[observation]['residuals'],
self.observations[observation]['residuals'], mode='full')
res_autocorr = res_autocorr[res_autocorr.size // 2:] / res_autocorr[res_autocorr.size // 2:][0]
self.observations[observation]['res_autocorr'] = res_autocorr
self.observations[observation]['res_max_autocorr'] = np.max(res_autocorr[1:])
self.observations[observation]['res_mean'] = np.mean(self.observations[observation]['residuals'])
self.observations[observation]['res_std'] = np.std(self.observations[observation]['residuals'])
self.observations[observation]['res_rms'] = np.sqrt(np.mean(self.observations[observation]['residuals'] ** 2))
self.observations[observation]['res_chi_sqr'] = np.sum(
(self.observations[observation]['residuals'] ** 2) / (self.observations[observation]['flux_unc'] ** 2))
self.observations[observation]['res_red_chi_sqr'] = (
self.observations[observation]['res_chi_sqr'] / (
len(self.observations[observation]['flux_unc']) - len(results['statistics']['corr_variables'])))
self.observations[observation]['detrended_flux'] = results['detrended_input_series']['flux'][id_series]
self.observations[observation]['detrended_flux_unc'] = results['detrended_input_series']['flux_unc'][id_series]
self.observations[observation]['detrended_model'] = results['detrended_output_series']['model'][id_series]
self.observations[observation]['detrended_residuals'] = results['detrended_output_series']['residuals'][id_series]
res_autocorr = np.correlate(self.observations[observation]['detrended_residuals'],
self.observations[observation]['detrended_residuals'], mode='full')
res_autocorr = res_autocorr[res_autocorr.size // 2:] / res_autocorr[res_autocorr.size // 2:][0]
self.observations[observation]['detrended_res_autocorr'] = res_autocorr
self.observations[observation]['detrended_res_max_autocorr'] = np.max(res_autocorr[1:])
self.observations[observation]['detrended_res_mean'] = np.mean(self.observations[observation]['detrended_residuals'])
self.observations[observation]['detrended_res_std'] = np.std(self.observations[observation]['detrended_residuals'])
self.observations[observation]['detrended_res_rms'] = np.sqrt(
np.mean(self.observations[observation]['detrended_residuals'] ** 2))
self.observations[observation]['detrended_res_chi_sqr'] = np.sum(
(self.observations[observation]['detrended_residuals'] ** 2) / (self.observations[observation]['detrended_flux_unc'] ** 2))
self.observations[observation]['detrended_res_red_chi_sqr'] = (
self.observations[observation]['detrended_res_chi_sqr'] / (
len(self.observations[observation]['detrended_flux_unc']) - len(results['statistics']['corr_variables'])))
w = open(os.path.join(output_folder, 'diagnostics_dataset_{0}.txt'.format(observation + 1)), 'w')
w.write('\n#Residuals:\n')
w.write('#Mean: {0}\n'.format(self.observations[observation]['res_mean']))
w.write('#STD: {0}\n'.format(self.observations[observation]['res_std']))
w.write('#RMS: {0}\n'.format(self.observations[observation]['res_rms']))
w.write('#Max auto-correlation: {0}\n'.format(self.observations[observation]['res_max_autocorr']))
w.write('#Chi squared: {0}\n'.format(self.observations[observation]['res_chi_sqr']))
w.write('#Reduced chi squared: {0}\n'.format(self.observations[observation]['res_red_chi_sqr']))
w.write('\n\n#Detrended Residuals:\n')
w.write('#Mean: {0}\n'.format(self.observations[observation]['detrended_res_mean']))
w.write('#STD: {0}\n'.format(self.observations[observation]['detrended_res_std']))
w.write('#RMS: {0}\n'.format(self.observations[observation]['detrended_res_rms']))
w.write('#Max auto-correlation: {0}\n'.format(self.observations[observation]['detrended_res_max_autocorr']))
w.write('#Chi squared: {0}\n'.format(self.observations[observation]['detrended_res_chi_sqr']))
w.write('#Reduced chi squared: {0}\n'.format(self.observations[observation]['detrended_res_red_chi_sqr']))
w.close()
results['observations'] = self.observations
save_dict(results, os.path.join(output_folder, 'results.pickle'))
fitting.save_results(os.path.join(output_folder, 'results.txt'))
fitting.plot_corner(os.path.join(output_folder, 'correlations.pdf'))
fitting.plot_traces(os.path.join(output_folder, 'traces.pdf'))
plot_transit_fitting_models(results, os.path.join(output_folder, 'lightcurves.pdf'))
| {
"repo_name": "ucl-exoplanets/pylightcurve",
"path": "pylightcurve/models/exoplanet.py",
"copies": "1",
"size": "56037",
"license": "mit",
"hash": -4768177089704062000,
"line_mean": 45.6975,
"line_max": 167,
"alpha_frac": 0.5784570909,
"autogenerated": false,
"ratio": 3.704925619834711,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9774952521324735,
"avg_score": 0.001686037881995202,
"num_lines": 1200
} |
__all__ = ['Players', 'Games', 'Votes']
from mongoengine import (BooleanField, Document, IntField, DictField, DateTimeField,
ListField, ReferenceField, StringField, URLField, FloatField)
class Players(Document):
guid = StringField()
guids = ListField(StringField())
name = StringField()
names = ListField(StringField())
online = BooleanField()
score = IntField()
team = StringField()
team_id = IntField()
skill = StringField()
address = StringField()
bot = BooleanField()
headmodel = StringField()
last_seen = DateTimeField()
# fields allowed to be updated by context info
update_fields = ['name', 'online', 'score', 'team', 'team_id', 'skill', 'address', 'bot', 'headmodel']
class Gamemaps(Document):
name = StringField()
levelshot = URLField()
images = ListField(URLField())
gametypes = ListField(IntField())
min_players = IntField(default=2)
max_players = IntField(default=12)
times_played = IntField(default=0)
last_played = DateTimeField()
class Votes(Document):
game = ReferenceField('Games')
player = ReferenceField('Players')
gamemap = ReferenceField('Gamemaps')
gametype = IntField()
num_players = IntField()
vote = IntField()
class Games(Document):
timestamp = StringField()
mapname = StringField()
gamemap = ReferenceField('Gamemaps')
gametype = IntField()
players = ListField(ReferenceField('Players'))
votes = ListField(ReferenceField('Votes'))
state = StringField()
start = StringField()
stop = StringField()
num_players = IntField()
current = BooleanField()
options = DictField()
# fields allowed to be updated by context info
update_fields = ['mapname', 'gametype', 'state', 'start', 'stop', 'num_players', 'current']
class PlayerVotes(Document):
id = DictField(primary_key=True)
value = FloatField()
class IntermediatePlaylist(Document):
id = DictField(primary_key=True)
value = DictField()
class PlaylistItems(Document):
gamemap = ReferenceField('Gamemaps')
votes = ListField(ReferenceField('Votes'))
gametype = IntField(default=0)
score = FloatField(default=0)
modifiers = ListField(DictField())
| {
"repo_name": "aequitas/munerator",
"path": "munerator/common/models.py",
"copies": "1",
"size": "2265",
"license": "mit",
"hash": 3877534242712547300,
"line_mean": 27.6708860759,
"line_max": 106,
"alpha_frac": 0.6635761589,
"autogenerated": false,
"ratio": 3.9186851211072664,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005445749343684328,
"num_lines": 79
} |
__all__ = ["PlotManager"]
import os
import math
import matplotlib.pyplot as plt
import pylab
from utility.log import VLOG
#import Tkinter
""" This class helps local controller machine process
the data renderring routines besides the WebGL renderring
which is implemented by JavaScript. This helper mainly use
the matplotlib as its low level data process """
class PlotManager(object):
minor_threshold = -100.0
@staticmethod
def Show():
plt.show()
@staticmethod
def Scatter(path):
x = []
y = []
try:
fd = open(path)
except:
VLOG(3, "Failed to find valid data file from: %s" % path)
for lines in fd:
token = lines[:-1].split(',')
try:
int(token[0])
except:
legend_label = token[0]
continue
if float(token[1]) > PlotManager.minor_threshold:
x.append(int(token[0]))
y.append(float(token[1]))
fd.close()
plt.figure(figsize=(10, 10))
plt.plot(x[1:], y[1:], label=legend_label, linewidth=2.0, color='b')
plt.xlabel('Current Theta Angle')
plt.ylabel('S21 Gain in Decibel')
plt.title('Antenna S21 2D Plot')
plt.axis([min(x), max(x) + 10, min(y), max(y) + 3])
plt.grid(b=True, which='major', color='r', linestyle='-', alpha=0.5)
plt.grid(b=True, which='minor', color='r', linestyle='-', alpha=0.2)
plt.minorticks_on()
# should not block multiple figure draw
plt.draw()
# add legend for plot
plt.legend(loc='upper right')
@staticmethod
def MultiScatter(path_list):
color_pool = ['b', 'r', 'g', 'y', 'k']
color_index = 0
plt.figure(figsize=(10, 10))
plt.xlabel('Current Theta Angle')
plt.ylabel('S21 Gain in Decibel')
plt.title('Antenna S21 2D Plot')
plt.grid(b=True, which='major', color='r', linestyle='-', alpha=0.5)
plt.grid(b=True, which='minor', color='r', linestyle='-', alpha=0.2)
plt.minorticks_on()
for path in path_list:
x = []
y = []
try:
fd = open(path)
except:
VLOG(3, "Failed to find valid data file from: %s" % path)
for lines in fd:
token = lines[:-1].split(',')
try:
int(token[0])
except:
legend_label = token[0]
continue
if float(token[1]) > PlotManager.minor_threshold:
x.append(int(token[0]))
y.append(float(token[1]))
fd.close()
plt.plot(x, y, linewidth=2.0, label=legend_label, color=color_pool[color_index])
color_index += 1
# should not block multiple figure draw
plt.draw()
# add legend for plot
plt.legend(loc='upper right')
@staticmethod
def ShareScatter(path_list):
color_pool = ['b', 'r', 'g', 'y', 'k']
color_index = 0
spot_label = [221, 222, 223, 224]
plt.figure(figsize=(10, 10))
for path in path_list:
plt.subplot(spot_label[color_index])
plt.xlabel('Current Theta Angle', fontsize=10)
plt.ylabel('S21 Gain in Decibel', fontsize=10)
plt.title('Antenna S21 2D Plot', fontsize=10)
plt.grid(b=True, which='major', color='r', linestyle='-', alpha=0.5)
plt.grid(b=True, which='minor', color='r', linestyle='-', alpha=0.2)
plt.minorticks_on()
x = []
y = []
try:
fd = open(path)
except:
VLOG(3, "Failed to find valid data file from: %s" % path)
for lines in fd:
token = lines[:-1].split(',')
try:
int(token[0])
except:
legend_label = token[0]
continue
if float(token[1]) > PlotManager.minor_threshold:
x.append(int(token[0]))
y.append(float(token[1]))
fd.close()
plt.plot(x, y, linewidth=2.0, label=legend_label, color=color_pool[color_index])
color_index += 1
# should not block multiple figure draw
plt.draw()
# add legend for plot
plt.legend(loc='upper right')
legend_text = plt.gca().get_legend().get_texts()
plt.setp(legend_text, fontsize='small')
@staticmethod
def Polar(path):
plt.figure(figsize=(10, 10))
plt.axes(polar=True)
x = []
y = []
try:
fd = open(path)
except:
VLOG(3, "Failed to find valid data file from: %s" % path)
for lines in fd:
token = lines[:-1].split(',')
try:
int(token[0])
except:
continue
if float(token[1]) > PlotManager.minor_threshold:
x.append(int(token[0]))
y.append(float(token[1]))
fd.close()
# rebase the minor value to be zero
print "min of db: %s" % str(min(y))
print "size of db: %d" % len(y)
bias = 1.5 * min(y)
for i in range(len(x)):
y[i] = y[i] - bias
x[i] = float(x[i]) * math.pi / 180.0
plt.plot(x, y, linewidth=2.0, color='b')
plt.title('Antenna S21 Polar Plot')
plt.grid(b=True, which='major', color='r', linestyle='-', alpha=0.5)
plt.grid(b=True, which='minor', color='r', linestyle='-', alpha=0.2)
plt.minorticks_on()
# should not block multiple figure draw
plt.draw()
@staticmethod
def MultiPolar(path_list):
color_pool = ['b', 'r', 'g', 'y', 'k']
color_index = 0
plt.figure(figsize=(10, 10))
plt.axes(polar=True)
for path in path_list:
x = []
y = []
try:
fd = open(path)
except:
VLOG(3, "Failed to find valid data file from: %s" % path)
for lines in fd:
token = lines[:-1].split(',')
try:
int(token[0])
except:
continue
if float(token[1]) > PlotManager.minor_threshold:
x.append(int(token[0]))
y.append(float(token[1]))
fd.close()
bias = 1.5 * min(y)
# rebase the minor value to be zero
for i in range(len(x)):
y[i] = y[i] - bias
x[i] = float(x[i]) * math.pi / 180.0
plt.plot(x, y, linewidth=2.0, color=color_pool[color_index])
color_index += 1
plt.title('Antenna S21 Polar Plot')
plt.grid(b=True, which='major', color='r', linestyle='-', alpha=0.5)
plt.grid(b=True, which='minor', color='r', linestyle='-', alpha=0.2)
plt.minorticks_on()
# should not block multiple figure draw
@staticmethod
def SharePolar(path_list):
color_pool = ['b', 'r', 'g', 'y', 'k']
color_index = 0
spot_label = [221, 222, 223, 224]
plt.figure(figsize=(10, 10))
for path in path_list:
#plt.axes(polar=True)
plt.subplot(spot_label[color_index], polar=True)
plt.title('Antenna S21 2D Plot', fontsize=10)
plt.grid(b=True, which='major', color='r', linestyle='-', alpha=0.5)
plt.grid(b=True, which='minor', color='r', linestyle='-', alpha=0.2)
plt.minorticks_on()
x = []
y = []
try:
fd = open(path)
except:
VLOG(3, "Failed to find valid data file from: %s" % path)
for lines in fd:
token = lines[:-1].split(',')
try:
int(token[0])
except:
continue
if float(token[1]) > PlotManager.minor_threshold:
x.append(int(token[0]))
y.append(float(token[1]))
fd.close()
# rebase the minor value to be zero
bias = 1.5 * min(y)
for i in range(len(x)):
y[i] = y[i] - bias
x[i] = float(x[i]) * math.pi / 180.0
plt.plot(x, y, linewidth=2.0, color=color_pool[color_index])
color_index += 1
# should not block multiple figure draw
plt.draw()
@staticmethod
def Bar(path):
x = []
y = []
try:
fd = open(path)
except:
VLOG(3, "Failed to find valid data file from: %s" % path)
for lines in fd:
token = lines[:-1].split(',')
try:
int(token[0])
except:
legend_label = token[0]
continue
if float(token[1]) > PlotManager.minor_threshold:
x.append(int(token[0]))
y.append(float(token[1]))
fd.close()
plt.figure(figsize=(10, 10))
plt.gca().set_axis_bgcolor('blue')
plt.bar(x, y, alpha=1, width=1.8, label=legend_label, align='center', color='w')
plt.xlabel('Current Theta Angle')
plt.ylabel('S21 Gain in Decibel')
plt.axis([min(x), max(x), min(y), max(y) + 3])
plt.title('Antenna S21 2D Plot')
plt.grid(False)
# should not block multiple figure draw
plt.draw()
# add legend for plot
plt.legend(loc='upper right')
@staticmethod
def ShareBar(path_list):
fig = plt.figure(figsize=(10, 10))
color_pool = ['blue', 'red', 'green', 'yellow']
color_index = 0
spot_label = [221, 222, 223, 224]
for path in path_list:
ax = fig.add_subplot(spot_label[color_index])
ax.patch.set_facecolor(color_pool[color_index])
color_index += 1
x = []
y = []
try:
fd = open(path)
except:
VLOG(3, "Failed to find valid data file from: %s" % path)
for lines in fd:
token = lines[:-1].split(',')
try:
int(token[0])
except:
legend_label = token[0]
continue
if float(token[1]) > PlotManager.minor_threshold:
x.append(int(token[0]))
y.append(float(token[1]))
fd.close()
plt.bar(x, y, alpha=1, width=1.8, label=legend_label, align='center', color='w')
plt.xlabel('Current Theta Angle', fontsize=10)
plt.ylabel('S21 Gain in Decibel', fontsize=10)
plt.axis([min(x), max(x), min(y), max(y) + 3])
plt.title('Antenna S21 2D Plot', fontsize=10)
plt.grid(False)
# should not block multiple figure draw
plt.draw()
# add legend for plot
plt.legend(loc='upper right')
legend_text = plt.gca().get_legend().get_texts()
plt.setp(legend_text, fontsize='small')
| {
"repo_name": "weibohit/tools",
"path": "utility/plot_manager.py",
"copies": "1",
"size": "9691",
"license": "mit",
"hash": -5149952545558977000,
"line_mean": 30.1607717042,
"line_max": 86,
"alpha_frac": 0.5629965948,
"autogenerated": false,
"ratio": 3.220671319375208,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42836679141752076,
"avg_score": null,
"num_lines": null
} |
__all__=['plotsascurve','guinierplot','kratkyplot']
from .io import getsascurve
import matplotlib.pyplot as plt
from sastool.libconfig import qunit, dunit
def plotsascurve(samplename, *args, **kwargs):
if 'dist' not in kwargs:
kwargs['dist'] = None
data1d, dist = getsascurve(samplename, kwargs['dist'])
del kwargs['dist']
if 'factor' in kwargs:
factor=kwargs['factor']
del kwargs['factor']
else:
factor=1
if 'label' not in kwargs:
if isinstance(dist, str):
kwargs['label'] = samplename + ' ' + dist
else:
kwargs['label'] = samplename + ' %g mm' % dist
if 'errorbar' in kwargs:
errorbars = bool(kwargs['errorbar'])
del kwargs['errorbar']
else:
errorbars = False
if errorbars:
ret = (data1d*factor).errorbar(*args, **kwargs)
plt.xscale('log')
plt.yscale('log')
else:
ret = (data1d*factor).loglog(*args, **kwargs)
plt.xlabel('q (' + qunit() + ')')
plt.ylabel('$d\\Sigma/d\\Omega$ (cm$^{-1}$ sr$^{-1}$)')
plt.legend(loc='best')
plt.grid(True, which='both')
plt.axis('tight')
return ret
def guinierplot(*args, **kwargs):
"""Make a Guinier plot. This is simply a wrapper around plotsascurve()."""
ret=plotsascurve(*args, **kwargs)
plt.xscale('power',exponent=2)
plt.yscale('log')
return ret
def kratkyplot(samplename, *args, **kwargs):
if 'dist' not in kwargs:
kwargs['dist'] = None
data1d, dist = getsascurve(samplename, kwargs['dist'])
del kwargs['dist']
if 'factor' in kwargs:
factor=kwargs['factor']
del kwargs['factor']
else:
factor=1
if 'label' not in kwargs:
if isinstance(dist, str):
kwargs['label'] = samplename + ' ' + dist
else:
kwargs['label'] = samplename + ' %g mm' % dist
if 'errorbar' in kwargs:
errorbars = bool(kwargs['errorbar'])
del kwargs['errorbar']
else:
errorbars = False
data1dscaled=data1d*factor
if errorbars:
if hasattr(data1dscaled, 'dx'):
dx=data1dscaled.qError
dy=(data1dscaled.Error ** 2 * data1dscaled.q ** 4 +
data1dscaled.Intensity ** 2 * data1dscaled.qError ** 2
* data1dscaled.q ** 2 * 4) ** 0.5
else:
dx=None
dy=data1dscaled.Error
ret = plt.errorbar(data1dscaled.q,
data1dscaled.q ** 2 * data1dscaled.Intensity,
dy, dx, *args, **kwargs)
else:
ret = plt.plot(data1dscaled.q,
data1dscaled.Intensity * data1dscaled.q ** 2,
*args, **kwargs)
plt.xlabel('q (' + dunit() + ')')
plt.ylabel('$q^2 d\\Sigma/d\\Omega$ (' +
dunit() +
'$^{-2}$ cm$^{-1}$ sr$^{-1}$)')
plt.legend(loc='best')
plt.grid(True, which='both')
plt.axis('tight')
return ret
def porodplot(samplename, *args, **kwargs):
if 'dist' not in kwargs:
kwargs['dist'] = None
data1d, dist = getsascurve(samplename, kwargs['dist'])
del kwargs['dist']
if 'factor' in kwargs:
factor=kwargs['factor']
del kwargs['factor']
else:
factor=1
if 'label' not in kwargs:
if isinstance(dist, str):
kwargs['label'] = samplename + ' ' + dist
else:
kwargs['label'] = samplename + ' %g mm' % dist
if 'errorbar' in kwargs:
errorbars = bool(kwargs['errorbar'])
del kwargs['errorbar']
else:
errorbars = False
data1dscaled=data1d*factor
if errorbars:
if hasattr(data1dscaled, 'dx'):
dx=data1dscaled.qError
dy=(data1dscaled.Error ** 2 * data1dscaled.q ** 8 +
data1dscaled.Intensity ** 2 * data1dscaled.qError ** 2
* data1dscaled.q ** 6 * 14) ** 0.5
else:
dx=None
dy=data1dscaled.Error
ret = plt.errorbar(data1dscaled.q,
data1dscaled.q ** 4 * data1dscaled.Intensity,
dy, dx, *args, **kwargs)
else:
ret = plt.plot(data1dscaled.q,
data1dscaled.Intensity * data1dscaled.q ** 2,
*args, **kwargs)
plt.xlabel('q (' + dunit() + ')')
plt.ylabel('$q^4 d\\Sigma/d\\Omega$ (' +
dunit() +
'$^{-4}$ cm$^{-1}$ sr$^{-1}$)')
plt.legend(loc='best')
plt.xscale('power',exponent=4)
plt.yscale('linear')
plt.grid(True, which='both')
plt.axis('tight')
return ret
| {
"repo_name": "awacha/credolib",
"path": "credolib/plotting.py",
"copies": "1",
"size": "4640",
"license": "bsd-3-clause",
"hash": 2475791799067895000,
"line_mean": 32.1428571429,
"line_max": 78,
"alpha_frac": 0.5334051724,
"autogenerated": false,
"ratio": 3.4523809523809526,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4485786124780952,
"avg_score": null,
"num_lines": null
} |
"""All plots of paper II
"""
import os
from collections import defaultdict
import numpy as np
from energy_demand.read_write import data_loader, read_data
from energy_demand.basic import date_prop
from energy_demand.basic import lookup_tables
from energy_demand.plotting import fig_weather_variability_priod
from energy_demand.plotting import fig_total_demand_peak
from energy_demand.plotting import fig_p2_temporal_validation
from energy_demand.validation import elec_national_data
from energy_demand.technologies import tech_related
from energy_demand.plotting import fig_p2_annual_hours_sorted
from energy_demand.plotting import fig_p2_spatial_val
from energy_demand import enduse_func
def main(
path_data_ed,
path_shapefile_input,
path_out_plots,
plot_crit_dict):
"""Figure II plots
"""
# ---------------------------------------------------------
# Iterate folders and read out all weather years and stations
# ---------------------------------------------------------
all_result_folders = os.listdir(path_data_ed)
paths_folders_result = []
weather_yrs = []
weather_station_per_y = {}
all_calculated_yrs_paths = []
for result_folder in all_result_folders:
try:
split_path_name = result_folder.split("__")
weather_yr = int(split_path_name[0])
weather_yrs.append(weather_yr)
try:
weather_station = int(split_path_name[1])
except:
weather_station = "all_stations"
try:
weather_station_per_y[weather_yr].append(weather_station)
except:
weather_station_per_y[weather_yr] = [weather_station]
# Collect all paths to simulation result folders
paths_folders_result.append(
os.path.join(path_data_ed, result_folder))
tupyle_yr_path = (weather_yr, os.path.join(path_data_ed, result_folder))
all_calculated_yrs_paths.append(tupyle_yr_path)
except ValueError:
pass
# -----------
# Used across different plots
# -----------
data = {}
data['lookups'] = lookup_tables.basic_lookups()
data['enduses'], data['assumptions'], data['regions'] = data_loader.load_ini_param(
os.path.join(path_data_ed))
data['assumptions']['seasons'] = date_prop.get_season(year_to_model=2015)
data['assumptions']['model_yeardays_daytype'], data['assumptions']['yeardays_month'], data['assumptions']['yeardays_month_days'] = date_prop.get_yeardays_daytype(year_to_model=2015)
population_data = read_data.read_scenaric_population_data(
os.path.join(path_data_ed, 'model_run_pop'))
####################################################################
# Plotting weather variability results for all weather stations (Fig 2b)
####################################################################
weather_yr_container = defaultdict(dict)
for weather_yr, result_folder in all_calculated_yrs_paths:
results_container = read_data.read_in_results(
os.path.join(result_folder, 'model_run_results_txt'),
data['assumptions']['seasons'],
data['assumptions']['model_yeardays_daytype'])
weather_yr_container['tot_fueltype_yh'][weather_yr] = results_container['tot_fueltype_yh'] #tot_fueltype_yh
weather_yr_container['results_enduse_every_year'][weather_yr] = results_container['ed_fueltype_regs_yh']
# ####################################################################
# Create plot with regional and non-regional plots for second paper
# Compare hdd calculations and disaggregation of regional and local
# ####################################################################
if plot_crit_dict['plot_scenarios_sorted']:
fig_p2_annual_hours_sorted.run(
data_input=weather_yr_container['results_enduse_every_year'],
regions=data['regions'],
simulation_yrs_to_plot=[2015], # Simulation year to plot
fueltype_str='electricity',
path_shapefile=path_shapefile_input,
fig_name=os.path.join(path_out_plots, "fig_paper_IIb_weather_var_map.pdf"))
def plot_fig_spatio_temporal_validation(
path_regional_calculations,
path_rolling_elec_demand,
path_temporal_elec_validation,
path_temporal_gas_validation,
path_non_regional_elec_2015,
path_out_plots,
plot_show=False
):
"""
Create plot with regional and non-regional plots for second paper
Compare hdd calculations and disaggregation of regional and local
"""
# ---------------------------------------------------------
# Iterate folders and read out all weather years and stations
# ---------------------------------------------------------
all_result_folders = os.listdir(path_regional_calculations)
paths_folders_result = []
data_container = defaultdict(dict)
ed_fueltype_regs_yh = defaultdict(dict)
weather_yr_station_tot_fueltype_yh = defaultdict(dict)
residential_results = defaultdict(dict)
for scenario_folder in all_result_folders:
result_folders = os.listdir(os.path.join(path_regional_calculations, scenario_folder))
for result_folder in result_folders:
try:
split_path_name = result_folder.split("__")
weather_yr = int(split_path_name[0])
try:
weather_station = int(split_path_name[1])
except:
weather_station = "all_stations"
paths_folders_result.append(
os.path.join(path_regional_calculations, result_folder))
data = {}
data['lookups'] = lookup_tables.basic_lookups()
data['enduses'], data['assumptions'], data['regions'] = data_loader.load_ini_param(
os.path.join(path_regional_calculations, all_result_folders[0])) # last result folder
data['assumptions']['seasons'] = date_prop.get_season(year_to_model=2015)
data['assumptions']['model_yeardays_daytype'], data['assumptions']['yeardays_month'], data['assumptions']['yeardays_month_days'] = date_prop.get_yeardays_daytype(year_to_model=2015)
results_container = read_data.read_in_results(
os.path.join(
path_regional_calculations,
scenario_folder,
"{}__{}".format(weather_yr, weather_station),
'model_run_results_txt'),
data['assumptions']['seasons'],
data['assumptions']['model_yeardays_daytype'])
weather_yr_station_tot_fueltype_yh[weather_yr][weather_station] = results_container['tot_fueltype_yh']
ed_fueltype_regs_yh[weather_yr][weather_station] = results_container['ed_fueltype_regs_yh']
residential_results[weather_yr][weather_station] = results_container['residential_results']
except ValueError:
pass
data_container['ed_fueltype_regs_yh'] = ed_fueltype_regs_yh
data_container['tot_fueltype_yh'] = weather_yr_station_tot_fueltype_yh
data_container['residential_results'] = residential_results
data_container = dict(data_container)
# -------------------------------------------------
# Collect non regional 2015 elec data
# Calculated with all regional weather stations
# -------------------------------------------------
year_non_regional = 2015
path_with_txt = os.path.join(
path_non_regional_elec_2015,
"{}__{}".format(str(year_non_regional), "all_stations"),
'model_run_results_txt')
demand_year_non_regional = read_data.read_in_results(
path_with_txt,
data['assumptions']['seasons'],
data['assumptions']['model_yeardays_daytype'])
tot_fueltype_yh = demand_year_non_regional['tot_fueltype_yh']
fueltype_int = tech_related.get_fueltype_int('electricity')
non_regional_elec_2015 = tot_fueltype_yh[year_non_regional][fueltype_int]
# ---Collect real electricity data of year 2015
elec_2015_indo, _ = elec_national_data.read_raw_elec_2015(
path_rolling_elec_demand)
# Factor data as total sum is not identical
f_diff_elec = np.sum(non_regional_elec_2015) / np.sum(elec_2015_indo)
elec_factored_yh = f_diff_elec * elec_2015_indo
# *****************************************************************
# Temporal validation
# Compare regional and non regional and actual demand over time
# *****************************************************************
simulation_yr_to_plot = 2015
winter_week, spring_week, summer_week, autumn_week = date_prop.get_seasonal_weeks()
# Peak day
peak_day, _ = enduse_func.get_peak_day_single_fueltype(elec_factored_yh)
# Convert days to hours
period_to_plot = list(range(0, 400))
period_to_plot = date_prop.get_8760_hrs_from_yeardays(winter_week)
period_to_plot = date_prop.get_8760_hrs_from_yeardays([peak_day])
period_to_plot_winter = date_prop.get_8760_hrs_from_yeardays(winter_week)
period_to_plot_spring = date_prop.get_8760_hrs_from_yeardays(spring_week)
fig_p2_temporal_validation.run_fig_p2_temporal_validation(
data_input=data_container['tot_fueltype_yh'],
weather_yr=2015,
fueltype_str='electricity',
simulation_yr_to_plot=simulation_yr_to_plot, # Simulation year to plot
period_h=period_to_plot,
validation_elec_2015=elec_factored_yh,
non_regional_elec_2015=non_regional_elec_2015,
fig_name=os.path.join(path_out_plots, "temporal_validation_elec.pdf"),
titel="yearday: {}".format(peak_day),
y_lim_val=55,
plot_validation=False,
plot_show=plot_show)
fueltype_gas = tech_related.get_fueltype_int('gas')
fig_p2_temporal_validation.run_fig_p2_temporal_validation(
data_input=data_container['tot_fueltype_yh'],
weather_yr=2015,
fueltype_str='gas',
simulation_yr_to_plot=simulation_yr_to_plot, # Simulation year to plot
period_h=period_to_plot,
validation_elec_2015=None,
non_regional_elec_2015=tot_fueltype_yh[year_non_regional][fueltype_gas],
fig_name=os.path.join(path_out_plots, "temporal_validation_gas.pdf"),
titel="yearday: {}".format(peak_day),
y_lim_val=250,
plot_validation=False,
plot_show=plot_show)
# -------------------
# Spatial validation (not with maps)
# -------------------
# non_regional: All weather station, spatially disaggregated TODO Give BETTER NAMES
# regional: Only one weather station for whole countr but still data for every region
weather_yr = 2015
fig_p2_spatial_val.run(
simulation_yr_to_plot=simulation_yr_to_plot,
demand_year_non_regional=demand_year_non_regional['residential_results'][weather_yr],
demand_year_regional=data_container['residential_results'][weather_yr],
fueltypes=data['lookups']['fueltypes'],
fig_path=path_out_plots,
path_temporal_elec_validation=path_temporal_elec_validation,
path_temporal_gas_validation=path_temporal_gas_validation,
regions=data['regions'],
plot_crit=plot_show)
# -------------------
# Spatial validation (plot regional maps)
# with deviation from average estimated demand across scenarios
# -------------------
| {
"repo_name": "nismod/energy_demand",
"path": "energy_demand/plotting/figs_p2.py",
"copies": "1",
"size": "11623",
"license": "mit",
"hash": -3454682120800130000,
"line_mean": 43.3625954198,
"line_max": 197,
"alpha_frac": 0.5982964811,
"autogenerated": false,
"ratio": 3.724126882409484,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48224233635094843,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Plugin']
import os.path
import subprocess
from datetime import timedelta
from studip_sync.helpers import JSONConfig, ConfigError
from studip_sync.plugins import PluginBase
import pickle
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
SCOPES = ['https://www.googleapis.com/auth/tasks']
DISPLAY_VIDEO_LENGTH_ALLOWED_FILETYPES = ['mp4']
class CredentialsError(PermissionError):
pass
def is_iterable(obj):
try:
iter(obj)
except TypeError:
return False
else:
return True
class PluginConfig(JSONConfig):
@property
def ignore_filetype(self):
if not self.config:
return
ignore_filetype = self.config.get("ignore_filetype", [])
if not is_iterable(ignore_filetype):
raise ConfigError("ignore_filetype is not iterable")
return ignore_filetype
@property
def task_list_id(self):
if not self.config:
return
return self.config.get("task_list_id")
@property
def display_video_length(self):
if not self.config:
return False
return self.config.get("display_video_length", False)
def _check(self):
# access ignore_filetype once to check if valid property
if self.ignore_filetype:
pass
def get_video_length_of_file(filename):
result = subprocess.run(["ffprobe", "-v", "error", "-show_entries",
"format=duration", "-of",
"default=noprint_wrappers=1:nokey=1", filename],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
return float(result.stdout)
class Plugin(PluginBase):
def __init__(self, config_path):
super(Plugin, self).__init__("google-tasks", config_path, PluginConfig)
self.token_pickle_path = os.path.join(self.config_dir, "token.pickle")
self.credentials_path = os.path.join(self.config_dir, "credentials.json")
self.service = None
def hook_configure(self):
super(Plugin, self).hook_configure()
credentials = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists(self.token_pickle_path):
with open(self.token_pickle_path, 'rb') as token:
credentials = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not credentials or not credentials.valid:
if credentials and credentials.expired and credentials.refresh_token:
credentials.refresh(Request())
else:
if not os.path.exists(self.credentials_path):
raise CredentialsError("Missing credentials.json at " + self.credentials_path)
flow = InstalledAppFlow.from_client_secrets_file(
self.credentials_path, SCOPES)
credentials = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(self.token_pickle_path, 'wb') as token:
pickle.dump(credentials, token)
service = build('tasks', 'v1', credentials=credentials)
# Call the Tasks API
results = service.tasklists().list(maxResults=10).execute()
items = results.get('items', [])
if not items:
print("No task lists found. Please create a task list online to use!")
return 1
print("Task lists:")
for item in items:
print(u'{0} ({1})'.format(item['title'], item['id']))
task_list_id = input("Please select a task list id to use: ")
if task_list_id not in [item['id'] for item in items]:
print("Invalid task id! Please select a task if from the list.")
return 1
config = {"task_list_id": task_list_id}
self.save_plugin_config(config)
def hook_start(self):
super(Plugin, self).hook_start()
credentials = None
if os.path.exists(self.token_pickle_path):
with open(self.token_pickle_path, 'rb') as token:
credentials = pickle.load(token)
if not credentials or not credentials.valid:
if credentials and credentials.expired and credentials.refresh_token:
credentials.refresh(Request())
else:
raise CredentialsError("tasks: couldn't log in")
self.service = build('tasks', 'v1', credentials=credentials)
def hook_media_download_successful(self, filename, course_save_as, full_filepath):
file_extension = os.path.splitext(filename)[1][1:]
if self.config and self.config.ignore_filetype and file_extension in self.config.ignore_filetype:
self.print("Skipping task: " + filename)
return
description = course_save_as
if self.config and self.config.display_video_length and file_extension in DISPLAY_VIDEO_LENGTH_ALLOWED_FILETYPES:
video_length = get_video_length_of_file(full_filepath)
video_length_seconds = int(video_length)
video_length_str = str(timedelta(seconds=video_length_seconds))
description = "{}: {}".format(video_length_str, description)
return self.insert_new_task(filename, description)
def insert_new_task(self, title, description):
body = {
"status": "needsAction",
"kind": "tasks#task",
"title": title, # Title of the task.
"deleted": False,
"notes": description, # Notes describing the task. Optional.
"hidden": False,
}
self.print("Inserting new task: " + title)
return self.service.tasks().insert(tasklist=self.config.task_list_id, body=body).execute()
| {
"repo_name": "popeye123/studip-sync",
"path": "studip_sync/plugins/google-tasks/__init__.py",
"copies": "1",
"size": "6031",
"license": "unlicense",
"hash": -6973463084569409000,
"line_mean": 32.5055555556,
"line_max": 121,
"alpha_frac": 0.6154866523,
"autogenerated": false,
"ratio": 4.2263489838822705,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006765002171415358,
"num_lines": 180
} |
__all__ = ['Point']
from sympy.physics.mechanics.essential import _check_frame, _check_vector
class Point(object):
"""This object represents a point in a dynamic system.
It stores the: position, velocity, and acceleration of a point.
The position is a vector defined as the vector distance from a parent
point to this point.
"""
def __init__(self, name):
"""Initialization of a Point object. """
self.name = name
self._pos_dict = {}
self._vel_dict = {}
self._acc_dict = {}
self._pdlist = [self._pos_dict, self._vel_dict, self._acc_dict]
def __str__(self):
return self.name
__repr__ = __str__
def _check_point(self, other):
if not isinstance(other, Point):
raise TypeError('A Point must be supplied')
def _pdict_list(self, other, num):
"""Creates a list from self to other using _dcm_dict. """
outlist = [[self]]
oldlist = [[]]
while outlist != oldlist:
oldlist = outlist[:]
for i, v in enumerate(outlist):
templist = v[-1]._pdlist[num].keys()
for i2, v2 in enumerate(templist):
if not v.__contains__(v2):
littletemplist = v + [v2]
if not outlist.__contains__(littletemplist):
outlist.append(littletemplist)
for i, v in enumerate(oldlist):
if v[-1] != other:
outlist.remove(v)
outlist.sort(key = len)
if len(outlist) != 0:
return outlist[0]
raise ValueError('No Connecting Path found between ' + other.name +
' and ' + self.name)
def a1pt_theory(self, otherpoint, outframe, interframe):
"""Sets the acceleration of this point with the 1-point theory.
The 1-point theory for point acceleration looks like this:
^N a^P = ^B a^P + ^N a^O + ^N alpha^B x r^OP + ^N omega^B x (^N omega^B
x r^OP) + 2 ^N omega^B x ^B v^P
where O is a point fixed in B, P is a point moving in B, and B is
rotating in frame N.
Parameters
==========
otherpoint : Point
The first point of the 1-point theory (O)
outframe : ReferenceFrame
The frame we want this point's acceleration defined in (N)
fixedframe : ReferenceFrame
The intermediate frame in this calculation (B)
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame, dynamicsymbols
>>> q = dynamicsymbols('q')
>>> q2 = dynamicsymbols('q2')
>>> qd = dynamicsymbols('q', 1)
>>> q2d = dynamicsymbols('q2', 1)
>>> N = ReferenceFrame('N')
>>> B = ReferenceFrame('B')
>>> B.set_ang_vel(N, 5 * B.y)
>>> O = Point('O')
>>> P = O.locatenew('P', q * B.x)
>>> P.set_vel(B, qd * B.x + q2d * B.y)
>>> O.set_vel(N, 0)
>>> P.a1pt_theory(O, N, B)
(-25*q + q'')*B.x + q2''*B.y - 10*q'*B.z
"""
_check_frame(outframe)
_check_frame(interframe)
self._check_point(otherpoint)
dist = self.pos_from(otherpoint)
v = self.vel(interframe)
a1 = otherpoint.acc(outframe)
a2 = self.acc(interframe)
omega = interframe.ang_vel_in(outframe)
alpha = interframe.ang_acc_in(outframe)
self.set_acc(outframe, a2 + 2 * (omega ^ v) + a1 + (alpha ^ dist) +
(omega ^ (omega ^ dist)))
return self.acc(outframe)
def a2pt_theory(self, otherpoint, outframe, fixedframe):
"""Sets the acceleration of this point with the 2-point theory.
The 2-point theory for point acceleration looks like this:
^N a^P = ^N a^O + ^N alpha^B x r^OP + ^N omega^B x (^N omega^B x r^OP)
where O and P are both points fixed in frame B, which is rotating in
frame N.
Parameters
==========
otherpoint : Point
The first point of the 2-point theory (O)
outframe : ReferenceFrame
The frame we want this point's acceleration defined in (N)
fixedframe : ReferenceFrame
The frame in which both points are fixed (B)
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame, dynamicsymbols
>>> q = dynamicsymbols('q')
>>> qd = dynamicsymbols('q', 1)
>>> N = ReferenceFrame('N')
>>> B = N.orientnew('B', 'Axis', [q, N.z])
>>> O = Point('O')
>>> P = O.locatenew('P', 10 * B.x)
>>> O.set_vel(N, 5 * N.x)
>>> P.a2pt_theory(O, N, B)
- 10*q'**2*B.x + 10*q''*B.y
"""
_check_frame(outframe)
_check_frame(fixedframe)
self._check_point(otherpoint)
dist = self.pos_from(otherpoint)
a = otherpoint.acc(outframe)
omega = fixedframe.ang_vel_in(outframe)
alpha = fixedframe.ang_acc_in(outframe)
self.set_acc(outframe, a + (alpha ^ dist) + (omega ^ (omega ^ dist)))
return self.acc(outframe)
def acc(self, frame):
"""The acceleration Vector of this Point in a ReferenceFrame.
Parameters
==========
frame : ReferenceFrame
The frame in which the returned acceleration vector will be defined in
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p1.set_acc(N, 10 * N.x)
>>> p1.acc(N)
10*N.x
"""
_check_frame(frame)
if not (frame in self._acc_dict):
if self._vel_dict[frame] != 0:
return (self._vel_dict[frame]).dt(frame)
else:
return 0
return self._acc_dict[frame]
def locatenew(self, name, value):
"""Creates a new point with a position defined from this point.
Parameters
==========
name : str
The name for the new point
value : Vector
The position of the new point relative to this point
Examples
========
>>> from sympy.physics.mechanics import ReferenceFrame, Point
>>> N = ReferenceFrame('N')
>>> P1 = Point('P1')
>>> P2 = P1.locatenew('P2', 10 * N.x)
"""
if not isinstance(name, str):
raise TypeError('Must supply a valid name')
value = _check_vector(value)
p = Point(name)
p.set_pos(self, value)
self.set_pos(p, -value)
return p
def pos_from(self, otherpoint):
"""Returns a Vector distance between this Point and the other Point.
Parameters
==========
otherpoint : Point
The otherpoint we are locating this one relative to
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p2 = Point('p2')
>>> p1.set_pos(p2, 10 * N.x)
>>> p1.pos_from(p2)
10*N.x
"""
outvec = 0
plist = self._pdict_list(otherpoint, 0)
for i in range(len(plist) - 1):
outvec += plist[i]._pos_dict[plist[i + 1]]
return outvec
def set_acc(self, frame, value):
"""Used to set the acceleration of this Point in a ReferenceFrame.
Parameters
==========
value : Vector
The vector value of this point's acceleration in the frame
frame : ReferenceFrame
The frame in which this point's acceleration is defined
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p1.set_acc(N, 10 * N.x)
>>> p1.acc(N)
10*N.x
"""
value = _check_vector(value)
_check_frame(frame)
self._acc_dict.update({frame: value})
def set_pos(self, otherpoint, value):
"""Used to set the position of this point w.r.t. another point.
Parameters
==========
value : Vector
The vector which defines the location of this point
point : Point
The other point which this point's location is defined relative to
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p2 = Point('p2')
>>> p1.set_pos(p2, 10 * N.x)
>>> p1.pos_from(p2)
10*N.x
"""
value = _check_vector(value)
self._check_point(otherpoint)
self._pos_dict.update({otherpoint: value})
otherpoint._pos_dict.update({self: -value})
def set_vel(self, frame, value):
"""Sets the velocity Vector of this Point in a ReferenceFrame.
Parameters
==========
value : Vector
The vector value of this point's velocity in the frame
frame : ReferenceFrame
The frame in which this point's velocity is defined
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p1.set_vel(N, 10 * N.x)
>>> p1.vel(N)
10*N.x
"""
value = _check_vector(value)
_check_frame(frame)
self._vel_dict.update({frame: value})
def v1pt_theory(self, otherpoint, outframe, interframe):
"""Sets the velocity of this point with the 1-point theory.
The 1-point theory for point velocity looks like this:
^N v^P = ^B v^P + ^N v^O + ^N omega^B x r^OP
where O is a point fixed in B, P is a point moving in B, and B is
rotating in frame N.
Parameters
==========
otherpoint : Point
The first point of the 2-point theory (O)
outframe : ReferenceFrame
The frame we want this point's velocity defined in (N)
interframe : ReferenceFrame
The intermediate frame in this calculation (B)
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame, dynamicsymbols
>>> q = dynamicsymbols('q')
>>> q2 = dynamicsymbols('q2')
>>> qd = dynamicsymbols('q', 1)
>>> q2d = dynamicsymbols('q2', 1)
>>> N = ReferenceFrame('N')
>>> B = ReferenceFrame('B')
>>> B.set_ang_vel(N, 5 * B.y)
>>> O = Point('O')
>>> P = O.locatenew('P', q * B.x)
>>> P.set_vel(B, qd * B.x + q2d * B.y)
>>> O.set_vel(N, 0)
>>> P.v1pt_theory(O, N, B)
q'*B.x + q2'*B.y - 5*q*B.z
"""
_check_frame(outframe)
_check_frame(interframe)
self._check_point(otherpoint)
dist = self.pos_from(otherpoint)
v1 = self.vel(interframe)
v2 = otherpoint.vel(outframe)
omega = interframe.ang_vel_in(outframe)
self.set_vel(outframe, v1 + v2 + (omega ^ dist))
return self.vel(outframe)
def v2pt_theory(self, otherpoint, outframe, fixedframe):
"""Sets the velocity of this point with the 2-point theory.
The 2-point theory for point velocity looks like this:
^N v^P = ^N v^O + ^N omega^B x r^OP
where O and P are both points fixed in frame B, which is rotating in
frame N.
Parameters
==========
otherpoint : Point
The first point of the 2-point theory (O)
outframe : ReferenceFrame
The frame we want this point's velocity defined in (N)
fixedframe : ReferenceFrame
The frame in which both points are fixed (B)
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame, dynamicsymbols
>>> q = dynamicsymbols('q')
>>> qd = dynamicsymbols('q', 1)
>>> N = ReferenceFrame('N')
>>> B = N.orientnew('B', 'Axis', [q, N.z])
>>> O = Point('O')
>>> P = O.locatenew('P', 10 * B.x)
>>> O.set_vel(N, 5 * N.x)
>>> P.v2pt_theory(O, N, B)
5*N.x + 10*q'*B.y
"""
_check_frame(outframe)
_check_frame(fixedframe)
self._check_point(otherpoint)
dist = self.pos_from(otherpoint)
v = otherpoint.vel(outframe)
omega = fixedframe.ang_vel_in(outframe)
self.set_vel(outframe, v + (omega ^ dist))
return self.vel(outframe)
def vel(self, frame):
"""The velocity Vector of this Point in the ReferenceFrame.
Parameters
==========
frame : ReferenceFrame
The frame in which the returned velocity vector will be defined in
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p1.set_vel(N, 10 * N.x)
>>> p1.vel(N)
10*N.x
"""
_check_frame(frame)
if not (frame in self._vel_dict):
raise ValueError('Velocity of point ' + self.name + ' has not been'
' defined in ReferenceFrame ' + frame.name)
return self._vel_dict[frame]
| {
"repo_name": "flacjacket/sympy",
"path": "sympy/physics/mechanics/point.py",
"copies": "2",
"size": "13473",
"license": "bsd-3-clause",
"hash": -1133574990310438000,
"line_mean": 29.4819004525,
"line_max": 85,
"alpha_frac": 0.528315891,
"autogenerated": false,
"ratio": 3.7824256035934867,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00043549409978876127,
"num_lines": 442
} |
__all__ = ['Point']
from sympy.physics.mechanics.essential import _check_frame, _check_vector
class Point(object):
"""This object represents a point in a dynamic system.
It stores the: position, velocity, and acceleration of a point.
The position is a vector defined as the vector distance from a parent
point to this point.
"""
def __init__(self, name):
"""Initialization of a Point object. """
self.name = name
self._pos_dict = {}
self._vel_dict = {}
self._acc_dict = {}
self._pdlist = [self._pos_dict, self._vel_dict, self._acc_dict]
def __str__(self):
return self.name
__repr__ = __str__
def _check_point(self, other):
if not isinstance(other, Point):
raise TypeError('A Point must be supplied')
def _pdict_list(self, other, num):
"""Creates a list from self to other using _dcm_dict. """
outlist = [[self]]
oldlist = [[]]
while outlist != oldlist:
oldlist = outlist[:]
for i, v in enumerate(outlist):
templist = v[-1]._pdlist[num].keys()
for i2, v2 in enumerate(templist):
if not v.__contains__(v2):
littletemplist = v + [v2]
if not outlist.__contains__(littletemplist):
outlist.append(littletemplist)
for i, v in enumerate(oldlist):
if v[-1] != other:
outlist.remove(v)
outlist.sort(key=len)
if len(outlist) != 0:
return outlist[0]
raise ValueError('No Connecting Path found between ' + other.name +
' and ' + self.name)
def a1pt_theory(self, otherpoint, outframe, interframe):
"""Sets the acceleration of this point with the 1-point theory.
The 1-point theory for point acceleration looks like this:
^N a^P = ^B a^P + ^N a^O + ^N alpha^B x r^OP + ^N omega^B x (^N omega^B
x r^OP) + 2 ^N omega^B x ^B v^P
where O is a point fixed in B, P is a point moving in B, and B is
rotating in frame N.
Parameters
==========
otherpoint : Point
The first point of the 1-point theory (O)
outframe : ReferenceFrame
The frame we want this point's acceleration defined in (N)
fixedframe : ReferenceFrame
The intermediate frame in this calculation (B)
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame, dynamicsymbols
>>> q = dynamicsymbols('q')
>>> q2 = dynamicsymbols('q2')
>>> qd = dynamicsymbols('q', 1)
>>> q2d = dynamicsymbols('q2', 1)
>>> N = ReferenceFrame('N')
>>> B = ReferenceFrame('B')
>>> B.set_ang_vel(N, 5 * B.y)
>>> O = Point('O')
>>> P = O.locatenew('P', q * B.x)
>>> P.set_vel(B, qd * B.x + q2d * B.y)
>>> O.set_vel(N, 0)
>>> P.a1pt_theory(O, N, B)
(-25*q + q'')*B.x + q2''*B.y - 10*q'*B.z
"""
_check_frame(outframe)
_check_frame(interframe)
self._check_point(otherpoint)
dist = self.pos_from(otherpoint)
v = self.vel(interframe)
a1 = otherpoint.acc(outframe)
a2 = self.acc(interframe)
omega = interframe.ang_vel_in(outframe)
alpha = interframe.ang_acc_in(outframe)
self.set_acc(outframe, a2 + 2 * (omega ^ v) + a1 + (alpha ^ dist) +
(omega ^ (omega ^ dist)))
return self.acc(outframe)
def a2pt_theory(self, otherpoint, outframe, fixedframe):
"""Sets the acceleration of this point with the 2-point theory.
The 2-point theory for point acceleration looks like this:
^N a^P = ^N a^O + ^N alpha^B x r^OP + ^N omega^B x (^N omega^B x r^OP)
where O and P are both points fixed in frame B, which is rotating in
frame N.
Parameters
==========
otherpoint : Point
The first point of the 2-point theory (O)
outframe : ReferenceFrame
The frame we want this point's acceleration defined in (N)
fixedframe : ReferenceFrame
The frame in which both points are fixed (B)
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame, dynamicsymbols
>>> q = dynamicsymbols('q')
>>> qd = dynamicsymbols('q', 1)
>>> N = ReferenceFrame('N')
>>> B = N.orientnew('B', 'Axis', [q, N.z])
>>> O = Point('O')
>>> P = O.locatenew('P', 10 * B.x)
>>> O.set_vel(N, 5 * N.x)
>>> P.a2pt_theory(O, N, B)
- 10*q'**2*B.x + 10*q''*B.y
"""
_check_frame(outframe)
_check_frame(fixedframe)
self._check_point(otherpoint)
dist = self.pos_from(otherpoint)
a = otherpoint.acc(outframe)
omega = fixedframe.ang_vel_in(outframe)
alpha = fixedframe.ang_acc_in(outframe)
self.set_acc(outframe, a + (alpha ^ dist) + (omega ^ (omega ^ dist)))
return self.acc(outframe)
def acc(self, frame):
"""The acceleration Vector of this Point in a ReferenceFrame.
Parameters
==========
frame : ReferenceFrame
The frame in which the returned acceleration vector will be defined in
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p1.set_acc(N, 10 * N.x)
>>> p1.acc(N)
10*N.x
"""
_check_frame(frame)
if not (frame in self._acc_dict):
if self._vel_dict[frame] != 0:
return (self._vel_dict[frame]).dt(frame)
else:
return 0
return self._acc_dict[frame]
def locatenew(self, name, value):
"""Creates a new point with a position defined from this point.
Parameters
==========
name : str
The name for the new point
value : Vector
The position of the new point relative to this point
Examples
========
>>> from sympy.physics.mechanics import ReferenceFrame, Point
>>> N = ReferenceFrame('N')
>>> P1 = Point('P1')
>>> P2 = P1.locatenew('P2', 10 * N.x)
"""
if not isinstance(name, str):
raise TypeError('Must supply a valid name')
value = _check_vector(value)
p = Point(name)
p.set_pos(self, value)
self.set_pos(p, -value)
return p
def pos_from(self, otherpoint):
"""Returns a Vector distance between this Point and the other Point.
Parameters
==========
otherpoint : Point
The otherpoint we are locating this one relative to
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p2 = Point('p2')
>>> p1.set_pos(p2, 10 * N.x)
>>> p1.pos_from(p2)
10*N.x
"""
outvec = 0
plist = self._pdict_list(otherpoint, 0)
for i in range(len(plist) - 1):
outvec += plist[i]._pos_dict[plist[i + 1]]
return outvec
def set_acc(self, frame, value):
"""Used to set the acceleration of this Point in a ReferenceFrame.
Parameters
==========
value : Vector
The vector value of this point's acceleration in the frame
frame : ReferenceFrame
The frame in which this point's acceleration is defined
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p1.set_acc(N, 10 * N.x)
>>> p1.acc(N)
10*N.x
"""
value = _check_vector(value)
_check_frame(frame)
self._acc_dict.update({frame: value})
def set_pos(self, otherpoint, value):
"""Used to set the position of this point w.r.t. another point.
Parameters
==========
value : Vector
The vector which defines the location of this point
point : Point
The other point which this point's location is defined relative to
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p2 = Point('p2')
>>> p1.set_pos(p2, 10 * N.x)
>>> p1.pos_from(p2)
10*N.x
"""
value = _check_vector(value)
self._check_point(otherpoint)
self._pos_dict.update({otherpoint: value})
otherpoint._pos_dict.update({self: -value})
def set_vel(self, frame, value):
"""Sets the velocity Vector of this Point in a ReferenceFrame.
Parameters
==========
value : Vector
The vector value of this point's velocity in the frame
frame : ReferenceFrame
The frame in which this point's velocity is defined
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p1.set_vel(N, 10 * N.x)
>>> p1.vel(N)
10*N.x
"""
value = _check_vector(value)
_check_frame(frame)
self._vel_dict.update({frame: value})
def v1pt_theory(self, otherpoint, outframe, interframe):
"""Sets the velocity of this point with the 1-point theory.
The 1-point theory for point velocity looks like this:
^N v^P = ^B v^P + ^N v^O + ^N omega^B x r^OP
where O is a point fixed in B, P is a point moving in B, and B is
rotating in frame N.
Parameters
==========
otherpoint : Point
The first point of the 2-point theory (O)
outframe : ReferenceFrame
The frame we want this point's velocity defined in (N)
interframe : ReferenceFrame
The intermediate frame in this calculation (B)
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame, dynamicsymbols
>>> q = dynamicsymbols('q')
>>> q2 = dynamicsymbols('q2')
>>> qd = dynamicsymbols('q', 1)
>>> q2d = dynamicsymbols('q2', 1)
>>> N = ReferenceFrame('N')
>>> B = ReferenceFrame('B')
>>> B.set_ang_vel(N, 5 * B.y)
>>> O = Point('O')
>>> P = O.locatenew('P', q * B.x)
>>> P.set_vel(B, qd * B.x + q2d * B.y)
>>> O.set_vel(N, 0)
>>> P.v1pt_theory(O, N, B)
q'*B.x + q2'*B.y - 5*q*B.z
"""
_check_frame(outframe)
_check_frame(interframe)
self._check_point(otherpoint)
dist = self.pos_from(otherpoint)
v1 = self.vel(interframe)
v2 = otherpoint.vel(outframe)
omega = interframe.ang_vel_in(outframe)
self.set_vel(outframe, v1 + v2 + (omega ^ dist))
return self.vel(outframe)
def v2pt_theory(self, otherpoint, outframe, fixedframe):
"""Sets the velocity of this point with the 2-point theory.
The 2-point theory for point velocity looks like this:
^N v^P = ^N v^O + ^N omega^B x r^OP
where O and P are both points fixed in frame B, which is rotating in
frame N.
Parameters
==========
otherpoint : Point
The first point of the 2-point theory (O)
outframe : ReferenceFrame
The frame we want this point's velocity defined in (N)
fixedframe : ReferenceFrame
The frame in which both points are fixed (B)
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame, dynamicsymbols
>>> q = dynamicsymbols('q')
>>> qd = dynamicsymbols('q', 1)
>>> N = ReferenceFrame('N')
>>> B = N.orientnew('B', 'Axis', [q, N.z])
>>> O = Point('O')
>>> P = O.locatenew('P', 10 * B.x)
>>> O.set_vel(N, 5 * N.x)
>>> P.v2pt_theory(O, N, B)
5*N.x + 10*q'*B.y
"""
_check_frame(outframe)
_check_frame(fixedframe)
self._check_point(otherpoint)
dist = self.pos_from(otherpoint)
v = otherpoint.vel(outframe)
omega = fixedframe.ang_vel_in(outframe)
self.set_vel(outframe, v + (omega ^ dist))
return self.vel(outframe)
def vel(self, frame):
"""The velocity Vector of this Point in the ReferenceFrame.
Parameters
==========
frame : ReferenceFrame
The frame in which the returned velocity vector will be defined in
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p1.set_vel(N, 10 * N.x)
>>> p1.vel(N)
10*N.x
"""
_check_frame(frame)
if not (frame in self._vel_dict):
raise ValueError('Velocity of point ' + self.name + ' has not been'
' defined in ReferenceFrame ' + frame.name)
return self._vel_dict[frame]
| {
"repo_name": "amitjamadagni/sympy",
"path": "sympy/physics/mechanics/point.py",
"copies": "3",
"size": "13472",
"license": "bsd-3-clause",
"hash": -290736466264247800,
"line_mean": 29.4108352144,
"line_max": 85,
"alpha_frac": 0.5283551069,
"autogenerated": false,
"ratio": 3.7821448624368332,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5810499969336833,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Point']
from sympy.physics.mechanics.essential import Vector, ReferenceFrame
class Point(object):
"""This object represents a point in a dynamic system.
It stores the: position, velocity, and acceleration of a point.
The position is a vector defined as the vector distance from a parent
point to this point.
"""
def __init__(self, name):
"""Initialization of a Point object. """
self.name = name
self._pos_dict = {}
self._vel_dict = {}
self._acc_dict = {}
self._pdlist = [self._pos_dict, self._vel_dict, self._acc_dict]
def __str__(self):
return self.name
__repr__ = __str__
def _check_frame(self, other):
if not isinstance(other, ReferenceFrame):
raise TypeError('A ReferenceFrame must be supplied')
def _check_point(self, other):
if not isinstance(other, Point):
raise TypeError('A Point must be supplied')
def _check_vector(self, other):
if isinstance(other, int):
if other == 0:
return
if not isinstance(other, Vector):
raise TypeError('A Vector must be supplied')
def _pdict_list(self, other, num):
"""Creates a list from self to other using _dcm_dict. """
outlist = [[self]]
oldlist = [[]]
while outlist != oldlist:
oldlist = outlist[:]
for i, v in enumerate(outlist):
templist = v[-1]._pdlist[num].keys()
for i2, v2 in enumerate(templist):
if not v.__contains__(v2):
littletemplist = v + [v2]
if not outlist.__contains__(littletemplist):
outlist.append(littletemplist)
for i, v in enumerate(oldlist):
if v[-1] != other:
outlist.remove(v)
outlist.sort(key = len)
if len(outlist) != 0:
return outlist[0]
raise ValueError('No Connecting Path found between ' + other.name +
' and ' + self.name)
def a1pt_theory(self, otherpoint, outframe, interframe):
"""Sets the acceleration of this point with the 1-point theory.
The 1-point theory for point acceleration looks like this:
^N a^P = ^B a^P + ^N a^O + ^N alpha^B x r^OP + ^N omega^B x (^N omega^B
x r^OP) + 2 ^N omega^B x ^B v^P
where O is a point fixed in B, P is a point moving in B, and B is
rotating in frame N.
Parameters
==========
otherpoint : Point
The first point of the 1-point theory (O)
outframe : ReferenceFrame
The frame we want this point's acceleration defined in (N)
fixedframe : ReferenceFrame
The intermediate frame in this calculation (B)
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame, dynamicsymbols
>>> q = dynamicsymbols('q')
>>> q2 = dynamicsymbols('q2')
>>> qd = dynamicsymbols('q', 1)
>>> q2d = dynamicsymbols('q2', 1)
>>> N = ReferenceFrame('N')
>>> B = ReferenceFrame('B')
>>> B.set_ang_vel(N, 5 * B.y)
>>> O = Point('O')
>>> P = O.locatenew('P', q * B.x)
>>> P.set_vel(B, qd * B.x + q2d * B.y)
>>> O.set_vel(N, 0)
>>> P.a1pt_theory(O, N, B)
(-25*q + q'')*B.x + q2''*B.y - 10*q'*B.z
"""
self._check_frame(outframe)
self._check_frame(interframe)
self._check_point(otherpoint)
dist = self.pos_from(otherpoint)
v = self.vel(interframe)
a1 = otherpoint.acc(outframe)
a2 = self.acc(interframe)
omega = interframe.ang_vel_in(outframe)
alpha = interframe.ang_acc_in(outframe)
self.set_acc(outframe, a2 + 2 * (omega ^ v) + a1 + (alpha ^ dist) +
(omega ^ (omega ^ dist)))
return self.acc(outframe)
def a2pt_theory(self, otherpoint, outframe, fixedframe):
"""Sets the acceleration of this point with the 2-point theory.
The 2-point theory for point acceleration looks like this:
^N a^P = ^N a^O + ^N alpha^B x r^OP + ^N omega^B x (^N omega^B x r^OP)
where O and P are both points fixed in frame B, which is rotating in
frame N.
Parameters
==========
otherpoint : Point
The first point of the 2-point theory (O)
outframe : ReferenceFrame
The frame we want this point's acceleration defined in (N)
fixedframe : ReferenceFrame
The frame in which both points are fixed (B)
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame, dynamicsymbols
>>> q = dynamicsymbols('q')
>>> qd = dynamicsymbols('q', 1)
>>> N = ReferenceFrame('N')
>>> B = N.orientnew('B', 'Axis', [q, N.z])
>>> O = Point('O')
>>> P = O.locatenew('P', 10 * B.x)
>>> O.set_vel(N, 5 * N.x)
>>> P.a2pt_theory(O, N, B)
- 10*q'**2*B.x + 10*q''*B.y
"""
self._check_frame(outframe)
self._check_frame(fixedframe)
self._check_point(otherpoint)
dist = self.pos_from(otherpoint)
a = otherpoint.acc(outframe)
omega = fixedframe.ang_vel_in(outframe)
alpha = fixedframe.ang_acc_in(outframe)
self.set_acc(outframe, a + (alpha ^ dist) + (omega ^ (omega ^ dist)))
return self.acc(outframe)
def acc(self, frame):
"""The acceleration Vector of this Point in a ReferenceFrame.
Parameters
==========
frame : ReferenceFrame
The frame in which the returned acceleration vector will be defined in
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p1.set_acc(N, 10 * N.x)
>>> p1.acc(N)
10*N.x
"""
self._check_frame(frame)
if not (frame in self._acc_dict):
if self._vel_dict[frame] != 0:
return (self._vel_dict[frame]).dt(frame)
else:
return 0
return self._acc_dict[frame]
def locatenew(self, name, value):
"""Creates a new point with a position defined from this point.
Parameters
==========
name : str
The name for the new point
value : Vector
The position of the new point relative to this point
Examples
========
>>> from sympy.physics.mechanics import ReferenceFrame, Point
>>> N = ReferenceFrame('N')
>>> P1 = Point('P1')
>>> P2 = P1.locatenew('P2', 10 * N.x)
"""
if not isinstance(name, str):
raise TypeError('Must supply a valid name')
self._check_vector(value)
p = Point(name)
p.set_pos(self, value)
self.set_pos(p, -value)
return p
def pos_from(self, otherpoint):
"""Returns a Vector distance between this Point and the other Point.
Parameters
==========
otherpoint : Point
The otherpoint we are locating this one relative to
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p2 = Point('p2')
>>> p1.set_pos(p2, 10 * N.x)
>>> p1.pos_from(p2)
10*N.x
"""
outvec = 0
plist = self._pdict_list(otherpoint, 0)
for i in range(len(plist) - 1):
outvec += plist[i]._pos_dict[plist[i + 1]]
return outvec
def set_acc(self, frame, value):
"""Used to set the acceleration of this Point in a ReferenceFrame.
Parameters
==========
value : Vector
The vector value of this point's acceleration in the frame
frame : ReferenceFrame
The frame in which this point's acceleration is defined
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p1.set_acc(N, 10 * N.x)
>>> p1.acc(N)
10*N.x
"""
self._check_vector(value)
self._check_frame(frame)
self._acc_dict.update({frame: value})
def set_pos(self, otherpoint, value):
"""Used to set the position of this point w.r.t. another point.
Parameters
==========
value : Vector
The vector which defines the location of this point
point : Point
The other point which this point's location is defined relative to
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p2 = Point('p2')
>>> p1.set_pos(p2, 10 * N.x)
>>> p1.pos_from(p2)
10*N.x
"""
self._check_vector(value)
self._check_point(otherpoint)
self._pos_dict.update({otherpoint: value})
otherpoint._pos_dict.update({self: -value})
def set_vel(self, frame, value):
"""Sets the velocity Vector of this Point in a ReferenceFrame.
Parameters
==========
value : Vector
The vector value of this point's velocity in the frame
frame : ReferenceFrame
The frame in which this point's velocity is defined
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p1.set_vel(N, 10 * N.x)
>>> p1.vel(N)
10*N.x
"""
self._check_vector(value)
self._check_frame(frame)
self._vel_dict.update({frame: value})
def v1pt_theory(self, otherpoint, outframe, interframe):
"""Sets the velocity of this point with the 1-point theory.
The 1-point theory for point velocity looks like this:
^N v^P = ^B v^P + ^N v^O + ^N omega^B x r^OP
where O is a point fixed in B, P is a point moving in B, and B is
rotating in frame N.
Parameters
==========
otherpoint : Point
The first point of the 2-point theory (O)
outframe : ReferenceFrame
The frame we want this point's velocity defined in (N)
interframe : ReferenceFrame
The intermediate frame in this calculation (B)
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame, dynamicsymbols
>>> q = dynamicsymbols('q')
>>> q2 = dynamicsymbols('q2')
>>> qd = dynamicsymbols('q', 1)
>>> q2d = dynamicsymbols('q2', 1)
>>> N = ReferenceFrame('N')
>>> B = ReferenceFrame('B')
>>> B.set_ang_vel(N, 5 * B.y)
>>> O = Point('O')
>>> P = O.locatenew('P', q * B.x)
>>> P.set_vel(B, qd * B.x + q2d * B.y)
>>> O.set_vel(N, 0)
>>> P.v1pt_theory(O, N, B)
q'*B.x + q2'*B.y - 5*q*B.z
"""
self._check_frame(outframe)
self._check_frame(interframe)
self._check_point(otherpoint)
dist = self.pos_from(otherpoint)
v1 = self.vel(interframe)
v2 = otherpoint.vel(outframe)
omega = interframe.ang_vel_in(outframe)
self.set_vel(outframe, v1 + v2 + (omega ^ dist))
return self.vel(outframe)
def v2pt_theory(self, otherpoint, outframe, fixedframe):
"""Sets the velocity of this point with the 2-point theory.
The 2-point theory for point velocity looks like this:
^N v^P = ^N v^O + ^N omega^B x r^OP
where O and P are both points fixed in frame B, which is rotating in
frame N.
Parameters
==========
otherpoint : Point
The first point of the 2-point theory (O)
outframe : ReferenceFrame
The frame we want this point's velocity defined in (N)
fixedframe : ReferenceFrame
The frame in which both points are fixed (B)
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame, dynamicsymbols
>>> q = dynamicsymbols('q')
>>> qd = dynamicsymbols('q', 1)
>>> N = ReferenceFrame('N')
>>> B = N.orientnew('B', 'Axis', [q, N.z])
>>> O = Point('O')
>>> P = O.locatenew('P', 10 * B.x)
>>> O.set_vel(N, 5 * N.x)
>>> P.v2pt_theory(O, N, B)
5*N.x + 10*q'*B.y
"""
self._check_frame(outframe)
self._check_frame(fixedframe)
self._check_point(otherpoint)
dist = self.pos_from(otherpoint)
v = otherpoint.vel(outframe)
omega = fixedframe.ang_vel_in(outframe)
self.set_vel(outframe, v + (omega ^ dist))
return self.vel(outframe)
def vel(self, frame):
"""The velocity Vector of this Point in the ReferenceFrame.
Parameters
==========
frame : ReferenceFrame
The frame in which the returned velocity vector will be defined in
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p1.set_vel(N, 10 * N.x)
>>> p1.vel(N)
10*N.x
"""
self._check_frame(frame)
if not (frame in self._vel_dict):
raise ValueError('Velocity of point ' + self.name + ' has not been'
' defined in ReferenceFrame ' + frame.name)
return self._vel_dict[frame]
| {
"repo_name": "ichuang/sympy",
"path": "sympy/physics/mechanics/point.py",
"copies": "1",
"size": "13889",
"license": "bsd-3-clause",
"hash": -168008012501465060,
"line_mean": 29.5925110132,
"line_max": 85,
"alpha_frac": 0.5312117503,
"autogenerated": false,
"ratio": 3.812517156189953,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9830595774418294,
"avg_score": 0.0026266264143317898,
"num_lines": 454
} |
__all__ = ['PointType', 'PointZType', 'PointMType', 'PointZMType', 'PolylineType', 'PolygonType']
import array
import sys
from pyspark.sql.types import UserDefinedType, StructField, StructType, DoubleType, IntegerType, ArrayType
#
# Copied from Spark VectorUDT
#
if sys.version >= '3':
basestring = str
xrange = range
import copyreg as copy_reg
long = int
else:
import copy_reg
if sys.version_info[:2] == (2, 7):
# speed up pickling array in Python 2.7
def fast_pickle_array(ar):
return array.array, (ar.typecode, ar.tostring())
copy_reg.pickle(array.array, fast_pickle_array)
class PointZUDT(UserDefinedType):
"""
SQL user-defined type (UDT) for PointZ.
"""
@classmethod
def sqlType(self):
return StructType([
StructField("x", DoubleType(), False),
StructField("y", DoubleType(), False),
StructField("z", DoubleType(), False)
])
@classmethod
def module(cls):
return "com.esri.udt"
@classmethod
def scalaUDT(cls):
return "com.esri.udt.PointZUDT"
def serialize(self, obj):
return obj.x, obj.y, obj.z
def deserialize(self, datum):
return PointZType(datum[0], datum[1], datum[2])
def simpleString(self):
return "pointZ"
class PointZType(object):
__UDT__ = PointZUDT()
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __repr__(self):
return "PointZType({},{},{})".format(self.x, self.y, self.z)
def __str__(self):
return "({},{},{})".format(self.x, self.y, self.z)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and \
other.y == self.y and \
other.z == self.z
class PointMUDT(UserDefinedType):
"""
SQL user-defined type (UDT) for PointM.
"""
@classmethod
def sqlType(self):
return StructType([
StructField("x", DoubleType(), False),
StructField("y", DoubleType(), False),
StructField("m", DoubleType(), False)
])
@classmethod
def module(cls):
return "com.esri.udt"
@classmethod
def scalaUDT(cls):
return "com.esri.udt.PointMUDT"
def serialize(self, obj):
return obj.x, obj.y, obj.m
def deserialize(self, datum):
return PointMType(datum[0], datum[1], datum[2])
def simpleString(self):
return "pointM"
class PointMType(object):
__UDT__ = PointMUDT()
def __init__(self, x, y, m):
self.x = x
self.y = y
self.m = m
def __repr__(self):
return "PointMType({},{},{})".format(self.x, self.y, self.m)
def __str__(self):
return "({},{},{})".format(self.x, self.y, self.m)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and \
other.y == self.y and \
other.m == self.m
class PointZMUDT(UserDefinedType):
"""
SQL user-defined type (UDT) for PointZM.
"""
@classmethod
def sqlType(self):
return StructType([
StructField("x", DoubleType(), False),
StructField("y", DoubleType(), False),
StructField("z", DoubleType(), False),
StructField("m", DoubleType(), False)
])
@classmethod
def module(cls):
return "com.esri.udt"
@classmethod
def scalaUDT(cls):
return "com.esri.udt.PointZMUDT"
def serialize(self, obj):
return obj.x, obj.y, obj.z, obj.m
def deserialize(self, datum):
return PointZMType(datum[0], datum[1], datum[2], datum[3])
def simpleString(self):
return "pointZM"
class PointZMType(object):
__UDT__ = PointZMUDT()
def __init__(self, x, y, z, m):
self.x = x
self.y = y
self.z = z
self.m = m
def __repr__(self):
return "PointZMType({},{},{},{})".format(self.x, self.y, self.z, self.m)
def __str__(self):
return "({},{},{},{})".format(self.x, self.y, self.z, self.m)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and \
other.y == self.y and \
other.z == self.z and \
other.m == self.m
class PointUDT(UserDefinedType):
"""
SQL user-defined type (UDT) for Point.
"""
@classmethod
def sqlType(self):
return StructType([
StructField("x", DoubleType(), False),
StructField("y", DoubleType(), False)
])
@classmethod
def module(cls):
return "com.esri.udt"
@classmethod
def scalaUDT(cls):
return "com.esri.udt.PointUDT"
def serialize(self, obj):
return obj.x, obj.y
def deserialize(self, datum):
return PointType(datum[0], datum[1])
def simpleString(self):
return "point"
class PointType(object):
__UDT__ = PointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "PointType({},{})".format(self.x, self.y)
def __str__(self):
return "({},{})".format(self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and other.y == self.y
class PolylineUDT(UserDefinedType):
"""
SQL user-defined type (UDT) for Polyline.
"""
@classmethod
def sqlType(cls):
return StructType([
StructField("xmin", DoubleType(), False),
StructField("ymin", DoubleType(), False),
StructField("xmax", DoubleType(), False),
StructField("ymax", DoubleType(), False),
StructField("xyNum", ArrayType(IntegerType(), False), False),
StructField("xyArr", ArrayType(DoubleType(), False), False)])
@classmethod
def module(cls):
return "com.esri.udt"
@classmethod
def scalaUDT(cls):
return "com.esri.udt.PolylineUDT"
def serialize(self, obj):
xyNum = [int(i) for i in obj.xyNum]
xyArr = [float(v) for v in obj.xyArr]
return obj.xmin, obj.ymin, obj.xmax, obj.ymax, xyNum, xyArr
def deserialize(self, datum):
return PolylineType(datum[0], datum[1], datum[2], datum[3], datum[4], datum[5])
def simpleString(self):
return "polyline"
class PolylineType(object):
__UDT__ = PolylineUDT()
def __init__(self, xmin, ymin, xmax, ymax, xyNum, xyArr):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.xyNum = xyNum
self.xyArr = xyArr
def __repr__(self):
return "PolylineType({},{},{},{})".format(self.xmin, self.ymin, self.xmax, self.ymax)
def __str__(self):
return "({},{},{},{})".format(self.xmin, self.ymin, self.xmax, self.ymax)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.xmin == self.xmin and other.ymin == self.ymin and \
other.xmax == self.xmax and other.ymax == self.ymax
class PolygonUDT(UserDefinedType):
"""
SQL user-defined type (UDT) for Polygon.
"""
@classmethod
def sqlType(cls):
return StructType([
StructField("xmin", DoubleType(), False),
StructField("ymin", DoubleType(), False),
StructField("xmax", DoubleType(), False),
StructField("ymax", DoubleType(), False),
StructField("xyNum", ArrayType(IntegerType(), False), False),
StructField("xyArr", ArrayType(DoubleType(), False), False)])
@classmethod
def module(cls):
return "com.esri.udt"
@classmethod
def scalaUDT(cls):
return "com.esri.udt.PolygonUDT"
def serialize(self, obj):
xyNum = [int(i) for i in obj.xyNum]
xyArr = [float(v) for v in obj.xyArr]
return obj.xmin, obj.ymin, obj.xmax, obj.ymax, xyNum, xyArr
def deserialize(self, datum):
return PolygonType(datum[0], datum[1], datum[2], datum[3], datum[4], datum[5])
def simpleString(self):
return "polygon"
class PolygonType(object):
__UDT__ = PolygonUDT()
def __init__(self, xmin, ymin, xmax, ymax, xyNum, xyArr):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.xyNum = xyNum
self.xyArr = xyArr
def __repr__(self):
return "PolygonType({},{},{},{})".format(self.xmin, self.ymin, self.xmax, self.ymax)
def __str__(self):
return "({},{},{},{})".format(self.xmin, self.ymin, self.xmax, self.ymax)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.xmin == self.xmin and other.ymin == self.ymin and \
other.xmax == self.xmax and other.ymax == self.ymax
| {
"repo_name": "mraad/spark-gdb",
"path": "src/main/python/com/esri/udt/__init__.py",
"copies": "1",
"size": "8974",
"license": "apache-2.0",
"hash": 6541661052133554000,
"line_mean": 24.7873563218,
"line_max": 106,
"alpha_frac": 0.5554936483,
"autogenerated": false,
"ratio": 3.5192156862745096,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45747093345745093,
"avg_score": null,
"num_lines": null
} |
__all__ = ['polygon_clip', 'polygon_area']
import numpy as np
from matplotlib import _path, path, transforms
def polygon_clip(rp, cp, r0, c0, r1, c1):
"""Clip a polygon to the given bounding box.
Parameters
----------
rp, cp : (N,) ndarray of double
Row and column coordinates of the polygon.
(r0, c0), (r1, c1) : double
Top-left and bottom-right coordinates of the bounding box.
Returns
-------
r_clipped, c_clipped : (M,) ndarray of double
Coordinates of clipped polygon.
Notes
-----
This makes use of Sutherland-Hodgman clipping as implemented in
AGG 2.4 and exposed in Matplotlib.
"""
poly = path.Path(np.vstack((rp, cp)).T, closed=True)
clip_rect = transforms.Bbox([[r0, c0], [r1, c1]])
poly_clipped = poly.clip_to_bbox(clip_rect).to_polygons()[0]
# This should be fixed in matplotlib >1.5
if np.all(poly_clipped[-1] == poly_clipped[-2]):
poly_clipped = poly_clipped[:-1]
return poly_clipped[:, 0], poly_clipped[:, 1]
def polygon_area(pr, pc):
"""Compute the area of a polygon.
Parameters
----------
pr, pc : (N,) array of float
Polygon row and column coordinates.
Returns
-------
a : float
Area of the polygon.
"""
pr = np.asarray(pr)
pc = np.asarray(pc)
return 0.5 * np.abs(np.sum((pc[:-1] * pr[1:]) - (pc[1:] * pr[:-1])))
| {
"repo_name": "rjeli/scikit-image",
"path": "skimage/_shared/_geometry.py",
"copies": "11",
"size": "1411",
"license": "bsd-3-clause",
"hash": 3442610144012886000,
"line_mean": 25.1296296296,
"line_max": 72,
"alpha_frac": 0.5839829908,
"autogenerated": false,
"ratio": 3.3044496487119437,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 54
} |
__all__ = ['polygon_clip', 'polygon_area']
import numpy as np
from matplotlib import path, transforms
def polygon_clip(rp, cp, r0, c0, r1, c1):
"""Clip a polygon to the given bounding box.
Parameters
----------
rp, cp : (N,) ndarray of double
Row and column coordinates of the polygon.
(r0, c0), (r1, c1) : double
Top-left and bottom-right coordinates of the bounding box.
Returns
-------
r_clipped, c_clipped : (M,) ndarray of double
Coordinates of clipped polygon.
Notes
-----
This makes use of Sutherland-Hodgman clipping as implemented in
AGG 2.4 and exposed in Matplotlib.
"""
poly = path.Path(np.vstack((rp, cp)).T, closed=True)
clip_rect = transforms.Bbox([[r0, c0], [r1, c1]])
poly_clipped = poly.clip_to_bbox(clip_rect).to_polygons()[0]
# This should be fixed in matplotlib >1.5
if np.all(poly_clipped[-1] == poly_clipped[-2]):
poly_clipped = poly_clipped[:-1]
return poly_clipped[:, 0], poly_clipped[:, 1]
def polygon_area(pr, pc):
"""Compute the area of a polygon.
Parameters
----------
pr, pc : (N,) array of float
Polygon row and column coordinates.
Returns
-------
a : float
Area of the polygon.
"""
pr = np.asarray(pr)
pc = np.asarray(pc)
return 0.5 * np.abs(np.sum((pc[:-1] * pr[1:]) - (pc[1:] * pr[:-1])))
| {
"repo_name": "paalge/scikit-image",
"path": "skimage/_shared/_geometry.py",
"copies": "1",
"size": "1404",
"license": "bsd-3-clause",
"hash": 3386516754324987400,
"line_mean": 25,
"line_max": 72,
"alpha_frac": 0.584045584,
"autogenerated": false,
"ratio": 3.311320754716981,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43953663387169806,
"avg_score": null,
"num_lines": null
} |
__all__ = ['polygon_clip', 'polygon_area']
import numpy as np
def polygon_clip(rp, cp, r0, c0, r1, c1):
from matplotlib import _path, path, transforms
"""Clip a polygon to the given bounding box.
Parameters
----------
rp, cp : (N,) ndarray of double
Row and column coordinates of the polygon.
(r0, c0), (r1, c1) : double
Top-left and bottom-right coordinates of the bounding box.
Returns
-------
r_clipped, c_clipped : (M,) ndarray of double
Coordinates of clipped polygon.
Notes
-----
This makes use of Sutherland-Hodgman clipping as implemented in
AGG 2.4 and exposed in Matplotlib.
"""
poly = path.Path(np.vstack((rp, cp)).T, closed=True)
clip_rect = transforms.Bbox([[r0, c0], [r1, c1]])
poly_clipped = poly.clip_to_bbox(clip_rect).to_polygons()[0]
# This should be fixed in matplotlib >1.5
if np.all(poly_clipped[-1] == poly_clipped[-2]):
poly_clipped = poly_clipped[:-1]
return poly_clipped[:, 0], poly_clipped[:, 1]
def polygon_area(pr, pc):
"""Compute the area of a polygon.
Parameters
----------
pr, pc : (N,) array of float
Polygon row and column coordinates.
Returns
-------
a : float
Area of the polygon.
"""
pr = np.asarray(pr)
pc = np.asarray(pc)
return 0.5 * np.abs(np.sum((pc[:-1] * pr[1:]) - (pc[1:] * pr[:-1])))
| {
"repo_name": "ryfeus/lambda-packs",
"path": "Skimage_numpy/source/skimage/_shared/_geometry.py",
"copies": "1",
"size": "1414",
"license": "mit",
"hash": 3824778858697641500,
"line_mean": 25.679245283,
"line_max": 72,
"alpha_frac": 0.5827439887,
"autogenerated": false,
"ratio": 3.3114754098360657,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43942193985360656,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Pool']
#
# Imports
#
import threading
import Queue
import itertools
import collections
import time
import os
from pyvotune.log import logger
import multiprocessing
from multiprocessing import Process, cpu_count, TimeoutError
from multiprocessing.util import Finalize, log_to_stderr
from multiprocessing.pool import TERMINATE, CLOSE, RUN, Pool, IMapIterator, IMapUnorderedIterator, mapstar, ApplyResult, MapResult
from logging import DEBUG
log = logger()
def worker(proc_num, inqueue, outqueue, initializer=None, initargs=(), maxtasks=None):
print "In worker process"
try:
log.debug("Worker {0} asserting".format(maxtasks))
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
put = outqueue.put
get = inqueue.get
log.debug("Worker {0} started".format(proc_num))
if initializer is not None:
initializer(*initargs)
completed = 0
while maxtasks is None or (maxtasks and completed < maxtasks):
try:
task = get()
except (EOFError, IOError):
log.debug('worker got EOFError or IOError -- exiting')
break
if task is None:
log.debug('worker got sentinel -- exiting')
break
job, i, func, args, kwds = task
try:
put((job, i, proc_num, 'started'))
log.debug("Worker {0} starting job {1} {2} {3}".format(
proc_num, job, func, args))
result = (True, func(*args, **kwds))
log.debug("Worker {0} finished job {1} {2} {3}".format(
proc_num, job, func, args))
except Exception, e:
log.exception("Exception in worker {0} - {1} - {2} {3} {4}".format(
proc_num, e, job, func, args))
result = (False, e)
put((job, i, proc_num, result))
completed += 1
log.debug('worker exiting after %d tasks' % completed)
except:
log.exception("Worker excepted {0}".format(
proc_num))
class TimeoutPool(object):
'''
Class which supports an async version of the `apply()` builtin
'''
Process = Process
def __init__(self, processes=None, initializer=None, initargs=(),
maxtasksperchild=None, timeout_seconds=30):
log_to_stderr(level=DEBUG)
self._setup_queues()
self._taskqueue = Queue.Queue()
self._cache = {}
self._state = RUN
self._maxtasksperchild = maxtasksperchild
self._initializer = initializer
self._initargs = initargs
self._proc_num = 0
if processes is None:
try:
processes = cpu_count()
except NotImplementedError:
processes = 1
if initializer is not None and not hasattr(initializer, '__call__'):
raise TypeError('initializer must be a callable')
self._processes = processes
self._pool = []
self._repopulate_pool()
self._worker_handler = threading.Thread(
target=Pool._handle_workers,
args=(self, )
)
self._worker_handler.daemon = True
self._worker_handler._state = RUN
self._worker_handler.start()
self._task_handler = threading.Thread(
target=Pool._handle_tasks,
args=(self._taskqueue, self._quick_put, self._outqueue, self._pool)
)
self._task_handler.daemon = True
self._task_handler._state = RUN
self._task_handler.start()
self.timeout_seconds = timeout_seconds
self._result_handler = threading.Thread(
target=TimeoutPool._handle_results,
args=(self._outqueue, self._quick_get, self._cache, self._pool, self.timeout_seconds)
)
self._result_handler.daemon = True
self._result_handler._state = RUN
self._result_handler.start()
self._terminate = Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
self._worker_handler, self._task_handler,
self._result_handler, self._cache),
exitpriority=15
)
def _join_exited_workers(self):
"""Cleanup after any worker processes which have exited due to reaching
their specified lifetime. Returns True if any workers were cleaned up.
"""
cleaned = False
for i in reversed(range(len(self._pool))):
worker = self._pool[i]
if worker.exitcode is not None:
# worker exited
log.debug('cleaning up worker %d' % i)
worker.join()
cleaned = True
del self._pool[i]
return cleaned
def _repopulate_pool(self):
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
for i in range(self._processes - len(self._pool)):
proc_num = self._proc_num
self._proc_num += 1
w = self.Process(target=worker,
args=(proc_num,
self._inqueue, self._outqueue,
self._initializer,
self._initargs,
self._maxtasksperchild)
)
w._proc_num = proc_num
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
log.debug("groups {0}".format(os.getgroups()))
log.debug('added worker {0} {1}'.format(w.name, len(self._pool) - 1))
def _maintain_pool(self):
"""Clean up any exited workers and start replacements for them.
"""
if self._join_exited_workers():
self._repopulate_pool()
def _setup_queues(self):
from multiprocessing.queues import SimpleQueue
self._inqueue = SimpleQueue()
self._outqueue = multiprocessing.Queue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue.get
def apply(self, func, args=(), kwds={}):
'''
Equivalent of `apply()` builtin
'''
assert self._state == RUN
return self.apply_async(func, args, kwds).get()
def map(self, func, iterable, chunksize=None):
'''
Equivalent of `map()` builtin
'''
assert self._state == RUN
return self.map_async(func, iterable, chunksize).get()
def imap(self, func, iterable, chunksize=1):
'''
Equivalent of `itertools.imap()` -- can be MUCH slower than `Pool.map()`
'''
assert self._state == RUN
if chunksize == 1:
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def imap_unordered(self, func, iterable, chunksize=1):
'''
Like `imap()` method but ordering of results is arbitrary
'''
assert self._state == RUN
if chunksize == 1:
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def apply_async(self, func, args=(), kwds={}, callback=None):
'''
Asynchronous equivalent of `apply()` builtin
'''
assert self._state == RUN
result = ApplyResult(self._cache, callback)
self._taskqueue.put(([(result._job, None, func, args, kwds)], None))
return result
def map_async(self, func, iterable, chunksize=None, callback=None):
'''
Asynchronous equivalent of `map()` builtin
'''
assert self._state == RUN
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
if chunksize is None:
chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
if extra:
chunksize += 1
if len(iterable) == 0:
chunksize = 0
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = MapResult(self._cache, chunksize, len(iterable), callback)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), None))
return result
@staticmethod
def _handle_workers(pool):
while pool._worker_handler._state == RUN and pool._state == RUN:
pool._maintain_pool()
time.sleep(0.1)
# send sentinel to stop workers
pool._taskqueue.put(None)
log.debug('worker handler exiting')
@staticmethod
def _handle_tasks(taskqueue, put, outqueue, pool):
thread = threading.current_thread()
for taskseq, set_length in iter(taskqueue.get, None):
i = -1
for i, task in enumerate(taskseq):
if thread._state:
log.debug('task handler found thread._state != RUN')
break
try:
put(task)
except IOError:
log.debug('could not put task on queue')
break
else:
if set_length:
log.debug('doing set_length()')
set_length(i + 1)
continue
break
else:
log.debug('task handler got sentinel')
try:
# tell result handler to finish when cache is empty
log.debug('task handler sending sentinel to result handler')
outqueue.put(None)
# tell workers there is no more work
log.debug('task handler sending sentinel to workers')
for p in pool:
put(None)
except IOError:
log.debug('task handler got IOError when sending sentinels')
log.debug('task handler exiting')
@staticmethod
def _handle_results(outqueue, get, cache, pool, timeout_seconds):
thread = threading.current_thread()
running_jobs = {}
def _check_timeouts(running_jobs):
log.debug("{0} Jobs running".format(
len(running_jobs)))
for job, (start_time, i, proc_num) in running_jobs.items():
diff_time = (time.time() - start_time)
if diff_time > timeout_seconds:
log.debug("Proc {0} timed out on job {1} in {2} seconds".format(
proc_num, job, diff_time))
for w in pool:
if w._proc_num == proc_num:
log.debug("Terminating worker {0}".format(w))
w.terminate()
print "Before delete", len(running_jobs)
del running_jobs[job]
print "After delete", len(running_jobs)
try:
cache[job]._set(i, (False, TimeoutError()))
except KeyError:
pass
while 1:
try:
task = get(True, 0.2)
except (Queue.Empty):
#log.debug("result handler received nothing during wait period")
_check_timeouts(running_jobs)
continue
except (IOError, EOFError):
log.debug('result handler got EOFError/IOError -- exiting')
return
if thread._state:
assert thread._state == TERMINATE
log.debug('result handler found thread._state=TERMINATE')
break
_check_timeouts(running_jobs)
if task is None:
log.debug('result handler got sentinel')
break
job, i, proc_num, obj = task
if isinstance(obj, basestring) and obj == 'started':
log.debug("worker started job {0}".format(job))
running_jobs[job] = (time.time(), i, proc_num)
continue
if job not in running_jobs:
log.debug("Worker is already done, ignoring result for job {0}".format(
job))
continue
del running_jobs[job]
log.debug("Worker finished job {0}".format(job))
try:
cache[job]._set(i, obj)
except KeyError:
pass
while cache and thread._state != TERMINATE:
try:
task = get()
except (Queue.Empty):
#log.debug("result handler received nothing during wait period")
_check_timeouts(running_jobs)
continue
except (IOError, EOFError):
log.debug('result handler got EOFError/IOError -- exiting')
return
_check_timeouts(running_jobs)
if task is None:
log.debug('result handler ignoring extra sentinel')
continue
job, i, proc_num, obj = task
if isinstance(obj, basestring) and obj == 'started':
log.debug("worker started job {0}".format(job))
running_jobs[job] = (time.time(), i, proc_num)
continue
if job not in running_jobs:
log.debug("Worker is already done, ignoring result for job {0}".format(
job))
continue
del running_jobs[job]
log.debug("Worker finished job {0}".format(job))
try:
cache[job]._set(i, obj)
except KeyError:
pass
if hasattr(outqueue, '_reader'):
log.debug('ensuring that outqueue is not full')
# If we don't make room available in outqueue then
# attempts to add the sentinel (None) to outqueue may
# block. There is guaranteed to be no more than 2 sentinels.
try:
for i in range(10):
if not outqueue._reader.poll():
break
get()
except (IOError, EOFError):
pass
log.debug('result handler exiting: len(cache)=%s, thread._state=%s', len(cache), thread._state)
@staticmethod
def _get_tasks(func, it, size):
it = iter(it)
while 1:
x = tuple(itertools.islice(it, size))
if not x:
return
yield (func, x)
def __reduce__(self):
raise NotImplementedError(
'pool objects cannot be passed between processes or pickled'
)
def close(self):
log.debug('closing pool')
if self._state == RUN:
self._state = CLOSE
self._worker_handler._state = CLOSE
def terminate(self):
log.debug('terminating pool')
self._state = TERMINATE
self._worker_handler._state = TERMINATE
self._terminate()
def join(self):
log.debug('joining pool')
assert self._state in (CLOSE, TERMINATE)
self._worker_handler.join()
self._task_handler.join()
self._result_handler.join()
for p in self._pool:
p.join()
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# task_handler may be blocked trying to put items on inqueue
log.debug('removing tasks from inqueue until task handler finished')
inqueue._rlock.acquire()
while task_handler.is_alive() and inqueue._reader.poll():
inqueue._reader.recv()
time.sleep(0)
@classmethod
def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
worker_handler, task_handler, result_handler, cache):
# this is guaranteed to only be called once
log.debug('finalizing pool')
worker_handler._state = TERMINATE
task_handler._state = TERMINATE
log.debug('helping task handler/workers to finish')
cls._help_stuff_finish(inqueue, task_handler, len(pool))
assert result_handler.is_alive() or len(cache) == 0
result_handler._state = TERMINATE
outqueue.put(None) # sentinel
# We must wait for the worker handler to exit before terminating
# workers because we don't want workers to be restarted behind our back.
log.debug('joining worker handler')
worker_handler.join()
# Terminate workers which haven't already finished.
if pool and hasattr(pool[0], 'terminate'):
log.debug('terminating workers')
for p in pool:
if p.exitcode is None:
p.terminate()
log.debug('joining task handler')
task_handler.join(1e100)
log.debug('joining result handler')
result_handler.join(1e100)
if pool and hasattr(pool[0], 'terminate'):
log.debug('joining pool workers')
for p in pool:
if p.is_alive():
# worker has not yet exited
log.debug('cleaning up worker %d' % p.pid)
p.join()
| {
"repo_name": "aelaguiz/pyvotune",
"path": "pyvotune/util/timeout_pool.py",
"copies": "1",
"size": "18420",
"license": "mit",
"hash": 8076286120123463000,
"line_mean": 33.820415879,
"line_max": 130,
"alpha_frac": 0.5308903366,
"autogenerated": false,
"ratio": 4.486117876278617,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5517008212878617,
"avg_score": null,
"num_lines": null
} |
__all__ = ["PortReservation", \
"PortManager", \
"PortServer"]
import sys
import socket
import select
import os
import re
import random
from browser.status import *
from base.log import VLOG
from base.bind import Bind
class PortReservation(object):
def __init__(self, on_free_func, port):
self.on_free_func = on_free_func
# port type: integer
self.port = port
def __del__(self):
if callable(self.on_free_func):
return self.on_free_func.Run()
return
def Leak(self):
VLOG(0, "Port leaked: " + str(self.port))
self.on_free_func = None
return
class PortManager(object):
def __init__(self, min_port, max_port):
self.min_port = min_port
self.max_port = max_port
self.taken = []
# return status and port<string> and reservation<PortReservation>
def ReservePort(self):
start = random.randint(self.min_port, self.max_port)
wrapped = False
try_port = start
while try_port != start or wrapped == False:
if try_port > self.max_port:
wrapped = True
if self.min_port == self.max_port:
break
try_port = self.min_port
if try_port in self.taken:
try_port = try_port + 1
continue
sock = socket.socket()
try:
sock.bind(('localhost', try_port))
except:
try_port = try_port + 1
continue
self.taken.append(try_port)
reservation = PortReservation(Bind(self.ReleasePort, [try_port]), try_port)
#VLOG(0, "from port manager get try_port: " + str(try_port))
return Status(kOk), str(try_port), reservation
return Status(kUnknownError, "unable to find open port"), "", PortReservation(None, None)
def ReleasePort(self, port):
self.taken.remove(port)
return
class PortServer(object):
def __init__(self, path):
if len(path) != 0 and path.startswith('\0'):
self.path = path
self,free = []
else:
VLOG(3, "path must be for Linux abstract namespace")
# return status and a valid port<string>
def RequestPort(self):
# The client sends its PID + \n, and the server responds with a port + \n,
# which is valid for the lifetime of the referred process.
port = ""
if 'linux2' != sys.platform:
return Status(kUnknownError, "not implemented for this platform"), port
try:
sock_fd = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock_fd.setblocking(0)
except:
return Status(kUnknownError, "unable to create socket"), port
try:
sock_fd.settimeout(10)
except:
return Status(kUnknownError, "unable to set socket timeout"), port
try:
sock_fd.connect(self.path)
except:
return Status(kUnknownError, "unable to connect"), port
try:
request = str(os.getpid()) + '\n'
VLOG(0, "PORTSERVER REQUEST " + request)
sock_fd.send(request)
response = ""
ready = select.select(sock_fd, None, None, 10)
if ready:
response = sock_fd.recv(1024)
if not response:
return Status(kUnknownError, "failed to receive portserver response"), port
VLOG(0, "PORTSERVER RESPONSE " + response)
# parse portserver response
matchObj = re.search(r'([0-9]+)\n', response)
if not matchObj:
return Status(kUnknownError, "failed to parse portserver response"), port
port = matchObj.groups()[0]
return Status(kOk), port
except socket.timeout:
""" This exception is raised when a timeout occurs on a socket which has had timeouts
enabled via a prior call to settimeout(). The accompanying value is a string whose
value is currently always timed out """
return Status(kUnknownError, "socket timeout"), port
except:
return Status(kUnknownError, "unable to send portserver request"), port
def ReleasePort(self, port):
self.free.append(port)
return
# return status and port<string> and reservation<PortReservation>
def ReservePort(self):
port = ""
port_reservation = PortReservation(None, None)
if self.free:
port = self.free[0]
del self.free[0]
status, port = self.RequestPort()
if status.IsError():
return status, port, port_reservation
port_reservation = PortReservation(Bind(self.ReleasePort, [port]), port)
return status, port, port_reservation
| {
"repo_name": "PeterWangIntel/crosswalk-webdriver-python",
"path": "net/port_server.py",
"copies": "1",
"size": "4353",
"license": "bsd-3-clause",
"hash": 1865162224329519600,
"line_mean": 30.3165467626,
"line_max": 93,
"alpha_frac": 0.6420859178,
"autogenerated": false,
"ratio": 3.70468085106383,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48467667688638294,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Predicate']
from typing import Callable
import attr
@attr.s(frozen=True)
class Predicate(object):
"""
A predicate is used to check if a condition is met. Predicates are used
to implement preconditions, postconditions, and invariants for a variety
of purposes.
"""
name = attr.ib(type=str)
predicate = attr.ib(type=Callable[['Action', 'State', 'Environment'],
bool])
def check(self,
action: 'Action',
state: 'State',
environment: 'Environment'
) -> bool:
"""
Determines whether this predicate is satisfied by a given action,
state, and environment.
Parameters:
action: details of the command being executed.
state: the state of the system.
environment: the state of the environment.
Returns:
True if this predicate is satisfied by the given information.
"""
return self.predicate(action, state, environment)
__call__ = check
class Invariant(Predicate):
"""
Invariants are used to express statements about the system in formal logic
that always remain true throughout the execution of an associated action.
"""
class Postcondition(Predicate):
"""
Predicate that should be met after the execution of an action.
"""
class Precondition(Predicate):
"""
Precondition that should be met before the execution of an action.
"""
| {
"repo_name": "squaresLab/Houston",
"path": "houston/predicate.py",
"copies": "1",
"size": "1518",
"license": "mit",
"hash": 223586114024267700,
"line_mean": 25.6315789474,
"line_max": 78,
"alpha_frac": 0.6166007905,
"autogenerated": false,
"ratio": 4.788643533123029,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5905244323623029,
"avg_score": null,
"num_lines": null
} |
all_preferences = {}
# read a list of all_preferences from the .txt file
# this .txt file is a copy of the challenge input from Reddit
for line in open('preferences.txt').read().splitlines():
person = line[0]
prefs = line.split(', ')
prefs.pop(0)
all_preferences[person] = prefs
# not a particularly unique solution
# adapted https://en.wikipedia.org/wiki/Stable_marriage_problem#Algorithm
# O(n**2) time complexity
def stableMatching():
free_husbands = [p for p in all_preferences if p==p.upper()]
free_wives = [p for p in all_preferences if p==p.lower()]
pairs = {}
while free_husbands:
man = free_husbands[0]
woman = all_preferences[man].pop(0)
if woman in free_wives:
pairs[woman] = man
free_wives.remove(woman)
free_husbands.remove(man)
else:
rival = pairs[woman]
if all_preferences[woman].index(man) < all_preferences[woman].index(rival):
free_husbands.append(rival)
free_husbands.remove(man)
pairs[woman] = man
return pairs
for wife,husband in stableMatching().items():
print("({}: {})".format(husband,wife))
| {
"repo_name": "filipdanic/dailyprogrammer",
"path": "Hard/husbands_for_sisters/husbands.py",
"copies": "1",
"size": "1199",
"license": "unlicense",
"hash": -4151231244089407000,
"line_mean": 34.2647058824,
"line_max": 87,
"alpha_frac": 0.6196830692,
"autogenerated": false,
"ratio": 3.4257142857142857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45453973549142856,
"avg_score": null,
"num_lines": null
} |
__all__ = ['pretty_print_model_fields']
def _get_class_full_name(cls_):
return cls_.__module__ + '.' + cls_.__name__
class _ModelFieldRow(object):
def __init__(self, field):
self.field = field
self.name = field.name
self.type_ = _get_class_full_name(type(field))
if self.field.many_to_many\
or self.field.many_to_one\
or self.field.one_to_many\
or self.field.one_to_one:
self.related_model = _get_class_full_name(self.field.remote_field.model)
else:
self.related_model = 'N/A'
def pretty_print(self, max_name_len, max_type_len, max_rel_model_len):
row = []
row.append(self.name)
row.append(' ' * (max_name_len - len(self.name)))
row.append('|')
row.append(self.type_)
row.append(' ' * (max_type_len - len(self.type_)))
row.append('|')
row.append(self.related_model)
row.append(' ' * (max_rel_model_len - len(self.related_model)))
print(''.join(row))
def pretty_print_model_fields(model):
field_info_rows = []
max_lens = [0, 0, 0]
for field in model._meta.get_fields():
field_info_rows.append(_ModelFieldRow(field))
max_lens[0] = max(max_lens[0], len(field_info_rows[-1].name))
max_lens[1] = max(max_lens[1], len(field_info_rows[-1].type_))
max_lens[2] = max(max_lens[2], len(field_info_rows[-1].related_model))
print('=' * (sum(max_lens) + len(max_lens) - 1))
for row in field_info_rows:
row.pretty_print(*max_lens)
print('=' * (sum(max_lens) + len(max_lens) - 1))
| {
"repo_name": "wwitzel3/awx",
"path": "tools/scripts/list_fields.py",
"copies": "4",
"size": "1645",
"license": "apache-2.0",
"hash": -8808457904600796000,
"line_mean": 34.7608695652,
"line_max": 84,
"alpha_frac": 0.5580547112,
"autogenerated": false,
"ratio": 3.0462962962962963,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5604351007496297,
"avg_score": null,
"num_lines": null
} |
__all__ = ['process_csv', 'process_from_web']
import csv
import logging
import requests
from hashlib import md5
from .processor import TasProcessor
from indra.util import read_unicode_csv
tas_data_url = 'https://bigmech.s3.amazonaws.com/indra-db/tas.csv'
tas_resource_md5 = '554ccba4617aae7b3b06a62893424c7f'
logger = logging.getLogger(__name__)
def _load_data(data_iter):
# Get the headers.
headers = data_iter[0]
# For some reason this heading is oddly formatted and inconsistent with the
# rest, or with the usual key-style for dicts.
data = [{header: val for header, val in zip(headers, line)}
for line in data_iter[1:]]
return data
def process_from_web(affinity_class_limit=2, named_only=False,
standardized_only=False):
"""Return a TasProcessor for the contents of the TAS dump online.
Interactions are classified into the following classes based on affinity:
| 1 -- Kd < 100nM
| 2 -- 100nM < Kd < 1uM
| 3 -- 1uM < Kd < 10uM
| 10 -- Kd > 10uM
By default, only classes 1 and 2 are extracted but the affinity_class_limit
parameter can be used to change the upper limit of extracted classes.
Parameters
----------
affinity_class_limit : Optional[int]
Defines the highest class of binding affinity that is included in the
extractions. Default: 2
named_only : Optional[bool]
If True, only chemicals that have a name assigned in some name space
(including ones that aren't fully stanadardized per INDRA's ontology,
e.g., CHEMBL1234) are included. If False, chemicals whose name is
assigned based on an ID (e.g., CHEMBL)rather than an actual name are
also included. Default: False
standardized_only : Optional[bool]
If True, only chemicals that are fully standardized per INDRA's
ontology (i.e., they have grounding appearing in one of the
default_ns_order name spaces, and consequently have any
groundings and their name standardized) are extracted.
Default: False
Returns
-------
TasProcessor
A TasProcessor object which has a list of INDRA Statements extracted
from the CSV file representing drug-target inhibitions in its
statements attribute.
"""
logger.info('Downloading TAS data from %s' % tas_data_url)
res = requests.get(tas_data_url)
observed_checksum = md5(res.text.encode('utf-8')).hexdigest()
logger.info('Verifying md5 checksum of data')
if tas_resource_md5 != observed_checksum:
raise RuntimeError('Checksum for downloaded TAS data does not'
' match expected value')
res.raise_for_status()
logger.info('Finished downloading TAS data from %s' % tas_data_url)
data_iter = list(csv.reader(res.text.splitlines(), delimiter=','))
return TasProcessor(_load_data(data_iter),
affinity_class_limit=affinity_class_limit,
named_only=named_only,
standardized_only=standardized_only)
def process_csv(fname, affinity_class_limit=2, named_only=False,
standardized_only=False):
"""Return a TasProcessor for the contents of a given CSV file..
Interactions are classified into the following classes based on affinity:
| 1 -- Kd < 100nM
| 2 -- 100nM < Kd < 1uM
| 3 -- 1uM < Kd < 10uM
| 10 -- Kd > 10uM
By default, only classes 1 and 2 are extracted but the affinity_class_limit
parameter can be used to change the upper limit of extracted classes.
Parameters
----------
fname : str
The path to a local CSV file containing the TAS data.
affinity_class_limit : Optional[int]
Defines the highest class of binding affinity that is included in the
extractions. Default: 2
named_only : Optional[bool]
If True, only chemicals that have a name assigned in some name space
(including ones that aren't fully stanadardized per INDRA's ontology,
e.g., CHEMBL1234) are included. If False, chemicals whose name is
assigned based on an ID (e.g., CHEMBL)rather than an actual name are
also included. Default: False
standardized_only : Optional[bool]
If True, only chemicals that are fully standardized per INDRA's
ontology (i.e., they have grounding appearing in one of the
default_ns_order name spaces, and consequently have any
groundings and their name standardized) are extracted.
Default: False
Returns
-------
TasProcessor
A TasProcessor object which has a list of INDRA Statements extracted
from the CSV file representing drug-target inhibitions in its
statements attribute.
"""
data_iter = list(read_unicode_csv(fname))
return TasProcessor(_load_data(data_iter),
affinity_class_limit=affinity_class_limit,
named_only=named_only,
standardized_only=standardized_only)
| {
"repo_name": "johnbachman/belpy",
"path": "indra/sources/tas/api.py",
"copies": "4",
"size": "5077",
"license": "mit",
"hash": -359319698873610800,
"line_mean": 39.616,
"line_max": 79,
"alpha_frac": 0.660035454,
"autogenerated": false,
"ratio": 3.938712179984484,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 125
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.