text stringlengths 0 1.05M | meta dict |
|---|---|
"""Abstraction form AEMET OpenData sensors."""
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import ATTRIBUTION, SENSOR_DEVICE_CLASS, SENSOR_NAME, SENSOR_UNIT
from .weather_update_coordinator import WeatherUpdateCoordinator
class AbstractAemetSensor(CoordinatorEntity, SensorEntity):
"""Abstract class for an AEMET OpenData sensor."""
def __init__(
self,
name,
unique_id,
sensor_type,
sensor_configuration,
coordinator: WeatherUpdateCoordinator,
):
"""Initialize the sensor."""
super().__init__(coordinator)
self._name = name
self._unique_id = unique_id
self._sensor_type = sensor_type
self._sensor_name = sensor_configuration[SENSOR_NAME]
self._unit_of_measurement = sensor_configuration.get(SENSOR_UNIT)
self._device_class = sensor_configuration.get(SENSOR_DEVICE_CLASS)
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._name} {self._sensor_name}"
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return self._unique_id
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def device_class(self):
"""Return the device_class."""
return self._device_class
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
| {
"repo_name": "w1ll1am23/home-assistant",
"path": "homeassistant/components/aemet/abstract_aemet_sensor.py",
"copies": "3",
"size": "1822",
"license": "apache-2.0",
"hash": 5317964490455318000,
"line_mean": 30.4137931034,
"line_max": 77,
"alpha_frac": 0.6586169045,
"autogenerated": false,
"ratio": 4.358851674641149,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6517468579141149,
"avg_score": null,
"num_lines": null
} |
"""Abstraction form AEMET OpenData sensors."""
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import ATTRIBUTION, SENSOR_DEVICE_CLASS, SENSOR_NAME, SENSOR_UNIT
from .weather_update_coordinator import WeatherUpdateCoordinator
class AbstractAemetSensor(CoordinatorEntity):
"""Abstract class for an AEMET OpenData sensor."""
def __init__(
self,
name,
unique_id,
sensor_type,
sensor_configuration,
coordinator: WeatherUpdateCoordinator,
):
"""Initialize the sensor."""
super().__init__(coordinator)
self._name = name
self._unique_id = unique_id
self._sensor_type = sensor_type
self._sensor_name = sensor_configuration[SENSOR_NAME]
self._unit_of_measurement = sensor_configuration.get(SENSOR_UNIT)
self._device_class = sensor_configuration.get(SENSOR_DEVICE_CLASS)
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._name} {self._sensor_name}"
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return self._unique_id
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def device_class(self):
"""Return the device_class."""
return self._device_class
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
| {
"repo_name": "partofthething/home-assistant",
"path": "homeassistant/components/aemet/abstract_aemet_sensor.py",
"copies": "2",
"size": "1752",
"license": "mit",
"hash": 5093971263165801000,
"line_mean": 29.7368421053,
"line_max": 77,
"alpha_frac": 0.649543379,
"autogenerated": false,
"ratio": 4.325925925925926,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 57
} |
"""Abstraction form OWM sensors."""
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import ATTRIBUTION, SENSOR_DEVICE_CLASS, SENSOR_NAME, SENSOR_UNIT
class AbstractOpenWeatherMapSensor(SensorEntity):
"""Abstract class for an OpenWeatherMap sensor."""
def __init__(
self,
name,
unique_id,
sensor_type,
sensor_configuration,
coordinator: DataUpdateCoordinator,
):
"""Initialize the sensor."""
self._name = name
self._unique_id = unique_id
self._sensor_type = sensor_type
self._sensor_name = sensor_configuration[SENSOR_NAME]
self._unit_of_measurement = sensor_configuration.get(SENSOR_UNIT)
self._device_class = sensor_configuration.get(SENSOR_DEVICE_CLASS)
self._coordinator = coordinator
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._name} {self._sensor_name}"
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return self._unique_id
@property
def should_poll(self):
"""Return the polling requirement of the entity."""
return False
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def device_class(self):
"""Return the device_class."""
return self._device_class
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
@property
def available(self):
"""Return True if entity is available."""
return self._coordinator.last_update_success
async def async_added_to_hass(self):
"""Connect to dispatcher listening for entity data notifications."""
self.async_on_remove(
self._coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self):
"""Get the latest data from OWM and updates the states."""
await self._coordinator.async_request_refresh()
| {
"repo_name": "w1ll1am23/home-assistant",
"path": "homeassistant/components/openweathermap/abstract_owm_sensor.py",
"copies": "3",
"size": "2398",
"license": "apache-2.0",
"hash": -7493894730500949000,
"line_mean": 30.1428571429,
"line_max": 77,
"alpha_frac": 0.647206005,
"autogenerated": false,
"ratio": 4.424354243542435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 77
} |
"""Abstraction form OWM sensors."""
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import ATTRIBUTION, SENSOR_DEVICE_CLASS, SENSOR_NAME, SENSOR_UNIT
class AbstractOpenWeatherMapSensor(Entity):
"""Abstract class for an OpenWeatherMap sensor."""
def __init__(
self,
name,
unique_id,
sensor_type,
sensor_configuration,
coordinator: DataUpdateCoordinator,
):
"""Initialize the sensor."""
self._name = name
self._unique_id = unique_id
self._sensor_type = sensor_type
self._sensor_name = sensor_configuration[SENSOR_NAME]
self._unit_of_measurement = sensor_configuration.get(SENSOR_UNIT)
self._device_class = sensor_configuration.get(SENSOR_DEVICE_CLASS)
self._coordinator = coordinator
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._name} {self._sensor_name}"
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return self._unique_id
@property
def should_poll(self):
"""Return the polling requirement of the entity."""
return False
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def device_class(self):
"""Return the device_class."""
return self._device_class
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
@property
def available(self):
"""Return True if entity is available."""
return self._coordinator.last_update_success
async def async_added_to_hass(self):
"""Connect to dispatcher listening for entity data notifications."""
self.async_on_remove(
self._coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self):
"""Get the latest data from OWM and updates the states."""
await self._coordinator.async_request_refresh()
| {
"repo_name": "balloob/home-assistant",
"path": "homeassistant/components/openweathermap/abstract_owm_sensor.py",
"copies": "9",
"size": "2384",
"license": "apache-2.0",
"hash": -4179208670834003500,
"line_mean": 29.961038961,
"line_max": 77,
"alpha_frac": 0.6451342282,
"autogenerated": false,
"ratio": 4.4148148148148145,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9559949043014815,
"avg_score": null,
"num_lines": null
} |
"""Abstraction form OWM sensors."""
import logging
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import ATTRIBUTION, SENSOR_DEVICE_CLASS, SENSOR_NAME, SENSOR_UNIT
_LOGGER = logging.getLogger(__name__)
class AbstractOpenWeatherMapSensor(Entity):
"""Abstract class for an OpenWeatherMap sensor."""
def __init__(
self,
name,
unique_id,
sensor_type,
sensor_configuration,
coordinator: DataUpdateCoordinator,
):
"""Initialize the sensor."""
self._name = name
self._unique_id = unique_id
self._sensor_type = sensor_type
self._sensor_name = sensor_configuration[SENSOR_NAME]
self._unit_of_measurement = sensor_configuration.get(SENSOR_UNIT)
self._device_class = sensor_configuration.get(SENSOR_DEVICE_CLASS)
self._coordinator = coordinator
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._name} {self._sensor_name}"
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return self._unique_id
@property
def should_poll(self):
"""Return the polling requirement of the entity."""
return False
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def device_class(self):
"""Return the device_class."""
return self._device_class
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
@property
def available(self):
"""Return True if entity is available."""
return self._coordinator.last_update_success
async def async_added_to_hass(self):
"""Connect to dispatcher listening for entity data notifications."""
self.async_on_remove(
self._coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self):
"""Get the latest data from OWM and updates the states."""
await self._coordinator.async_request_refresh()
| {
"repo_name": "tchellomello/home-assistant",
"path": "homeassistant/components/openweathermap/abstract_owm_sensor.py",
"copies": "1",
"size": "2439",
"license": "apache-2.0",
"hash": -4053187661174629000,
"line_mean": 29.1111111111,
"line_max": 77,
"alpha_frac": 0.6465764658,
"autogenerated": false,
"ratio": 4.394594594594595,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5541171060394594,
"avg_score": null,
"num_lines": null
} |
# abstraction layer for dfrotz
# parts for non-blocking communication are taken from
# https://gist.github.com/EyalAr/7915597
# this code is still somewhat shitty...
import os
import queue
import subprocess
import sys
import threading
class DFrotz():
def __init__(self, arg_frotz_path, arg_game_path):
self.frotz_path = arg_frotz_path
self.game_path = arg_game_path
#print(os.path.abspath(self.frotz_path))
try:
self.frotz = subprocess.Popen(
[self.frotz_path, self.game_path],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1
)
except OSError:
print('Couldn\'t run Frotz. Maybe wrong architecture?')
sys.exit(0)
self.queue = queue.Queue()
self.thread = threading.Thread(target=self.enqueue, args=(self.frotz.stdout, self.queue))
self.thread.daemon = True
self.thread.start()
def enqueue(self, out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
def send(self, command):
self.frotz.stdin.write(command.encode('cp1252'))
try:
self.frotz.stdin.flush()
except BrokenPipeError:
debug_string = '[DEV] Pipe is broken. Please tell @mrtnb what you did.'
return debug_string
def generate_output(self):
self.raw_output = ''.join(self.lines)
# clean up Frotz' output
self.output = self.raw_output.replace('> > ', '')
self.output = self.output.replace('\n.\n', '\n\n')
return self.output
def get(self):
self.lines = []
while True:
try:
self.line = self.queue.get(timeout=1).decode('cp1252')
self.line = '\n'.join(' '.join(line_.split()) for line_ in self.line.split('\n'))
except queue.Empty:
print('', end='')
break
else:
self.lines.append(self.line)
for index, line in enumerate(self.lines):
# long line (> 70 chars) could be a part of
# a text passage - removing \n there to
# make output more readable
if len(line) >= 70 and line.endswith('\n'):
self.lines[index] = line.replace('\n', ' ')
return self.generate_output()
def main():
f = DFrotz()
while True:
print(f.get())
cmd = '%s\r\n' % input()
f.send(cmd)
if __name__ == '__main__':
main() | {
"repo_name": "sneaksnake/z5bot",
"path": "dfrotz.py",
"copies": "1",
"size": "2614",
"license": "mit",
"hash": 382593423080654600,
"line_mean": 29.4069767442,
"line_max": 97,
"alpha_frac": 0.5443764346,
"autogenerated": false,
"ratio": 3.8216374269005846,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4866013861500585,
"avg_score": null,
"num_lines": null
} |
"""Abstraction layer for motors."""
import bbb.pwm as pwm_mod
import bbb.gpio as gpio_mod
import bot.lib.lib as lib
FORWARD = 1
REVERSE = 0
class Motor(object):
"""Class for abstracting motor settings.
Note that motors without GPIO pins are assumed to not need to change
direction in-code. Their direction should be manually changed by
switching the wires that drive them.
"""
def __init__(self, pwm_num, gpio_num=None, inverted=False):
"""Build GPIO and PWM pins, set initial values.
Note that the default gpio_num=None param implies that the motor
has no direction. Its direction should be manually changed by
swapping the wires that drive it.
:param pwm_num: PWM number for this motor.
:type pwm_num: int
:param gpio_num: Optional GPIO number for this motor.
:type gpio_num: int
:param inverted: Whether to treat direction as inverted.
:type inverted: bool
"""
# Get and store logger object
self.logger = lib.get_logger()
# Store PWM and GPIO numbers of motor
self.pwm_num = pwm_num
self.gpio_num = gpio_num
# Set motor-specific forward and reverse values based on inverted
# TODO: Make this a config flag
self.invert(inverted)
# Load system configuration
config = lib.get_config("bot/config.yaml")
if config["test_mode"]["motor"]:
# Get dir of simulated hardware files from config
pwm_test_dir = config["test_pwm_base_dir"]
# Build PWM object for BBB interaction, provide test dir
self.pwm = pwm_mod.PWM(self.pwm_num, pwm_test_dir)
if self.gpio_num is not None:
# Build GPIO object for BBB interaction, provide test dir
gpio_test_dir = config["test_gpio_base_dir"]
self.gpio = gpio_mod.GPIO(self.gpio_num, gpio_test_dir)
else:
# Build PWM object for BBB interaction
self.pwm = pwm_mod.PWM(self.pwm_num)
if self.gpio_num is not None:
# Build GPIO object for BBB interaction
self.gpio = gpio_mod.GPIO(self.gpio_num)
# Polarity should be 0 to get X% high at X PWM.
self.pwm.polarity = 0
# Setup initial speed and direction
self.speed = 0
if self.gpio_num is not None:
self.direction = FORWARD
self.logger.debug("Setup {}".format(self))
def __str__(self):
"""Override string representation of this object for readability.
:returns: Human readable representation of this object.
"""
if self.gpio_num is None:
return "Motor PWM:{} GPIO:None speed:{}".format(
self.pwm_num, self.speed)
return "Motor PWM:{} GPIO:{} speed:{} dir:{} vel:{}".format(
self.pwm_num,
self.gpio_num,
self.speed,
self.direction,
self.velocity)
def invert(self, inverted):
"""Provides ability to invert motor direction.
This is needed to account for the physical position of motors.
:param inverted: True to swap typical forward and reverse directions.
:type inverted: bool
"""
self.inverted = inverted
if self.inverted:
self.forward = REVERSE
self.reverse = FORWARD
else:
self.forward = FORWARD
self.reverse = REVERSE
def get_speed(self):
"""Getter for motor's speed as % of max (same as duty cycle).
:returns: Current motor speed as percent of max speed.
"""
return int(round((self.pwm.duty / float(self.pwm.period)) * 100))
def set_speed(self, speed):
"""Setter for motor's speed as % of max (same as duty cycle).
:param speed: Speed to set motor to in % of maximum.
:type speed: float
"""
speed = int(round(speed))
if speed > 100:
self.logger.warning("Invalid speed {}, using 100".format(speed))
speed = 100
elif speed < 0:
self.logger.warning("Invalid speed {}, using 0".format(speed))
speed = 0
self.pwm.duty = int(round((speed / 100.) * self.pwm.period))
self.logger.debug("Updated speed {}".format(self))
speed = property(get_speed, set_speed)
def get_direction(self):
"""Getter for motor's direction.
Motors that have no GPIO pin have no coded direction. This method
will return None in that case. Set motor direction by manually
switching the motor's wires.
:returns: Direction of motor ("forward", "reverse" or None).
"""
if self.gpio_num is None:
self.logger.warning("{} doesn't own a GPIO".format(self))
return None
if self.gpio.value == self.forward:
return "forward"
elif self.gpio.value == self.reverse:
return "reverse"
else:
self.logger.error("Invalid polarity: {}".format(self.gpio))
def set_direction(self, direction):
"""Setter for motor's direction. Toggles a GPIO pin.
Motors that have no GPIO pin have no coded direction. This method
will return None in that case. Set motor direction by manually
switching the motor's wires.
:param direction: Dir to rotate motors (1="forward", 0="reverse").
:type direction: int or string
"""
if self.gpio_num is None:
self.logger.warning("{} doesn't own a GPIO".format(self))
return None
if direction == "forward":
direction = self.forward
elif direction == "reverse":
direction = self.reverse
elif direction != 0 and direction != 1:
self.logger.warning("Invalid dir {}, no update.".format(direction))
return
self.gpio.value = direction
self.logger.debug("Updated direction {}".format(self))
direction = property(get_direction, set_direction)
def get_velocity(self):
"""Getter for motor's velocity as % of max (+ forward, - backward).
Note that directionless motors (no assigned GPIO pin) will return +.
:returns: Current motor velocity as % of max with signed direction.
"""
if self.gpio_num is None:
return self.speed
return self.speed * (1 if self.gpio.value == self.forward else -1)
velocity = property(get_velocity)
| {
"repo_name": "IEEERobotics/bot",
"path": "bot/hardware/motor.py",
"copies": "2",
"size": "6558",
"license": "bsd-2-clause",
"hash": 1589283851955610400,
"line_mean": 31.4653465347,
"line_max": 79,
"alpha_frac": 0.5946935041,
"autogenerated": false,
"ratio": 4.228239845261122,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5822933349361122,
"avg_score": null,
"num_lines": null
} |
"""Abstraction layer to deal with Django related changes in order to keep
compatibility with several Django versions simultaneously."""
from django import VERSION as DJANGO_VERSION
from django.db import transaction
from django.conf import settings as django_settings
# Django 1.5+
if DJANGO_VERSION >= (1,5):
USER_MODEL = getattr(django_settings, 'AUTH_USER_MODEL', 'auth.User')
else:
USER_MODEL = 'auth.User'
def get_user_model():
if DJANGO_VERSION >= (1,5):
from django.contrib.auth import get_user_model as gum
return gum()
else:
from django.contrib.auth.models import User
return User
# Django 1.6 transaction API, required for 1.8+
def nop_decorator(func):
return func
# Where these are used in code, both old and new methods for transactions appear
# to be used, but only one will actually do anything. When only Django 1.8+
# is supported, transaction_commit_on_success can be deleted.
try:
atomic = transaction.atomic # Does it exist?
transaction_commit_on_success = nop_decorator
except AttributeError:
atomic = nop_decorator
transaction_commit_on_success = transaction.commit_on_success
| {
"repo_name": "Si-elegans/Web-based_GUI_Tools",
"path": "wiki/core/compat.py",
"copies": "2",
"size": "1180",
"license": "apache-2.0",
"hash": -6985631712228756000,
"line_mean": 31.7777777778,
"line_max": 80,
"alpha_frac": 0.7228813559,
"autogenerated": false,
"ratio": 3.8436482084690553,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5566529564369056,
"avg_score": null,
"num_lines": null
} |
'''Abstraction model for our lab containers managed in Docker'''
from datetime import datetime
import functools
import logging
import os
import os.path
import shutil
from docker import Client
from docker import errors
import lsc.config as config
class Instance(object):
'''Wraps all operations on Lab instances
Each instance is defined by a record from the Lab Control
Sheet. The instance can have a Docker container and an external
directory, passed to the container as a volume.
'''
log = logging.getLogger(__name__)
docker_api = None
def __init__(self, rec):
self.rec = rec
# ------------------------------------------------------------------------
# Properties
def safe_name(self):
'''Unique name for the instance, based on instance number'''
return "instance_{0}".format(int(self.rec.cols.inst_nmbr))
def instance_dir(self):
'''Path to the instance directory'''
return os.path.join(config.instance_data_dir, self.safe_name())
def notebook_dir(self):
'''Path to the instance's IPython notebook directory'''
return os.path.join(self.instance_dir(), 'notebooks')
def world_dir(self):
'''Path to the instance's Minecraft world directory'''
return os.path.join(self.instance_dir(), 'server-files', 'worlds')
def ensure_docker_api(self):
if not self.docker_api:
'''Connect to Docker and gets access to the Client API'''
self.docker_api = Client(base_url=config.docker_control_url)
# ------------------------------------------------------------------------
# Status methods
def server_status(self):
self.ensure_docker_api()
try:
cd = self.docker_api.inspect_container(self.safe_name())
if cd['State']['Paused']:
return "Paused"
elif cd['State']['Running']:
return "Running"
else:
return "Stopped"
except errors.APIError, ae:
return "None"
pass
def world_status(self):
if os.path.isdir(self.world_dir()):
return 'Available'
else:
return 'Unavailable'
def notebooks_status(self):
if os.path.isdir(self.notebook_dir()):
return 'Available'
else:
return 'Unavailable'
def container_ids(self):
self.ensure_docker_api()
try:
cd = self.docker_api.inspect_container(self.safe_name())
return cd['Id']
except errors.APIError, ae:
return "None"
def gather_status(self):
''' Gather status from the parts managed by the LSC '''
self.rec.replace_cols(servers=self.server_status(),
world=self.world_status(),
notebooks=self.notebooks_status(),
status_as_of=str(datetime.now()),
container_ids=self.container_ids() )
def lsc_message(self, message):
''' Convenience method to set the LSC Message '''
self.rec.replace_cols(lsc_message=message.format(r=self.rec.cols))
# ------------------------------------------------------------------------
# Dispatch and its commands
def unimplemented(self):
''' Invoked command has not yet been implemented
'''
self.lsc_message("Unimplemented command '{r.command}'")
def noop(self):
''' Take no action on the record
'''
pass
def run(self):
''' Run the instance according to the settings in the LSC
'''
self.log.info("Preparing to launch instance {0.inst_nmbr}".format(self.rec.cols))
self.ensure_docker_api()
if self.server_status() == 'Running':
self.lsc_message("Already running")
elif self.server_status() == 'Paused':
self.docker_api.unpause(self.safe_name())
self.lsc_message("Unpaused")
elif self.server_status() == 'Stopped':
self.docker_api.restart(self.safe_name())
self.lsc_message("Restarted")
else:
# Prepare to run the instance
# Create the instance directory, if needed
if not os.path.isdir(self.instance_dir()):
os.mkdir(self.instance_dir())
# Create the container
container = self.docker_api.create_container(
name=self.safe_name(),
image=config.docker_image, #'coderdojotc.org/python-minecraft-student',
detach=True,
volumes=['/home/student/minecraft-lab'],
environment = {
'MOJANG_ACCOUNTS': self.rec.cols.mojang_accounts,
'STUDENT_PASSWORD': self.rec.cols.student_password,
'CODERDOJO_REPO': config.sourcecode_repo,
},
ports = [8888, 25565],
)
# Then start it
response = self.docker_api.start(
container=container.get('Id'),
binds={
self.instance_dir(): {
'bind': '/home/student/minecraft-lab',
'ro': False,
},
},
port_bindings={
8888: self.rec.cols.ipython_port,
25565: self.rec.cols.minecraft_port,
},
)
self.lsc_message("Run response {0}".format(response))
def down(self):
''' Down the instance
'''
self.log.info("Preparing to down instance {0.inst_nmbr}".format(self.rec.cols))
self.ensure_docker_api()
if self.server_status() == 'Running':
self.docker_api.stop(container=self.safe_name(), timeout=15)
self.lsc_message("Stopped")
elif self.server_status() == 'Paused':
self.docker_api.unpause(self.safe_name())
self.docker_api.stop(container=self.safe_name(), timeout=15)
self.lsc_message("Stopped")
else:
self.lsc_message("Not running")
def down_and_reset(self, world=False, notebooks=False, instance=False):
'''Down the instance, and optionally reset some or all of the state.
Use functools.partial() to create wrapper functions with one
or more flags preset.
When the instance is restarted, the pieces that were
reset/destroyed will be recreated. For example, deleting the
world while leaving the Python Notebook files will give a
student a clean place to run their scripts again.
'''
self.log.info("Preparing to down instance {0.inst_nmbr}".format(self.rec.cols))
self.ensure_docker_api()
if self.server_status() == 'Running':
self.docker_api.stop(container=self.safe_name(), timeout=15)
elif self.server_status() == 'Paused':
self.docker_api.unpause(self.safe_name())
self.docker_api.stop(container=self.safe_name(), timeout=15)
if instance and self.container_ids() != 'None':
self.docker_api.remove_container(container=self.safe_name())
if world and self.world_status() == 'Available':
shutil.rmtree(self.world_dir(), ignore_errors=True)
if notebooks and self.notebooks_status() == 'Available':
shutil.rmtree(self.notebook_dir(), ignore_errors=True)
if world and notebooks and instance:
self.lsc_message("Destroyed instance")
elif world:
self.lsc_message("Reset world")
elif notebooks:
self.lsc_message("Reset notebooks")
else:
self.lsc_message("Instance stopped")
COMMAND_PLACEHOLDER = ''
COMMANDS = {
'RUN': run,
'DOWN': functools.partial(down_and_reset),
'RESETWORLD': functools.partial(down_and_reset, world=True),
'RESETNOTEBOOKS': functools.partial(down_and_reset, notebooks=True),
'DESTROY': functools.partial(down_and_reset, world=True, notebooks=True, instance=True),
COMMAND_PLACEHOLDER: noop,
}
def dispatch(self):
''' Invoke the appropriate method, based on the command in the record
'''
self.gather_status()
cmd = self.rec.cols.command.upper()
if cmd in self.COMMANDS:
try:
self.COMMANDS[cmd](self)
self.gather_status()
except Exception, e:
self.lsc_message("Exception occurred: {e}".format(e=e))
self.rec.replace_cols(command=self.COMMAND_PLACEHOLDER)
else:
self.lsc_message("Unrecognized command '{r.command}'")
| {
"repo_name": "CoderDojoTC/python-minecraft",
"path": "lab-server/controller/lsc/model/instance.py",
"copies": "1",
"size": "8775",
"license": "mit",
"hash": -1801139819291748000,
"line_mean": 33.5472440945,
"line_max": 96,
"alpha_frac": 0.5582905983,
"autogenerated": false,
"ratio": 4.243230174081238,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5301520772381237,
"avg_score": null,
"num_lines": null
} |
"""Abstraction of an OF table."""
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2017 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import struct
from faucet import valve_of
class ValveTable(object):
"""Wrapper for an OpenFlow table."""
def __init__(self, table_id, name, restricted_match_types,
flow_cookie, notify_flow_removed=False):
self.table_id = table_id
self.name = name
self.restricted_match_types = None
if restricted_match_types:
self.restricted_match_types = set(restricted_match_types)
self.flow_cookie = flow_cookie
self.notify_flow_removed = notify_flow_removed
def match(self, in_port=None, vlan=None,
eth_type=None, eth_src=None,
eth_dst=None, eth_dst_mask=None,
ipv6_nd_target=None, icmpv6_type=None,
nw_proto=None, nw_src=None, nw_dst=None):
"""Compose an OpenFlow match rule."""
match_dict = valve_of.build_match_dict(
in_port, vlan, eth_type, eth_src,
eth_dst, eth_dst_mask, ipv6_nd_target, icmpv6_type,
nw_proto, nw_src, nw_dst)
match = valve_of.match(match_dict)
if self.restricted_match_types is not None:
for match_type in match_dict:
assert match_type in self.restricted_match_types, '%s match in table %s' % (
match_type, self.name)
return match
def flowmod(self, match=None, priority=None,
inst=None, command=valve_of.ofp.OFPFC_ADD, out_port=0,
out_group=0, hard_timeout=0, idle_timeout=0):
"""Helper function to construct a flow mod message with cookie."""
if match is None:
match = self.match()
if priority is None:
priority = 0 # self.dp.lowest_priority
if inst is None:
inst = []
flags = 0
if self.notify_flow_removed:
flags = valve_of.ofp.OFPFF_SEND_FLOW_REM
return valve_of.flowmod(
self.flow_cookie,
command,
self.table_id,
priority,
out_port,
out_group,
match,
inst,
hard_timeout,
idle_timeout,
flags)
def flowdel(self, match=None, priority=None, out_port=valve_of.ofp.OFPP_ANY, strict=False):
"""Delete matching flows from a table."""
command = valve_of.ofp.OFPFC_DELETE
if strict:
command = valve_of.ofp.OFPFC_DELETE_STRICT
return [
self.flowmod(
match=match,
priority=priority,
command=command,
out_port=out_port,
out_group=valve_of.ofp.OFPG_ANY)]
def flowdrop(self, match=None, priority=None, hard_timeout=0):
"""Add drop matching flow to a table."""
return self.flowmod(
match=match,
priority=priority,
hard_timeout=hard_timeout,
inst=[])
def flowcontroller(self, match=None, priority=None, inst=None, max_len=96):
"""Add flow outputting to controller."""
if inst is None:
inst = []
return self.flowmod(
match=match,
priority=priority,
inst=[valve_of.apply_actions(
[valve_of.output_controller(max_len)])] + inst)
class ValveGroupEntry(object):
"""Abstraction for a single OpenFlow group entry."""
def __init__(self, table, group_id, buckets):
self.table = table
self.group_id = group_id
self.update_buckets(buckets)
def update_buckets(self, buckets):
self.buckets = tuple(buckets)
def add(self):
"""Return flows to add this entry to the group table."""
ofmsgs = []
ofmsgs.append(self.delete())
ofmsgs.append(valve_of.groupadd(
group_id=self.group_id, buckets=self.buckets))
self.table.entries[self.group_id] = self
return ofmsgs
def modify(self):
"""Return flow to modify an existing group entry."""
assert self.group_id in self.table.entries
self.table.entries[self.group_id] = self
return valve_of.groupmod(group_id=self.group_id, buckets=self.buckets)
def delete(self):
"""Return flow to delete an existing group entry."""
if self.group_id in self.table.entries:
del self.table.entries[self.group_id]
return valve_of.groupdel(group_id=self.group_id)
class ValveGroupTable(object):
"""Wrap access to group table."""
entries = {}
@staticmethod
def group_id_from_str(key_str):
"""Return a group ID based on a string key."""
# TODO: does not handle collisions
digest = hashlib.sha256(key_str.encode('utf-8')).digest()
return struct.unpack('<L', digest[:4])[0]
def get_entry(self, group_id, buckets):
if group_id in self.entries:
self.entries[group_id].update_buckets(buckets)
else:
self.entries[group_id] = ValveGroupEntry(
self, group_id, buckets)
return self.entries[group_id]
def delete_all(self):
"""Delete all groups."""
self.entries = {}
return valve_of.groupdel()
| {
"repo_name": "byllyfish/faucet",
"path": "faucet/valve_table.py",
"copies": "1",
"size": "5959",
"license": "apache-2.0",
"hash": 1335451933375703600,
"line_mean": 34.2603550296,
"line_max": 95,
"alpha_frac": 0.5982547407,
"autogenerated": false,
"ratio": 3.819871794871795,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4918126535571795,
"avg_score": null,
"num_lines": null
} |
"""Abstraction point allowing use with numpy or Numeric
Chooses numpy if available because when it's installed
Numeric tends to be a bit flaky...
"""
from numpy import *
try:
from vrml_accelerate import tmatrixaccel
from vrml_accelerate import frustcullaccel
except ImportError as err:
tmatrixaccel = frustcullaccel = None
# why did this get taken out? Is divide now safe?
amin = amin
amax = amax
divide_safe = divide
# Now deal with differing numpy APIs...
a = array([1,2,3],'i')
ArrayType = ndarray # alias removed in later versions
# Take's API changed from Numeric, we've updated to
# always provide axis now...
if hasattr( a, '__array_typestr__' ):
def typeCode( a ):
"""Retrieve the typecode for the given array
Depending on whether you access the classic or new API
you have different access methods, so we have to use
the typecode() method if __array_typestr__ isn't there.
"""
try:
return a.__array_typestr__
except AttributeError as err:
return a.typecode()
else:
def typeCode( a ):
"""Retrieve the typecode for the given array
Depending on whether you access the classic or new API
you have different access methods, so we have to use
the typecode() method if .dtype.char isn't there.
"""
try:
return a.dtype.char
except AttributeError as err:
return a.typecode()
del a
implementation_name = 'numpy'
try:
# PyVRML97 is from before numpy printed errors, we explicitly do not care
# about the divide-by-zero, which commonly happens in mesh data processing
# TODO: likely should rework the mesh processing to check manually and remove
# this sledge-hammer approach
seterr(all='ignore')
except Exception as err:
pass
def safeCompare( first, second ):
"""Watch out for pointless numpy truth-value checks"""
try:
return bool(first == second )
except ValueError as err:
return bool( any( first == second ) )
def contiguous( a ):
"""Force to a contiguous array"""
return array( a, typeCode(a) )
| {
"repo_name": "alexus37/AugmentedRealityChess",
"path": "pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/vrml/arrays.py",
"copies": "2",
"size": "2169",
"license": "mit",
"hash": -8949563515783074000,
"line_mean": 32.3692307692,
"line_max": 82,
"alpha_frac": 0.6588289534,
"autogenerated": false,
"ratio": 4.1393129770992365,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02118812171387264,
"num_lines": 65
} |
""" Abstractions for handling resources via Amazon Web Services (AWS) API
The intention of these utilities is to allow other infrastructure to
interact with AWS without having to understand AWS APIs. Additionally,
this module provides helper functions for the most common queries required
to manipulate and test a DC/OS cluster, which would be otherwise cumbersome
to do with AWS API calls only
BotoWrapper: AWS credentials and region bound to various helper methods
CfStack: Generic representation of a CloudFormation stack
DcosCfStack: Represents DC/OS in a simple deployment
DcosZenCfStack: Represents DC/OS deployed from a zen template
MasterStack: thin wrapper for master stack in a zen template
PrivateAgentStack: thin wrapper for public agent stack in a zen template
PublicAgentStack: thin wrapper for public agent stack in a zen template
BareClusterCfStack: Represents a homogeneous cluster of hosts with a specific AMI
"""
import logging
import os
import boto3
import pkg_resources
from botocore.exceptions import ClientError, WaiterError
from retrying import retry
import dcos_launch
from dcos_test_utils.helpers import Host, SshInfo
log = logging.getLogger(__name__)
def template_by_instance_type(instance_type):
if instance_type.split('.')[0] in ('c4', 't2', 'm4'):
template = pkg_resources.resource_string(dcos_launch.__name__, 'templates/vpc-ebs-only-cluster-template.json')
else:
template = pkg_resources.resource_string(dcos_launch.__name__, 'templates/vpc-cluster-template.json')
return template.decode('utf-8')
def param_dict_to_aws_format(user_parameters):
return [{'ParameterKey': str(k), 'ParameterValue': str(v)} for k, v in user_parameters.items()]
def tag_dict_to_aws_format(tag_dict: dict):
return [{'Key': k, 'Value': v} for k, v in tag_dict.items()]
def retry_on_rate_limiting(e: Exception):
""" Returns 'True' if a rate limiting error occurs and raises the exception otherwise
"""
if isinstance(e, ClientError):
error_code = e.response['Error']['Code']
elif isinstance(e, WaiterError):
error_code = e.last_response['Error']['Code']
else:
raise e
if error_code in ['Throttling', 'RequestLimitExceeded']:
log.warning('AWS API Limiting error: {}'.format(error_code))
return True
raise e
@retry(wait_exponential_multiplier=1000, wait_exponential_max=20 * 60 * 1000, retry_on_exception=retry_on_rate_limiting)
def instances_to_hosts(instances):
return [Host(i.private_ip_address, i.public_ip_address) for i in instances]
@retry(wait_exponential_multiplier=1000, wait_exponential_max=20 * 60 * 1000, retry_on_exception=retry_on_rate_limiting)
def fetch_stack(stack_name, boto_wrapper):
log.debug('Attemping to fetch AWS Stack: {}'.format(stack_name))
stack = boto_wrapper.resource('cloudformation').Stack(stack_name)
for resource in stack.resource_summaries.all():
if resource.logical_resource_id == 'MasterStack':
log.debug('Using Zen DC/OS Cloudformation interface')
return DcosZenCfStack(stack_name, boto_wrapper)
if resource.logical_resource_id == 'MasterServerGroup':
log.debug('Using Basic DC/OS Cloudformation interface')
return DcosCfStack(stack_name, boto_wrapper)
if resource.logical_resource_id == 'BareServerAutoScale':
log.debug('Using Bare Cluster Cloudformation interface')
return BareClusterCfStack(stack_name, boto_wrapper)
log.warning('No recognized resources found; using generic stack')
return CfStack(stack_name, boto_wrapper)
class BotoWrapper:
def __init__(self, region):
self.region = region
self.session = boto3.session.Session()
@retry(wait_exponential_multiplier=1000, wait_exponential_max=20 * 60 * 1000,
retry_on_exception=retry_on_rate_limiting)
def client(self, name):
return self.session.client(service_name=name, region_name=self.region)
def resource(self, name, region=None):
region = self.region if region is None else region
return self.session.resource(service_name=name, region_name=region)
@retry(wait_exponential_multiplier=1000, wait_exponential_max=20 * 60 * 1000,
retry_on_exception=retry_on_rate_limiting)
def create_key_pair(self, key_name):
"""Returns private key of newly generated pair
"""
log.info('Creating KeyPair: {}'.format(key_name))
key = self.client('ec2').create_key_pair(KeyName=key_name)
return key['KeyMaterial']
def get_service_resources(self, service, resource_name):
"""Return resources and boto wrapper in every region for the given boto3 service and resource type."""
for region in aws_region_names:
# line below is needed because function get_all_stacks needs to copy the boto wrapper with the correct
# region when initializing each CfStack object
self.region = region['id']
# It is common to have access to an account, but not all regions. In that case, we still want to be able
# to pull whatever resources we can from the regions we have access to
try:
yield from getattr(self.resource(service, region['id']), resource_name).all()
except ClientError as e:
if e.response['Error']['Code'] == 'UnauthorizedOperation':
log.debug("Failed getting resources ({}) for region {} with exception: {}".format(
resource_name, self.region, repr(e)))
else:
raise e
def get_all_vpcs(self):
yield from self.get_service_resources('ec2', 'vpcs')
def get_all_instances(self):
yield from self.get_service_resources('ec2', 'instances')
def get_all_stacks(self):
"""Get all AWS CloudFormation stacks in all regions."""
for stack in self.get_service_resources('cloudformation', 'stacks'):
boto_wrapper_copy = BotoWrapper(self.region)
yield CfStack(stack.stack_name, boto_wrapper_copy)
def get_all_buckets(self):
"""Get all S3 buckets in all regions."""
yield from self.get_service_resources('s3', 'buckets')
def get_all_keypairs(self):
"""Get all EC2 key pairs in all regions."""
yield from self.get_service_resources('ec2', 'key_pairs')
@retry(wait_exponential_multiplier=1000, wait_exponential_max=20 * 60 * 1000,
retry_on_exception=retry_on_rate_limiting)
def delete_key_pair(self, key_name):
log.info('Deleting KeyPair: {}'.format(key_name))
self.resource('ec2').KeyPair(key_name).delete()
def create_stack(
self,
name: str,
parameters: dict,
template_url: str=None,
template_body: str=None,
deploy_timeout: int=60,
disable_rollback: bool=False,
tags=None):
"""Pulls template and checks user params versus temlate params.
Does simple casting of strings or numbers
Starts stack creation if validation is successful
"""
log.info('Requesting AWS CloudFormation: {}'.format(name))
role_arn = os.getenv('DCOS_LAUNCH_ROLE_ARN')
args = {
'StackName': name,
'DisableRollback': disable_rollback,
'TimeoutInMinutes': deploy_timeout,
'Capabilities': ['CAPABILITY_IAM'],
# this python API only accepts data in string format; cast as string here
# so that we may pass parameters directly from yaml (which parses numbers as non-strings)
'Parameters': param_dict_to_aws_format(parameters)}
if template_body is not None:
assert template_url is None, 'tempate_body and template_url cannot be supplied simultaneously'
args['TemplateBody'] = template_body
else:
assert template_url is not None, 'template_url must be set if template_body is not provided'
args['TemplateURL'] = template_url
if tags is not None:
args['Tags'] = tag_dict_to_aws_format(tags)
if role_arn is not None:
log.info('Passing effective role as per DCOS_LAUNCH_ROLE_ARN')
args['RoleARN'] = role_arn
return self.resource('cloudformation').create_stack(**args)
def create_vpc_tagged(self, cidr, name_tag):
ec2 = self.client('ec2')
log.info('Creating new VPC...')
vpc_id = ec2.create_vpc(CidrBlock=cidr, InstanceTenancy='default')['Vpc']['VpcId']
ec2.get_waiter('vpc_available').wait(VpcIds=[vpc_id])
ec2.create_tags(Resources=[vpc_id], Tags=[{'Key': 'Name', 'Value': name_tag}])
log.info('Created VPC with ID: {}'.format(vpc_id))
return vpc_id
def create_internet_gateway_tagged(self, vpc_id, name_tag):
ec2 = self.client('ec2')
log.info('Creating new InternetGateway...')
gateway_id = ec2.create_internet_gateway()['InternetGateway']['InternetGatewayId']
ec2.attach_internet_gateway(InternetGatewayId=gateway_id, VpcId=vpc_id)
ec2.create_tags(Resources=[gateway_id], Tags=[{'Key': 'Name', 'Value': name_tag}])
log.info('Created internet gateway with ID: {}'.format(gateway_id))
return gateway_id
def create_subnet_tagged(self, vpc_id, cidr, name_tag):
ec2 = self.client('ec2')
log.info('Creating new Subnet...')
subnet_id = ec2.create_subnet(VpcId=vpc_id, CidrBlock=cidr)['Subnet']['SubnetId']
ec2.create_tags(Resources=[subnet_id], Tags=[{'Key': 'Name', 'Value': name_tag}])
ec2.get_waiter('subnet_available').wait(SubnetIds=[subnet_id])
log.info('Created subnet with ID: {}'.format(subnet_id))
return subnet_id
@retry(wait_exponential_multiplier=1000, wait_exponential_max=20 * 60 * 1000,
retry_on_exception=retry_on_rate_limiting)
def delete_subnet(self, subnet_id):
log.info('Deleting subnet: {}'.format(subnet_id))
self.client('ec2').delete_subnet(SubnetId=subnet_id)
@retry(wait_exponential_multiplier=1000, wait_exponential_max=20 * 60 * 1000,
retry_on_exception=retry_on_rate_limiting)
def delete_internet_gateway(self, gateway_id):
ig = self.resource('ec2').InternetGateway(gateway_id)
for vpc in ig.attachments:
vpc_id = vpc['VpcId']
log.info('Detaching gateway {} from vpc {}'.format(gateway_id, vpc_id))
ig.detach_from_vpc(VpcId=vpc_id)
log.info('Deleting internet gateway: {}'.format(gateway_id))
ig.delete()
@retry(wait_exponential_multiplier=1000, wait_exponential_max=20 * 60 * 1000,
retry_on_exception=retry_on_rate_limiting)
def delete_vpc(self, vpc_id):
log.info('Deleting vpc: {}'.format(vpc_id))
self.client('ec2').delete_vpc(VpcId=vpc_id)
@retry(wait_exponential_multiplier=1000, wait_exponential_max=20 * 60 * 1000,
retry_on_exception=retry_on_rate_limiting)
def get_auto_scaling_instances(self, asg_physical_resource_id):
""" Returns instance objects as described here:
http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#instance
"""
ec2 = self.resource('ec2')
return [ec2.Instance(i['InstanceId']) for asg in self.client('autoscaling').
describe_auto_scaling_groups(
AutoScalingGroupNames=[asg_physical_resource_id])
['AutoScalingGroups'] for i in asg['Instances']]
@retry(wait_exponential_multiplier=1000, wait_exponential_max=20 * 60 * 1000,
retry_on_exception=retry_on_rate_limiting)
def empty_and_delete_bucket(self, bucket_id):
""" Buckets must be empty to be deleted. Additionally, there is no high-level
method to check if buckets exist, so the try/except statement is required
"""
try:
# just check to see if the head is accessible before continuing
self.resource('s3').meta.client.head_bucket(Bucket=bucket_id)
bucket = self.resource('s3').Bucket(bucket_id)
except ClientError:
log.exception('Bucket could not be fetched')
log.warning('S3 bucket not found when expected during delete, moving on...')
return
log.info('Starting bucket {} deletion'.format(bucket))
for obj in bucket.objects.all():
obj.delete()
log.info('Trying deleting bucket {} itself'.format(bucket))
bucket.delete()
class CfStack:
def __init__(self, stack_name, boto_wrapper):
self.boto_wrapper = boto_wrapper
self.stack = self.boto_wrapper.resource('cloudformation').Stack(stack_name)
@property
def name(self):
return self.stack.stack_name
def wait_for_complete(self, transition_states: list, end_states: list) -> str:
"""
Note: Do not use unwrapped boto waiter class, it has very poor error handling
Stacks can have one of the following statuses. See:
http://boto3.readthedocs.io/en/latest/reference/
services/cloudformation.html#CloudFormation.Client.describe_stacks
CREATE_IN_PROGRESS, CREATE_FAILED, CREATE_COMPLETE
ROLLBACK_IN_PROGRESS, ROLLBACK_FAILED, ROLLBACK_COMPLETE
DELETE_IN_PROGRESS, DELETE_FAILED, DELETE_COMPLETE
UPDATE_IN_PROGRESS, UPDATE_COMPLETE_CLEANUP_IN_PROGRESS
UPDATE_COMPLETE, UPDATE_ROLLBACK_IN_PROGRESS
UPDATE_ROLLBACK_FAILED, UPDATE_ROLLBACK_COMPLETE
UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS
:param transition_states: as long as the current state is in one of these, the wait continues
:param end_states: when the current state becomes one of these, the wait stops as the operation completed
"""
log.info('Waiting for stack operation to complete')
# wait for 60 seconds before retry
@retry(wait_fixed=60 * 1000, retry_on_result=lambda result: result is None, retry_on_exception=lambda ex: False)
def wait_loop():
self.refresh_stack()
stack_status = self.get_status()
if stack_status in end_states:
log.info("Final stack status: " + stack_status)
return stack_status
log.info("Stack status {status}. Continuing to wait... ".format(status=stack_status))
if stack_status not in transition_states:
for event in self.get_stack_events():
log.error('Stack Events: {}'.format(event))
raise Exception('StackStatus changed unexpectedly to: {}'.format(stack_status))
status = wait_loop()
return status
@retry(wait_exponential_multiplier=1000, wait_exponential_max=20 * 60 * 1000,
retry_on_exception=retry_on_rate_limiting)
def get_stack_events(self):
log.debug('Requesting stack events')
return self.boto_wrapper.client('cloudformation').describe_stack_events(
StackName=self.stack.stack_id)['StackEvents']
@retry(wait_exponential_multiplier=1000, wait_exponential_max=20 * 60 * 1000,
retry_on_exception=retry_on_rate_limiting)
def update_tags(self, tags: dict):
cf_tags = tag_dict_to_aws_format(tags)
new_keys = tags.keys()
for tag in self.stack.tags:
if tag['Key'] not in new_keys:
cf_tags.append(tag)
log.info('Updating tags of stack {} to {}'.format(self.stack.name, tags))
return self.stack.update(Capabilities=['CAPABILITY_IAM'],
Parameters=self.stack.parameters,
UsePreviousTemplate=True,
Tags=cf_tags)
@retry(wait_exponential_multiplier=1000, wait_exponential_max=20 * 60 * 1000,
retry_on_exception=retry_on_rate_limiting)
def refresh_stack(self):
# we need to refresh the stack to get the latest info
self.stack = self.boto_wrapper.resource('cloudformation').Stack(self.name)
return self.stack
@retry(wait_exponential_multiplier=1000, wait_exponential_max=20 * 60 * 1000,
retry_on_exception=retry_on_rate_limiting)
def get_status(self):
self.refresh_stack()
return self.stack.stack_status
def get_parameter(self, param):
"""Returns param if in stack parameters, else returns None
"""
for p in self.stack.parameters:
if p['ParameterKey'] == param:
return p['ParameterValue']
raise KeyError('Key not found in template parameters: {}. Parameters: {}'.
format(param, self.stack.parameters))
@retry(wait_exponential_multiplier=1000, wait_exponential_max=20 * 60 * 1000,
retry_on_exception=retry_on_rate_limiting)
def delete(self):
log.info('Deleting stack: {}'.format(self.stack.stack_name))
self.stack.delete()
log.info('Delete successfully initiated for {}'.format(self.stack.stack_name))
class CleanupS3BucketMixin(CfStack):
""" Exhibitor S3 Buckets are not deleted with the rest of the resources
in the cloudformation template so this method must be used to prevent
leaking cloud resources.
"""
def delete(self):
try:
self.boto_wrapper.empty_and_delete_bucket(
self.stack.Resource('ExhibitorS3Bucket').physical_resource_id)
except Exception:
# Exhibitor S3 Bucket might not be a resource
log.exception('Failed to get S3 bucket physical ID')
super().delete()
class DcosCfStack(CleanupS3BucketMixin):
""" This abstraction will work for a simple DC/OS template.
A simple template has its exhibitor bucket and auto scaling groups
for each of the master, public agent, and private agent groups
"""
@classmethod
def create(cls, stack_name: str, template_url: str, public_agents: int, private_agents: int,
admin_location: str, key_pair_name: str, boto_wrapper: BotoWrapper):
parameters = {
'KeyName': key_pair_name,
'AdminLocation': admin_location,
'PublicSlaveInstanceCount': str(public_agents),
'SlaveInstanceCount': str(private_agents)}
boto_wrapper.create_stack(stack_name, parameters, template_url=template_url)
return cls(stack_name, boto_wrapper), SSH_INFO['coreos']
@property
def master_instances(self):
yield from self.boto_wrapper.get_auto_scaling_instances(
self.stack.Resource('MasterServerGroup').physical_resource_id)
@property
def private_agent_instances(self):
yield from self.boto_wrapper.get_auto_scaling_instances(
self.stack.Resource('SlaveServerGroup').physical_resource_id)
@property
def public_agent_instances(self):
yield from self.boto_wrapper.get_auto_scaling_instances(
self.stack.Resource('PublicSlaveServerGroup').physical_resource_id)
def get_master_ips(self):
return instances_to_hosts(self.master_instances)
def get_private_agent_ips(self):
return instances_to_hosts(self.private_agent_instances)
def get_public_agent_ips(self):
return instances_to_hosts(self.public_agent_instances)
class MasterStack(CleanupS3BucketMixin):
@property
def instances(self):
yield from self.boto_wrapper.get_auto_scaling_instances(
self.stack.Resource('MasterServerGroup').physical_resource_id)
class PrivateAgentStack(CfStack):
@property
def instances(self):
yield from self.boto_wrapper.get_auto_scaling_instances(
self.stack.Resource('PrivateAgentServerGroup').physical_resource_id)
class PublicAgentStack(CfStack):
@property
def instances(self):
yield from self.boto_wrapper.get_auto_scaling_instances(
self.stack.Resource('PublicAgentServerGroup').physical_resource_id)
class DcosZenCfStack(CfStack):
"""Zen stacks are stacks that have the masters, infra, public agents, and private
agents split into resources stacks under one zen stack
"""
@classmethod
def create(cls, stack_name, boto_wrapper, template_url,
public_agents, private_agents, key_pair_name,
private_agent_type, public_agent_type, master_type,
gateway, vpc, private_subnet, public_subnet):
parameters = {
'KeyName': key_pair_name,
'Vpc': vpc,
'InternetGateway': gateway,
'MasterInstanceType': master_type,
'PublicAgentInstanceCount': public_agents,
'PublicAgentInstanceType': public_agent_type,
'PublicSubnet': public_subnet,
'PrivateAgentInstanceCount': private_agents,
'PrivateAgentInstanceType': private_agent_type,
'PrivateSubnet': private_subnet}
boto_wrapper.create_stack(stack_name, parameters, template_url=template_url)
os_string = None
try:
os_string = template_url.split('/')[-1].split('.')[-2].split('-')[0]
ssh_info = CF_OS_SSH_INFO[os_string]
except (KeyError, IndexError):
log.critical('Unexpected template URL: {}'.format(template_url))
if os_string is not None:
log.critical('No SSH info for OS string: {}'.format(os_string))
raise
return cls(stack_name, boto_wrapper), ssh_info
@property
def master_stack(self):
return MasterStack(
self.stack.Resource('MasterStack').physical_resource_id, self.boto_wrapper)
@property
def private_agent_stack(self):
return PrivateAgentStack(
self.stack.Resource('PrivateAgentStack').physical_resource_id, self.boto_wrapper)
@property
def public_agent_stack(self):
return PublicAgentStack(
self.stack.Resource('PublicAgentStack').physical_resource_id, self.boto_wrapper)
@property
def infrastructure(self):
return CfStack(self.stack.Resource('Infrastructure').physical_resource_id, self.boto_wrapper)
def delete(self):
log.info('Starting deletion of Zen CF stack')
# These resources might have failed to create or been removed prior, except their
# failures and log it out
for nested_stack in ['infrastructure', 'master_stack', 'private_agent_stack', 'public_agent_stack']:
try:
s = getattr(self, nested_stack)
s.delete()
except Exception:
log.exception('Delete encountered an error!')
super().delete()
@property
def master_instances(self):
yield from self.master_stack.instances
@property
def private_agent_instances(self):
yield from self.private_agent_stack.instances
@property
def public_agent_instances(self):
yield from self.public_agent_stack.instances
def get_master_ips(self):
return instances_to_hosts(self.master_instances)
def get_private_agent_ips(self):
return instances_to_hosts(self.private_agent_instances)
def get_public_agent_ips(self):
return instances_to_hosts(self.public_agent_instances)
class BareClusterCfStack(CfStack):
@classmethod
def create(cls, stack_name, instance_type, instance_os, instance_count,
admin_location, key_pair_name, boto_wrapper):
stack = cls.create_from_ami(
stack_name,
instance_type,
OS_AMIS[instance_os][boto_wrapper.region],
instance_count,
admin_location,
key_pair_name,
boto_wrapper,
)
return stack, OS_SSH_INFO[instance_os]
@classmethod
def create_from_ami(cls, stack_name, instance_type, instance_ami, instance_count,
admin_location, key_pair_name, boto_wrapper):
template = template_by_instance_type(instance_type)
parameters = {
'KeyName': key_pair_name,
'AllowAccessFrom': admin_location,
'ClusterSize': instance_count,
'InstanceType': instance_type,
'AmiCode': instance_ami,
}
boto_wrapper.create_stack(stack_name, parameters, template_body=template)
return cls(stack_name, boto_wrapper)
@property
def instances(self):
""" only represents the cluster instances (i.e. NOT bootstrap)
"""
yield from self.boto_wrapper.get_auto_scaling_instances(
self.stack.Resource('BareServerAutoScale').physical_resource_id)
@property
def bootstrap_instances(self):
yield from self.boto_wrapper.get_auto_scaling_instances(
self.stack.Resource('BootstrapServerPlaceholderAutoScale').physical_resource_id)
def get_cluster_host_ips(self):
return instances_to_hosts(self.instances)
def get_bootstrap_ip(self):
return instances_to_hosts(self.bootstrap_instances)[0]
SSH_INFO = {
'centos': SshInfo(
user='centos',
home_dir='/home/centos',
),
'coreos': SshInfo(
user='core',
home_dir='/home/core',
),
'debian': SshInfo(
user='admin',
home_dir='/home/admin',
),
'rhel': SshInfo(
user='ec2-user',
home_dir='/home/ec2-user',
),
'ubuntu': SshInfo(
user='ubuntu',
home_dir='/home/ubuntu',
),
}
# Update these mappings to expand OS support
OS_SSH_INFO = {
'cent-os-7.4': SSH_INFO['centos'],
'cent-os-7-dcos-prereqs': SSH_INFO['centos'],
'cent-os-7.4-with-docker-selinux-disabled': SSH_INFO['centos'],
'cent-os-7.4-with-docker-selinux-enforcing': SSH_INFO['centos'],
'cent-os-8.0-with-docker-selinux-permissive': SSH_INFO['centos'],
'coreos': SSH_INFO['coreos'],
'debian-8': SSH_INFO['debian'],
'rhel-7-dcos-prereqs': SSH_INFO['rhel'],
'ubuntu-16-04': SSH_INFO['ubuntu'],
}
CF_OS_SSH_INFO = {
'el7': SSH_INFO['centos'],
'coreos': SSH_INFO['coreos']
}
CENTOS_74_WITH_DOCKER_SELINUX_ENFORCING = {'ap-northeast-1': 'ami-0bc386484490ade7f',
'ap-northeast-2': 'ami-04be7998b246727cb',
'ap-south-1': 'ami-05df5a77e02a3e66f',
'ap-southeast-1': 'ami-0a7ca9fe50e8b6882',
'ap-southeast-2': 'ami-0fe85a17db4dc8cd3',
'ca-central-1': 'ami-0af6de696e00750aa',
'eu-central-1': 'ami-0fd78465e18a6450a',
'eu-west-1': 'ami-056a9758ebedad71a',
'eu-west-2': 'ami-06267aa2f48954032',
'eu-west-3': 'ami-0760a4919cd3e034f',
'sa-east-1': 'ami-047b3e4ef6a6d7be7',
'us-east-1': 'ami-079bfc2b0c5f1db87',
'us-east-2': 'ami-0f0494bd2aad99db9',
'us-west-1': 'ami-02af2dc49f253922c',
'us-west-2': 'ami-0ff76065de2567eec'}
OS_AMIS = {
'cent-os-7.4': {'ap-northeast-1': 'ami-965345f8',
'ap-southeast-1': 'ami-8af586e9',
'ap-southeast-2': 'ami-427d9c20',
'eu-central-1': 'ami-2d0cbc42',
'eu-west-1': 'ami-e46ea69d',
'sa-east-1': 'ami-a5acd0c9',
'us-east-1': 'ami-771beb0d',
'us-west-1': 'ami-866151e6',
'us-west-2': 'ami-a9b24bd1'},
'cent-os-7.4-with-docker-selinux-disabled': {'ap-northeast-1': 'ami-023fe9ba88dfc1339',
'ap-northeast-2': 'ami-085f7275040429a2f',
'ap-south-1': 'ami-07b913395ee5282df',
'ap-southeast-1': 'ami-06890ad7295bd4e4b',
'ap-southeast-2': 'ami-01a1c6ded405b43a9',
'ca-central-1': 'ami-010bd16a1ea7d010a',
'eu-central-1': 'ami-0b6a8b2453889f012',
'eu-west-1': 'ami-0f4101e8c6c46f86a',
'eu-west-2': 'ami-0c64993daba80da53',
'eu-west-3': 'ami-02f3169248abeab2f',
'sa-east-1': 'ami-0c6bf10f43f4ab65c',
'us-east-1': 'ami-0df90d83033b1c207',
'us-east-2': 'ami-07f48e9948906d95d',
'us-west-1': 'ami-0b1320a3d397fa07a',
'us-west-2': 'ami-0116dcbe0583de7ca'},
# https://github.com/dcos/dcos-images/blob/master/centos/8.0/aws/DCOS-1.14.0-beta/docker-18.09.1-ce/selinux_permissive/dcos_images.yaml # noqa
'cent-os-8.0-with-docker-selinux-permissive': {'us-east-1': 'ami-0c298469d9dcbba34',
'us-west-2': 'ami-0009a3f7859e07083'},
'cent-os-7.4-with-docker-selinux-enforcing': CENTOS_74_WITH_DOCKER_SELINUX_ENFORCING,
# cent-os-7-dcos-prereqs (CentOS 7.5) uses manually built AMIs for now, see DCOS-51289.
# All regions except us-west-2 & us-east-1 have been disabled.
'cent-os-7-dcos-prereqs': {'us-east-1': 'ami-006219aba10688d0b',
'us-west-2': 'ami-03daaf0b90fc7c71b'},
'coreos': {'ap-northeast-1': 'ami-884835ee',
'ap-southeast-1': 'ami-b9c280c5',
'ap-southeast-2': 'ami-04be7b66',
'eu-central-1': 'ami-862140e9',
'eu-west-1': 'ami-022d646e',
'sa-east-1': 'ami-022d646e',
'us-east-1': 'ami-3f061b45',
'us-west-1': 'ami-cc0900ac',
'us-west-2': 'ami-692faf11'},
'debian-8': {'ap-northeast-1': 'ami-fe54f3fe',
'ap-southeast-1': 'ami-60989c32',
'ap-southeast-2': 'ami-07e3993d',
'eu-central-1': 'ami-b092aaad',
'eu-west-1': 'ami-0ed89d79',
'sa-east-1': 'ami-a5bd3fb8',
'us-east-1': 'ami-8b9a63e0',
'us-west-1': 'ami-a5d621e1',
'us-west-2': 'ami-3d56520d'},
# Red Hat 7.4
'rhel-7-dcos-prereqs': {'ap-northeast-1': 'ami-9f2b90f9',
'ap-southeast-1': 'ami-56154835',
'ap-southeast-2': 'ami-4e52a72c',
'eu-central-1': 'ami-b78906d8',
'eu-west-1': 'ami-b372cfca',
'sa-east-1': 'ami-38b1f554',
'us-east-1': 'ami-78ed7402',
'us-west-1': 'ami-c96b51a9',
'us-west-2': 'ami-23aa725b'},
'ubuntu-16-04': {'ap-northeast-1': 'ami-0919cd68',
'ap-southeast-1': 'ami-42934921',
'ap-southeast-2': 'ami-623c0d01',
'eu-central-1': 'ami-a9a557c6',
'eu-west-1': 'ami-643d4217',
'sa-east-1': 'ami-60bd2d0c',
'us-east-1': 'ami-2ef48339',
'us-west-1': 'ami-a9a8e4c9',
'us-west-2': 'ami-746aba14'}
}
aws_region_names = [
{
'name': 'US West (N. California)',
'id': 'us-west-1'
},
{
'name': 'US West (Oregon)',
'id': 'us-west-2'
},
{
'name': 'US East (N. Virginia)',
'id': 'us-east-1'
},
{
'name': 'South America (Sao Paulo)',
'id': 'sa-east-1'
},
{
'name': 'EU (Ireland)',
'id': 'eu-west-1'
},
{
'name': 'EU (Frankfurt)',
'id': 'eu-central-1'
},
{
'name': 'Asia Pacific (Tokyo)',
'id': 'ap-northeast-1'
},
{
'name': 'Asia Pacific (Singapore)',
'id': 'ap-southeast-1'
},
{
'name': 'Asia Pacific (Sydney)',
'id': 'ap-southeast-2'
},
{
'name': 'Asia Pacific (Seoul)',
'id': 'ap-northeast-2'
},
{
'name': 'Asia Pacific (Mumbai)',
'id': 'ap-south-1'
},
{
'name': 'US East (Ohio)',
'id': 'us-east-2'
}]
| {
"repo_name": "dcos/dcos-launch",
"path": "dcos_launch/platforms/aws.py",
"copies": "1",
"size": "32722",
"license": "apache-2.0",
"hash": 7998396213300687000,
"line_mean": 41.9422572178,
"line_max": 146,
"alpha_frac": 0.5986492268,
"autogenerated": false,
"ratio": 3.7032593933906743,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4801908620190674,
"avg_score": null,
"num_lines": null
} |
# abstractions for LED strip
import time
import colorsys
import random as Random
from neopixel import *
# LED strip configuration:
# LED_COUNT = 300 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).
## LED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 5 # DMA channel to use for generating signal (try 5)
# LED_BRIGHTNESS = 150 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53
LED_STRIP = ws.WS2811_STRIP_GRB # Strip type and colour ordering
# useful colors:
RED = Color(127, 0, 0)
BLUE = Color( 0, 255, 0)
GREEN = Color(0, 255, 0)
class Strip:
def __init__(self, count, brightness):
self.count = count
self.brightness = brightness
self.strip = Adafruit_NeoPixel(self.count, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, self.brightness, LED_CHANNEL, LED_STRIP)
self.strip.begin()
def set_pixel_color(self, i, r, g, b):
self.strip.setPixelColor(i, Color(r, g, b))
def show(self):
self.strip.show()
def loop(self, color1, color2, wait_ms=100):
for i in range(self.strip.numPixels() / 2):
self.strip.setPixelColor(i, color1)
self.strip.setPixelColor(self.strip.numPixels() - i, color1)
self.strip.show()
time.sleep(wait_ms/1000.0)
for i in range(self.strip.numPixels() / 2):
self.strip.setPixelColor(self.strip.numPixels() / 2 - i, color2)
self.strip.setPixelColor(self.strip.numPixels() / 2 + i, color2)
self.strip.show()
time.sleep(wait_ms/1000.0)
def random(self):
for i in range(30):
color = Color(Random.randint(0, 255), Random.randint(0, 255), Random.randint(0, 255), Random.randint(0, 255))
for i in range(Random.randint(0,5)):
self.strip.setPixelColor(Random.randint(0, self.strip.numPixels()), color)
self.strip.show()
def allOneColor(self, color):
for i in range(self.strip.numPixels()):
self.strip.setPixelColor(i, color)
self.strip.show()
def twinkle(self, color, wait_ms=100):
for i in range(self.strip.numPixels() / 30):
self.strip.setPixelColor(Random.randint(0, self.strip.numPixels()), color)
self.strip.show()
time.sleep(.5)
def colorWipe(self, color, wait_ms=50):
"""Wipe color across display a pixel at a time."""
for i in range(self.strip.numPixels()):
self.strip.setPixelColor(i, color)
self.strip.show()
time.sleep(wait_ms/1000.0)
def theaterChase(self, color, wait_ms=50, iterations=10):
"""Movie theater light style chaser animation."""
for j in range(iterations):
for q in range(3):
for i in range(0, self.strip.numPixels(), 3):
self.strip.setPixelColor(i+q, color)
self.strip.show()
time.sleep(wait_ms/1000.0)
for i in range(0, self.strip.numPixels(), 3):
self.strip.setPixelColor(i+q, 0)
def rainbow(self, wait_ms=20, iterations=1):
"""Draw rainbow that fades across all pixels at once."""
for j in range(256*iterations):
for i in range(self.strip.numPixels()):
self.strip.setPixelColor(i, wheel((i+j) & 255))
self.strip.show()
time.sleep(wait_ms/1000.0)
def rainbowCycle(self, wait_ms=20, iterations=5):
"""Draw rainbow that uniformly distributes itself across all pixels."""
for j in range(256*iterations):
for i in range(self.strip.numPixels()):
self.strip.setPixelColor(i, self.wheel((int(i * 256 / self.strip.numPixels()) + j) & 255))
self.strip.show()
time.sleep(wait_ms/1000.0)
def theaterChaseRainbow(self, wait_ms=50):
"""Rainbow movie theater light style chaser animation."""
for j in range(256):
for q in range(3):
for i in range(0, self.strip.numPixels(), 3):
self.strip.setPixelColor(i+q, wheel((i+j) % 255))
self.strip.show()
time.sleep(wait_ms/1000.0)
for i in range(0, self.strip.numPixels(), 3):
self.strip.setPixelColor(i+q, 0)
def HLSColor(hue, lightness, saturation):
hue /= 360.0
lightness /= 100.0
saturation /= 100.0
hls = colorsys.hls_to_rgb(hue, lightness, saturation)
return Color(int(hls[0] * 255), int(hls[1]* 255), int(hls[2] * 255))
def wheel(pos):
"""Generate rainbow colors across 0-255 positions."""
if pos < 85:
return Color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
return Color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
return Color(0, pos * 3, 255 - pos * 3) | {
"repo_name": "ethanacm/the_snake_lights",
"path": "StrandMethods.py",
"copies": "1",
"size": "5211",
"license": "mit",
"hash": 9036758327119945000,
"line_mean": 37.8955223881,
"line_max": 134,
"alpha_frac": 0.5810784878,
"autogenerated": false,
"ratio": 3.3511254019292607,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9418510976719416,
"avg_score": 0.002738582601968823,
"num_lines": 134
} |
"""Abstractions for Swimlane app field types to simplify getting/setting values on records"""
from six import string_types as _string_types
from swimlane.core.fields.base import Field
from swimlane.utils import (
get_recursive_subclasses as _get_recursive_subclasses,
import_submodules as _import_submodules
)
_import_submodules(__name__)
def _build_field_type_map(base_class):
"""Create mapping from all $type values to their respective Field classes"""
mapping = {}
for cls in _get_recursive_subclasses(base_class):
if cls.field_type:
if isinstance(cls.field_type, tuple):
for field_type in cls.field_type:
mapping[field_type] = cls
elif isinstance(cls.field_type, _string_types):
mapping[cls.field_type] = cls
else:
raise ValueError('Field type must be str or tuple, cannot understand type "{}" on class "{}"'.format(
type(cls.field_type),
cls
))
return mapping
_FIELD_TYPE_MAP = _build_field_type_map(Field)
def resolve_field_class(field_definition):
"""Return field class most fitting of provided Swimlane field definition"""
try:
return _FIELD_TYPE_MAP[field_definition['$type']]
except KeyError as error:
error.message = 'No field available to handle Swimlane $type "{}"'.format(field_definition)
raise
__all__ = ['resolve_field_class'] + [f.__class__.__name__ for f in _FIELD_TYPE_MAP.values()]
| {
"repo_name": "Swimlane/sw-python-client",
"path": "swimlane/core/fields/__init__.py",
"copies": "1",
"size": "1544",
"license": "mit",
"hash": 7006865815795158000,
"line_mean": 32.5652173913,
"line_max": 117,
"alpha_frac": 0.6347150259,
"autogenerated": false,
"ratio": 3.969151670951157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011350269853894687,
"num_lines": 46
} |
"""Abstractions for working with powerpoint files"""
import tempfile
import shutil
import re
import os
from win32 import run
runpowerpoint = lambda: run('PowerPoint.Application')
def new(app, filename):
presentation = app.Presentations.Add()
presentation.SaveAs(filename)
return presentation
def addslide(pptx, idx, style):
return pptx.Slides.AddSlide(idx, style)
def open_pptx(app, path):
return app.Presentations.Open(path)
def slide(pptx, index):
return pptx.Slides(index + 1)
class SlideTemplate(object):
"""Handle the logic for working with pptx files
as single-slide templates, so it can be copied
to a final slideshow.
"""
def __init__(self, template_path):
self.template_path = template_path
def pptx_from_template(self):
tmp_fd, tmp_path = tempfile.mkstemp(prefix='pptxtmp')
shutil.copyfile(self.template_path, tmp_path)
self.pptx = open_pptx(self.app, tmp_path)
return self.pptx
def open_pptx(self):
self.pptx = open_pptx(self.app, self.template_path)
return self.pptx
@property
def app(self):
if not hasattr(self, '_app'):
self._app = runpowerpoint()
return self._app
def append_to(self, destination_pptx):
# Copy the only slide
source_slide = slide(self.pptx, 0)
source_slide.Copy()
# Source style
design = source_slide.Design
# Append copied slide to target presentation with original style
destination_pptx.Slides.Paste()
slide(destination_pptx,
destination_pptx.Slides.Count - 1).Design = design
def clean(self):
self.app.DisplayAlerts = False
self.pptx.Saved = True
self.pptx.Close()
class SlideshowJoiner(object):
"""Manages the logic for joining a set of SlideTemplate
objects into a final slideshow."""
def __init__(self, builders):
"""Intialize the joiner object. The sources
are instances of the SlideBuilder class
and should be provided in the same order
of the final slideshow.
"""
self.sources = builders
def create_document(self):
self.app.Presentations.Add(1)
self.pptx = self.app.ActivePresentation
fd, path = tempfile.mkstemp()
self.pptx.SaveAs(path)
def setup(self):
self.pptx.PageSetup.SlideHeight = self.sources[
0].pptx.PageSetup.SlideHeight
self.pptx.PageSetup.SlideWidth = self.sources[
0].pptx.PageSetup.SlideWidth
@property
def app(self):
if not hasattr(self, '_app'):
self._app = runpowerpoint()
return self._app
def save(self, path):
self.pptx.SaveAs(path)
def build(self):
self.create_document()
for index, source in enumerate(self.sources):
if index == 0:
self.setup()
source.build()
source.template.append_to(self.pptx)
source.template.clean()
def quit(self):
self.app.DisplayAlerts = False
self.app.Quit()
class SlideSourceOrdering(object):
"""Encapsulate logic for ordering slide order
relative to their names"""
def __init__(self, filename_list=None):
if filename_list is not None:
self.strategy = self.list_ordering
self.filename_list = filename_list
else:
self.strategy = self.numeric_ordering
def numeric_ordering(self, paths):
regex = re.compile(r'\d+$')
fn = lambda st: int(regex.search(st).group(0))
return sorted((path for path in paths if os.path.isdir(path)),
key=fn)
def list_ordering(self, paths):
paths_list = list(paths)
base_dir = os.path.dirname(paths_list[0])
full_ordered_paths = [os.path.join(
base_dir, path) for path in self.filename_list]
if set(paths_list) != set(full_ordered_paths):
raise ValueError('Mismatch with order specification')
else:
return full_ordered_paths
def __call__(self, paths):
return self.strategy(paths)
| {
"repo_name": "Intelimetrica/coati",
"path": "coati/powerpoint.py",
"copies": "1",
"size": "4177",
"license": "mit",
"hash": 1417743327286144500,
"line_mean": 27.4149659864,
"line_max": 72,
"alpha_frac": 0.6191046205,
"autogenerated": false,
"ratio": 3.7095914742451153,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9823659409648906,
"avg_score": 0.0010073370192417812,
"num_lines": 147
} |
"""Abstractions related to the game and game logic"""
class Cell:
"""Represents one element of the board"""
def __init__(self, z=0, active=False, lit=False): # setting default values
self.z = z
self.active = active
self.lit = lit
def __repr__(self):
return 'z = {}, active = {}, lit up = {}'.format(self.z, self.active, self.lit)
class Board:
"""Represents the board"""
def __init__(self, filename: str):
"""Reads a board file from disk and stores it as a Board object."""
with open(filename) as f:
header = f.readline().strip()
# Making massive tuple unpacking here
(self.width, self.height, self.start_x,
self.start_y, self.angle) = map(int, header.split())
# Making an empty board (i.e. list of lists)
temp_list = [Cell() for _ in range(self.width)]
self.cells = [temp_list for _ in range(self.height)]
for x, line in enumerate(f.read().splitlines()):
for y, curr_cell in enumerate(map(int, line.split())):
self.cells[x][y] = Cell(abs(curr_cell), curr_cell < 0)
def draw(self) -> None:
"""Prettyprinting a board."""
for x in range(self.width):
for y in range(self.height):
print(self.cells[x][y])
print()
if __name__ == '__main__':
board = Board('board.txt')
board.draw()
| {
"repo_name": "5tr1k3r/lightbot-python",
"path": "lightbot/game.py",
"copies": "1",
"size": "1455",
"license": "mit",
"hash": -6713374500320594000,
"line_mean": 32.8372093023,
"line_max": 87,
"alpha_frac": 0.5450171821,
"autogenerated": false,
"ratio": 3.75968992248062,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.480470710458062,
"avg_score": null,
"num_lines": null
} |
"""Abstractions to interact with service models."""
from collections import defaultdict
from botocore_eb.utils import CachedProperty
from botocore_eb.compat import OrderedDict
NOT_SET = object()
class NoShapeFoundError(Exception):
pass
class InvalidShapeError(Exception):
pass
class OperationNotFoundError(Exception):
pass
class InvalidShapeReferenceError(Exception):
pass
class UndefinedModelAttributeError(Exception):
pass
class Shape(object):
"""Object representing a shape from the service model."""
# To simplify serialization logic, all shape params that are
# related to serialization are moved from the top level hash into
# a 'serialization' hash. This list below contains the names of all
# the attributes that should be moved.
SERIALIZED_ATTRS = ['locationName', 'queryName', 'flattened', 'location',
'payload', 'streaming', 'timestampFormat',
'xmlNamespace', 'resultWrapper', 'xmlAttribute']
METADATA_ATTRS = ['required', 'min', 'max', 'sensitive', 'enum']
MAP_TYPE = OrderedDict
def __init__(self, shape_name, shape_model, shape_resolver=None):
"""
:type shape_name: string
:param shape_name: The name of the shape.
:type shape_model: dict
:param shape_model: The shape model. This would be the value
associated with the key in the "shapes" dict of the
service model (i.e ``model['shapes'][shape_name]``)
:type shape_resolver: botocore.model.ShapeResolver
:param shape_resolver: A shape resolver object. This is used to
resolve references to other shapes. For scalar shape types
(string, integer, boolean, etc.), this argument is not
required. If a shape_resolver is not provided for a complex
type, then a ``ValueError`` will be raised when an attempt
to resolve a shape is made.
"""
self.name = shape_name
self.type_name = shape_model['type']
self.documentation = shape_model.get('documentation', '')
self._shape_model = shape_model
if shape_resolver is None:
# If a shape_resolver is not provided, we create an object
# that will throw errors if you attempt to resolve
# a shape. This is actually ok for scalar shapes
# because they don't need to resolve shapes and shouldn't
# be required to provide an object they won't use.
shape_resolver = UnresolvableShapeMap()
self._shape_resolver = shape_resolver
self._cache = {}
@CachedProperty
def serialization(self):
"""Serialization information about the shape.
This contains information that may be needed for input serialization
or response parsing. This can include:
* name
* queryName
* flattened
* location
* payload
* streaming
* xmlNamespace
* resultWrapper
* xmlAttribute
:rtype: dict
:return: Serialization information about the shape.
"""
model = self._shape_model
serialization = {}
for attr in self.SERIALIZED_ATTRS:
if attr in self._shape_model:
serialization[attr] = model[attr]
# For consistency, locationName is renamed to just 'name'.
if 'locationName' in serialization:
serialization['name'] = serialization.pop('locationName')
return serialization
@CachedProperty
def metadata(self):
"""Metadata about the shape.
This requires optional information about the shape, including:
* min
* max
* enum
* sensitive
* required
:rtype: dict
:return: Metadata about the shape.
"""
model = self._shape_model
metadata = {}
for attr in self.METADATA_ATTRS:
if attr in self._shape_model:
metadata[attr] = model[attr]
return metadata
@CachedProperty
def required_members(self):
"""A list of members that are required.
A structure shape can define members that are required.
This value will return a list of required members. If there
are no required members an empty list is returned.
"""
return self.metadata.get('required', [])
def _resolve_shape_ref(self, shape_ref):
return self._shape_resolver.resolve_shape_ref(shape_ref)
def __repr__(self):
return "<%s(%s)>" % (self.__class__.__name__,
self.name)
class StructureShape(Shape):
@CachedProperty
def members(self):
members = self._shape_model['members']
# The members dict looks like:
# 'members': {
# 'MemberName': {'shape': 'shapeName'},
# 'MemberName2': {'shape': 'shapeName'},
# }
# We return a dict of member name to Shape object.
shape_members = self.MAP_TYPE()
for name, shape_ref in members.items():
shape_members[name] = self._resolve_shape_ref(shape_ref)
return shape_members
class ListShape(Shape):
@CachedProperty
def member(self):
return self._resolve_shape_ref(self._shape_model['member'])
class MapShape(Shape):
@CachedProperty
def key(self):
return self._resolve_shape_ref(self._shape_model['key'])
@CachedProperty
def value(self):
return self._resolve_shape_ref(self._shape_model['value'])
class ServiceModel(object):
"""
:ivar service_description: The parsed service description dictionary.
"""
# Any type not in this mapping will default to the Shape class.
SHAPE_CLASSES = {
'structure': StructureShape,
'list': ListShape,
'map': MapShape,
}
def __init__(self, service_description):
"""
:type service_description: dict
:param service_description: The service description model. This value
is obtained from a botocore.loader.Loader, or from directly loading
the file yourself::
service_description = json.load(
open('/path/to/service-description-model.json'))
model = ServiceModel(service_description)
"""
self._service_description = service_description
# We want clients to be able to access metadata directly.
self.metadata = service_description.get('metadata', {})
self._shape_resolver = ShapeResolver(
service_description.get('shapes', {}))
self._signature_version = NOT_SET
def shape_for(self, shape_name, member_traits=None):
return self._shape_resolver.get_shape_by_name(
shape_name, member_traits)
def resolve_shape_ref(self, shape_ref):
return self._shape_resolver.resolve_shape_ref(shape_ref)
def operation_model(self, operation_name):
try:
model = self._service_description['operations'][operation_name]
except KeyError:
raise OperationNotFoundError(operation_name)
return OperationModel(model, self)
@CachedProperty
def operation_names(self):
return list(self._service_description.get('operations', []))
@CachedProperty
def signing_name(self):
"""The name to use when computing signatures.
If the model does not define a signing name, this
value will be the endpoint prefix defined in the model.
"""
signing_name = self.metadata.get('signingName')
if signing_name is None:
signing_name = self.endpoint_prefix
return signing_name
@CachedProperty
def api_version(self):
return self._get_metadata_property('apiVersion')
@CachedProperty
def protocol(self):
return self._get_metadata_property('protocol')
@CachedProperty
def endpoint_prefix(self):
return self._get_metadata_property('endpointPrefix')
def _get_metadata_property(self, name):
try:
return self.metadata[name]
except KeyError:
raise UndefinedModelAttributeError(
'"%s" not defined in the metadata of the the model: %s' %
(name, self))
# Signature version is one of the rare properties
# than can be modified so a CachedProperty is not used here.
@property
def signature_version(self):
if self._signature_version is NOT_SET:
signature_version = self.metadata.get('signatureVersion')
self._signature_version = signature_version
return self._signature_version
@signature_version.setter
def signature_version(self, value):
self._signature_version = value
class OperationModel(object):
def __init__(self, operation_model, service_model):
"""
:type operation_model: dict
:param operation_model: The operation model. This comes from the
service model, and is the value associated with the operation
name in the service model (i.e ``model['operations'][op_name]``).
:type service_model: botocore.model.ServiceModel
:param service_model: The service model associated with the operation.
"""
self._operation_model = operation_model
self._service_model = service_model
# Clients can access '.name' to get the operation name
# and '.metadata' to get the top level metdata of the service.
self.name = operation_model.get('name')
self.metadata = service_model.metadata
self.http = operation_model.get('http', {})
@CachedProperty
def input_shape(self):
if 'input' not in self._operation_model:
# Some operations do not accept any input and do not define an
# input shape.
return None
return self._service_model.resolve_shape_ref(
self._operation_model['input'])
@CachedProperty
def output_shape(self):
if 'output' not in self._operation_model:
# Some operations do not define an output shape,
# in which case we return None to indicate the
# operation has no expected output.
return None
return self._service_model.resolve_shape_ref(
self._operation_model['output'])
@CachedProperty
def has_streaming_output(self):
output_shape = self.output_shape
if output_shape is None:
return False
payload = output_shape.serialization.get('payload')
if payload is not None:
payload_shape = output_shape.members[payload]
if payload_shape.type_name == 'blob':
return True
return False
class ShapeResolver(object):
"""Resolves shape references."""
# Any type not in this mapping will default to the Shape class.
SHAPE_CLASSES = {
'structure': StructureShape,
'list': ListShape,
'map': MapShape,
}
def __init__(self, shape_map):
self._shape_map = shape_map
self._shape_cache = {}
def get_shape_by_name(self, shape_name, member_traits=None):
try:
shape_model = self._shape_map[shape_name]
except KeyError:
raise NoShapeFoundError(shape_name)
try:
shape_cls = self.SHAPE_CLASSES.get(shape_model['type'], Shape)
except KeyError:
raise InvalidShapeError("Shape is missing required key 'type': %s"
% shape_model)
if member_traits:
shape_model = shape_model.copy()
shape_model.update(member_traits)
return shape_cls(shape_name, shape_model, self)
def resolve_shape_ref(self, shape_ref):
# A shape_ref is a dict that has a 'shape' key that
# refers to a shape name as well as any additional
# member traits that are then merged over the shape
# definition. For example:
# {"shape": "StringType", "locationName": "Foobar"}
if len(shape_ref) == 1 and 'shape' in shape_ref:
# It's just a shape ref with no member traits, we can avoid
# a .copy(). This is the common case so it's specifically
# called out here.
return self.get_shape_by_name(shape_ref['shape'])
else:
member_traits = shape_ref.copy()
try:
shape_name = member_traits.pop('shape')
except KeyError:
raise InvalidShapeReferenceError(
"Invalid model, missing shape reference: %s" % shape_ref)
return self.get_shape_by_name(shape_name, member_traits)
class UnresolvableShapeMap(object):
"""A ShapeResolver that will throw ValueErrors when shapes are resolved.
"""
def get_shape_by_name(self, shape_name, member_traits=None):
raise ValueError("Attempted to lookup shape '%s', but no shape "
"map was provided.")
def resolve_shape_ref(self, shape_ref):
raise ValueError("Attempted to resolve shape '%s', but no shape "
"map was provided.")
class DenormalizedStructureBuilder(object):
"""Build a StructureShape from a denormalized model.
This is a convenience builder class that makes it easy to construct
``StructureShape``s based on a denormalized model.
It will handle the details of creating unique shape names and creating
the appropriate shape map needed by the ``StructureShape`` class.
Example usage::
builder = DenormalizedStructureBuilder()
shape = builder.with_members({
'A': {
'type': 'structure',
'members': {
'B': {
'type': 'structure',
'members': {
'C': {
'type': 'string',
}
}
}
}
}
}).build_model()
# ``shape`` is now an instance of botocore.model.StructureShape
"""
def __init__(self, name=None):
self.members = {}
self._name_generator = ShapeNameGenerator()
if name is None:
self.name = self._name_generator.new_shape_name('structure')
def with_members(self, members):
"""
:type members: dict
:param members: The denormalized members.
:return: self
"""
self._members = members
return self
def build_model(self):
"""Build the model based on the provided members.
:rtype: botocore.model.StructureShape
:return: The built StructureShape object.
"""
shapes = {}
denormalized = {
'type': 'structure',
'members': self._members,
}
self._build_model(denormalized, shapes, self.name)
resolver = ShapeResolver(shape_map=shapes)
return StructureShape(shape_name=self.name,
shape_model=shapes[self.name],
shape_resolver=resolver)
def _build_model(self, model, shapes, shape_name):
if model['type'] == 'structure':
shapes[shape_name] = self._build_structure(model, shapes)
elif model['type'] == 'list':
shapes[shape_name] = self._build_list(model, shapes)
elif model['type'] == 'map':
shapes[shape_name] = self._build_map(model, shapes)
elif model['type'] in ['string', 'integer', 'boolean', 'blob',
'timestamp', 'long', 'double', 'char']:
shapes[shape_name] = self._build_scalar(model)
else:
raise InvalidShapeError("Unknown shape type: %s" % model['type'])
def _build_structure(self, model, shapes):
members = {}
shape = self._build_initial_shape(model)
shape['members'] = members
for name, member_model in model['members'].items():
member_shape_name = self._get_shape_name(member_model)
members[name] = {'shape': member_shape_name}
self._build_model(member_model, shapes, member_shape_name)
return shape
def _build_list(self, model, shapes):
member_shape_name = self._get_shape_name(model)
shape = self._build_initial_shape(model)
shape['member'] = {'shape': member_shape_name}
self._build_model(model['member'], shapes, member_shape_name)
return shape
def _build_map(self, model, shapes):
key_shape_name = self._get_shape_name(model['key'])
value_shape_name = self._get_shape_name(model['value'])
shape = self._build_initial_shape(model)
shape['key'] = {'shape': key_shape_name}
shape['value'] = {'shape': value_shape_name}
self._build_model(model['key'], shapes, key_shape_name)
self._build_model(model['value'], shapes, value_shape_name)
return shape
def _build_initial_shape(self, model):
shape = {
'type': model['type'],
}
if 'documentation' in model:
shape['documentation'] = model['documentation']
if 'enum' in model:
shape['enum'] = model['enum']
return shape
def _build_scalar(self, model):
return self._build_initial_shape(model)
def _get_shape_name(self, model):
if 'shape_name' in model:
return model['shape_name']
else:
return self._name_generator.new_shape_name(model['type'])
class ShapeNameGenerator(object):
"""Generate unique shape names for a type.
This class can be used in conjunction with the DenormalizedStructureBuilder
to generate unique shape names for a given type.
"""
def __init__(self):
self._name_cache = defaultdict(int)
def new_shape_name(self, type_name):
"""Generate a unique shape name.
This method will guarantee a unique shape name each time it is
called with the same type.
::
>>> s = ShapeNameGenerator()
>>> s.new_shape_name('structure')
'StructureType1'
>>> s.new_shape_name('structure')
'StructureType2'
>>> s.new_shape_name('list')
'ListType1'
>>> s.new_shape_name('list')
'ListType2'
:type type_name: string
:param type_name: The type name (structure, list, map, string, etc.)
:rtype: string
:return: A unique shape name for the given type
"""
self._name_cache[type_name] += 1
current_index = self._name_cache[type_name]
return '%sType%s' % (type_name.capitalize(),
current_index)
| {
"repo_name": "raulanatol/awsebcli",
"path": "botocore_eb/model.py",
"copies": "2",
"size": "18977",
"license": "apache-2.0",
"hash": -1002783745928076800,
"line_mean": 32.8875,
"line_max": 79,
"alpha_frac": 0.5921378511,
"autogenerated": false,
"ratio": 4.448429442100328,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003968253968253969,
"num_lines": 560
} |
"""Abstractions to interact with service models."""
from collections import defaultdict
from botocore.utils import CachedProperty
from botocore.compat import OrderedDict
NOT_SET = object()
class NoShapeFoundError(Exception):
pass
class InvalidShapeError(Exception):
pass
class OperationNotFoundError(Exception):
pass
class InvalidShapeReferenceError(Exception):
pass
class UndefinedModelAttributeError(Exception):
pass
class Shape(object):
"""Object representing a shape from the service model."""
# To simplify serialization logic, all shape params that are
# related to serialization are moved from the top level hash into
# a 'serialization' hash. This list below contains the names of all
# the attributes that should be moved.
SERIALIZED_ATTRS = ['locationName', 'queryName', 'flattened', 'location',
'payload', 'streaming', 'timestampFormat',
'xmlNamespace', 'resultWrapper', 'xmlAttribute']
METADATA_ATTRS = ['required', 'min', 'max', 'sensitive', 'enum']
MAP_TYPE = OrderedDict
def __init__(self, shape_name, shape_model, shape_resolver=None):
"""
:type shape_name: string
:param shape_name: The name of the shape.
:type shape_model: dict
:param shape_model: The shape model. This would be the value
associated with the key in the "shapes" dict of the
service model (i.e ``model['shapes'][shape_name]``)
:type shape_resolver: botocore.model.ShapeResolver
:param shape_resolver: A shape resolver object. This is used to
resolve references to other shapes. For scalar shape types
(string, integer, boolean, etc.), this argument is not
required. If a shape_resolver is not provided for a complex
type, then a ``ValueError`` will be raised when an attempt
to resolve a shape is made.
"""
self.name = shape_name
self.type_name = shape_model['type']
self.documentation = shape_model.get('documentation', '')
self._shape_model = shape_model
if shape_resolver is None:
# If a shape_resolver is not provided, we create an object
# that will throw errors if you attempt to resolve
# a shape. This is actually ok for scalar shapes
# because they don't need to resolve shapes and shouldn't
# be required to provide an object they won't use.
shape_resolver = UnresolvableShapeMap()
self._shape_resolver = shape_resolver
self._cache = {}
@CachedProperty
def serialization(self):
"""Serialization information about the shape.
This contains information that may be needed for input serialization
or response parsing. This can include:
* name
* queryName
* flattened
* location
* payload
* streaming
* xmlNamespace
* resultWrapper
* xmlAttribute
:rtype: dict
:return: Serialization information about the shape.
"""
model = self._shape_model
serialization = {}
for attr in self.SERIALIZED_ATTRS:
if attr in self._shape_model:
serialization[attr] = model[attr]
# For consistency, locationName is renamed to just 'name'.
if 'locationName' in serialization:
serialization['name'] = serialization.pop('locationName')
return serialization
@CachedProperty
def metadata(self):
"""Metadata about the shape.
This requires optional information about the shape, including:
* min
* max
* enum
* sensitive
* required
:rtype: dict
:return: Metadata about the shape.
"""
model = self._shape_model
metadata = {}
for attr in self.METADATA_ATTRS:
if attr in self._shape_model:
metadata[attr] = model[attr]
return metadata
@CachedProperty
def required_members(self):
"""A list of members that are required.
A structure shape can define members that are required.
This value will return a list of required members. If there
are no required members an empty list is returned.
"""
return self.metadata.get('required', [])
def _resolve_shape_ref(self, shape_ref):
return self._shape_resolver.resolve_shape_ref(shape_ref)
def __repr__(self):
return "<%s(%s)>" % (self.__class__.__name__,
self.name)
class StructureShape(Shape):
@CachedProperty
def members(self):
members = self._shape_model['members']
# The members dict looks like:
# 'members': {
# 'MemberName': {'shape': 'shapeName'},
# 'MemberName2': {'shape': 'shapeName'},
# }
# We return a dict of member name to Shape object.
shape_members = self.MAP_TYPE()
for name, shape_ref in members.items():
shape_members[name] = self._resolve_shape_ref(shape_ref)
return shape_members
class ListShape(Shape):
@CachedProperty
def member(self):
return self._resolve_shape_ref(self._shape_model['member'])
class MapShape(Shape):
@CachedProperty
def key(self):
return self._resolve_shape_ref(self._shape_model['key'])
@CachedProperty
def value(self):
return self._resolve_shape_ref(self._shape_model['value'])
class ServiceModel(object):
"""
:ivar service_description: The parsed service description dictionary.
"""
# Any type not in this mapping will default to the Shape class.
SHAPE_CLASSES = {
'structure': StructureShape,
'list': ListShape,
'map': MapShape,
}
def __init__(self, service_description, service_name=None):
"""
:type service_description: dict
:param service_description: The service description model. This value
is obtained from a botocore.loader.Loader, or from directly loading
the file yourself::
service_description = json.load(
open('/path/to/service-description-model.json'))
model = ServiceModel(service_description)
:type service_name: str
:param service_name: The name of the service. Normally this is
the endpoint prefix defined in the service_description. However,
you can override this value to provide a more convenient name.
This is done in a few places in botocore (ses instead of email,
emr instead of elasticmapreduce). If this value is not provided,
it will default to the endpointPrefix defined in the model.
"""
self._service_description = service_description
# We want clients to be able to access metadata directly.
self.metadata = service_description.get('metadata', {})
self._shape_resolver = ShapeResolver(
service_description.get('shapes', {}))
self._signature_version = NOT_SET
self._service_name = service_name
def shape_for(self, shape_name, member_traits=None):
return self._shape_resolver.get_shape_by_name(
shape_name, member_traits)
def resolve_shape_ref(self, shape_ref):
return self._shape_resolver.resolve_shape_ref(shape_ref)
def operation_model(self, operation_name):
try:
model = self._service_description['operations'][operation_name]
except KeyError:
raise OperationNotFoundError(operation_name)
return OperationModel(model, self)
@CachedProperty
def operation_names(self):
return list(self._service_description.get('operations', []))
@CachedProperty
def service_name(self):
"""The name of the service.
This defaults to the endpointPrefix defined in the service model.
However, this value can be overriden when a ``ServiceModel`` is
created. If a service_name was not provided when the ``ServiceModel``
was created and if there is no endpointPrefix defined in the
service model, then an ``UndefinedModelAttributeError`` exception
will be raised.
"""
if self._service_name is not None:
return self._service_name
else:
return self.endpoint_prefix
@CachedProperty
def signing_name(self):
"""The name to use when computing signatures.
If the model does not define a signing name, this
value will be the endpoint prefix defined in the model.
"""
signing_name = self.metadata.get('signingName')
if signing_name is None:
signing_name = self.endpoint_prefix
return signing_name
@CachedProperty
def api_version(self):
return self._get_metadata_property('apiVersion')
@CachedProperty
def protocol(self):
return self._get_metadata_property('protocol')
@CachedProperty
def endpoint_prefix(self):
return self._get_metadata_property('endpointPrefix')
def _get_metadata_property(self, name):
try:
return self.metadata[name]
except KeyError:
raise UndefinedModelAttributeError(
'"%s" not defined in the metadata of the the model: %s' %
(name, self))
# Signature version is one of the rare properties
# than can be modified so a CachedProperty is not used here.
@property
def signature_version(self):
if self._signature_version is NOT_SET:
signature_version = self.metadata.get('signatureVersion')
self._signature_version = signature_version
return self._signature_version
@signature_version.setter
def signature_version(self, value):
self._signature_version = value
class OperationModel(object):
def __init__(self, operation_model, service_model):
"""
:type operation_model: dict
:param operation_model: The operation model. This comes from the
service model, and is the value associated with the operation
name in the service model (i.e ``model['operations'][op_name]``).
:type service_model: botocore.model.ServiceModel
:param service_model: The service model associated with the operation.
"""
self._operation_model = operation_model
self._service_model = service_model
# Clients can access '.name' to get the operation name
# and '.metadata' to get the top level metdata of the service.
self.name = operation_model.get('name')
self.metadata = service_model.metadata
self.http = operation_model.get('http', {})
@CachedProperty
def input_shape(self):
if 'input' not in self._operation_model:
# Some operations do not accept any input and do not define an
# input shape.
return None
return self._service_model.resolve_shape_ref(
self._operation_model['input'])
@CachedProperty
def output_shape(self):
if 'output' not in self._operation_model:
# Some operations do not define an output shape,
# in which case we return None to indicate the
# operation has no expected output.
return None
return self._service_model.resolve_shape_ref(
self._operation_model['output'])
@CachedProperty
def has_streaming_output(self):
output_shape = self.output_shape
if output_shape is None:
return False
payload = output_shape.serialization.get('payload')
if payload is not None:
payload_shape = output_shape.members[payload]
if payload_shape.type_name == 'blob':
return True
return False
class ShapeResolver(object):
"""Resolves shape references."""
# Any type not in this mapping will default to the Shape class.
SHAPE_CLASSES = {
'structure': StructureShape,
'list': ListShape,
'map': MapShape,
}
def __init__(self, shape_map):
self._shape_map = shape_map
self._shape_cache = {}
def get_shape_by_name(self, shape_name, member_traits=None):
try:
shape_model = self._shape_map[shape_name]
except KeyError:
raise NoShapeFoundError(shape_name)
try:
shape_cls = self.SHAPE_CLASSES.get(shape_model['type'], Shape)
except KeyError:
raise InvalidShapeError("Shape is missing required key 'type': %s"
% shape_model)
if member_traits:
shape_model = shape_model.copy()
shape_model.update(member_traits)
return shape_cls(shape_name, shape_model, self)
def resolve_shape_ref(self, shape_ref):
# A shape_ref is a dict that has a 'shape' key that
# refers to a shape name as well as any additional
# member traits that are then merged over the shape
# definition. For example:
# {"shape": "StringType", "locationName": "Foobar"}
if len(shape_ref) == 1 and 'shape' in shape_ref:
# It's just a shape ref with no member traits, we can avoid
# a .copy(). This is the common case so it's specifically
# called out here.
return self.get_shape_by_name(shape_ref['shape'])
else:
member_traits = shape_ref.copy()
try:
shape_name = member_traits.pop('shape')
except KeyError:
raise InvalidShapeReferenceError(
"Invalid model, missing shape reference: %s" % shape_ref)
return self.get_shape_by_name(shape_name, member_traits)
class UnresolvableShapeMap(object):
"""A ShapeResolver that will throw ValueErrors when shapes are resolved.
"""
def get_shape_by_name(self, shape_name, member_traits=None):
raise ValueError("Attempted to lookup shape '%s', but no shape "
"map was provided.")
def resolve_shape_ref(self, shape_ref):
raise ValueError("Attempted to resolve shape '%s', but no shape "
"map was provided.")
class DenormalizedStructureBuilder(object):
"""Build a StructureShape from a denormalized model.
This is a convenience builder class that makes it easy to construct
``StructureShape``s based on a denormalized model.
It will handle the details of creating unique shape names and creating
the appropriate shape map needed by the ``StructureShape`` class.
Example usage::
builder = DenormalizedStructureBuilder()
shape = builder.with_members({
'A': {
'type': 'structure',
'members': {
'B': {
'type': 'structure',
'members': {
'C': {
'type': 'string',
}
}
}
}
}
}).build_model()
# ``shape`` is now an instance of botocore.model.StructureShape
"""
def __init__(self, name=None):
self.members = {}
self._name_generator = ShapeNameGenerator()
if name is None:
self.name = self._name_generator.new_shape_name('structure')
def with_members(self, members):
"""
:type members: dict
:param members: The denormalized members.
:return: self
"""
self._members = members
return self
def build_model(self):
"""Build the model based on the provided members.
:rtype: botocore.model.StructureShape
:return: The built StructureShape object.
"""
shapes = {}
denormalized = {
'type': 'structure',
'members': self._members,
}
self._build_model(denormalized, shapes, self.name)
resolver = ShapeResolver(shape_map=shapes)
return StructureShape(shape_name=self.name,
shape_model=shapes[self.name],
shape_resolver=resolver)
def _build_model(self, model, shapes, shape_name):
if model['type'] == 'structure':
shapes[shape_name] = self._build_structure(model, shapes)
elif model['type'] == 'list':
shapes[shape_name] = self._build_list(model, shapes)
elif model['type'] == 'map':
shapes[shape_name] = self._build_map(model, shapes)
elif model['type'] in ['string', 'integer', 'boolean', 'blob', 'float',
'timestamp', 'long', 'double', 'char']:
shapes[shape_name] = self._build_scalar(model)
else:
raise InvalidShapeError("Unknown shape type: %s" % model['type'])
def _build_structure(self, model, shapes):
members = {}
shape = self._build_initial_shape(model)
shape['members'] = members
for name, member_model in model['members'].items():
member_shape_name = self._get_shape_name(member_model)
members[name] = {'shape': member_shape_name}
self._build_model(member_model, shapes, member_shape_name)
return shape
def _build_list(self, model, shapes):
member_shape_name = self._get_shape_name(model)
shape = self._build_initial_shape(model)
shape['member'] = {'shape': member_shape_name}
self._build_model(model['member'], shapes, member_shape_name)
return shape
def _build_map(self, model, shapes):
key_shape_name = self._get_shape_name(model['key'])
value_shape_name = self._get_shape_name(model['value'])
shape = self._build_initial_shape(model)
shape['key'] = {'shape': key_shape_name}
shape['value'] = {'shape': value_shape_name}
self._build_model(model['key'], shapes, key_shape_name)
self._build_model(model['value'], shapes, value_shape_name)
return shape
def _build_initial_shape(self, model):
shape = {
'type': model['type'],
}
if 'documentation' in model:
shape['documentation'] = model['documentation']
if 'enum' in model:
shape['enum'] = model['enum']
return shape
def _build_scalar(self, model):
return self._build_initial_shape(model)
def _get_shape_name(self, model):
if 'shape_name' in model:
return model['shape_name']
else:
return self._name_generator.new_shape_name(model['type'])
class ShapeNameGenerator(object):
"""Generate unique shape names for a type.
This class can be used in conjunction with the DenormalizedStructureBuilder
to generate unique shape names for a given type.
"""
def __init__(self):
self._name_cache = defaultdict(int)
def new_shape_name(self, type_name):
"""Generate a unique shape name.
This method will guarantee a unique shape name each time it is
called with the same type.
::
>>> s = ShapeNameGenerator()
>>> s.new_shape_name('structure')
'StructureType1'
>>> s.new_shape_name('structure')
'StructureType2'
>>> s.new_shape_name('list')
'ListType1'
>>> s.new_shape_name('list')
'ListType2'
:type type_name: string
:param type_name: The type name (structure, list, map, string, etc.)
:rtype: string
:return: A unique shape name for the given type
"""
self._name_cache[type_name] += 1
current_index = self._name_cache[type_name]
return '%sType%s' % (type_name.capitalize(),
current_index)
| {
"repo_name": "ianblenke/awsebcli",
"path": "ebcli/bundled/botocore/model.py",
"copies": "1",
"size": "20151",
"license": "apache-2.0",
"hash": -4525155215634181000,
"line_mean": 33.32879046,
"line_max": 79,
"alpha_frac": 0.5956031959,
"autogenerated": false,
"ratio": 4.473029966703662,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5568633162603662,
"avg_score": null,
"num_lines": null
} |
"""Abstractions to interact with service models."""
from collections import defaultdict
from .utils import CachedProperty
from .compat import OrderedDict
NOT_SET = object()
class NoShapeFoundError(Exception):
pass
class InvalidShapeError(Exception):
pass
class OperationNotFoundError(Exception):
pass
class InvalidShapeReferenceError(Exception):
pass
class UndefinedModelAttributeError(Exception):
pass
class Shape(object):
"""Object representing a shape from the service model."""
# To simplify serialization logic, all shape params that are
# related to serialization are moved from the top level hash into
# a 'serialization' hash. This list below contains the names of all
# the attributes that should be moved.
SERIALIZED_ATTRS = ['locationName', 'queryName', 'flattened', 'location',
'payload', 'streaming', 'timestampFormat',
'xmlNamespace', 'resultWrapper', 'xmlAttribute']
METADATA_ATTRS = ['required', 'min', 'max', 'sensitive', 'enum']
MAP_TYPE = OrderedDict
def __init__(self, shape_name, shape_model, shape_resolver=None):
"""
:type shape_name: string
:param shape_name: The name of the shape.
:type shape_model: dict
:param shape_model: The shape model. This would be the value
associated with the key in the "shapes" dict of the
service model (i.e ``model['shapes'][shape_name]``)
:type shape_resolver: botocore.model.ShapeResolver
:param shape_resolver: A shape resolver object. This is used to
resolve references to other shapes. For scalar shape types
(string, integer, boolean, etc.), this argument is not
required. If a shape_resolver is not provided for a complex
type, then a ``ValueError`` will be raised when an attempt
to resolve a shape is made.
"""
self.name = shape_name
self.type_name = shape_model['type']
self.documentation = shape_model.get('documentation', '')
self._shape_model = shape_model
if shape_resolver is None:
# If a shape_resolver is not provided, we create an object
# that will throw errors if you attempt to resolve
# a shape. This is actually ok for scalar shapes
# because they don't need to resolve shapes and shouldn't
# be required to provide an object they won't use.
shape_resolver = UnresolvableShapeMap()
self._shape_resolver = shape_resolver
self._cache = {}
@CachedProperty
def serialization(self):
"""Serialization information about the shape.
This contains information that may be needed for input serialization
or response parsing. This can include:
* name
* queryName
* flattened
* location
* payload
* streaming
* xmlNamespace
* resultWrapper
* xmlAttribute
:rtype: dict
:return: Serialization information about the shape.
"""
model = self._shape_model
serialization = {}
for attr in self.SERIALIZED_ATTRS:
if attr in self._shape_model:
serialization[attr] = model[attr]
# For consistency, locationName is renamed to just 'name'.
if 'locationName' in serialization:
serialization['name'] = serialization.pop('locationName')
return serialization
@CachedProperty
def metadata(self):
"""Metadata about the shape.
This requires optional information about the shape, including:
* min
* max
* enum
* sensitive
* required
:rtype: dict
:return: Metadata about the shape.
"""
model = self._shape_model
metadata = {}
for attr in self.METADATA_ATTRS:
if attr in self._shape_model:
metadata[attr] = model[attr]
return metadata
@CachedProperty
def required_members(self):
"""A list of members that are required.
A structure shape can define members that are required.
This value will return a list of required members. If there
are no required members an empty list is returned.
"""
return self.metadata.get('required', [])
def _resolve_shape_ref(self, shape_ref):
return self._shape_resolver.resolve_shape_ref(shape_ref)
def __repr__(self):
return "<%s(%s)>" % (self.__class__.__name__,
self.name)
class StructureShape(Shape):
@CachedProperty
def members(self):
members = self._shape_model['members']
# The members dict looks like:
# 'members': {
# 'MemberName': {'shape': 'shapeName'},
# 'MemberName2': {'shape': 'shapeName'},
# }
# We return a dict of member name to Shape object.
shape_members = self.MAP_TYPE()
for name, shape_ref in members.items():
shape_members[name] = self._resolve_shape_ref(shape_ref)
return shape_members
class ListShape(Shape):
@CachedProperty
def member(self):
return self._resolve_shape_ref(self._shape_model['member'])
class MapShape(Shape):
@CachedProperty
def key(self):
return self._resolve_shape_ref(self._shape_model['key'])
@CachedProperty
def value(self):
return self._resolve_shape_ref(self._shape_model['value'])
class ServiceModel(object):
"""
:ivar service_description: The parsed service description dictionary.
"""
# Any type not in this mapping will default to the Shape class.
SHAPE_CLASSES = {
'structure': StructureShape,
'list': ListShape,
'map': MapShape,
}
def __init__(self, service_description, service_name=None):
"""
:type service_description: dict
:param service_description: The service description model. This value
is obtained from a botocore.loader.Loader, or from directly loading
the file yourself::
service_description = json.load(
open('/path/to/service-description-model.json'))
model = ServiceModel(service_description)
:type service_name: str
:param service_name: The name of the service. Normally this is
the endpoint prefix defined in the service_description. However,
you can override this value to provide a more convenient name.
This is done in a few places in botocore (ses instead of email,
emr instead of elasticmapreduce). If this value is not provided,
it will default to the endpointPrefix defined in the model.
"""
self._service_description = service_description
# We want clients to be able to access metadata directly.
self.metadata = service_description.get('metadata', {})
self._shape_resolver = ShapeResolver(
service_description.get('shapes', {}))
self._signature_version = NOT_SET
self._service_name = service_name
def shape_for(self, shape_name, member_traits=None):
return self._shape_resolver.get_shape_by_name(
shape_name, member_traits)
def resolve_shape_ref(self, shape_ref):
return self._shape_resolver.resolve_shape_ref(shape_ref)
def operation_model(self, operation_name):
try:
model = self._service_description['operations'][operation_name]
except KeyError:
raise OperationNotFoundError(operation_name)
return OperationModel(model, self)
@CachedProperty
def operation_names(self):
return list(self._service_description.get('operations', []))
@CachedProperty
def service_name(self):
"""The name of the service.
This defaults to the endpointPrefix defined in the service model.
However, this value can be overriden when a ``ServiceModel`` is
created. If a service_name was not provided when the ``ServiceModel``
was created and if there is no endpointPrefix defined in the
service model, then an ``UndefinedModelAttributeError`` exception
will be raised.
"""
if self._service_name is not None:
return self._service_name
else:
return self.endpoint_prefix
@CachedProperty
def signing_name(self):
"""The name to use when computing signatures.
If the model does not define a signing name, this
value will be the endpoint prefix defined in the model.
"""
signing_name = self.metadata.get('signingName')
if signing_name is None:
signing_name = self.endpoint_prefix
return signing_name
@CachedProperty
def api_version(self):
return self._get_metadata_property('apiVersion')
@CachedProperty
def protocol(self):
return self._get_metadata_property('protocol')
@CachedProperty
def endpoint_prefix(self):
return self._get_metadata_property('endpointPrefix')
def _get_metadata_property(self, name):
try:
return self.metadata[name]
except KeyError:
raise UndefinedModelAttributeError(
'"%s" not defined in the metadata of the the model: %s' %
(name, self))
# Signature version is one of the rare properties
# than can be modified so a CachedProperty is not used here.
@property
def signature_version(self):
if self._signature_version is NOT_SET:
signature_version = self.metadata.get('signatureVersion')
self._signature_version = signature_version
return self._signature_version
@signature_version.setter
def signature_version(self, value):
self._signature_version = value
class OperationModel(object):
def __init__(self, operation_model, service_model):
"""
:type operation_model: dict
:param operation_model: The operation model. This comes from the
service model, and is the value associated with the operation
name in the service model (i.e ``model['operations'][op_name]``).
:type service_model: botocore.model.ServiceModel
:param service_model: The service model associated with the operation.
"""
self._operation_model = operation_model
self._service_model = service_model
# Clients can access '.name' to get the operation name
# and '.metadata' to get the top level metdata of the service.
self.name = operation_model.get('name')
self.metadata = service_model.metadata
self.http = operation_model.get('http', {})
@CachedProperty
def input_shape(self):
if 'input' not in self._operation_model:
# Some operations do not accept any input and do not define an
# input shape.
return None
return self._service_model.resolve_shape_ref(
self._operation_model['input'])
@CachedProperty
def output_shape(self):
if 'output' not in self._operation_model:
# Some operations do not define an output shape,
# in which case we return None to indicate the
# operation has no expected output.
return None
return self._service_model.resolve_shape_ref(
self._operation_model['output'])
@CachedProperty
def has_streaming_output(self):
output_shape = self.output_shape
if output_shape is None:
return False
payload = output_shape.serialization.get('payload')
if payload is not None:
payload_shape = output_shape.members[payload]
if payload_shape.type_name == 'blob':
return True
return False
class ShapeResolver(object):
"""Resolves shape references."""
# Any type not in this mapping will default to the Shape class.
SHAPE_CLASSES = {
'structure': StructureShape,
'list': ListShape,
'map': MapShape,
}
def __init__(self, shape_map):
self._shape_map = shape_map
self._shape_cache = {}
def get_shape_by_name(self, shape_name, member_traits=None):
try:
shape_model = self._shape_map[shape_name]
except KeyError:
raise NoShapeFoundError(shape_name)
try:
shape_cls = self.SHAPE_CLASSES.get(shape_model['type'], Shape)
except KeyError:
raise InvalidShapeError("Shape is missing required key 'type': %s"
% shape_model)
if member_traits:
shape_model = shape_model.copy()
shape_model.update(member_traits)
return shape_cls(shape_name, shape_model, self)
def resolve_shape_ref(self, shape_ref):
# A shape_ref is a dict that has a 'shape' key that
# refers to a shape name as well as any additional
# member traits that are then merged over the shape
# definition. For example:
# {"shape": "StringType", "locationName": "Foobar"}
if len(shape_ref) == 1 and 'shape' in shape_ref:
# It's just a shape ref with no member traits, we can avoid
# a .copy(). This is the common case so it's specifically
# called out here.
return self.get_shape_by_name(shape_ref['shape'])
else:
member_traits = shape_ref.copy()
try:
shape_name = member_traits.pop('shape')
except KeyError:
raise InvalidShapeReferenceError(
"Invalid model, missing shape reference: %s" % shape_ref)
return self.get_shape_by_name(shape_name, member_traits)
class UnresolvableShapeMap(object):
"""A ShapeResolver that will throw ValueErrors when shapes are resolved.
"""
def get_shape_by_name(self, shape_name, member_traits=None):
raise ValueError("Attempted to lookup shape '%s', but no shape "
"map was provided.")
def resolve_shape_ref(self, shape_ref):
raise ValueError("Attempted to resolve shape '%s', but no shape "
"map was provided.")
class DenormalizedStructureBuilder(object):
"""Build a StructureShape from a denormalized model.
This is a convenience builder class that makes it easy to construct
``StructureShape``s based on a denormalized model.
It will handle the details of creating unique shape names and creating
the appropriate shape map needed by the ``StructureShape`` class.
Example usage::
builder = DenormalizedStructureBuilder()
shape = builder.with_members({
'A': {
'type': 'structure',
'members': {
'B': {
'type': 'structure',
'members': {
'C': {
'type': 'string',
}
}
}
}
}
}).build_model()
# ``shape`` is now an instance of botocore.model.StructureShape
"""
def __init__(self, name=None):
self.members = {}
self._name_generator = ShapeNameGenerator()
if name is None:
self.name = self._name_generator.new_shape_name('structure')
def with_members(self, members):
"""
:type members: dict
:param members: The denormalized members.
:return: self
"""
self._members = members
return self
def build_model(self):
"""Build the model based on the provided members.
:rtype: botocore.model.StructureShape
:return: The built StructureShape object.
"""
shapes = {}
denormalized = {
'type': 'structure',
'members': self._members,
}
self._build_model(denormalized, shapes, self.name)
resolver = ShapeResolver(shape_map=shapes)
return StructureShape(shape_name=self.name,
shape_model=shapes[self.name],
shape_resolver=resolver)
def _build_model(self, model, shapes, shape_name):
if model['type'] == 'structure':
shapes[shape_name] = self._build_structure(model, shapes)
elif model['type'] == 'list':
shapes[shape_name] = self._build_list(model, shapes)
elif model['type'] == 'map':
shapes[shape_name] = self._build_map(model, shapes)
elif model['type'] in ['string', 'integer', 'boolean', 'blob', 'float',
'timestamp', 'long', 'double', 'char']:
shapes[shape_name] = self._build_scalar(model)
else:
raise InvalidShapeError("Unknown shape type: %s" % model['type'])
def _build_structure(self, model, shapes):
members = {}
shape = self._build_initial_shape(model)
shape['members'] = members
for name, member_model in model['members'].items():
member_shape_name = self._get_shape_name(member_model)
members[name] = {'shape': member_shape_name}
self._build_model(member_model, shapes, member_shape_name)
return shape
def _build_list(self, model, shapes):
member_shape_name = self._get_shape_name(model)
shape = self._build_initial_shape(model)
shape['member'] = {'shape': member_shape_name}
self._build_model(model['member'], shapes, member_shape_name)
return shape
def _build_map(self, model, shapes):
key_shape_name = self._get_shape_name(model['key'])
value_shape_name = self._get_shape_name(model['value'])
shape = self._build_initial_shape(model)
shape['key'] = {'shape': key_shape_name}
shape['value'] = {'shape': value_shape_name}
self._build_model(model['key'], shapes, key_shape_name)
self._build_model(model['value'], shapes, value_shape_name)
return shape
def _build_initial_shape(self, model):
shape = {
'type': model['type'],
}
if 'documentation' in model:
shape['documentation'] = model['documentation']
if 'enum' in model:
shape['enum'] = model['enum']
return shape
def _build_scalar(self, model):
return self._build_initial_shape(model)
def _get_shape_name(self, model):
if 'shape_name' in model:
return model['shape_name']
else:
return self._name_generator.new_shape_name(model['type'])
class ShapeNameGenerator(object):
"""Generate unique shape names for a type.
This class can be used in conjunction with the DenormalizedStructureBuilder
to generate unique shape names for a given type.
"""
def __init__(self):
self._name_cache = defaultdict(int)
def new_shape_name(self, type_name):
"""Generate a unique shape name.
This method will guarantee a unique shape name each time it is
called with the same type.
::
>>> s = ShapeNameGenerator()
>>> s.new_shape_name('structure')
'StructureType1'
>>> s.new_shape_name('structure')
'StructureType2'
>>> s.new_shape_name('list')
'ListType1'
>>> s.new_shape_name('list')
'ListType2'
:type type_name: string
:param type_name: The type name (structure, list, map, string, etc.)
:rtype: string
:return: A unique shape name for the given type
"""
self._name_cache[type_name] += 1
current_index = self._name_cache[type_name]
return '%sType%s' % (type_name.capitalize(),
current_index)
| {
"repo_name": "gdm/aws-cfn-resource-bridge",
"path": "aws/cfn/bridge/vendored/botocore/model.py",
"copies": "3",
"size": "20135",
"license": "apache-2.0",
"hash": 5658549569592250000,
"line_mean": 33.3015332198,
"line_max": 79,
"alpha_frac": 0.5952818475,
"autogenerated": false,
"ratio": 4.471463468798579,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6566745316298579,
"avg_score": null,
"num_lines": null
} |
"""Abstraction to send a TunnelingRequest and wait for TunnelingResponse."""
from __future__ import annotations
from typing import TYPE_CHECKING
from xknx.knxip import (
CEMIFrame,
CEMIMessageCode,
KNXIPFrame,
TunnellingAck,
TunnellingRequest,
)
from .request_response import RequestResponse
if TYPE_CHECKING:
from xknx.io.udp_client import UDPClient
from xknx.telegram import IndividualAddress, Telegram
from xknx.xknx import XKNX
class Tunnelling(RequestResponse):
"""Class to TunnelingRequest and wait for TunnelingResponse."""
def __init__(
self,
xknx: XKNX,
udp_client: UDPClient,
telegram: Telegram,
src_address: IndividualAddress,
sequence_counter: int,
communication_channel_id: int,
):
"""Initialize Tunnelling class."""
self.xknx = xknx
self.udp_client = udp_client
self.src_address = src_address
super().__init__(xknx, self.udp_client, TunnellingAck)
self.telegram = telegram
self.sequence_counter = sequence_counter
self.communication_channel_id = communication_channel_id
def create_knxipframe(self) -> KNXIPFrame:
"""Create KNX/IP Frame object to be sent to device."""
cemi = CEMIFrame.init_from_telegram(
self.xknx,
telegram=self.telegram,
code=CEMIMessageCode.L_DATA_REQ,
src_addr=self.src_address,
)
tunnelling_request = TunnellingRequest(
self.xknx,
communication_channel_id=self.communication_channel_id,
sequence_counter=self.sequence_counter,
cemi=cemi,
)
return KNXIPFrame.init_from_body(tunnelling_request)
| {
"repo_name": "XKNX/xknx",
"path": "xknx/io/request_response/tunnelling.py",
"copies": "1",
"size": "1752",
"license": "mit",
"hash": 7131248798359292000,
"line_mean": 28.6949152542,
"line_max": 76,
"alpha_frac": 0.6484018265,
"autogenerated": false,
"ratio": 3.5609756097560976,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9709377436256097,
"avg_score": 0,
"num_lines": 59
} |
"""Abstraction to send ConnectonStateRequest and wait for ConnectionStateResponse."""
from __future__ import annotations
from typing import TYPE_CHECKING
from xknx.io.const import CONNECTIONSTATE_REQUEST_TIMEOUT
from xknx.knxip import HPAI, ConnectionStateRequest, ConnectionStateResponse, KNXIPFrame
from .request_response import RequestResponse
if TYPE_CHECKING:
from xknx.io.udp_client import UDPClient
from xknx.xknx import XKNX
class ConnectionState(RequestResponse):
"""Class to send ConnectonStateRequest and wait for ConnectionStateResponse."""
def __init__(
self,
xknx: XKNX,
udp_client: UDPClient,
communication_channel_id: int,
route_back: bool = False,
):
"""Initialize ConnectionState class."""
self.udp_client = udp_client
self.route_back = route_back
super().__init__(
xknx,
self.udp_client,
ConnectionStateResponse,
timeout_in_seconds=CONNECTIONSTATE_REQUEST_TIMEOUT,
)
self.communication_channel_id = communication_channel_id
def create_knxipframe(self) -> KNXIPFrame:
"""Create KNX/IP Frame object to be sent to device."""
if self.route_back:
endpoint = HPAI()
else:
(local_addr, local_port) = self.udpclient.getsockname()
endpoint = HPAI(ip_addr=local_addr, port=local_port)
connectionstate_request = ConnectionStateRequest(
self.xknx,
communication_channel_id=self.communication_channel_id,
control_endpoint=endpoint,
)
return KNXIPFrame.init_from_body(connectionstate_request)
| {
"repo_name": "XKNX/xknx",
"path": "xknx/io/request_response/connectionstate.py",
"copies": "1",
"size": "1686",
"license": "mit",
"hash": -1829749085372579600,
"line_mean": 33.4081632653,
"line_max": 88,
"alpha_frac": 0.662514828,
"autogenerated": false,
"ratio": 4.052884615384615,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007095637538303238,
"num_lines": 49
} |
"""Abstraction to send ConnectRequest and wait for ConnectResponse."""
from __future__ import annotations
from typing import TYPE_CHECKING
from xknx.knxip import (
HPAI,
ConnectRequest,
ConnectRequestType,
ConnectResponse,
KNXIPFrame,
)
from .request_response import RequestResponse
if TYPE_CHECKING:
from xknx.io.udp_client import UDPClient
from xknx.xknx import XKNX
class Connect(RequestResponse):
"""Class to send a ConnectRequest and wait for ConnectResponse.."""
def __init__(self, xknx: XKNX, udp_client: UDPClient, route_back: bool = False):
"""Initialize Connect class."""
self.udp_client = udp_client
self.route_back = route_back
super().__init__(xknx, self.udp_client, ConnectResponse)
self.communication_channel = 0
self.identifier = 0
def create_knxipframe(self) -> KNXIPFrame:
"""Create KNX/IP Frame object to be sent to device."""
# set control_endpoint and data_endpoint to the same udp_connection
if self.route_back:
endpoint = HPAI()
else:
(local_addr, local_port) = self.udp_client.getsockname()
endpoint = HPAI(ip_addr=local_addr, port=local_port)
connect_request = ConnectRequest(
self.xknx,
request_type=ConnectRequestType.TUNNEL_CONNECTION,
control_endpoint=endpoint,
data_endpoint=endpoint,
)
return KNXIPFrame.init_from_body(connect_request)
def on_success_hook(self, knxipframe: KNXIPFrame) -> None:
"""Set communication channel and identifier after having received a valid answer."""
assert isinstance(knxipframe.body, ConnectResponse)
assert isinstance(knxipframe.body.identifier, int)
self.communication_channel = knxipframe.body.communication_channel
self.identifier = knxipframe.body.identifier
| {
"repo_name": "XKNX/xknx",
"path": "xknx/io/request_response/connect.py",
"copies": "1",
"size": "1907",
"license": "mit",
"hash": -4680766646573685000,
"line_mean": 34.9811320755,
"line_max": 92,
"alpha_frac": 0.6717357105,
"autogenerated": false,
"ratio": 3.9238683127572016,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5095604023257202,
"avg_score": null,
"num_lines": null
} |
"""Abstraction to send DisconnectRequest and wait for DisconnectResponse."""
from __future__ import annotations
from typing import TYPE_CHECKING
from xknx.knxip import HPAI, DisconnectRequest, DisconnectResponse, KNXIPFrame
from .request_response import RequestResponse
if TYPE_CHECKING:
from xknx.io.udp_client import UDPClient
from xknx.xknx import XKNX
class Disconnect(RequestResponse):
"""Class to send a DisconnectRequest and wait for a DisconnectResponse."""
def __init__(
self,
xknx: XKNX,
udp_client: UDPClient,
communication_channel_id: int,
route_back: bool = False,
):
"""Initialize Disconnect class."""
self.xknx = xknx
self.udp_client = udp_client
self.route_back = route_back
super().__init__(xknx, self.udp_client, DisconnectResponse)
self.communication_channel_id = communication_channel_id
def create_knxipframe(self) -> KNXIPFrame:
"""Create KNX/IP Frame object to be sent to device."""
if self.route_back:
endpoint = HPAI()
else:
(local_addr, local_port) = self.udpclient.getsockname()
endpoint = HPAI(ip_addr=local_addr, port=local_port)
disconnect_request = DisconnectRequest(
self.xknx,
communication_channel_id=self.communication_channel_id,
control_endpoint=endpoint,
)
return KNXIPFrame.init_from_body(disconnect_request)
| {
"repo_name": "XKNX/xknx",
"path": "xknx/io/request_response/disconnect.py",
"copies": "1",
"size": "1488",
"license": "mit",
"hash": -4916549600577353000,
"line_mean": 32.8181818182,
"line_max": 78,
"alpha_frac": 0.6572580645,
"autogenerated": false,
"ratio": 3.8449612403100777,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5002219304810077,
"avg_score": null,
"num_lines": null
} |
"""Abstract linear algebra library.
This module defines a class hierarchy that implements a kind of "lazy"
matrix representation, called the ``LinearOperator``. It can be used to do
linear algebra with extremely large sparse or structured matrices, without
representing those explicitly in memory. Such matrices can be added,
multiplied, transposed, etc.
As a motivating example, suppose you want have a matrix where almost all of
the elements have the value one. The standard sparse matrix representation
skips the storage of zeros, but not ones. By contrast, a LinearOperator is
able to represent such matrices efficiently. First, we need a compact way to
represent an all-ones matrix::
>>> import numpy as np
>>> class Ones(LinearOperator):
... def __init__(self, shape):
... super().__init__(dtype=None, shape=shape)
... def _matvec(self, x):
... return np.repeat(x.sum(), self.shape[0])
Instances of this class emulate ``np.ones(shape)``, but using a constant
amount of storage, independent of ``shape``. The ``_matvec`` method specifies
how this linear operator multiplies with (operates on) a vector. We can now
add this operator to a sparse matrix that stores only offsets from one::
>>> from scipy.sparse import csr_matrix
>>> offsets = csr_matrix([[1, 0, 2], [0, -1, 0], [0, 0, 3]])
>>> A = aslinearoperator(offsets) + Ones(offsets.shape)
>>> A.dot([1, 2, 3])
array([13, 4, 15])
The result is the same as that given by its dense, explicitly-stored
counterpart::
>>> (np.ones(A.shape, A.dtype) + offsets.toarray()).dot([1, 2, 3])
array([13, 4, 15])
Several algorithms in the ``scipy.sparse`` library are able to operate on
``LinearOperator`` instances.
"""
import warnings
import numpy as np
from scipy.sparse import isspmatrix
from scipy.sparse.sputils import isshape, isintlike, asmatrix, is_pydata_spmatrix
__all__ = ['LinearOperator', 'aslinearoperator']
class LinearOperator:
"""Common interface for performing matrix vector products
Many iterative methods (e.g. cg, gmres) do not need to know the
individual entries of a matrix to solve a linear system A*x=b.
Such solvers only require the computation of matrix vector
products, A*v where v is a dense vector. This class serves as
an abstract interface between iterative solvers and matrix-like
objects.
To construct a concrete LinearOperator, either pass appropriate
callables to the constructor of this class, or subclass it.
A subclass must implement either one of the methods ``_matvec``
and ``_matmat``, and the attributes/properties ``shape`` (pair of
integers) and ``dtype`` (may be None). It may call the ``__init__``
on this class to have these attributes validated. Implementing
``_matvec`` automatically implements ``_matmat`` (using a naive
algorithm) and vice-versa.
Optionally, a subclass may implement ``_rmatvec`` or ``_adjoint``
to implement the Hermitian adjoint (conjugate transpose). As with
``_matvec`` and ``_matmat``, implementing either ``_rmatvec`` or
``_adjoint`` implements the other automatically. Implementing
``_adjoint`` is preferable; ``_rmatvec`` is mostly there for
backwards compatibility.
Parameters
----------
shape : tuple
Matrix dimensions (M, N).
matvec : callable f(v)
Returns returns A * v.
rmatvec : callable f(v)
Returns A^H * v, where A^H is the conjugate transpose of A.
matmat : callable f(V)
Returns A * V, where V is a dense matrix with dimensions (N, K).
dtype : dtype
Data type of the matrix.
rmatmat : callable f(V)
Returns A^H * V, where V is a dense matrix with dimensions (M, K).
Attributes
----------
args : tuple
For linear operators describing products etc. of other linear
operators, the operands of the binary operation.
ndim : int
Number of dimensions (this is always 2)
See Also
--------
aslinearoperator : Construct LinearOperators
Notes
-----
The user-defined matvec() function must properly handle the case
where v has shape (N,) as well as the (N,1) case. The shape of
the return type is handled internally by LinearOperator.
LinearOperator instances can also be multiplied, added with each
other and exponentiated, all lazily: the result of these operations
is always a new, composite LinearOperator, that defers linear
operations to the original operators and combines the results.
More details regarding how to subclass a LinearOperator and several
examples of concrete LinearOperator instances can be found in the
external project `PyLops <https://pylops.readthedocs.io>`_.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse.linalg import LinearOperator
>>> def mv(v):
... return np.array([2*v[0], 3*v[1]])
...
>>> A = LinearOperator((2,2), matvec=mv)
>>> A
<2x2 _CustomLinearOperator with dtype=float64>
>>> A.matvec(np.ones(2))
array([ 2., 3.])
>>> A * np.ones(2)
array([ 2., 3.])
"""
ndim = 2
def __new__(cls, *args, **kwargs):
if cls is LinearOperator:
# Operate as _CustomLinearOperator factory.
return super(LinearOperator, cls).__new__(_CustomLinearOperator)
else:
obj = super(LinearOperator, cls).__new__(cls)
if (type(obj)._matvec == LinearOperator._matvec
and type(obj)._matmat == LinearOperator._matmat):
warnings.warn("LinearOperator subclass should implement"
" at least one of _matvec and _matmat.",
category=RuntimeWarning, stacklevel=2)
return obj
def __init__(self, dtype, shape):
"""Initialize this LinearOperator.
To be called by subclasses. ``dtype`` may be None; ``shape`` should
be convertible to a length-2 tuple.
"""
if dtype is not None:
dtype = np.dtype(dtype)
shape = tuple(shape)
if not isshape(shape):
raise ValueError("invalid shape %r (must be 2-d)" % (shape,))
self.dtype = dtype
self.shape = shape
def _init_dtype(self):
"""Called from subclasses at the end of the __init__ routine.
"""
if self.dtype is None:
v = np.zeros(self.shape[-1])
self.dtype = np.asarray(self.matvec(v)).dtype
def _matmat(self, X):
"""Default matrix-matrix multiplication handler.
Falls back on the user-defined _matvec method, so defining that will
define matrix multiplication (though in a very suboptimal way).
"""
return np.hstack([self.matvec(col.reshape(-1,1)) for col in X.T])
def _matvec(self, x):
"""Default matrix-vector multiplication handler.
If self is a linear operator of shape (M, N), then this method will
be called on a shape (N,) or (N, 1) ndarray, and should return a
shape (M,) or (M, 1) ndarray.
This default implementation falls back on _matmat, so defining that
will define matrix-vector multiplication as well.
"""
return self.matmat(x.reshape(-1, 1))
def matvec(self, x):
"""Matrix-vector multiplication.
Performs the operation y=A*x where A is an MxN linear
operator and x is a column vector or 1-d array.
Parameters
----------
x : {matrix, ndarray}
An array with shape (N,) or (N,1).
Returns
-------
y : {matrix, ndarray}
A matrix or ndarray with shape (M,) or (M,1) depending
on the type and shape of the x argument.
Notes
-----
This matvec wraps the user-specified matvec routine or overridden
_matvec method to ensure that y has the correct shape and type.
"""
x = np.asanyarray(x)
M,N = self.shape
if x.shape != (N,) and x.shape != (N,1):
raise ValueError('dimension mismatch')
y = self._matvec(x)
if isinstance(x, np.matrix):
y = asmatrix(y)
else:
y = np.asarray(y)
if x.ndim == 1:
y = y.reshape(M)
elif x.ndim == 2:
y = y.reshape(M,1)
else:
raise ValueError('invalid shape returned by user-defined matvec()')
return y
def rmatvec(self, x):
"""Adjoint matrix-vector multiplication.
Performs the operation y = A^H * x where A is an MxN linear
operator and x is a column vector or 1-d array.
Parameters
----------
x : {matrix, ndarray}
An array with shape (M,) or (M,1).
Returns
-------
y : {matrix, ndarray}
A matrix or ndarray with shape (N,) or (N,1) depending
on the type and shape of the x argument.
Notes
-----
This rmatvec wraps the user-specified rmatvec routine or overridden
_rmatvec method to ensure that y has the correct shape and type.
"""
x = np.asanyarray(x)
M,N = self.shape
if x.shape != (M,) and x.shape != (M,1):
raise ValueError('dimension mismatch')
y = self._rmatvec(x)
if isinstance(x, np.matrix):
y = asmatrix(y)
else:
y = np.asarray(y)
if x.ndim == 1:
y = y.reshape(N)
elif x.ndim == 2:
y = y.reshape(N,1)
else:
raise ValueError('invalid shape returned by user-defined rmatvec()')
return y
def _rmatvec(self, x):
"""Default implementation of _rmatvec; defers to adjoint."""
if type(self)._adjoint == LinearOperator._adjoint:
# _adjoint not overridden, prevent infinite recursion
raise NotImplementedError
else:
return self.H.matvec(x)
def matmat(self, X):
"""Matrix-matrix multiplication.
Performs the operation y=A*X where A is an MxN linear
operator and X dense N*K matrix or ndarray.
Parameters
----------
X : {matrix, ndarray}
An array with shape (N,K).
Returns
-------
Y : {matrix, ndarray}
A matrix or ndarray with shape (M,K) depending on
the type of the X argument.
Notes
-----
This matmat wraps any user-specified matmat routine or overridden
_matmat method to ensure that y has the correct type.
"""
X = np.asanyarray(X)
if X.ndim != 2:
raise ValueError('expected 2-d ndarray or matrix, not %d-d'
% X.ndim)
if X.shape[0] != self.shape[1]:
raise ValueError('dimension mismatch: %r, %r'
% (self.shape, X.shape))
Y = self._matmat(X)
if isinstance(Y, np.matrix):
Y = asmatrix(Y)
return Y
def rmatmat(self, X):
"""Adjoint matrix-matrix multiplication.
Performs the operation y = A^H * x where A is an MxN linear
operator and x is a column vector or 1-d array, or 2-d array.
The default implementation defers to the adjoint.
Parameters
----------
X : {matrix, ndarray}
A matrix or 2D array.
Returns
-------
Y : {matrix, ndarray}
A matrix or 2D array depending on the type of the input.
Notes
-----
This rmatmat wraps the user-specified rmatmat routine.
"""
X = np.asanyarray(X)
if X.ndim != 2:
raise ValueError('expected 2-d ndarray or matrix, not %d-d'
% X.ndim)
if X.shape[0] != self.shape[0]:
raise ValueError('dimension mismatch: %r, %r'
% (self.shape, X.shape))
Y = self._rmatmat(X)
if isinstance(Y, np.matrix):
Y = asmatrix(Y)
return Y
def _rmatmat(self, X):
"""Default implementation of _rmatmat defers to rmatvec or adjoint."""
if type(self)._adjoint == LinearOperator._adjoint:
return np.hstack([self.rmatvec(col.reshape(-1, 1)) for col in X.T])
else:
return self.H.matmat(X)
def __call__(self, x):
return self*x
def __mul__(self, x):
return self.dot(x)
def dot(self, x):
"""Matrix-matrix or matrix-vector multiplication.
Parameters
----------
x : array_like
1-d or 2-d array, representing a vector or matrix.
Returns
-------
Ax : array
1-d or 2-d array (depending on the shape of x) that represents
the result of applying this linear operator on x.
"""
if isinstance(x, LinearOperator):
return _ProductLinearOperator(self, x)
elif np.isscalar(x):
return _ScaledLinearOperator(self, x)
else:
x = np.asarray(x)
if x.ndim == 1 or x.ndim == 2 and x.shape[1] == 1:
return self.matvec(x)
elif x.ndim == 2:
return self.matmat(x)
else:
raise ValueError('expected 1-d or 2-d array or matrix, got %r'
% x)
def __matmul__(self, other):
if np.isscalar(other):
raise ValueError("Scalar operands are not allowed, "
"use '*' instead")
return self.__mul__(other)
def __rmatmul__(self, other):
if np.isscalar(other):
raise ValueError("Scalar operands are not allowed, "
"use '*' instead")
return self.__rmul__(other)
def __rmul__(self, x):
if np.isscalar(x):
return _ScaledLinearOperator(self, x)
else:
return NotImplemented
def __pow__(self, p):
if np.isscalar(p):
return _PowerLinearOperator(self, p)
else:
return NotImplemented
def __add__(self, x):
if isinstance(x, LinearOperator):
return _SumLinearOperator(self, x)
else:
return NotImplemented
def __neg__(self):
return _ScaledLinearOperator(self, -1)
def __sub__(self, x):
return self.__add__(-x)
def __repr__(self):
M,N = self.shape
if self.dtype is None:
dt = 'unspecified dtype'
else:
dt = 'dtype=' + str(self.dtype)
return '<%dx%d %s with %s>' % (M, N, self.__class__.__name__, dt)
def adjoint(self):
"""Hermitian adjoint.
Returns the Hermitian adjoint of self, aka the Hermitian
conjugate or Hermitian transpose. For a complex matrix, the
Hermitian adjoint is equal to the conjugate transpose.
Can be abbreviated self.H instead of self.adjoint().
Returns
-------
A_H : LinearOperator
Hermitian adjoint of self.
"""
return self._adjoint()
H = property(adjoint)
def transpose(self):
"""Transpose this linear operator.
Returns a LinearOperator that represents the transpose of this one.
Can be abbreviated self.T instead of self.transpose().
"""
return self._transpose()
T = property(transpose)
def _adjoint(self):
"""Default implementation of _adjoint; defers to rmatvec."""
return _AdjointLinearOperator(self)
def _transpose(self):
""" Default implementation of _transpose; defers to rmatvec + conj"""
return _TransposedLinearOperator(self)
class _CustomLinearOperator(LinearOperator):
"""Linear operator defined in terms of user-specified operations."""
def __init__(self, shape, matvec, rmatvec=None, matmat=None,
dtype=None, rmatmat=None):
super().__init__(dtype, shape)
self.args = ()
self.__matvec_impl = matvec
self.__rmatvec_impl = rmatvec
self.__rmatmat_impl = rmatmat
self.__matmat_impl = matmat
self._init_dtype()
def _matmat(self, X):
if self.__matmat_impl is not None:
return self.__matmat_impl(X)
else:
return super()._matmat(X)
def _matvec(self, x):
return self.__matvec_impl(x)
def _rmatvec(self, x):
func = self.__rmatvec_impl
if func is None:
raise NotImplementedError("rmatvec is not defined")
return self.__rmatvec_impl(x)
def _rmatmat(self, X):
if self.__rmatmat_impl is not None:
return self.__rmatmat_impl(X)
else:
return super()._rmatmat(X)
def _adjoint(self):
return _CustomLinearOperator(shape=(self.shape[1], self.shape[0]),
matvec=self.__rmatvec_impl,
rmatvec=self.__matvec_impl,
matmat=self.__rmatmat_impl,
rmatmat=self.__matmat_impl,
dtype=self.dtype)
class _AdjointLinearOperator(LinearOperator):
"""Adjoint of arbitrary Linear Operator"""
def __init__(self, A):
shape = (A.shape[1], A.shape[0])
super().__init__(dtype=A.dtype, shape=shape)
self.A = A
self.args = (A,)
def _matvec(self, x):
return self.A._rmatvec(x)
def _rmatvec(self, x):
return self.A._matvec(x)
def _matmat(self, x):
return self.A._rmatmat(x)
def _rmatmat(self, x):
return self.A._matmat(x)
class _TransposedLinearOperator(LinearOperator):
"""Transposition of arbitrary Linear Operator"""
def __init__(self, A):
shape = (A.shape[1], A.shape[0])
super().__init__(dtype=A.dtype, shape=shape)
self.A = A
self.args = (A,)
def _matvec(self, x):
# NB. np.conj works also on sparse matrices
return np.conj(self.A._rmatvec(np.conj(x)))
def _rmatvec(self, x):
return np.conj(self.A._matvec(np.conj(x)))
def _matmat(self, x):
# NB. np.conj works also on sparse matrices
return np.conj(self.A._rmatmat(np.conj(x)))
def _rmatmat(self, x):
return np.conj(self.A._matmat(np.conj(x)))
def _get_dtype(operators, dtypes=None):
if dtypes is None:
dtypes = []
for obj in operators:
if obj is not None and hasattr(obj, 'dtype'):
dtypes.append(obj.dtype)
return np.find_common_type(dtypes, [])
class _SumLinearOperator(LinearOperator):
def __init__(self, A, B):
if not isinstance(A, LinearOperator) or \
not isinstance(B, LinearOperator):
raise ValueError('both operands have to be a LinearOperator')
if A.shape != B.shape:
raise ValueError('cannot add %r and %r: shape mismatch'
% (A, B))
self.args = (A, B)
super().__init__(_get_dtype([A, B]), A.shape)
def _matvec(self, x):
return self.args[0].matvec(x) + self.args[1].matvec(x)
def _rmatvec(self, x):
return self.args[0].rmatvec(x) + self.args[1].rmatvec(x)
def _rmatmat(self, x):
return self.args[0].rmatmat(x) + self.args[1].rmatmat(x)
def _matmat(self, x):
return self.args[0].matmat(x) + self.args[1].matmat(x)
def _adjoint(self):
A, B = self.args
return A.H + B.H
class _ProductLinearOperator(LinearOperator):
def __init__(self, A, B):
if not isinstance(A, LinearOperator) or \
not isinstance(B, LinearOperator):
raise ValueError('both operands have to be a LinearOperator')
if A.shape[1] != B.shape[0]:
raise ValueError('cannot multiply %r and %r: shape mismatch'
% (A, B))
super().__init__(_get_dtype([A, B]),
(A.shape[0], B.shape[1]))
self.args = (A, B)
def _matvec(self, x):
return self.args[0].matvec(self.args[1].matvec(x))
def _rmatvec(self, x):
return self.args[1].rmatvec(self.args[0].rmatvec(x))
def _rmatmat(self, x):
return self.args[1].rmatmat(self.args[0].rmatmat(x))
def _matmat(self, x):
return self.args[0].matmat(self.args[1].matmat(x))
def _adjoint(self):
A, B = self.args
return B.H * A.H
class _ScaledLinearOperator(LinearOperator):
def __init__(self, A, alpha):
if not isinstance(A, LinearOperator):
raise ValueError('LinearOperator expected as A')
if not np.isscalar(alpha):
raise ValueError('scalar expected as alpha')
dtype = _get_dtype([A], [type(alpha)])
super().__init__(dtype, A.shape)
self.args = (A, alpha)
def _matvec(self, x):
return self.args[1] * self.args[0].matvec(x)
def _rmatvec(self, x):
return np.conj(self.args[1]) * self.args[0].rmatvec(x)
def _rmatmat(self, x):
return np.conj(self.args[1]) * self.args[0].rmatmat(x)
def _matmat(self, x):
return self.args[1] * self.args[0].matmat(x)
def _adjoint(self):
A, alpha = self.args
return A.H * np.conj(alpha)
class _PowerLinearOperator(LinearOperator):
def __init__(self, A, p):
if not isinstance(A, LinearOperator):
raise ValueError('LinearOperator expected as A')
if A.shape[0] != A.shape[1]:
raise ValueError('square LinearOperator expected, got %r' % A)
if not isintlike(p) or p < 0:
raise ValueError('non-negative integer expected as p')
super().__init__(_get_dtype([A]), A.shape)
self.args = (A, p)
def _power(self, fun, x):
res = np.array(x, copy=True)
for i in range(self.args[1]):
res = fun(res)
return res
def _matvec(self, x):
return self._power(self.args[0].matvec, x)
def _rmatvec(self, x):
return self._power(self.args[0].rmatvec, x)
def _rmatmat(self, x):
return self._power(self.args[0].rmatmat, x)
def _matmat(self, x):
return self._power(self.args[0].matmat, x)
def _adjoint(self):
A, p = self.args
return A.H ** p
class MatrixLinearOperator(LinearOperator):
def __init__(self, A):
super().__init__(A.dtype, A.shape)
self.A = A
self.__adj = None
self.args = (A,)
def _matmat(self, X):
return self.A.dot(X)
def _adjoint(self):
if self.__adj is None:
self.__adj = _AdjointMatrixOperator(self)
return self.__adj
class _AdjointMatrixOperator(MatrixLinearOperator):
def __init__(self, adjoint):
self.A = adjoint.A.T.conj()
self.__adjoint = adjoint
self.args = (adjoint,)
self.shape = adjoint.shape[1], adjoint.shape[0]
@property
def dtype(self):
return self.__adjoint.dtype
def _adjoint(self):
return self.__adjoint
class IdentityOperator(LinearOperator):
def __init__(self, shape, dtype=None):
super().__init__(dtype, shape)
def _matvec(self, x):
return x
def _rmatvec(self, x):
return x
def _rmatmat(self, x):
return x
def _matmat(self, x):
return x
def _adjoint(self):
return self
def aslinearoperator(A):
"""Return A as a LinearOperator.
'A' may be any of the following types:
- ndarray
- matrix
- sparse matrix (e.g. csr_matrix, lil_matrix, etc.)
- LinearOperator
- An object with .shape and .matvec attributes
See the LinearOperator documentation for additional information.
Notes
-----
If 'A' has no .dtype attribute, the data type is determined by calling
:func:`LinearOperator.matvec()` - set the .dtype attribute to prevent this
call upon the linear operator creation.
Examples
--------
>>> from scipy.sparse.linalg import aslinearoperator
>>> M = np.array([[1,2,3],[4,5,6]], dtype=np.int32)
>>> aslinearoperator(M)
<2x3 MatrixLinearOperator with dtype=int32>
"""
if isinstance(A, LinearOperator):
return A
elif isinstance(A, np.ndarray) or isinstance(A, np.matrix):
if A.ndim > 2:
raise ValueError('array must have ndim <= 2')
A = np.atleast_2d(np.asarray(A))
return MatrixLinearOperator(A)
elif isspmatrix(A) or is_pydata_spmatrix(A):
return MatrixLinearOperator(A)
else:
if hasattr(A, 'shape') and hasattr(A, 'matvec'):
rmatvec = None
rmatmat = None
dtype = None
if hasattr(A, 'rmatvec'):
rmatvec = A.rmatvec
if hasattr(A, 'rmatmat'):
rmatmat = A.rmatmat
if hasattr(A, 'dtype'):
dtype = A.dtype
return LinearOperator(A.shape, A.matvec, rmatvec=rmatvec,
rmatmat=rmatmat, dtype=dtype)
else:
raise TypeError('type not understood')
| {
"repo_name": "scipy/scipy",
"path": "scipy/sparse/linalg/interface.py",
"copies": "12",
"size": "25267",
"license": "bsd-3-clause",
"hash": -2462375166692827600,
"line_mean": 29.5895883777,
"line_max": 81,
"alpha_frac": 0.5721296553,
"autogenerated": false,
"ratio": 3.985959930588421,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""Abstract model describing WorldMap Layers"""
from __future__ import print_function
from abc import abstractmethod
from urlparse import urlparse
import logging
from django.conf import settings
from django.db import models
from django import forms
import jsonfield # using jsonfield.JSONField
from shared_dataverse_information.dataverse_info.forms_existing_layer\
import CheckForExistingLayerForm
from shared_dataverse_information.map_layer_metadata.forms import\
MapLayerMetadataValidationForm,\
GeoconnectToDataverseMapLayerMetadataValidationForm,\
GeoconnectToDataverseDeleteMapLayerMetadataForm
from .download_link_formatter import DownloadLinkFormatter
from gc_apps.core.models import TimeStampedModel
from gc_apps.geo_utils.json_field_reader import JSONHelper
LOGGER = logging.getLogger(__name__)
class WorldMapLayerInfo(TimeStampedModel):
"""
Store the results of a new layer created by mapping a Dataverse file
Abstract model used as a mix-in
"""
layer_name = models.CharField(max_length=255, blank=True, help_text='auto-filled on save')
core_data = jsonfield.JSONField()
attribute_data = jsonfield.JSONField()
download_links = jsonfield.JSONField(blank=True)
# for object identification
md5 = models.CharField(max_length=40,\
blank=True,\
db_index=True,\
help_text='auto-filled on save')
class Meta:
"""model meta info"""
abstract = True
ordering = ('-modified', '-created')
verbose_name = 'WorldMapLayerInfo'
verbose_name_plural = verbose_name
def __str__(self):
"""string representation"""
return self.layer_name
@abstractmethod
def get_layer_type(self):
"""return type such as:
TYPE_JOIN_LAYER, TYPE_LAT_LNG_LAYER, TYPE_SHAPEFILE_LAYER, etc"""
@abstractmethod
def get_gis_data_info(self):
"""Return the attribute holding gis_data_file
e.g. return self.tabular_info
OR return self.shapefile_info, etc"""
@abstractmethod
def get_description_for_core_data(self):
"""Return a description of the map layer source.
e.g. 'Layer created from tabular file'"""
@abstractmethod
def get_failed_rows(self):
"""Return a list of rows which failed to map.
e.g. 'Layer created from tabular file'"""
def is_shapefile_layer(self):
"""Is this the result of mapping a zipped shapefile?"""
return False
#self.get_layer_type() == DV_MAP_TYPE_SHAPEFILE
def is_lat_lng_layer(self):
"""Is this the result of mapping Lat/Lng columns?"""
return False
#return self.get_layer_type() == TYPE_LAT_LNG_LAYER
def is_join_layer(self):
"""Is this the result of joining an existing layer?"""
return False
#return self.get_layer_type() == TYPE_JOIN_LAYER
def get_core_data_dict_for_views(self):
"""
Parameters used for HTML views of map data:
core_data, attribute_data, download_links
"""
return dict(worldmap_layerinfo=self,
core_data=self.core_data,
attribute_data=self.attribute_data,
download_links=self.get_formatted_download_links())
def get_dict_for_classify_form(self):
"""
Parameters used for populating the classification form
# Override in concrete class
"""
return dict(layer_name=self.layer_name,\
data_source_type=self.get_layer_type(),\
raw_attribute_info=self.attribute_data)
def get_download_link(self, link_type='png'):
"""
See download_link_formatter.DownloadLinkFormatter
for different format types
"""
if not self.download_links:
return None
return self.download_links.get('png', None)
@staticmethod
def build_dict_from_worldmap_json(json_dict):
"""Given json_dict containing WorldMap layer information,
split out the core_data, attribute_data, and download_link information
Used for creating subclass objects:
- WorldMapShapefileLayerInfo
- WorldMapJoinLayerInfo
- WorldMapLatLngInfo
"""
if json_dict is None:
LOGGER.error('json_dict cannot be None')
return None
if not hasattr(json_dict, 'has_key'):
LOGGER.error('json_dict must be a dict. not type: [%s]', type(json_dict))
return None
# -----------------------------------------
# Get core data (required)
# -----------------------------------------
if not 'data' in json_dict:
LOGGER.error('The json_dict must have a "data" key')
return None
core_data = json_dict['data']
# -----------------------------------------
# Get attribute data (required)
# Note: Currently this is an escaped string within core data...
# -----------------------------------------
if not 'attribute_info' in core_data:
LOGGER.error('The core_data must have a "attribute_info" key')
return None
attribute_data = JSONHelper.to_python_or_none(core_data['attribute_info'])
if attribute_data is None:
LOGGER.error(('Failed to convert core_data'
' "attribute_info" from string'
' to python object (list)'))
return None
# -----------------------------------------
# Get download_links (optional)
# Note: Currently this is an escaped string within core data...
# -----------------------------------------
if 'download_links' in core_data:
download_links = JSONHelper.to_python_or_none(core_data['download_links'])
if download_links is None:
LOGGER.error(('Failed to convert core_data "download_links"'
' from string to python object (list)'))
download_links = ''
else:
download_links = ''
# -----------------------------------------
# Gather initial values
# -----------------------------------------
init_data = dict(core_data=core_data,\
attribute_data=attribute_data,\
download_links=download_links)
return init_data
@staticmethod
def clear_duplicate_worldmapinfo(worldmap_info):
"""
Remove any duplicate objects of the same subclass.
Subclass objects include:
- WorldMapShapefileLayerInfo
- WorldMapJoinLayerInfo
- WorldMapLatLngInfo
worldmap_info - instance of a subclass above
"""
if worldmap_info is None or not worldmap_info.id:
# Make sure the object has been saved -- e.g. has an 'id'
return
assert isinstance(worldmap_info, WorldMapLayerInfo),\
("worldmap_info must be an instance/subclass of"
" WorldMapJoinLayerInfo")
WorldMapLayerInfoType = worldmap_info.__class__
if worldmap_info.is_shapefile_layer():
filters = dict(shapefile_info=worldmap_info.get_gis_data_info())
else:
filters = dict(tabular_info=worldmap_info.get_gis_data_info())
# Pull objects except the current "worldmap_info"
#
older_info_objects = WorldMapLayerInfoType.objects\
.filter(**filters)\
.exclude(id=worldmap_info.id)
# Delete the older objects
older_info_objects.delete()
def get_layer_url_base(self):
"""
Return the layer url base. Examples:
- http://worldmap.harvard.edu
- http(s)://worldmap.harvard.edu
"""
if not self.core_data:
return None
layer_link = self.core_data.get('layer_link', None)
if layer_link is None:
return None
parsed_url = urlparse(layer_link)
return '%s://%s' % (parsed_url.scheme, parsed_url.netloc)
def get_params_to_check_for_existing_layer_metadata(self):
"""Return dict of params used to check WorldMap
for existing layer metadata"""
gis_data_info = self.get_gis_data_info()
assert gis_data_info is not None, "gis_data_info cannot be None"
f = CheckForExistingLayerForm(gis_data_info.__dict__)
if not f.is_valid():
raise forms.ValidationError(\
'CheckForExistingLayerForm params did not validate: %s'\
% f.errors)
return f.cleaned_data
def get_params_for_dv_delete_layer_metadata(self):
"""Return dict of params used to delete an
WorldMap metadata from Dataverse"""
gis_data_info = self.get_gis_data_info()
assert gis_data_info is not None, "gis_data_info cannot be None"
f = GeoconnectToDataverseDeleteMapLayerMetadataForm(\
{'dv_session_token' : gis_data_info.dv_session_token})
if not f.is_valid():
raise forms.ValidationError(\
'WorldMapLayerInfo DELETE params did not validate: %s' %\
f.errors)
return f.format_for_dataverse_api()
def get_legend_img_url(self):
"""
Construct a url that returns a Legend for a Worldmap layer in the form of PNG file
"""
if not self.core_data:
return None
params = (('request', 'GetLegendGraphic'),
('format', 'image/png'),
('width', 20),
('height', 20),
('layer', self.layer_name),
('legend_options', 'fontAntiAliasing:true;fontSize:11;'))
param_str = '&'.join(['%s=%s' % (k, v) for k, v in params])
legend_img_url = '%s/geoserver/wms?%s' %\
(self.get_layer_url_base(),
param_str)
if settings.WORLDMAP_EMBED_FORCE_HTTPS:
return legend_img_url.replace('http://', 'https://', 1)
else:
return legend_img_url
"""
Example of how an image tag is formed:
<img src="{{ worldmap_layerinfo.get_layer_url_base }}
/geoserver/wms?request=GetLegendGraphic&format=image/png&width=20&height=20
&layer={{ worldmap_layerinfo.layer_name }}
&legend_options=fontAntiAliasing:true;fontSize:12;
&trefresh={% now "U" %}" id="legend_img" alt="legend" />
"""
def get_dataverse_server_url(self):
"""
Retrieve the Dataverse base url to be used
for using the Dataverse API
"""
wm_info = self.get_gis_data_info()
if not wm_info:
return None
return wm_info.get_dataverse_server_url()
def get_data_dict(self, json_format=False):
"""
Used for processing model data.
"""
f = MapLayerMetadataValidationForm(self.core_data)
if not f.is_valid():
raise forms.ValidationError('WorldMapLayerInfo params did not validate: %s' % f.errors)
if not json_format:
return f.cleaned_data
try:
return json.dumps(f.cleaned_data)
except:
raise ValueError('Failed to convert data to json\ndata: %s' % f.cleaned_data)
def get_layer_link(self):
if not self.core_data:
return None
layer_link = self.core_data.get('layer_link', None)
if not layer_link:
return None
if layer_link.startswith('/'):
return settings.WORLDMAP_SERVER_URL + layer_link
return layer_link
def get_params_for_dv_update(self):
"""
Format data to send to the Dataverse
"""
self.verify_layer_link_format()
if self.core_data and self.core_data.get('joinDescription') is None:
self.core_data['joinDescription'] = self.get_description_for_core_data()
self.save()
f = GeoconnectToDataverseMapLayerMetadataValidationForm(self.core_data)
if not f.is_valid():
raise forms.ValidationError('WorldMapLayerInfo params did not validate: %s' % f.errors)
gis_data_info = self.get_gis_data_info()
if gis_data_info is None:
raise forms.ValidationError(\
'Serious error! Could not find gis_data_info: %s' % f.errors)
return f.format_data_for_dataverse_api(gis_data_info.dv_session_token,\
join_description=self.get_description_for_core_data())
def get_formatted_download_links(self):
"""Format the download links from WorldMap"""
if not self.download_links:
return None
dl = DownloadLinkFormatter(self.download_links)
return dl.get_formatted_links()
def get_embed_map_link(self):
"""
Return the WorldMap embed link.
By default, make the link 'https'
"""
if not self.core_data:
return None
embed_link = self.core_data.get('embed_map_link', None)
if not embed_link:
return None
if settings.WORLDMAP_EMBED_FORCE_HTTPS and embed_link.startswith('http://'):
return embed_link.replace('http://', 'https://', 1)
return embed_link
def verify_layer_link_format(self):
"""
Hack to make sure the layer_link is a full url and not just the path
e.g., If it's just the path, take the rest of the url from the embed_link
"""
layer_link = self.core_data.get('layer_link', None)
# Is it just a path?
if layer_link and layer_link.startswith('/'):
full_link = self.core_data.get('embed_link', None)
if not full_link:
full_link = self.core_data.get('map_image_link', None)
# Does the embed link a full url
if full_link and full_link.lower().startswith('http'):
# Parse the embed link and use it to reformat the layer_link
url_parts = urlparse(full_link)
# Full layer link
layer_link = '%s://%s%s' % (url_parts.scheme,
url_parts.netloc,
layer_link)
self.core_data['layer_link'] = layer_link
self.save()
| {
"repo_name": "IQSS/geoconnect",
"path": "gc_apps/worldmap_layers/models.py",
"copies": "1",
"size": "14537",
"license": "apache-2.0",
"hash": -686569487382985700,
"line_mean": 32.8857808858,
"line_max": 99,
"alpha_frac": 0.5721262984,
"autogenerated": false,
"ratio": 4.205091119467746,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002950181103524847,
"num_lines": 429
} |
"""Abstract models for use in other applications"""
import datetime
from django.contrib.auth.models import User
from django.contrib.postgres.fields import JSONField
from django.db import transaction
from django.db.models import DateTimeField, ForeignKey, Manager, Model, SET_NULL
from django.db.models.query import QuerySet
import pytz
class TimestampedModelQuerySet(QuerySet):
"""
Subclassed QuerySet for TimestampedModelManager
"""
def update(self, **kwargs):
"""
Automatically update updated_on timestamp when .update(). This is because .update()
does not go through .save(), thus will not auto_now, because it happens on the
database level without loading objects into memory.
"""
if "updated_on" not in kwargs:
kwargs["updated_on"] = datetime.datetime.now(tz=pytz.UTC)
return super().update(**kwargs)
class TimestampedModelManager(Manager):
"""
Subclassed manager for TimestampedModel
"""
def update(self, **kwargs):
"""
Allows access to TimestampedModelQuerySet's update method on the manager
"""
return self.get_queryset().update(**kwargs)
def get_queryset(self):
"""
Returns custom queryset
"""
return TimestampedModelQuerySet(self.model, using=self._db)
class TimestampedModel(Model):
"""
Base model for create/update timestamps
"""
objects = TimestampedModelManager()
created_on = DateTimeField(auto_now_add=True) # UTC
updated_on = DateTimeField(auto_now=True) # UTC
class Meta:
abstract = True
class AuditModel(TimestampedModel):
"""An abstract base class for audit models"""
acting_user = ForeignKey(User, null=True, on_delete=SET_NULL)
data_before = JSONField(blank=True, null=True)
data_after = JSONField(blank=True, null=True)
class Meta:
abstract = True
@classmethod
def get_related_field_name(cls):
"""
Returns:
str: A field name which links the Auditable model to this model
"""
raise NotImplementedError
class AuditableModel(Model):
"""An abstract base class for auditable models"""
class Meta:
abstract = True
def to_dict(self):
"""
Returns:
dict:
A serialized representation of the model object
"""
raise NotImplementedError
@classmethod
def get_audit_class(cls):
"""
Returns:
class of Model:
A class of a Django model used as the audit table
"""
raise NotImplementedError
@transaction.atomic
def save_and_log(self, acting_user, *args, **kwargs):
"""
Saves the object and creates an audit object.
Args:
acting_user (django.contrib.auth.models.User or None):
The user who made the change to the model. May be None if inapplicable.
"""
before_obj = self.__class__.objects.filter(id=self.id).first()
self.save(*args, **kwargs)
self.refresh_from_db()
before_dict = None
if before_obj is not None:
before_dict = before_obj.to_dict()
audit_kwargs = dict(
acting_user=acting_user, data_before=before_dict, data_after=self.to_dict()
)
audit_class = self.get_audit_class()
audit_kwargs[audit_class.get_related_field_name()] = self
audit_class.objects.create(**audit_kwargs)
class ValidateOnSaveMixin(Model):
"""Mixin that calls field/model validation methods before saving a model object"""
class Meta:
abstract = True
def save(
self, force_insert=False, force_update=False, **kwargs
): # pylint: disable=arguments-differ
if not (force_insert or force_update):
self.full_clean()
super().save(force_insert=force_insert, force_update=force_update, **kwargs)
| {
"repo_name": "mitodl/bootcamp-ecommerce",
"path": "main/models.py",
"copies": "1",
"size": "3965",
"license": "bsd-3-clause",
"hash": 4307491312650223000,
"line_mean": 28.1544117647,
"line_max": 91,
"alpha_frac": 0.6315258512,
"autogenerated": false,
"ratio": 4.272629310344827,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5404155161544828,
"avg_score": null,
"num_lines": null
} |
"""ABSTRACT:
This module analyses paths of a state machine. For each path through a state a
PathTrace object is created. A PathTrace object tells something about the
acceptance behavior of a state in terms of a list of AcceptInfo objects. As a
basic result of this process a map is generated with the following property:
map: state index --> list of PathTrace objects.
Based on the information in the AcceptInfo objects requirements on entry and
drop_out behaviors of a state can be derived, as done by module 'core.py'.
-------------------------------------------------------------------------------
FURTHER INFO:
class TrackAnalysis
class PathTrace
class AcceptInfo
-------------------------------------------------------------------------------
(C) 2010-2011 Frank-Rene Schaefer
ABSOLUTELY NO WARRANTY
"""
from quex.blackboard import E_AcceptanceIDs, E_PreContextIDs, E_TransitionN
from quex.engine.misc.tree_walker import TreeWalker
from itertools import izip
from copy import copy
from zlib import crc32
def do(SM):
"""RETURNS: Acceptance trace database:
map: state_index --> list of PathTrace objects.
This function walks down each possible path trough a given state
machine. During the process of walking down the paths it develops for
each state its list of PathTrace objects.
The result of the process is presented by property 'map_state_to_trace'.
It delivers for each state of the state machine a trace object that maps:
state index --> list of PathTrace objects
Another result of the walk is the 'dangerous_positioning_state_set' which
collects some positioning states that have to store the position for
any successor state (knot analysis, see below).
"""
def print_path(x):
print x.state_index, " ",
if x.parent is not None: print_path(x.parent)
else: print
class TraceFinder(TreeWalker):
"""Determines PathTrace objects for each state. The heart of this function is
the call to 'PathTrace.next_step()' which incrementally develops the
acceptance and position storage history of a path.
Recursion Terminal: When state has no target state that has not yet been
handled in the 'path' in the same manner. That means,
that if a state appears again in the path, its trace
must be different or the recursion terminates.
"""
def __init__(self, state_machine):
self.__depth = 0
self.sm = state_machine
self.empty_list = []
self.result = dict((i, []) for i in self.sm.states.iterkeys())
self.dangerous_positioning_state_set = set()
TreeWalker.__init__(self)
def on_enter(self, Args):
PreviousTrace = Args[0]
StateIndex = Args[1]
# (*) Update the information about the 'trace of acceptances'
State = self.sm.states[StateIndex]
if self.__depth == 0: trace = PathTrace(self.sm.init_state_index)
else: trace = PreviousTrace.next_step(StateIndex, State)
trigger_map = self.sm.states[StateIndex].transitions().get_map()
# (*) Recursion Termination:
#
# If a state has been analyzed before with the same trace as result,
# then it is not necessary dive into deeper investigations again. All
# of its successor paths have been walked along before. This catches
# two scenarios:
#
# (1) Loops: A state is reached through a loop and nothing
# changed during the walk through the loop since
# the last passing.
#
# There may be connected loops, so it is not sufficient
# to detect a loop and stop.
#
# (2) Knots: A state is be reached through different branches.
# However, the traces through those branches are
# indifferent in their positioning and accepting
# behavior. Only one branch needs to consider the
# subsequent states.
#
# (There were cases where this blew the computation time
# see bug-2257908.sh in $QUEX_PATH/TEST).
#
existing_trace_list = self.result.get(StateIndex)
if len(existing_trace_list) != 0:
end_of_road_f = (len(trigger_map) == 0)
for pioneer in existing_trace_list:
if not trace.is_equivalent(pioneer, end_of_road_f):
continue
if trace.has_parent(pioneer):
# Loop detected -- Continuation unecessary.
# Nothing new happend since last passage.
# If trace was not equivalent, the loop would have to be stepped through again.
return None
else:
# Knot detected -- Continuation abbreviated.
# A state is reached twice via two separate paths with
# the same positioning_states and acceptance states. The
# analysis of subsequent states on the path is therefore
# complete. Almost: There is now alternative paths from
# store to restore that must added later on.
self.dangerous_positioning_state_set.update(accept_info.positioning_state_index \
for accept_info in trace.acceptance_trace)
return None
# (*) Mark the current state with its acceptance trace
self.result[StateIndex].append(trace)
# (*) Add current state to path
self.__depth += 1
# (*) Recurse to all (undone) target states.
return [(trace, target_i) for target_i in trigger_map.iterkeys() ]
def on_finished(self, Args):
# self.done_set.add(StateIndex)
self.__depth -= 1
trace_finder = TraceFinder(SM)
trace_finder.do((None, SM.init_state_index))
# Generate 'Doodle' objects that have only the acceptance trace member.
# This is to ensure that no other member is used from the PathTrace Object.
class Doodle:
def __init__(self, X):
self.acceptance_trace = X.acceptance_trace
return dict( (key, [Doodle(x) for x in trace_list]) for key, trace_list in trace_finder.result.iteritems()), \
trace_finder.dangerous_positioning_state_set
class PathTrace(object):
"""ABSTRACT:
An object of this class documents the impact of actions that happen
along ONE specific path from the init state to a specific state.
------------------------------------------------------------------------
EXPLANATION:
During a path from the init state to 'this state', the following things
may happen or may have happened:
-- The input position has been stored in a position register
(for post context management or on accepting a pattern).
-- A pattern has been accepted. Acceptance may depend on a
pre-context being fulfilled.
Storing the input position can be a costly operation. If the length of
the path from storing to restoring can be determined from the number of
transitions, then it actually does not have to be stored. Instead, it
can be obtained by 'input position -= transition number since
positioning.' In any case, the restoring of an input position is
triggered by an acceptance event.
Acceptance of a pattern occurs, if one drops out of a state, i.e. there
are no further transitions possible. Later analysis will focus on these
acceptance events. They are stored in a sorted member '.acceptance_trace'.
The sort order of the acceptance trace reflects the philosophy of
'longest match'. That, is that the last acceptance along a path has a
higher precedence than an even higher prioritized pattern before.
Actually, all patterns without any pre-context remove any AcceptInfo
object that preceded along the path.
For further analysis, this class provides:
.acceptance_trace -- Sorted list of information about acceptances.
During the process of building path traces, the function
.next_step(...)
is called. It assumes that the current object represents the path trace
before 'this state'. Based on the given arguments to this function it
modifies itself so that it represents the trace for 'this_state'.
------------------------------------------------------------------------
EXAMPLE:
( 0 )----->(( 1 ))----->( 2 )----->(( 3 ))----->( 4 ) ....
8 wins pre 4 -> 5 wins
pre 3 -> 7 wins
results in PathTrace objects for the states as follows:
State 0: has no acceptance trace, only '(no pre-context, failure)'.
State 1: (pattern 8 wins, input position = current)
State 2: (pattern 8 wins, input position = current - 1)
State 3: (if pre context 4 fulfilled: 5 wins, input position = current)
(if pre context 3 fulfilled: 7 wins, input position = current)
(else, 8 wins, input position = current - 2)
State 4: (if pre context 4 fulfilled: 5 wins, input position = current - 1)
(if pre context 3 fulfilled: 7 wins, input position = current - 1)
(else, 8 wins, input position = current - 3)
...
"""
__slots__ = ("__acceptance_trace", # List of AcceptInfo objects
"__storage_db", # Map: pattern_id --> StoreInfo objects
"__parent",
"__state_index",
"__equivalence_hash",
"__equivalence_hint",
"__acceptance_trace_len",
"__storage_db_len")
def __init__(self, InitStateIndex=None, HashF=True):
if InitStateIndex is None: # 'None' --> call from '.clone()'
self.__acceptance_trace = []
else:
self.__acceptance_trace = [
AcceptInfo(PreContextID = E_PreContextIDs.NONE,
PatternID = E_AcceptanceIDs.FAILURE,
AcceptingStateIndex = InitStateIndex,
PathSincePositioning = [InitStateIndex],
TransitionNSincePositioning = 0)
]
self.__storage_db = {}
self.__state_index = InitStateIndex
self.__parent = None
self.__equivalence_hash = None
self.__equivalence_hint = None
if HashF:
self.__compute_equivalence_hash()
def reproduce(self, StateIndex):
"""Reproduce: Clone + update for additional StateIndex in the path."""
result = PathTrace(HashF=False) # We compute 'hash' later.
result.__acceptance_trace = [ x.reproduce(StateIndex) for x in self.__acceptance_trace ]
result.__storage_db = dict(( (i, x.reproduce(StateIndex))
for i, x in self.__storage_db.iteritems()
))
result.__state_index = StateIndex
result.__parent = self
result.__compute_equivalence_hash()
return result
def next_step(self, StateIndex, State):
"""The present object of PathTrace represents the history of events
along a path from the init state to the state BEFORE 'this state'.
Applying the events of 'this state' on the current history results
in a PathTrace object that represents the history of events until
'this state'.
RETURNS: Altered clone of the present object.
"""
# Some experimenting has shown that the number of unnecessary cloning,
# i.e. when there would be no change, is negligible. The fact that
# '.path_since_positioning()' has almost always to be adapted,
# makes selective cloning meaningless. So, it is done the safe way:
# (update .path_since_positioning during 'reproduction'.)
result = self.reproduce(StateIndex) # Always clone.
# (2) Update '.__acceptance_trace' and '.__storage_db' according to occurring
# acceptances and store-input-position events.
# Origins must be sorted with the highest priority LAST, so that they will
# appear on top of the acceptance trace list.
for origin in sorted(State.origins(), key=lambda x: x.pattern_id(), reverse=True):
# Acceptance
if origin.is_acceptance():
result.__acceptance_trace_add_at_front(origin, StateIndex)
# Store Input Position Information
if origin.input_position_store_f():
result.__storage_db[origin.pattern_id()] = StoreInfo([StateIndex], 0)
assert len(result.__acceptance_trace) >= 1
result.__compute_equivalence_hash()
return result
def __acceptance_trace_add_at_front(self, Origin, StateIndex):
"""Assume that the 'Origin' belongs to a state with index 'StateIndex' that
comes after all states on the before considered path.
Assume that the 'Origin' talks about 'acceptance'.
"""
# If there is an unconditional acceptance, it dominates all previous
# occurred acceptances (philosophy of longest match).
if Origin.pre_context_id() == E_PreContextIDs.NONE:
del self.__acceptance_trace[:]
# Input Position Store/Restore
pattern_id = Origin.pattern_id()
if Origin.input_position_restore_f():
# Restorage of Input Position (Post Contexts): refer to the
# input position at the time when it was stored.
entry = self.__storage_db[pattern_id]
path_since_positioning = entry.path_since_positioning
transition_n_since_positioning = entry.transition_n_since_positioning
else:
# Normally accepted patterns refer to the input position at
# the time of acceptance.
path_since_positioning = [ StateIndex ]
transition_n_since_positioning = 0
# Reoccurring information about an acceptance overwrites previous occurrences.
for entry_i in (i for i, x in enumerate(self.__acceptance_trace) \
if x.pattern_id == pattern_id):
del self.__acceptance_trace[entry_i]
# From the above rule, it follows that there is only one entry per pattern_id.
break
entry = AcceptInfo(Origin.pre_context_id(), pattern_id,
AcceptingStateIndex = StateIndex,
PathSincePositioning = path_since_positioning,
TransitionNSincePositioning = transition_n_since_positioning)
# Insert at the beginning, because what comes last has the highest
# priority. (Philosophy of longest match). The calling function must
# ensure that for one step on the path, the higher prioritized patterns
# appear AFTER the lower prioritized ones.
self.__acceptance_trace.insert(0, entry)
@property
def state_index(self):
return self.__state_index
@property
def parent(self):
return self.__parent
def has_parent(self, Candidate):
parent = self.__parent
while parent is not None:
if id(parent) == id(Candidate): return True
parent = parent.parent
return False
@property
def acceptance_trace(self):
return self.__acceptance_trace
def get(self, PreContextID):
"""RETURNS: AcceptInfo object for a given PreContextID."""
for entry in self.__acceptance_trace:
if entry.pre_context_id == PreContextID: return entry
return None
def __compute_equivalence_hash(self):
"""Computes a numeric value 'self.__equivalence_hash' to identify the
current setting of the object. This hash value may be used in a
necessary condition to compare for 'equivalence' with another
object. That is, if the hash values of two objects are different,
the objects MUST be different. If they are the same, a detailed
check must investigate if they are equivalent or not. See function
'is_equivalent()' which assumes that the '__equivalence_hash' has been
computed.
"""
data = []
for x in self.__acceptance_trace:
if isinstance(x.pattern_id, long): data.append(x.pattern_id)
elif x.pattern_id == E_AcceptanceIDs.FAILURE: data.append(0x5A5A5A5A)
else: data.append(0xA5A5A5A5)
if isinstance(x.accepting_state_index, long): data.append(x.accepting_state_index)
else: data.append(0x5B5B5B5B)
if isinstance(x.positioning_state_index, long): data.append(x.positioning_state_index)
else: data.append(0x5C5C5C5C)
if isinstance(x.transition_n_since_positioning, long): data.append(x.transition_n_since_positioning)
else: data.append(0x5D5D5D5D)
for pattern_id, info in sorted(self.__storage_db.iteritems()):
if info.loop_f: data.append(0x48484848)
elif isinstance(info.transition_n_since_positioning, long): data.append(info.transition_n_since_positioning)
else: data.append(0x4D4D4D4D)
self.__equivalence_hash = crc32(str(data))
# HINT: -- One single acceptance on current state.
# -- No restore of position from previous states.
# => Store the pattern_id of the winning pattern.
#
# This hint may be used for a SUFFICENT condition to determine
# equivalence, IF the state has no subsequent transitions. Because,
# then there is no restore involved and the __storage_db can be
# neglected.
if len(self.__acceptance_trace) == 1:
x = self.__acceptance_trace[0]
if x.transition_n_since_positioning == 0 \
and x.positioning_state_index == x.accepting_state_index:
# From: transition_n_since_positioning == 0
# Follows: x.accepting_state_index == current state index
self.__equivalence_hint = x.pattern_id
else:
self.__equivalence_hint = None
else:
self.__equivalence_hint = None
self.__acceptance_trace_len = len(self.__acceptance_trace)
self.__storage_db_len = len(self.__acceptance_trace)
def is_equivalent(self, Other, EndOfRoadF=False):
"""This function determines whether the path trace described in Other is
equivalent to this trace.
"""
if self.__equivalence_hash != Other.__equivalence_hash: return False
if self.__equivalence_hint is not None:
if self.__equivalence_hint == Other.__equivalence_hint: return True
if self.__acceptance_trace_len != Other.__acceptance_trace_len: return False
elif self.__storage_db_len != Other.__storage_db_len: return False
for x, y in izip(self.__acceptance_trace, Other.__acceptance_trace):
if x.pattern_id != y.pattern_id: return False
elif x.accepting_state_index != y.accepting_state_index: return False
elif x.positioning_state_index != y.positioning_state_index: return False
elif x.transition_n_since_positioning != y.transition_n_since_positioning: return False
# When there are no further transitions to other states, then no restore
# may happen. Then, considering '__storage_db' is not necessary.
if not EndOfRoadF:
for x_pattern_id, x in self.__storage_db.iteritems():
y = Other.__storage_db.get(x_pattern_id)
if y is None: return False
if x.loop_f != y.loop_f: return False
elif x.positioning_state_index != y.positioning_state_index: return False
#print "## self.acceptance:", self.__acceptance_trace
#print "## self.storage:", self.__storage_db
#print "## Other.acceptance:", Other.__acceptance_trace
#print "## Other.storage:", Other.__storage_db
return True
def __eq__(self, Other):
if self.__acceptance_trace != Other.__acceptance_trace: return False
if len(self.__storage_db) != len(Other.__storage_db): return False
for pattern_id, entry in self.__storage_db.iteritems():
other_entry = Other.__storage_db.get(pattern_id)
if other_entry is None: return False
if not entry.is_equal(Other.__storage_db[pattern_id]): return False
return True
def __neq__(self):
return not self.__eq__(self)
def __repr__(self):
return "".join([repr(x) for x in self.__acceptance_trace]) + "".join([repr(x) for x in self.__storage_db.iteritems()])
class StoreInfo(object):
"""ABSTRACT:
Informs about a 'positioning action' that happened during the walk
along a specific path from init state to 'this state'.
Used in function 'PathTrace.next_step()'.
------------------------------------------------------------------------
EXPLANATION:
A 'positioning action' is the storage of the current input position
into a dedicated position register. Objects of class 'StoreInfo'
are stored in dictionaries where the key represents the pattern-id
is at the same time the identifier of the position storage register.
(Note, later the position register is remapped according to required
entries.)
'This state' means the state where the trace lead to.
The member '.path_since_positioning' gets one more state index appended
at each transition along a path.
If a loop is detected '.transition_n_since_positioning' returns
'E_TransitionN.VOID'.
The member '.positioning_state_index' is the state where the positioning
happend. If there is a loop along the path from '.positioning_state_index'
to 'this state, then the '.transition_n_since_positioning' is set to
'E_TransitionN.VOID' (see comment above).
"""
__slots__ = ('path_since_positioning', '__transition_n_since_positioning', '__loop_f')
def __init__(self, PathSincePositioning, TransitionNSincePositioning=None):
self.path_since_positioning = PathSincePositioning
if TransitionNSincePositioning is None:
if len(PathSincePositioning) != len(set(PathSincePositioning)):
self.__loopf = True
self.__transition_n_since_positioning = E_TransitionN.VOID
else:
self.__loop_f = False
self.__transition_n_since_positioning = len(PathSincePositioning) - 1
else:
if TransitionNSincePositioning == E_TransitionN.VOID:
self.__loop_f = True
else:
self.__loop_f = False
self.__transition_n_since_positioning = TransitionNSincePositioning
def reproduce(self, StateIndex):
"""Reproduce: Clone + update for additional StateIndex in the path."""
path_since_positioning = copy(self.path_since_positioning)
transition_n_since_positioning = self.get_transition_n_since_positioning_update(StateIndex)
path_since_positioning.append(StateIndex)
return StoreInfo(path_since_positioning, transition_n_since_positioning)
def get_transition_n_since_positioning_update(self, StateIndex):
"""RETURNS: Value of 'transition_n_since_positioning' when 'StateIndex'
is put on the path.
"""
if StateIndex in self.path_since_positioning \
and self.__transition_n_since_positioning != E_TransitionN.LEXEME_START_PLUS_ONE:
return E_TransitionN.VOID
elif isinstance(self.__transition_n_since_positioning, (int, long)):
return self.__transition_n_since_positioning + 1
else:
return self.__transition_n_since_positioning
@property
def loop_f(self):
# NOT: return self.__transition_n_since_positioning == E_TransitionN.VOID
# because the comparison is much slower, then returning simply a boolean.
# THIS FUNCTION MAY BE CALLED EXTENSIVELY!
return self.__loop_f
@property
def transition_n_since_positioning(self):
return self.__transition_n_since_positioning
@property
def positioning_state_index(self):
return self.path_since_positioning[0]
def is_equal(self, Other):
return self.transition_n_since_positioning == Other.transition_n_since_positioning \
and self.positioning_state_index == Other.positioning_state_index
def __repr__(self):
txt = ["---\n"]
txt.append(" .path_since_positioning = %s\n" % repr(self.path_since_positioning))
return "".join(txt)
class AcceptInfo(StoreInfo):
"""ABSTRACT:
Information about the acceptance behavior in a state which is a result
of events that happened before on a specific path from the init state
to 'this_state'.
------------------------------------------------------------------------
EXPLANATION:
Acceptance of a pattern is something that occurs in case that the
state machine can no further proceed on a given input (= philosophy
of 'longest match'), i.e. on 'drop-out'. 'AcceptInfo' objects tell
about the acceptance of a particular pattern (given by '.pattern_id').
.pattern_id -- Identifies the pattern that is concerned.
.pre_context_id -- if E_PreContextIDs.NONE, then '.pattern_id' is
always accepted. If not, then the pre-context
must be checked before the pattern can be accepted.
.accepting_state_index -- Index of the state that caused the acceptance
of the pattern somewhere before on the path.
It may, as well be 'this state'.
.transition_n_since_positioning -- Number of transitions since the storage
of the input position needed to be done.
If it is E_TransitionN.VOID, then the
number cannot be determined from the
state machine (loop occurred).
.positioning_state_index -- Identifies the state where the position needed
to be stored. For post-context patterns this is
different from the accepting state.
"""
__slots__ = ("pre_context_id",
"pattern_id",
"accepting_state_index")
def __init__(self, PreContextID, PatternID,
AcceptingStateIndex, PathSincePositioning,
TransitionNSincePositioning):
self.pre_context_id = PreContextID
self.pattern_id = PatternID
self.accepting_state_index = AcceptingStateIndex
if self.pattern_id == E_AcceptanceIDs.FAILURE:
transition_n_since_positioning = E_TransitionN.LEXEME_START_PLUS_ONE
else:
transition_n_since_positioning = TransitionNSincePositioning
StoreInfo.__init__(self, PathSincePositioning, transition_n_since_positioning)
def reproduce(self, StateIndex):
"""Reproduce: Clone + update for additional StateIndex in the path."""
path_since_positioning = copy(self.path_since_positioning)
transition_n_since_positioning = self.get_transition_n_since_positioning_update(StateIndex)
path_since_positioning.append(StateIndex)
result = AcceptInfo(self.pre_context_id,
self.pattern_id,
self.accepting_state_index,
path_since_positioning,
transition_n_since_positioning)
return result
def index_of_last_acceptance_on_path_since_positioning(self):
L = len(self.path_since_positioning)
return L - 1 - self.path_since_positioning[::-1].index(self.accepting_state_index)
@property
def positioning_state_index(self):
return self.path_since_positioning[0]
def is_equal(self, Other):
if self.pre_context_id != Other.pre_context_id: return False
elif self.pattern_id != Other.pattern_id: return False
elif self.accepting_state_index != Other.accepting_state_index: return False
elif self.transition_n_since_positioning != Other.transition_n_since_positioning: return False
elif self.positioning_state_index != Other.positioning_state_index: return False
return True
def __eq__(self, Other):
return self.is_equal(Other)
def __repr__(self):
txt = ["---\n"]
txt.append(" .pre_context_id = %s\n" % repr(self.pre_context_id))
txt.append(" .pattern_id = %s\n" % repr(self.pattern_id))
txt.append(" .transition_n_since_positioning = %s\n" % repr(self.transition_n_since_positioning))
txt.append(" .accepting_state_index = %s\n" % repr(self.accepting_state_index))
txt.append(" .positioning_state_index = %s\n" % repr(self.positioning_state_index))
txt.append(StoreInfo.__repr__(self))
return "".join(txt)
| {
"repo_name": "coderjames/pascal",
"path": "quex-0.63.1/quex/engine/analyzer/track_analysis.py",
"copies": "1",
"size": "31429",
"license": "bsd-2-clause",
"hash": 2818694341615683600,
"line_mean": 47.3523076923,
"line_max": 126,
"alpha_frac": 0.5748830698,
"autogenerated": false,
"ratio": 4.507888697647734,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5582771767447734,
"avg_score": null,
"num_lines": null
} |
"""ABSTRACT:
This module produces an object of class Analyzer. It is a representation of an
analyzer state machine (object of class StateMachine) that is suited for code
generation. In particular, track analysis results in 'decorations' for states
which help to implement efficient code.
Formally an Analyzer consists of a set of states that are related by their
transitions. Each state is an object of class AnalyzerState and has the
following components:
* entry: actions to be performed at the entry of the state.
* input: what happens to get the next character.
* transition_map: a map that tells what state is to be entered
as a reaction to the current input character.
* drop_out: what has to happen if no character triggers.
For administrative purposes, other data such as the 'state_index' is stored
along with the AnalyzerState object.
The goal of track analysis is to reduce the run-time effort of the lexical
analyzer. In particular, acceptance and input position storages may be spared
depending on the constitution of the state machine.
-------------------------------------------------------------------------------
(C) 2010-2011 Frank-Rene Schaefer
ABSOLUTELY NO WARRANTY
"""
import quex.engine.analyzer.track_analysis as track_analysis
import quex.engine.analyzer.optimizer as optimizer
from quex.engine.analyzer.state.core import AnalyzerState
from quex.engine.analyzer.state.drop_out import DropOut
import quex.engine.analyzer.mega_state.analyzer as mega_state_analyzer
import quex.engine.analyzer.position_register_map as position_register_map
from quex.engine.state_machine.core import StateMachine
from quex.blackboard import setup as Setup
from quex.blackboard import E_AcceptanceIDs, \
E_EngineTypes, \
E_TransitionN, \
E_PreContextIDs
from collections import defaultdict
from operator import itemgetter
from itertools import islice, imap, izip
def do(SM, EngineType=E_EngineTypes.FORWARD):
# Generate Analyzer from StateMachine
analyzer = Analyzer(SM, EngineType)
# Optimize the Analyzer
analyzer = optimizer.do(analyzer)
# The language database requires the analyzer for labels etc.
if Setup.language_db is not None:
Setup.language_db.register_analyzer(analyzer)
# If required by the user: Combine some states into mega states.
mega_state_analyzer.do(analyzer)
return analyzer
class Analyzer:
"""A representation of a pattern analyzing StateMachine suitable for
effective code generation.
"""
def __init__(self, SM, EngineType):
assert EngineType in E_EngineTypes
assert isinstance(SM, StateMachine)
self.__acceptance_state_index_list = SM.get_acceptance_state_index_list()
self.__init_state_index = SM.init_state_index
self.__state_machine_id = SM.get_id()
self.__engine_type = EngineType
# (*) PathTrace database, Successor database
self.__trace_db, self.__dangerous_positioning_state_set = track_analysis.do(SM)
# (*) From/To Databases
#
# from_db: state_index --> states from which it is entered.
# to_db: state_index --> states which it enters
#
from_db = defaultdict(set)
to_db = defaultdict(set)
for from_index, state in SM.states.iteritems():
to_db[from_index] = set(state.transitions().get_map().iterkeys())
for to_index in state.transitions().get_map().iterkeys():
from_db[to_index].add(from_index)
self.__from_db = from_db
self.__to_db = to_db
# (*) Prepare AnalyzerState Objects
self.__state_db = dict([(state_index, AnalyzerState(SM.states[state_index], state_index,
state_index == SM.init_state_index,
EngineType,
from_db[state_index]))
for state_index in self.__trace_db.iterkeys()])
if EngineType != E_EngineTypes.FORWARD:
# BACKWARD_INPUT_POSITION, BACKWARD_PRE_CONTEXT:
#
# DropOut and Entry do not require any construction beyond what is
# accomplished inside the constructor of 'AnalyzerState'. No positions
# need to be stored and restored.
self.__position_register_map = None
self.__position_info_db = None
return
# (*) Positioning info:
#
# map: (state_index) --> (pattern_id) --> positioning info
#
self.__position_info_db = {}
for state_index, trace_list in self.__trace_db.iteritems():
self.__position_info_db[state_index] = self.multi_path_positioning_analysis(trace_list)
# (*) Drop Out Behavior
# The PathTrace objects tell what to do at drop_out. From this, the
# required entry actions of states can be derived.
self.__require_acceptance_storage_list = []
self.__require_position_storage_list = []
for state_index, trace_list in self.__trace_db.iteritems():
state = self.__state_db[state_index]
# trace_list: PathTrace objects for each path that guides to state.
self.configure_drop_out(state, trace_list)
# (*) Entry Behavior
# Implement the required entry actions.
self.configure_entries()
# (*) Position Register Map (Used in 'optimizer.py')
self.__position_register_map = position_register_map.do(self)
@property
def trace_db(self): return self.__trace_db
@property
def state_db(self): return self.__state_db
@property
def init_state_index(self): return self.__init_state_index
@property
def position_register_map(self): return self.__position_register_map
@property
def state_machine_id(self): return self.__state_machine_id
@property
def engine_type(self): return self.__engine_type
@property
def position_info_db(self): return self.__position_info_db
@property
def acceptance_state_index_list(self): return self.__acceptance_state_index_list
@property
def to_db(self):
"""Map: state_index --> list of states that it enters."""
return self.__to_db
@property
def from_db(self):
"""Map: state_index --> list of states that enter it."""
return self.__from_db
def last_acceptance_variable_required(self):
"""If one entry stores the last_acceptance, then the
correspondent variable is required to be defined.
"""
if self.__engine_type != E_EngineTypes.FORWARD: return False
for entry in imap(lambda x: x.entry, self.__state_db.itervalues()):
if entry.has_accepter(): return True
return False
def configure_drop_out(self, state, ThePathTraceList):
"""Every analysis step ends with a 'drop-out'. At this moment it is
decided what pattern has won. Also, the input position pointer
must be set so that it indicates the right location where the
next step starts the analysis.
Consequently, a drop-out action contains two elements:
-- Acceptance Checker: Dependent on the fulfilled pre-contexts a
winning pattern is determined.
If acceptance depends on stored acceptances, a request is raised
at each accepting state that is has to store its acceptance in
variable 'last_acceptance'.
-- Terminal Router: Dependent on the accepted pattern the input
position is modified and the terminal containing the pattern
action is entered.
If the input position is restored from a position register,
then the storing states are requested to store the input
position.
--------------------------------------------------------------------
HINT:
A state may be reached via multiple paths. For each path there is a
separate PathTrace. Each PathTrace tells what has to happen in the state
depending on the pre-contexts being fulfilled or not (if there are
even any pre-context patterns).
"""
assert len(ThePathTraceList) != 0
result = DropOut()
# (*) Acceptance Checker
uniform_f = self.multi_path_acceptance_analysis(ThePathTraceList)
if uniform_f:
# (i) Uniform Acceptance Pattern for all paths through the state.
#
# Use one trace as prototype. No related state needs to store
# acceptance at entry.
prototype = ThePathTraceList[0]
for x in prototype.acceptance_trace:
result.accept(x.pre_context_id, x.pattern_id)
# No further checks after unconditional acceptance necessary
if x.pre_context_id == E_PreContextIDs.NONE \
and x.pattern_id != E_AcceptanceIDs.FAILURE: break
else:
# (ii) Non-Uniform Acceptance Patterns
#
# Different paths to one state result in different acceptances.
# There is only one way to handle this:
#
# -- The acceptance must be stored in the state where it occurs, and
# -- It must be restored here.
#
result.accept(E_PreContextIDs.NONE, E_AcceptanceIDs.VOID)
# Dependency: Related states are required to store acceptance at state entry.
for trace in ThePathTraceList:
self.__require_acceptance_storage_list.extend(trace.acceptance_trace)
# Later on, a function will use the '__require_acceptance_storage_list' to
# implement the acceptance storage.
# (*) Terminal Router
for pattern_id, info in self.__position_info_db[state.index].iteritems():
result.route_to_terminal(pattern_id, info.transition_n_since_positioning)
if info.transition_n_since_positioning == E_TransitionN.VOID:
# Request the storage of the position from related states.
self.__require_position_storage_list.append((state.index, pattern_id, info))
# Later on, a function will use the '__require_position_storage_list' to
# implement the position storage.
result.trivialize()
state.drop_out = result
def configure_entries(self):
"""DropOut objects may rely on acceptances and input positions being
stored. This storage happens at state entries.
Function 'configure_drop_out()' registers which states have to store
the input position and which ones have to store acceptances. These
tasks are specified in the two members:
self.__require_acceptance_storage_list
self.__require_position_storage_list
It is tried to postpone the storing as much as possible along the
state paths from store to restore. Thus, some states may not have to
store, and thus the lexical analyzer becomes a little faster.
"""
self.implement_required_acceptance_storage()
self.implement_required_position_storage()
def implement_required_acceptance_storage(self):
"""
Storing Acceptance / Postpone as much as possible.
The stored 'last_acceptance' is only needed at the first time
when it is restored. So, we could walk along the path from the
accepting state to the end of the path and see when this happens.
Critical case:
State V --- "acceptance = A" -->-.
\
State Y -----> State Z
/
State W --- "acceptance = B" -->-'
That is, if state Y is entered from state V is shall store 'A'
as accepted, if it is entered from state W is shall store 'B'.
In this case, we cannot walk the path further, and say that when
state Z is entered it has to store 'A'. This would cancel the
possibility of having 'B' accepted here. There is good news:
! During the 'configure_drop_out()' the last acceptance is restored !
! if and only if there are at least two paths with differing !
! acceptance patterns. Thus, it is sufficient to consider the restore !
! of acceptance in the drop_out as a terminal condition. !
"""
postponed_db = defaultdict(set)
for acceptance_trace in self.__require_acceptance_storage_list:
accepting_state_index = acceptance_trace.accepting_state_index
path_since_positioning = acceptance_trace.path_since_positioning
pre_context_id = acceptance_trace.pre_context_id
pattern_id = acceptance_trace.pattern_id
# Find the first place on the path where the acceptance is restored
# - starting from the last accepting state.
begin_i = acceptance_trace.index_of_last_acceptance_on_path_since_positioning()
prev_state_index = None
for state_index in islice(path_since_positioning, begin_i, None):
if self.__state_db[state_index].drop_out.restore_acceptance_f:
break
prev_state_index = state_index
if prev_state_index is not None:
entry = self.__state_db[state_index].entry
path_trace_list = self.__trace_db[prev_state_index]
for path_trace in path_trace_list:
entry.doors_accept(FromStateIndex=prev_state_index, PathTraceList=path_trace.acceptance_trace)
else:
# Postpone:
#
# Here, storing Acceptance cannot be deferred to subsequent states, because
# the first state that restores acceptance is the acceptance state itself.
#
# (1) Restore only happens if there is non-uniform acceptance. See
# function 'configure_drop_out(...)'.
# (2) Non-uniform acceptance only happens, if there are multiple paths
# to the same state with different trailing acceptances.
# (3) If there was an absolute acceptance, then all previous trailing
# acceptance were deleted (longest match). This contradicts (2).
#
# (4) => Thus, there are only pre-contexted acceptances in such a state.
assert pre_context_id != E_PreContextIDs.NONE
postponed_db[accepting_state_index].add((pre_context_id, pattern_id))
# Postponed: Collected acceptances to be stored in the acceptance states itself.
#
# It is possible that a deferred acceptance are already present in the doors. But,
# since they all come from trailing acceptances, we know that the acceptance of
# this state preceeds (longest match). Thus, all the acceptances we add here
# preceed the already mentioned ones. Since they all trigger on lexemes of the
# same length, the only precendence criteria is the pattern_id.
#
for state_index, info_set in postponed_db.iteritems():
entry = self.__state_db[state_index].entry
for pre_context_id, pattern_id in sorted(list(info_set), key=itemgetter(1), reverse=True):
entry.doors_accepter_add_front(pre_context_id, pattern_id)
def implement_required_position_storage(self):
"""
Store Input Position / Postpone as much as possible.
Before we do not reach a state that actually restores the position, it
does make little sense to store the input position.
Critical Point: Loops and Forks
If a loop is reached then the input position can no longer be determined
by the transition number. The good news is that during 'configure_drop_out'
any state that has undetermined positioning restores the input position.
Thus 'restore_position_f(register)' is enough to catch this case.
"""
def get_positioning_state_iterable(from_state_index, path):
if from_state_index in self.__dangerous_positioning_state_set:
for to_state_index in self.__to_db[from_state_index]:
yield to_state_index
else:
yield path[1]
for state_index, pattern_id, info in self.__require_position_storage_list:
# state_index --> state that restores the input position
# pattern_id --> pattern which is concerned
for path in info.path_list_since_positioning:
# Never store the input position in the state itself. The input position
# is reached after the entries have been passed.
from_state_index = path[0]
for to_state_index in get_positioning_state_iterable(from_state_index, path):
state = self.__state_db[to_state_index]
# Never store the input position in the state itself. The input position
# is reached after the entries have been passed.
state.entry.doors_store(FromStateIndex = from_state_index,
PreContextID = info.pre_context_id,
PositionRegister = pattern_id,
Offset = 0)
# offset = -1
# for state_index in islice(path, 1, None):
# offset += 1
# state = self.__state_db[state_index]
# if not state.drop_out.restore_position_f(pattern_id):
# prev_state_index = state_index
# continue
# state.entry.doors_store(FromStateIndex = prev_state_index,
# PreContextID = info.pre_context_id,
# PositionRegister = pattern_id,
# Offset = offset)
# break
def multi_path_positioning_analysis(self, ThePathTraceList):
"""
This function draws conclusions on the input positioning behavior at
drop-out based on different paths through the same state. Basis for
the analysis are the PathTrace objects of a state specified as
'ThePathTraceList'.
RETURNS: For a given state's PathTrace list a dictionary that maps:
pattern_id --> PositioningInfo
--------------------------------------------------------------------
There are the following alternatives for setting the input position:
(1) 'lexeme_start_p + 1' in case of failure.
(2) 'input_p + offset' if the number of transitions between
any storing state and the current state is does not differ
dependent on the path taken (and does not contain loops).
(3) 'input_p = position_register[i]' if (1) and (2) are not
not the case.
The detection of loops has been accomplished during the construction
of the PathTrace objects for each state. This function focusses on
the possibility to have different paths to the same state with
different positioning behaviors.
"""
class PositioningInfo(object):
__slots__ = ("transition_n_since_positioning",
"pre_context_id",
"path_list_since_positioning")
def __init__(self, PathTraceElement):
self.transition_n_since_positioning = PathTraceElement.transition_n_since_positioning
self.path_list_since_positioning = [ PathTraceElement.path_since_positioning ]
self.pre_context_id = PathTraceElement.pre_context_id
@property
def positioning_state_index_set(self):
return set(path[0] for path in self.path_list_since_positioning)
def add(self, PathTraceElement):
self.path_list_since_positioning.append(PathTraceElement.path_since_positioning)
if self.transition_n_since_positioning != PathTraceElement.transition_n_since_positioning:
self.transition_n_since_positioning = E_TransitionN.VOID
def __repr__(self):
txt = ".transition_n_since_positioning = %s\n" % repr(self.transition_n_since_positioning)
txt += ".positioning_state_index_set = %s\n" % repr(self.positioning_state_index_set)
txt += ".pre_context_id = %s\n" % repr(self.pre_context_id)
return txt
positioning_info_by_pattern_id = {}
# -- If the positioning differs for one element in the trace list, or
# -- one element has undetermined positioning,
# => then the acceptance relates to undetermined positioning.
for trace in ThePathTraceList:
for element in trace.acceptance_trace:
assert element.pattern_id != E_AcceptanceIDs.VOID
prototype = positioning_info_by_pattern_id.get(element.pattern_id)
if prototype is None:
positioning_info_by_pattern_id[element.pattern_id] = PositioningInfo(element)
else:
prototype.add(element)
return positioning_info_by_pattern_id
def multi_path_acceptance_analysis(self, ThePathTraceList):
"""
This function draws conclusions on the input positioning behavior at
drop-out based on different paths through the same state. Basis for
the analysis are the PathTrace objects of a state specified as
'ThePathTraceList'.
Acceptance Uniformity:
For any possible path to 'this' state the acceptance pattern is
the same. That is, it accepts exactly the same pattern under the
same pre contexts and in the same sequence of precedence.
The very nice thing is that the 'acceptance_trace' of a PathTrace
object reflects the precedence of acceptance. Thus, one can simply
compare the acceptance trace objects of each PathTrace.
RETURNS: True - uniform acceptance pattern.
False - acceptance pattern is not uniform.
"""
prototype = ThePathTraceList[0].acceptance_trace
# Check (1) and (2)
for path_trace in islice(ThePathTraceList, 1, None):
acceptance_trace = path_trace.acceptance_trace
if len(prototype) != len(acceptance_trace): return False
for x, y in izip(prototype, acceptance_trace):
if x.pre_context_id != y.pre_context_id: return False
elif x.pattern_id != y.pattern_id: return False
return True
def __iter__(self):
for x in self.__state_db.values():
yield x
def __repr__(self):
# Provide some type of order that is oriented towards the content of the states.
# This helps to compare analyzers where the state identifiers differ, but the
# states should be the same.
def order(X):
side_info = 0
if len(X.transition_map) != 0: side_info = max(trigger_set.size() for trigger_set, t in X.transition_map)
return (len(X.transition_map), side_info, X.index)
txt = [ repr(state) for state in sorted(self.__state_db.itervalues(), key=order) ]
return "".join(txt)
| {
"repo_name": "coderjames/pascal",
"path": "quex-0.63.1/quex/engine/analyzer/core.py",
"copies": "1",
"size": "24510",
"license": "bsd-2-clause",
"hash": 277877732692961300,
"line_mean": 46.87109375,
"line_max": 117,
"alpha_frac": 0.5943288454,
"autogenerated": false,
"ratio": 4.516307352128248,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009547101378308784,
"num_lines": 512
} |
abstractPath = './abstracts/'
logFile = './output/all_no_data_abstracts_lda_50_passes_10_topics.txt'
dictFile = './output/agu_no_data_wordDict.dict'
corpusFile = './output/agu_no_data_topicCorpus.mm'
stoplist = set(
['data','a', 'about', 'above', 'above', 'across', 'after', 'afterwards', 'again', 'against', 'all', 'almost',
'alone', 'along', 'already', 'also','although','always','am','among', 'amongst', 'amoungst', 'amount',
'an', 'and', 'another', 'any','anyhow','anyone','anything','anyway', 'anywhere', 'are', 'around', 'as',
'at', 'back','be','became', 'because','become','becomes', 'becoming', 'been', 'before', 'beforehand',
'behind', 'being', 'below', 'beside', 'besides', 'between', 'beyond', 'bill', 'both', 'bottom','but',
'by', 'call', 'can', 'cannot', 'cant', 'co', 'con', 'could', 'couldnt', 'cry', 'de', 'describe', 'detail',
'do', 'done', 'down', 'due', 'during', 'each', 'eg', 'eight', 'either', 'eleven','else', 'elsewhere',
'empty', 'enough', 'etc', 'even', 'ever', 'every', 'everyone', 'everything', 'everywhere', 'except', 'few',
'fifteen', 'fify', 'fill', 'find', 'fire', 'first', 'five', 'for', 'former', 'formerly', 'forty', 'found',
'four', 'from', 'front', 'full', 'further', 'get', 'give', 'go', 'had', 'has', 'hasnt', 'have', 'he', 'hence',
'her', 'here', 'hereafter', 'hereby', 'herein', 'hereupon', 'hers', 'herself', 'him', 'himself', 'his', 'how',
'however', 'hundred', 'ie', 'if', 'in', 'inc', 'indeed', 'interest', 'into', 'is', 'it', 'its', 'itself', 'keep',
'last', 'latter', 'latterly', 'least', 'less', 'ltd', 'made', 'many', 'may', 'me', 'meanwhile', 'might', 'mill',
'mine', 'more', 'moreover', 'most', 'mostly', 'move', 'much', 'must', 'my', 'myself', 'name', 'namely', 'neither',
'never', 'nevertheless', 'next', 'nine', 'no', 'nobody', 'none', 'noone', 'nor', 'not', 'nothing', 'now', 'nowhere',
'of', 'off', 'often', 'on', 'once', 'one', 'only', 'onto', 'or', 'other', 'others', 'otherwise', 'our', 'ours',
'ourselves', 'out', 'over', 'own','part', 'per', 'perhaps', 'please', 'put', 'rather', 're', 'same', 'see', 'seem',
'seemed', 'seeming', 'seems', 'serious', 'several', 'she', 'should', 'show', 'side', 'since', 'sincere', 'six',
'sixty', 'so', 'some', 'somehow', 'someone', 'something', 'sometime', 'sometimes', 'somewhere', 'still', 'such',
'system', 'take', 'ten', 'than', 'that', 'the', 'their', 'them', 'themselves', 'then', 'thence', 'there',
'thereafter', 'thereby', 'therefore', 'therein', 'thereupon', 'these', 'they', 'thickv', 'thin', 'third', 'this',
'those', 'though', 'three', 'through', 'throughout', 'thru', 'thus', 'to', 'together', 'too', 'top', 'toward',
'towards', 'twelve', 'twenty', 'two', 'un', 'under', 'until', 'up', 'upon', 'us', 'very', 'via', 'was', 'we', 'well',
'were', 'what', 'whatever', 'when', 'whence', 'whenever', 'where', 'whereafter', 'whereas', 'whereby', 'wherein',
'whereupon', 'wherever', 'whether', 'which', 'while', 'whither', 'who', 'whoever', 'whole', 'whom', 'whose', 'why',
'will', 'with', 'within', 'without', 'would', 'yet', 'you', 'your', 'yours', 'yourself', 'yourselves',
'aren\'t', 'can\'t', 'couldn\'t', 'didn\'t', 'doesn\'t', 'don\'t', 'hadn\'t', 'hasn\'t', 'haven\'t', 'he\'d',
'he\'ll', 'he\'s', 'here\'s', 'how\'s', 'i', 'i\'d', 'i\'ll', 'i\'m', 'i\'ve', 'isn\'t', 'it\'s',
'let\'s', 'mustn\'t', 'shan\'t', 'she\'d', 'she\'ll', 'she\'s','shouldn\'t', 'that\'s', 'there\'s', 'they\'d',
'they\'ll','they\'re', 'they\'ve', 'wasn\'t', 'we', 'we\'d', 'we\'ll', 'we\'re', 'we\'ve', 'were', 'weren\'t',
'what', 'what\'s', 'when\'s', 'where\'s','who\'s', 'why\'s', 'won\'t', 'wouldn\'t', 'you\'d', 'you\'ll', 'you\'re',
'you\'ve']
) | {
"repo_name": "narock/narock.github.io",
"path": "research/agu_scientometrics/utils.py",
"copies": "1",
"size": "3868",
"license": "mit",
"hash": 2147262334741369000,
"line_mean": 95.725,
"line_max": 124,
"alpha_frac": 0.5175801448,
"autogenerated": false,
"ratio": 2.6348773841961854,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.3652457528996185,
"avg_score": null,
"num_lines": null
} |
"""Abstract permissions container.
"""
from __future__ import print_function
from __future__ import unicode_literals
import typing
from typing import Iterable
import six
from ._typing import Text
if typing.TYPE_CHECKING:
from typing import Iterator, List, Optional, Tuple, Type, Union
def make_mode(init):
# type: (Union[int, Iterable[Text], None]) -> int
"""Make a mode integer from an initial value."""
return Permissions.get_mode(init)
class _PermProperty(object):
"""Creates simple properties to get/set permissions."""
def __init__(self, name):
# type: (Text) -> None
self._name = name
self.__doc__ = "Boolean for '{}' permission.".format(name)
def __get__(self, obj, obj_type=None):
# type: (Permissions, Optional[Type[Permissions]]) -> bool
return self._name in obj
def __set__(self, obj, value):
# type: (Permissions, bool) -> None
if value:
obj.add(self._name)
else:
obj.remove(self._name)
@six.python_2_unicode_compatible
class Permissions(object):
"""An abstraction for file system permissions.
Permissions objects store information regarding the permissions
on a resource. It supports Linux permissions, but is generic enough
to manage permission information from almost any filesystem.
Example:
>>> from fs.permissions import Permissions
>>> p = Permissions(user='rwx', group='rw-', other='r--')
>>> print(p)
rwxrw-r--
>>> p.mode
500
>>> oct(p.mode)
'0o764'
"""
_LINUX_PERMS = [
("setuid", 2048),
("setguid", 1024),
("sticky", 512),
("u_r", 256),
("u_w", 128),
("u_x", 64),
("g_r", 32),
("g_w", 16),
("g_x", 8),
("o_r", 4),
("o_w", 2),
("o_x", 1),
] # type: List[Tuple[Text, int]]
_LINUX_PERMS_NAMES = [_name for _name, _mask in _LINUX_PERMS] # type: List[Text]
def __init__(
self,
names=None, # type: Optional[Iterable[Text]]
mode=None, # type: Optional[int]
user=None, # type: Optional[Text]
group=None, # type: Optional[Text]
other=None, # type: Optional[Text]
sticky=None, # type: Optional[bool]
setuid=None, # type: Optional[bool]
setguid=None, # type: Optional[bool]
):
# type: (...) -> None
"""Create a new `Permissions` instance.
Arguments:
names (list, optional): A list of permissions.
mode (int, optional): A mode integer.
user (str, optional): A triplet of *user* permissions, e.g.
``"rwx"`` or ``"r--"``
group (str, optional): A triplet of *group* permissions, e.g.
``"rwx"`` or ``"r--"``
other (str, optional): A triplet of *other* permissions, e.g.
``"rwx"`` or ``"r--"``
sticky (bool, optional): A boolean for the *sticky* bit.
setuid (bool, optional): A boolean for the *setuid* bit.
setguid (bool, optional): A boolean for the *setguid* bit.
"""
if names is not None:
self._perms = set(names)
elif mode is not None:
self._perms = {name for name, mask in self._LINUX_PERMS if mode & mask}
else:
perms = self._perms = set()
perms.update("u_" + p for p in user or "" if p != "-")
perms.update("g_" + p for p in group or "" if p != "-")
perms.update("o_" + p for p in other or "" if p != "-")
if sticky:
self._perms.add("sticky")
if setuid:
self._perms.add("setuid")
if setguid:
self._perms.add("setguid")
def __repr__(self):
# type: () -> Text
if not self._perms.issubset(self._LINUX_PERMS_NAMES):
_perms_str = ", ".join("'{}'".format(p) for p in sorted(self._perms))
return "Permissions(names=[{}])".format(_perms_str)
def _check(perm, name):
# type: (Text, Text) -> Text
return name if perm in self._perms else ""
user = "".join((_check("u_r", "r"), _check("u_w", "w"), _check("u_x", "x")))
group = "".join((_check("g_r", "r"), _check("g_w", "w"), _check("g_x", "x")))
other = "".join((_check("o_r", "r"), _check("o_w", "w"), _check("o_x", "x")))
args = []
_fmt = "user='{}', group='{}', other='{}'"
basic = _fmt.format(user, group, other)
args.append(basic)
if self.sticky:
args.append("sticky=True")
if self.setuid:
args.append("setuid=True")
if self.setuid:
args.append("setguid=True")
return "Permissions({})".format(", ".join(args))
def __str__(self):
# type: () -> Text
return self.as_str()
def __iter__(self):
# type: () -> Iterator[Text]
return iter(self._perms)
def __contains__(self, permission):
# type: (object) -> bool
return permission in self._perms
def __eq__(self, other):
# type: (object) -> bool
if isinstance(other, Permissions):
names = other.dump() # type: object
else:
names = other
return self.dump() == names
def __ne__(self, other):
# type: (object) -> bool
return not self.__eq__(other)
@classmethod
def parse(cls, ls):
# type: (Text) -> Permissions
"""Parse permissions in Linux notation."""
user = ls[:3]
group = ls[3:6]
other = ls[6:9]
return cls(user=user, group=group, other=other)
@classmethod
def load(cls, permissions):
# type: (List[Text]) -> Permissions
"""Load a serialized permissions object."""
return cls(names=permissions)
@classmethod
def create(cls, init=None):
# type: (Union[int, Iterable[Text], None]) -> Permissions
"""Create a permissions object from an initial value.
Arguments:
init (int or list, optional): May be None to use `0o777`
permissions, a mode integer, or a list of permission names.
Returns:
int: mode integer that may be used for instance by `os.makedir`.
Example:
>>> Permissions.create(None)
Permissions(user='rwx', group='rwx', other='rwx')
>>> Permissions.create(0o700)
Permissions(user='rwx', group='', other='')
>>> Permissions.create(['u_r', 'u_w', 'u_x'])
Permissions(user='rwx', group='', other='')
"""
if init is None:
return cls(mode=0o777)
if isinstance(init, cls):
return init
if isinstance(init, int):
return cls(mode=init)
if isinstance(init, list):
return cls(names=init)
raise ValueError("permissions is invalid")
@classmethod
def get_mode(cls, init):
# type: (Union[int, Iterable[Text], None]) -> int
"""Convert an initial value to a mode integer."""
return cls.create(init).mode
def copy(self):
# type: () -> Permissions
"""Make a copy of this permissions object."""
return Permissions(names=list(self._perms))
def dump(self):
# type: () -> List[Text]
"""Get a list suitable for serialization."""
return sorted(self._perms)
def as_str(self):
# type: () -> Text
"""Get a Linux-style string representation of permissions."""
perms = [
c if name in self._perms else "-"
for name, c in zip(self._LINUX_PERMS_NAMES[-9:], "rwxrwxrwx")
]
if "setuid" in self._perms:
perms[2] = "s" if "u_x" in self._perms else "S"
if "setguid" in self._perms:
perms[5] = "s" if "g_x" in self._perms else "S"
if "sticky" in self._perms:
perms[8] = "t" if "o_x" in self._perms else "T"
perm_str = "".join(perms)
return perm_str
@property
def mode(self):
# type: () -> int
"""`int`: mode integer."""
mode = 0
for name, mask in self._LINUX_PERMS:
if name in self._perms:
mode |= mask
return mode
@mode.setter
def mode(self, mode):
# type: (int) -> None
self._perms = {name for name, mask in self._LINUX_PERMS if mode & mask}
u_r = _PermProperty("u_r")
u_w = _PermProperty("u_w")
u_x = _PermProperty("u_x")
g_r = _PermProperty("g_r")
g_w = _PermProperty("g_w")
g_x = _PermProperty("g_x")
o_r = _PermProperty("o_r")
o_w = _PermProperty("o_w")
o_x = _PermProperty("o_x")
sticky = _PermProperty("sticky")
setuid = _PermProperty("setuid")
setguid = _PermProperty("setguid")
def add(self, *permissions):
# type: (*Text) -> None
"""Add permission(s).
Arguments:
*permissions (str): Permission name(s), such as ``'u_w'``
or ``'u_x'``.
"""
self._perms.update(permissions)
def remove(self, *permissions):
# type: (*Text) -> None
"""Remove permission(s).
Arguments:
*permissions (str): Permission name(s), such as ``'u_w'``
or ``'u_x'``.s
"""
self._perms.difference_update(permissions)
def check(self, *permissions):
# type: (*Text) -> bool
"""Check if one or more permissions are enabled.
Arguments:
*permissions (str): Permission name(s), such as ``'u_w'``
or ``'u_x'``.
Returns:
bool: `True` if all given permissions are set.
"""
return self._perms.issuperset(permissions)
| {
"repo_name": "PyFilesystem/pyfilesystem2",
"path": "fs/permissions.py",
"copies": "1",
"size": "9840",
"license": "mit",
"hash": -7365286591246904000,
"line_mean": 29.8463949843,
"line_max": 85,
"alpha_frac": 0.5217479675,
"autogenerated": false,
"ratio": 3.786071565986918,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4807819533486918,
"avg_score": null,
"num_lines": null
} |
""" **abstract_phasechange_simulation.py** provides an abstract class for phase-change simulations,
including the natural and compositional convection of binary alloys.
"""
import phaseflow
import fenics
import matplotlib
import math
import sys
class AbstractPhaseChangeSimulation(phaseflow.abstract_simulation.AbstractSimulation):
""" Implement the general model for phase-change coupled with natural and compositional convection.
This class is abstract, because an instantiable simulation still requires
definitions for the mesh, initial values, and boundary conditions. """
def __init__(self, time_order = 1, integration_measure = fenics.dx, setup_solver = True):
self._temperature_rayleigh_number = fenics.Constant(1., name = "Ra_T")
self._concentration_rayleigh_number = fenics.Constant(-1., name = "Ra_C")
self._prandtl_number = fenics.Constant(1., name = "Pr")
self._stefan_number = fenics.Constant(1., name = "Ste")
self._schmidt_number = fenics.Constant(1., name = "Sc")
self._pure_liquidus_temperature = fenics.Constant(0., name = "T_m")
self._liquidus_slope = fenics.Constant(-1., name = "m_L")
self._liquid_viscosity = fenics.Constant(1., name = "mu_L")
self._solid_viscosity = fenics.Constant(1.e8, name = "mu_S")
self._pressure_penalty_factor = fenics.Constant(1.e-7, name = "gamma")
self._regularization_central_temperature_offset = fenics.Constant(
0., name = "delta_T")
self._regularization_smoothing_parameter = fenics.Constant(1./64., name = "r")
self.regularization_sequence = None
super().__init__(
time_order = time_order,
integration_measure = integration_measure,
setup_solver = setup_solver)
self.nonlinear_solver_table_filename = "NonlinearSolverTable.txt"
if setup_solver:
self.solver.parameters["newton_solver"]["maximum_iterations"] = 20
self.solver.parameters["newton_solver"]["absolute_tolerance"] = 1.e-9
@property
def temperature_rayleigh_number(self):
return self._temperature_rayleigh_number
@property
def concentration_rayleigh_number(self):
return self._concentration_rayleigh_number
@property
def prandtl_number(self):
return self._prandtl_number
@property
def stefan_number(self):
return self._stefan_number
@property
def schmidt_number(self):
return self._schmidt_number
@property
def prandtl_number(self):
return self._prandtl_number
@property
def pure_liquidus_temperature(self):
return self._pure_liquidus_temperature
@property
def liquidus_slope(self):
return self._liquidus_slope
@property
def prandtl_number(self):
return self._prandtl_number
@property
def liquid_viscosity(self):
return self._liquid_viscosity
@property
def solid_viscosity(self):
return self._solid_viscosity
@property
def pressure_penalty_factor(self):
return self._pressure_penalty_factor
@property
def regularization_central_temperature_offset(self):
return self._regularization_central_temperature_offset
@property
def regularization_smoothing_parameter(self):
return self._regularization_smoothing_parameter
def phi(self, T, C, T_m, m_L, delta_T, s):
""" The regularized semi-phasefield. """
T_L = delta_T + T_m + m_L*C
tanh = fenics.tanh
return 0.5*(1. + tanh((T_L - T)/s))
def semi_phasefield(self, T, C):
""" The semi-phasefield $phi$ given in UFL. """
T_m = self.pure_liquidus_temperature
m_L = self.liquidus_slope
delta_T = self.regularization_central_temperature_offset
s = self.regularization_smoothing_parameter
return self.phi(T = T, C = C, T_m = T_m, m_L = m_L, delta_T = delta_T, s = s)
def point_value_from_semi_phasefield(self, T, C):
""" The semi-phasefield $phi$ sutiable for evaluation given a single $T$ and $C$.
Maybe there is a way to evaluate the UFL expression rather than having to provide this
redundant function.
"""
T_m = self.pure_liquidus_temperature.values()[0]
m_L = self.liquidus_slope.values()[0]
delta_T = self.regularization_central_temperature_offset.values()[0]
s = self.regularization_smoothing_parameter.values()[0]
return self.phi(T = T, C = C, T_m = T_m, m_L = m_L, delta_T = delta_T, s = s)
def time_discrete_terms(self):
""" Return the discrete time derivatives which are needed for the variational form. """
p_t, u_t, T_t, C_t = super().time_discrete_terms()
pnp1, unp1, Tnp1, Cnp1_L = fenics.split(self._solutions[0].leaf_node())
pn, un, Tn, Cn_L = fenics.split(self._solutions[1].leaf_node())
phinp1 = self.semi_phasefield(T = Tnp1, C = Cnp1_L)
phin = self.semi_phasefield(T = Tn, C = Cn_L)
if self.time_order == 1:
phi_t = phaseflow.backward_difference_formulas.apply_backward_euler(
Delta_t = self._timestep_sizes[0],
u = (phinp1, phin))
if self.time_order > 1:
pnm1, unm1, Tnm1, Cnm1_L = fenics.split(self._solutions[2])
phinm1 = self.semi_phasefield(T = Tnm1, C = Cnm1_L)
if self.time_order == 2:
phi_t = phaseflow.backward_difference_formulas.apply_bdf2(
Delta_t = (self._timestep_sizes[0], self._timestep_sizes[1]),
u = (phinp1, phin, phinm1))
if self.time_order > 2:
raise NotImplementedError()
return u_t, T_t, C_t, phi_t
def element(self):
""" Return a P1P2P1P1 element for the monolithic solution. """
P1 = fenics.FiniteElement('P', self.mesh.ufl_cell(), 1)
P2 = fenics.VectorElement('P', self.mesh.ufl_cell(), 2)
return fenics.MixedElement([P1, P2, P1, P1])
def buoyancy(self, T, C):
""" Extend the model from @cite{zimmerman2018monolithic} with a solute concentration. """
Pr = self.prandtl_number
Ra_T = self.temperature_rayleigh_number
Ra_C = self.concentration_rayleigh_number
ghat = fenics.Constant((0., -1.), name = "ghat")
return 1./Pr*(Ra_T*T + Ra_C*C)*ghat
def governing_form(self):
""" Extend the model from @cite{zimmerman2018monolithic} with a solute concentration balance. """
Pr = self.prandtl_number
Ste = self.stefan_number
Sc = self.schmidt_number
gamma = self.pressure_penalty_factor
mu_L = self.liquid_viscosity
mu_S = self.solid_viscosity
p, u, T, C = fenics.split(self.solution.leaf_node())
u_t, T_t, C_t, phi_t = self.time_discrete_terms()
b = self.buoyancy(T = T, C = C)
phi = self.semi_phasefield(T = T, C = C)
mu = mu_L + (mu_S - mu_L)*phi
psi_p, psi_u, psi_T, psi_C = fenics.TestFunctions(self.function_space)
inner, dot, grad, div, sym = \
fenics.inner, fenics.dot, fenics.grad, fenics.div, fenics.sym
mass = -psi_p*div(u)
momentum = dot(psi_u, u_t + b + dot(grad(u), u)) - div(psi_u)*p \
+ 2.*mu*inner(sym(grad(psi_u)), sym(grad(u)))
enthalpy = psi_T*(T_t - 1./Ste*phi_t) \
+ dot(grad(psi_T), 1./Pr*grad(T) - T*u)
concentration = \
psi_C*((1. - phi)*C_t - C*phi_t) \
+ dot(grad(psi_C), 1./Sc*(1. - phi)*grad(C) - C*u)
stabilization = -gamma*psi_p*p
dx = self.integration_measure
F = (mass + momentum + enthalpy + concentration + stabilization)*dx
return F
def adaptive_goal(self):
""" Choose the solid area as the goal for AMR. """
return self.solid_area_integrand()
def solid_area_integrand(self):
p, u, T, C = fenics.split(self.solution.leaf_node())
phi = self.semi_phasefield(T = T, C = C)
dx = self.integration_measure
return phi*dx
def solute_mass_integrand(self):
p, u, T, C = fenics.split(self.solution.leaf_node())
phi = self.semi_phasefield(T = T, C = C)
dx = self.integration_measure
return (1. - phi)*C*dx
def area_above_critical_phi_integrand(self, critical_phi = 1.e-6):
p, u, T, C = fenics.split(self.solution.leaf_node())
_p, _u, _T, _C = self.solution.leaf_node().split()
cell_markers = fenics.MeshFunction("size_t", self.mesh.leaf_node(), self.mesh.topology().dim())
def phi(x):
p = fenics.Point(x[0], x[1])
return self.point_value_from_semi_phasefield(T = _T(p), C = _C(p))
class AboveCriticalPhi(fenics.SubDomain):
def inside(self, x, on_boundary):
return phi(x) > critical_phi
subdomain_id = 2
AboveCriticalPhi().mark(cell_markers, subdomain_id)
dx_phistar = fenics.dx(
domain = self.mesh.leaf_node(),
subdomain_data = cell_markers,
subdomain_id = subdomain_id)
P1 = fenics.FiniteElement("P", self.mesh.ufl_cell(), 1)
V = fenics.FunctionSpace(self.mesh.leaf_node(), P1)
unity = fenics.interpolate(fenics.Expression("1.", element = P1), V)
return unity*dx_phistar
def solve_with_auto_regularization(self,
goal_tolerance = None,
max_regularization_threshold = 4.,
max_attempts = 16,
enable_newton_solution_backup = False):
""" Catch solver failure and automatically over-regularize the problem,
then successively return to desired regularization.
If not using AMR, then the latest successful Newton solution can be saved/loaded to be more efficient
with `enable_newton_solution_backup = True`.
"""
if self.regularization_sequence == None:
self.regularization_sequence = (self.regularization_smoothing_parameter.__float__(),)
first_s_to_solve = self.regularization_sequence[0]
attempts = range(max_attempts)
solved = False
for attempt in attempts:
s_start_index = self.regularization_sequence.index(first_s_to_solve)
try:
for s in self.regularization_sequence[s_start_index:]:
self.regularization_smoothing_parameter.assign(s)
if enable_newton_solution_backup:
self.save_newton_solution()
""" Try/catch block prevents us from directly checking the
number of iterations when the solver fails."""
self.solver_status["iterations"] = \
self.solver.parameters["newton_solver"]["maximum_iterations"]
self.solver_status["solved"] = False
self.solve(goal_tolerance = goal_tolerance)
self.solver_status["solved"] = True
self.write_nonlinear_solver_table_row()
break
except RuntimeError:
if "Newton solver did not converge" not in str(sys.exc_info()):
raise
self.write_nonlinear_solver_table_row()
current_s = self.regularization_smoothing_parameter.__float__()
ss = self.regularization_sequence
print("Failed to solve with s = " + str(current_s) +
" from the sequence " + str(ss))
if attempt == attempts[-1]:
break
if current_s >= max_regularization_threshold:
print("Exceeded maximum regularization (s_max = " + str(max_regularization_threshold) + ")")
break
index = ss.index(current_s)
if index == 0:
s_to_insert = 2.*ss[0]
new_ss = (s_to_insert,) + ss
else:
s_to_insert = (current_s + ss[index - 1])/2.
new_ss = ss[:index] + (s_to_insert,) + ss[index:]
self.regularization_sequence = new_ss
print("Inserted new value of " + str(s_to_insert))
if enable_newton_solution_backup:
self.load_newton_solution()
first_s_to_solve = s_to_insert
else:
self.reset_initial_guess()
first_s_to_solve = self.regularization_sequence[0]
self.regularization_smoothing_parameter.assign(self.regularization_sequence[-1])
assert(self.solver_status["solved"])
def coarsen(self,
absolute_tolerances = (1.e-2, 1.e-2, 1.e-2, 1.e-2, 1.e-2),
maximum_refinement_cycles = 6,
circumradius_threshold = 0.01):
""" Re-mesh while preserving pointwise accuracy of solution variables. """
finesim = self.deepcopy()
adapted_coares_mesh = self.coarse_mesh()
adapted_coarse_function_space = fenics.FunctionSpace(adapted_coares_mesh, self._element)
adapted_coarse_solution = fenics.Function(adapted_coarse_function_space)
assert(self.mesh.topology().dim() == 2)
def u0(solution, point):
return solution(point)[1]
def u1(solution, point):
return solution(point)[2]
def T(solution, point):
return solution(point)[3]
def C(solution, point):
return solution(point)[4]
def phi(solution, point):
return self.point_value_from_semi_phasefield(T = T(solution, point), C = C(solution, point))
scalars = (u0, u1, T, C, phi)
for scalar, tolerance in zip(scalars, absolute_tolerances):
adapted_coarse_solution, adapted_coarse_function_space, adapted_coarse_mesh = \
phaseflow.refinement.adapt_coarse_solution_to_fine_solution(
scalar = scalar,
coarse_solution = adapted_coarse_solution,
fine_solution = finesim.solution,
element = self._element,
absolute_tolerance = tolerance,
maximum_refinement_cycles = maximum_refinement_cycles,
circumradius_threshold = circumradius_threshold)
self._mesh = adapted_coarse_mesh
self._function_space = fenics.FunctionSpace(self._mesh, self._element)
for i in range(len(self._solutions)):
self._solutions[i] = fenics.project(
finesim._solutions[i].leaf_node(), self._function_space.leaf_node())
self.setup_solver()
def deepcopy(self):
""" Extends the parent deepcopy method with attributes for this derived class """
sim = super().deepcopy()
sim.temperature_rayleigh_number.assign(self.temperature_rayleigh_number)
sim.concentration_rayleigh_number.assign(self.concentration_rayleigh_number)
sim.prandtl_number.assign(self.prandtl_number)
sim.stefan_number.assign(self.stefan_number)
sim.schmidt_number.assign(self.schmidt_number)
sim.pure_liquidus_temperature.assign(self.pure_liquidus_temperature)
sim.liquidus_slope.assign(self.liquidus_slope)
sim.liquid_viscosity.assign(self.liquid_viscosity)
sim.solid_viscosity.assign(self.solid_viscosity)
sim.pressure_penalty_factor.assign(self.pressure_penalty_factor)
sim.regularization_central_temperature_offset.assign(
self.regularization_central_temperature_offset)
sim.regularization_smoothing_parameter.assign(self.regularization_smoothing_parameter)
return sim
def _plot(self, solution, time, savefigs = False):
""" Plot the adaptive mesh, velocity vector field, temperature field, and phase field. """
p, u, T, C = solution.leaf_node().split()
phi = fenics.project(self.semi_phasefield(T = T, C = C), mesh = self.mesh.leaf_node())
Cbar = fenics.project(C*(1. - phi), mesh = self.mesh.leaf_node())
for var, label, colorbar, varname in zip(
(solution.function_space().mesh().leaf_node(), p, u, T, Cbar, phi),
("$\Omega_h$", "$p$", "$\mathbf{u}$", "$T$", "$\overline{C}$", "$\phi$"),
(False, True, True, True, True, True),
("mesh", "p", "u", "T", "Cbar", "phi")):
some_mappable_thing = phaseflow.plotting.plot(var)
if colorbar and (self.mesh.topology().dim() > 1):
matplotlib.pyplot.colorbar(some_mappable_thing)
matplotlib.pyplot.title(label + ", $t = " + str(time) + "$")
matplotlib.pyplot.xlabel("$x$")
if colorbar and (self.mesh.topology().dim() > 1):
matplotlib.pyplot.ylabel("$y$")
if savefigs:
matplotlib.pyplot.savefig(self.output_dir + varname + "_t" + str(time) + ".png")
matplotlib.pyplot.show()
def write_solution(self, file, solution_index = 0):
""" Write the solution to a file.
Parameters
----------
file : phaseflow.helpers.SolutionFile
This method should have been called from within the context of the open `file`.
"""
for var, symbol, label in zip(
self._solutions[solution_index].leaf_node().split(),
("p", "u", "T", "C"),
("pressure", "velocity", "temperature", "concentration")):
var.rename(symbol, label)
file.write(var, self._times[solution_index])
def write_nonlinear_solver_table_header(self):
with open(self.output_dir + self.nonlinear_solver_table_filename, "a") as table_file:
table_file.write(
"AbsoluteTolerance, RelativeTolerance, Time, SmoothingParameter, IterationCount, Solved\n")
def write_nonlinear_solver_table_row(self):
with open(self.output_dir + self.nonlinear_solver_table_filename, "a") as table_file:
table_file.write(
str(self.solver.parameters["newton_solver"]["absolute_tolerance"]) + ", " + \
str(self.solver.parameters["newton_solver"]["relative_tolerance"]) + ", " + \
str(self.time) + ", " + \
str(self.regularization_smoothing_parameter.__float__()) + ", " + \
str(self.solver_status["iterations"]) + ", " + \
str(self.solver_status["solved"]) + "\n")
| {
"repo_name": "geo-fluid-dynamics/phaseflow-fenics",
"path": "phaseflow/abstract_phasechange_simulation.py",
"copies": "1",
"size": "20958",
"license": "mit",
"hash": 4807334492384036000,
"line_mean": 34.0484949833,
"line_max": 112,
"alpha_frac": 0.5229983777,
"autogenerated": false,
"ratio": 4.078225335668418,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9857592275333722,
"avg_score": 0.048726287606939145,
"num_lines": 598
} |
''' Abstract request handler that handles bokeh-session-id
'''
from __future__ import absolute_import, print_function
import logging
log = logging.getLogger(__name__)
from tornado import gen
from tornado.web import RequestHandler, HTTPError
from bokeh.util.session_id import generate_session_id, check_session_id_signature
class SessionHandler(RequestHandler):
''' Implements a custom Tornado handler for document display page
'''
def __init__(self, tornado_app, *args, **kw):
self.application_context = kw['application_context']
self.bokeh_websocket_path = kw['bokeh_websocket_path']
# Note: tornado_app is stored as self.application
super(SessionHandler, self).__init__(tornado_app, *args, **kw)
def initialize(self, *args, **kw):
pass
@gen.coroutine
def get_session(self):
session_id = self.get_argument("bokeh-session-id", default=None)
if session_id is None:
if self.application.generate_session_ids:
session_id = generate_session_id(secret_key=self.application.secret_key,
signed=self.application.sign_sessions)
else:
log.debug("Server configured not to generate session IDs and none was provided")
raise HTTPError(status_code=403, reason="No bokeh-session-id provided")
elif not check_session_id_signature(session_id,
secret_key=self.application.secret_key,
signed=self.application.sign_sessions):
log.error("Session id had invalid signature: %r", session_id)
raise HTTPError(status_code=403, reason="Invalid session ID")
session = yield self.application_context.create_session_if_needed(session_id, self.request)
raise gen.Return(session)
| {
"repo_name": "dennisobrien/bokeh",
"path": "bokeh/server/views/session_handler.py",
"copies": "9",
"size": "1894",
"license": "bsd-3-clause",
"hash": -8860683203927499000,
"line_mean": 41.0888888889,
"line_max": 99,
"alpha_frac": 0.6372756072,
"autogenerated": false,
"ratio": 4.354022988505747,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004232327333028482,
"num_lines": 45
} |
""" Abstracts all Widgets of the pySUMO GUI.
This module contains:
- Widget: The main widget representation in the GUI.
- RWidget: The class of widgets which only have access to the IndexAbstractor and therefore cannot modify the Ontologies.
- RWWidget: The class of widgets which have access to the SyntaxController and the IndexAbstractor.
"""
from PySide.QtCore import Signal, Slot, QEvent, Qt
from PySide.QtGui import QDockWidget
from pysumo.indexabstractor import IndexAbstractor
from pysumo.syntaxcontroller import SyntaxController
import logging
class PySUMOWidget(QDockWidget):
""" The main class representing a widget in the pySUMO GUI.
It catches the focus event on components in widget to
connect them the application actions like cut, copy,
paste, or undo, ...
Methods:
- refresh: Refreshes the view of the current widget according to the IndexAbstractor.
"""
def __init__(self, mainwindow):
""" Initializes the Widget object.
Initializes a pysumo widget with the parent which is the main window.
Parameter:
- mainwindow : The main window.
"""
super(PySUMOWidget, self).__init__()
self.mw = mainwindow
self.mainWindow = mainwindow
self.isPopedOut = False
self.topLevelChanged.connect(self.setPopedOut)
self.widget = None
self.callback = None
self.prefixName = None
self.suffixName = None
def _setSuffixName_(self, s):
""" QT Slot which sets a suffix name to the title of the dock widget,
like the name of the active ontology in the widget.
Parameter:
- s : The suffix name as a string.
"""
if s is None :
return
s = s.strip()
if "" == s :
s = None
self.suffixName = s
self.updateTitle()
def setPrefixName(self, s):
"""
Sets the prefix name which is the default title of a pysumo widget.
Parameter:
- s : The prefix or default name as a string.
"""
if s is None :
return
s = s.strip()
if "" == s :
return
self.prefixName = s
self.updateTitle()
def updateTitle(self):
"""
Updates the title of the pysumo widget according to it'S prefix and suffix name.
"""
assert self.prefixName is not None
title = self.prefixName
if self.suffixName is not None :
title = title + " | " + self.suffixName
self.setWindowTitle(title)
@Slot()
def setPopedOut(self):
"""
Qt Slot which customizes the pop out of a pysumo widget.
"""
if not self.isPopedOut :
self.setWindowFlags(Qt.Window)
self.show()
self.isPopedOut = True
else :
self.isPopedOut = False
def eventFilter(self, source, event):
"""
Filters event on a component where the pysumo was installed as event filter.
Override from QObject.
"""
if event.type() == QEvent.FocusIn:
self.callback = self.mainWindow.connectWidget(self)
elif event.type() == QEvent.FocusOut:
self.mainWindow.disconnectWidget(self, self.callback)
return super(PySUMOWidget, self).eventFilter(source, event)
IA = IndexAbstractor()
@classmethod
def getIndexAbstractor(cls):
return cls.IA
def refresh(self):
""" Uses the IndexAbstractor to refresh the widget. """
def getWidget(self):
pass
def _print_(self):
pass
def _quickPrint_(self):
pass
def _printPreview_(self):
pass
def zoomIn(self):
pass
def zoomOut(self):
pass
def expandAll(self):
pass
def collapseAll(self):
pass
def setSettings(self, settings):
self.settings = settings
class RWidget(PySUMOWidget):
""" Class for Widgets which only has read-access to the Ontologies. This
class should not be used directly, but extended.
"""
def __init__(self, mainwindow):
""" Initializes the RWidget. """
super(RWidget, self).__init__(mainwindow)
class RWWidget(PySUMOWidget):
""" Class for Widgets which have modify-access to the Ontologies. This
class should not be used directly, but extended.
Methods:
- commit: Commits the modifications on the ontology and notifies the others widgets of changes.
"""
SyntaxController = SyntaxController(PySUMOWidget.getIndexAbstractor())
ontologyChanged = Signal()
def __init__(self, mainwindow):
""" Initializes the read/write widget """
super(RWWidget, self).__init__(mainwindow)
self.log = logging.getLogger('.' + __name__)
def commit(self):
""" Commits modifications to the ontology to the SyntaxController, and
if successful updates the IndexAbstractor and notifies all other
widgets that the Ontology has been modified. """
self.ontologyChanged.emit()
def getActiveOntology(self):
pass
def setActiveOntology(self, ontology):
pass
def saveOntology(self):
ontology = self.getActiveOntology()
if ontology in self.IA.ontologies:
self.log.info('Saving Ontology: %s' % str(ontology))
ontology.save()
def redo(self):
ontology = self.getActiveOntology()
if ontology in self.IA.ontologies:
self.SyntaxController.redo(ontology)
self.ontologyChanged.emit()
def undo(self):
ontology = self.getActiveOntology()
if ontology in self.IA.ontologies:
self.SyntaxController.undo(ontology)
self.ontologyChanged.emit()
| {
"repo_name": "pySUMO/pysumo",
"path": "src/pySUMOQt/Widget/Widget.py",
"copies": "1",
"size": "5959",
"license": "bsd-2-clause",
"hash": 7266475493619759000,
"line_mean": 27.5119617225,
"line_max": 121,
"alpha_frac": 0.6042960228,
"autogenerated": false,
"ratio": 4.262517882689557,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.019638200034973505,
"num_lines": 209
} |
"""Abstracts away file storage and retrieval.
It's not uncommon to want to switch between a local filesystem or mogilefs or
some kind of cloud thing, and that's a pain when the relevant code is strewn
everywhere. So, this class is a wrapper around the operations needed on
uploaded files. It also provides the necessary machinery to be used with a
transaction manager.
Files are all identified by `class_`, a sort of rough category or namespace,
and `key`, which is just an identifier. For example, with local storage, files
are stored under ``/class_/k/e/y/key``.
A storage object factory can be retrieved through :func:`get_storage_factory`.
"""
from __future__ import absolute_import
from cStringIO import StringIO
import shutil
from pyramid.util import DottedNameResolver
import transaction
def get_storage_factory(settings, prefix='filestore'):
"""Uses a Pyramid deployment settings dictionary to construct and return
an instance of a :class:`FileStorage` subclass.
The `prefix` will be used to extract storage configuration. The package to
use is determined by the `$prefix` config setting, and any setting named
`$prefix.$key` will be passed to the subclass constructor as `$key`.
The `$prefix` config setting may be either the full dotted python name of a
data manager class, or the name of one of the modules in the
floof.model.filestore namespace, currently `local` or `mogilefs`.
"""
# Pull prefix.key out of the config object
kwargs = {}
plen = len(prefix)
for key, val in settings.iteritems():
if key[0:plen] == prefix and len(key) > plen:
kwargs[ key[plen+1:] ] = val
# Resolve the data manager
resolver = DottedNameResolver(None)
manager = settings[prefix]
name = 'floof.model.filestore.' + manager + '.FileStorage'
try:
storage = resolver.resolve(name)
except ImportError:
storage = resolver.resolve(manager)
def storage_factory():
"""Returns an instance of the chosen data manager for attachement to a
request object and use over the life of a single request. The instance
must be manually joined to a Zope-style transaction."""
return storage(transaction.manager, **kwargs)
return storage_factory
# TODO: several steps here
# 3. add notion of file class for all filestorages; local can either ignore or use subdirectories
# fix this impl-per-module nonsense
class FileStorage(object):
"""Implements a staging dictionary to temporarily hold in StringIO objects
copies of all file objects that are passed to :meth`put`.
Child classes must implement :meth:`url` and the Zope transaction `data
manager` methods according to their actual backends.
See: http://www.zodb.org/zodbbook/transactions.html
"""
def __init__(self, transaction_manager, **kwargs):
self.transaction_manager = transaction_manager
self.stage = {}
def put(self, class_, key, fileobj):
"""Stages the data in the `fileobj` for subsequent commital under the
given `class_` and `key`."""
stageobj = StringIO()
shutil.copyfileobj(fileobj, stageobj)
fileobj.seek(0)
stageobj.seek(0)
idx = self._idx(class_, key)
self.stage[idx] = (class_, key, stageobj)
def _idx(self, class_, key):
"""Index for use in the staging dict."""
return u':'.join((class_, key))
def _finish(self):
"""Cleans up the temporary memory file storage.
Should be run at the end of any abort or commit, regardless of the
outcome. (i.e. at the end of :meth:`abort`, :meth:`tpc_finish` and
:meth:`tpc_abort`.)
"""
self.stage = {}
def sortKey(self):
"""Return a string by which to sort the commit order for transactions
with multiple data managers."""
return 'filestore:{0}'.format(id(self.stage))
def url(self, class_, key):
"""Returns a URL for accessing this file.
Must be a fully-qualified URL, or None if the file doesn't seem to
exist. Local files can be served by using file:// URLs.
"""
raise NotImplementedError
def abort(self, transaction):
"""Run if the transaction is aborted before the two-stage commit
process begins."""
raise NotImplementedError
def tpc_begin(self, transaction):
"""Run at the start of the two-tage commit."""
raise NotImplementedError
def commit(self, transaction):
"""Run during the two-tage commit process; should commit the files
non-permanently."""
raise NotImplementedError
def tpc_vote(self, transaction):
"""Run during the two-tage commit process; should raise an exception if
the commit process should abort."""
raise NotImplementedError
def tpc_finish(self, transaction):
"""Run at the successful completion of the two-stage commit process.
Should strenuously avoid failing."""
raise NotImplementedError
def tpc_abort(self, transaction):
"""Run if the two-tage commit process is aborted. Should not fail."""
raise NotImplementedError
| {
"repo_name": "eevee/floof",
"path": "floof/model/filestore/__init__.py",
"copies": "1",
"size": "5219",
"license": "isc",
"hash": -6775711562192942000,
"line_mean": 35.7535211268,
"line_max": 97,
"alpha_frac": 0.6754167465,
"autogenerated": false,
"ratio": 4.309661436829067,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00041538429181359936,
"num_lines": 142
} |
# abstractsession.py
"""A common interface shared between publish and consumer sessions.
This file defines a class 'AbstractSession' - an interface which is shared
between its concrete implementations 'Session' and 'ProviderSession'.
"""
from __future__ import absolute_import
from .exception import _ExceptionUtil
from .identity import Identity
from .service import Service
from . import internals
from .internals import CorrelationId
from . import utils
class AbstractSession(object):
"""A common interface shared between publish and consumer sessions.
This class provides an abstract session which defines shared interface
between publish and consumer requests for Bloomberg
Sessions manage access to services either by requests and
responses or subscriptions. A Session can dispatch events and
replies in either a synchronous or asynchronous mode. The mode
of a Session is determined when it is constructed and cannot be
changed subsequently.
A Session is asynchronous if an EventHandler object is
supplied when it is constructed. The nextEvent() method may not be called.
All incoming events are delivered to the EventHandler supplied on
construction.
If supplied, EventHandler is a callable object that takes two arguments:
received event and related session.
A Session is synchronous if an EventHandler object is not
supplied when it is constructed. The nextEvent() method must be
called to read incoming events.
Several methods in Session take a CorrelationId parameter. The
application may choose to supply its own CorrelationId values
or allow the Session to create values. If the application
supplies its own CorrelationId values it must manage their
lifetime such that the same value is not reused for more than
one operation at a time. The lifetime of a CorrelationId begins
when it is supplied in a method invoked on a Session and ends
either when it is explicitly cancelled using cancel() or
unsubscribe(), when a RESPONSE Event (not a PARTIAL_RESPONSE)
containing it is received or when a SUBSCRIPTION_STATUS Event
which indicates that the subscription it refers to has been
terminated is received.
When using an asynchronous Session the application must be
aware that because the callbacks are generated from another
thread they may be processed before the call which generates
them has returned. For example, the SESSION_STATUS Event
generated by a startAsync() may be processed before
startAsync() has returned (even though startAsync() itself will
not block).
This becomes more significant when Session generated
CorrelationIds are in use. For example, if a call to
subscribe() which returns a Session generated CorrelationId has
not completed before the first Events which contain that
CorrelationId arrive the application may not be able to
interpret those events correctly. For this reason, it is
preferable to use user generated CorrelationIds when using
asynchronous Sessions. This issue does not arise when using a
synchronous Session as long as the calls to subscribe() etc are
made on the same thread as the calls to nextEvent().
"""
def __init__(self, handle=None):
"""Instantiate an 'AbstractSession' with the specified handle.
This function is for internal use only. Clients should create sessions
using one of the concrete subclasses of 'AbstractSession'.
"""
if self.__class__ is AbstractSession:
raise NotImplementedError("Don't instantiate this class directly.\
Create sessions using one of the concrete subclasses of this class.")
self.__handle = handle
def openService(self, serviceName):
"""Open the service identified by the specified 'serviceName'.
Attempt to open the service identified by the specified
'serviceName' and block until the service is either opened
successfully or has failed to be opened. Return 'True' if
the service is opened successfully and 'False' if the
service cannot be successfully opened.
The 'serviceName' must contain a fully qualified service name. That
is, it must be of the form "//<namespace>/<service-name>".
Before openService() returns a SERVICE_STATUS Event is
generated. If this is an asynchronous Session then this
Event may be processed by the registered EventHandler
before openService() has returned.
"""
return internals.blpapi_AbstractSession_openService(
self.__handle,
serviceName) == 0
def openServiceAsync(self, serviceName, correlationId=None):
"""Begin the process to open the service and return immediately.
Begin the process to open the service identified by the
specified 'serviceName' and return immediately. The optional
specified 'correlationId' is used to track Events generated
as a result of this call. The actual correlationId which
will identify Events generated as a result of this call is
returned.
The 'serviceName' must contain a fully qualified service name. That
is, it must be of the form "//<namespace>/<service-name>".
The application must monitor events for a SERVICE_STATUS
Event which will be generated once the service has been
successfully opened or the opening has failed.
"""
if correlationId is None:
correlationId = CorrelationId()
_ExceptionUtil.raiseOnError(
internals.blpapi_AbstractSession_openServiceAsync(
self.__handle,
serviceName,
correlationId._handle()))
return correlationId
def sendAuthorizationRequest(self,
request,
identity,
correlationId=None,
eventQueue=None):
"""Send the specified 'authorizationRequest'.
Send the specified 'authorizationRequest' and update the
specified 'identity' with the results. If the optionally
specified 'correlationId' is supplied, it is used; otherwise
create a CorrelationId. The actual CorrelationId used is
returned. If the optionally specified 'eventQueue' is
supplied all Events relating to this Request will arrive on
that EventQueue.
The underlying user information must remain valid until the
Request has completed successfully or failed.
A successful request will generate zero or more
PARTIAL_RESPONSE Messages followed by exactly one RESPONSE
Message. Once the final RESPONSE Message has been received
the specified 'identity' will have been updated to contain
the users entitlement information and the CorrelationId
associated with the request may be re-used. If the request
fails at any stage a REQUEST_STATUS will be generated, the
specified 'identity' will not be modified and the
CorrelationId may be re-used.
The 'identity' supplied must have been returned from this
Session's createIdentity() method.
"""
if correlationId is None:
correlationId = CorrelationId()
_ExceptionUtil.raiseOnError(
internals.blpapi_AbstractSession_sendAuthorizationRequest(
self.__handle,
request._handle(),
identity._handle(),
correlationId._handle(),
None if eventQueue is None else eventQueue._handle(),
None, # no request label
0)) # request label length 0
if eventQueue is not None:
eventQueue._registerSession(self)
return correlationId
def cancel(self, correlationId):
"""Cancel 'correlationId' request.
If the specified 'correlationId' identifies a current
request then cancel that request.
Once this call returns the specified 'correlationId' will
not be seen in any subsequent Message obtained from a
MessageIterator by calling next(). However, any Message
currently pointed to by a MessageIterator when
cancel() is called is not affected even if it has the
specified 'correlationId'. Also any Message where a
reference has been retained by the application may still
contain the 'correlationId'. For these reasons, although
technically an application is free to re-use
'correlationId' as soon as this method returns it is
preferable not to aggressively re-use correlation IDs,
particularly with an asynchronous Session.
'correlationId' should be either a correlation Id or a list of
correlation Ids.
"""
_ExceptionUtil.raiseOnError(internals.blpapi_AbstractSession_cancel(
self.__handle,
correlationId._handle(),
1, # number of correlation IDs supplied
None, # no request label
0)) # request label length 0
def generateToken(self, correlationId=None, eventQueue=None):
"""Generate a token to be used for authorization.
If invalid authentication option is specified in session option or
there is failure to get authentication information based on
authentication option, then an InvalidArgumentException is raised.
"""
if correlationId is None:
correlationId = CorrelationId()
_ExceptionUtil.raiseOnError(
internals.blpapi_AbstractSession_generateToken(
self.__handle,
correlationId._handle(),
None if eventQueue is None else eventQueue._handle()))
if eventQueue is not None:
eventQueue._registerSession(self)
return correlationId
def getService(self, serviceName):
"""Return a Service object representing the service.
Return a Service object representing the service identified by the
specified 'serviceName'.
The 'serviceName' must contain a fully qualified service name. That
is, it must be of the form "//<namespace>/<service-name>".
If the service identified by the specified 'serviceName' is not open
already then an InvalidStateException is raised.
"""
errorCode, service = internals.blpapi_AbstractSession_getService(
self.__handle,
serviceName)
_ExceptionUtil.raiseOnError(errorCode)
return Service(service, self)
def createIdentity(self):
"""Return a Identity which is valid but has not been authorized."""
return Identity(
internals.blpapi_AbstractSession_createIdentity(self.__handle),
self)
# Protect enumeration constant(s) defined in this class and in classes
# derived from this class from changes:
__metaclass__ = utils.MetaClassForClassesWithEnums
__copyright__ = """
Copyright 2012. Bloomberg Finance L.P.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: The above
copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
| {
"repo_name": "anshulkgupta/viznow",
"path": "Mayank/blpapi_python3.5.5/build/lib.linux-i686-2.7/blpapi/abstractsession.py",
"copies": "2",
"size": "12218",
"license": "mit",
"hash": 1118100056301955500,
"line_mean": 42.6357142857,
"line_max": 78,
"alpha_frac": 0.6966770339,
"autogenerated": false,
"ratio": 5.144421052631579,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6841098086531578,
"avg_score": null,
"num_lines": null
} |
""" **abstract_simulation.py** provides an abstract class for unsteady adaptive FEM simulations.
We define a simulation as a sequence of time-dependent initial boundary value problems.
This module only provides an abstract base class for simulations.
For an example of implementing a specific set of governing equations,
see the `abstract_phasechange_simulation` module.
For examples of implementing instantiable simulation classes,
see e.g. `cavity_melting_simulation` and `cavity_freezing_simulation`.
This module essentially contains many lessons learned by a PhD student*
applying FEniCS to time-dependent mixed FE problems with goal-oriented AMR.
If you are not solving time-dependent problems,
not using mixed finite elements,
or not using goal-oriented AMR,
then perhaps you do not need to consider much of this module.
The bulk of the difficult work is done by the `fenics` module;
but `fenics` is mostly only a library for finite element spatial discretization of PDE's.
In that context,
little attention is paid to the time discretization of initial value problem.
Furthermore, we use some advanced features of `fenics`,
namely mixed finite elements and goal-oriented adaptive mesh refinement (AMR),
the latter of which is not among the better supported "core features" of `fenics`.
* Alexander G. Zimmerman, AICES Graduate School, RWTH Aachen University
"""
import phaseflow
import fenics
import abc
import numpy
import matplotlib
import os
class AbstractSimulation(metaclass = abc.ABCMeta):
""" A class for time-dependent simulations with goal-oriented AMR using FeniCS """
def __init__(self, time_order = 1, integration_measure = fenics.dx, setup_solver = True):
self.integration_measure = integration_measure
self.time_order = time_order
self._times = [0.,]*(time_order + 1)
self._timestep_sizes = []
for i in range(time_order):
self._timestep_sizes.append(fenics.Constant(1.))
self._mesh = self.initial_mesh()
self._element = self.element()
self._function_space = fenics.FunctionSpace(self._mesh, self._element)
self._solutions = []
for i in range(time_order + 1):
self._solutions.append(fenics.Function(self.function_space))
self.newton_solution = fenics.Function(self.function_space)
self.adaptive_solver = None
self.solver_status = {"iterations": 0, "solved": False}
self.solver_needs_setup = True
if setup_solver:
self.setup_solver()
self.output_dir = ""
@property
def timestep_size(self):
return self._timestep_sizes[0]
@property
def mesh(self):
return self._mesh.leaf_node()
@mesh.setter
def mesh(self, value):
""" Automatically redefine the function space and solutions when the mesh is redefined. """
self._mesh = value
self.solver_needs_setup = True
self.reinit_solutions()
@property
def function_space(self):
return self._function_space.leaf_node()
@property
def solution(self):
return self._solutions[0].leaf_node()
@property
def time(self):
return self._times[0]
@abc.abstractmethod
def coarse_mesh(self):
""" Redefine this to return a `fenics.Mesh`. """
@abc.abstractmethod
def element(self):
""" Redefine this to return a `fenics.MixedElement`. """
@abc.abstractmethod
def governing_form(self):
""" Redefine this to return a nonlinear variational form for the governing equations. """
@abc.abstractmethod
def initial_values(self):
""" Redefine this to return a `fenics.Function` containing the initial values. """
@abc.abstractmethod
def boundary_conditions(self):
""" Redefine this to return a list of `fenics.DirichletBC`. """
def adaptive_goal(self):
""" Redefine this to return an adaptive goal. """
def initial_mesh(self):
""" Redefine this to refine the mesh before adaptive mesh refinement. """
return self.coarse_mesh()
def reinit_solutions(self):
""" Create the function space and solution functions for the current mesh and element. """
self._function_space = fenics.FunctionSpace(self._mesh, self._element)
for i in range(len(self._solutions)):
self._solutions[i] = fenics.Function(self._function_space)
def setup_solver(self):
""" Sometimes it is necessary to set up the solver again after breaking
important references, e.g. after re-meshing.
"""
self._governing_form = self.governing_form()
self._boundary_conditions = self.boundary_conditions()
self._problem = fenics.NonlinearVariationalProblem(
F = self._governing_form,
u = self.solution,
bcs = self._boundary_conditions,
J = fenics.derivative(
form = self._governing_form,
u = self.solution))
save_parameters = False
if hasattr(self, "solver"):
save_parameters = True
if save_parameters:
solver_parameters = self.solver.parameters.copy()
self.solver = fenics.NonlinearVariationalSolver(problem = self._problem)
if save_parameters:
self.solver.parameters = solver_parameters.copy()
self._adaptive_goal = self.adaptive_goal()
if self._adaptive_goal is not None:
save_parameters = False
if self.adaptive_solver is not None:
save_parameters = True
if save_parameters:
adaptive_solver_parameters = self.adaptive_solver.parameters.copy()
self.adaptive_solver = fenics.AdaptiveNonlinearVariationalSolver(
problem = self._problem,
goal = self._adaptive_goal)
if save_parameters:
self.adaptive_solver.parameters = adaptive_solver_parameters.copy()
self.solver_needs_setup = False
""" The following methods are used to solve time steps and advance the unsteady simulation. """
def solve(self, goal_tolerance = None):
""" Solve the nonlinear variational problem.
Optionally provide `goal_tolerance` to use the adaptive solver.
"""
if self.solver_needs_setup:
self.setup_solver()
self._times[0] = self._times[1] + self.timestep_size.__float__()
if goal_tolerance is None:
solver_status = self.solver.solve()
self.solver_status["iterations"] = solver_status[0]
else:
share_solver_parameters(
self.adaptive_solver.parameters["nonlinear_variational_solver"],
self.solver.parameters)
self.adaptive_solver.solve(goal_tolerance)
""" `fenics.AdaptiveNonlinearVariationalSolver` does not return status."""
self.solver_status["iterations"] = "NA"
self.solver_status["solved"] = True
return self.solver_status
def advance(self):
""" Move solutions backward in the queue to prepare for a new time step.
This is a separate method, since one may want to call `solve` multiple times
before being satisfied with the solution state.
"""
if self.time_order > 1:
self._solutions[2].leaf_node().assign(self._solutions[1].leaf_node())
self._times[2] = 0. + self._times[1]
self._timestep_sizes[1].assign(self._timestep_sizes[0])
self._solutions[1].leaf_node().assign(self._solutions[0].leaf_node())
self._times[1] = 0. + self._times[0]
""" The following are some utility methods. """
def time_discrete_terms(self):
""" Apply first-order implicit Euler finite difference method. """
wnp1 = fenics.split(self._solutions[0].leaf_node())
wn = fenics.split(self._solutions[1].leaf_node())
if self.time_order == 1:
return tuple([
phaseflow.backward_difference_formulas.apply_backward_euler(
self._timestep_sizes[0],
(wnp1[i], wn[i]))
for i in range(len(wn))])
if self.time_order > 1:
wnm1 = fenics.split(self._solutions[2].leaf_node())
if self.time_order == 2:
return tuple([
phaseflow.backward_difference_formulas.apply_bdf2(
(self._timestep_sizes[0], self._timestep_sizes[1]),
(wnp1[i], wn[i], wnm1[i]))
for i in range(len(wn))])
if self.time_order > 2:
raise NotImplementedError()
def assign_initial_values(self):
""" Set values of all solutions from `self.initial_values()`. """
initial_values = self.initial_values()
for i in range(len(self._solutions)):
self._solutions[i].assign(initial_values)
def reset_initial_guess(self):
""" Set the values of the latest solution from the next latest solution. """
self._solutions[0].leaf_node().vector()[:] = self._solutions[1].leaf_node().vector()
def save_newton_solution(self):
""" When not using AMR, we can save a copy of the solution from the latest successful Newton iteration.
This can be useful, since a failed Newton iteration will blow up the solution, replacing it with garbage.
This will fail if the mesh has been changed by the adaptive solver
and `self.newton_solution` has not been reinitialized with
`self.newton_solution = fenics.Function(self.function_space)`.
"""
self.newton_solution.vector()[:] = self._solutions[0].vector()
def load_newton_solution(self):
""" When not using AMR, we can load a copy of the solution from the latest successful Newton iteration.
This can be useful, since a failed Newton iteration will blow up the solution, replacing it with garbage.
This will fail if the mesh has been changed by the adaptive solver
and `self.newton_solution` has not been reinitialized with
`self.newton_solution = fenics.Function(self.function_space)`.
"""
self._solutions[0].vector()[:] = self.newton_solution.vector()
def set_solution_on_subdomain(self, subdomain, values):
""" Abuse `fenics.DirichletBC` to set values of a function on a subdomain.
Parameters
----------
subdomain
`fenics.SubDomain`
values
container of objects that would typically be passed to
`fenics.DirichletBC` as the values of the boundary condition,
one for each subspace of the mixed finite element solution space
"""
function_space = fenics.FunctionSpace(self.mesh.leaf_node(), self.element())
new_solution = fenics.Function(function_space)
new_solution.vector()[:] = self.solution.vector()
for function_subspace_index in range(len(fenics.split(self.solution))):
hack = fenics.DirichletBC(
function_space.sub(function_subspace_index),
values[function_subspace_index],
subdomain)
hack.apply(new_solution.vector())
self.solution.vector()[:] = new_solution.vector()
def deepcopy(self):
""" Return an entire deep copy of `self`.
For example, this is useful for checkpointing small problems in memory,
or for running a batch of simulations with parameter changes.
"""
sim = type(self)(
time_order = self.time_order,
integration_measure = self.integration_measure(),
setup_solver = False)
sim._mesh = fenics.Mesh(self.mesh)
sim._function_space = fenics.FunctionSpace(sim.mesh, sim._element)
for i in range(len(self._solutions)):
sim._solutions[i] = fenics.Function(sim.function_space)
sim._solutions[i].leaf_node().vector()[:] = self._solutions[i].leaf_node().vector()
sim._times[i] = 0. + self._times[i]
for i in range(len(self._timestep_sizes)):
sim._timestep_sizes[i] = self._timestep_sizes[i]
sim.setup_solver()
sim.solver.parameters = self.solver.parameters.copy()
return sim
def print_constants(self):
""" Print the names and values of all `fenics.Constant` attributes.
For example, this is useful for verifying that the correct parameters
have been set.
"""
for key in self.__dict__.keys():
attribute = self.__dict__[key]
if type(attribute) is type(fenics.Constant(0.)):
print(attribute.name() + " = " + str(attribute.values()))
def write_checkpoint(self, filepath):
"""Write solutions, times, and timestep sizes to a checkpoint file."""
print("Writing checkpoint to " + filepath)
with fenics.HDF5File(self.mesh.mpi_comm(), filepath, "w") as h5:
h5.write(self._solutions[0].function_space().mesh().leaf_node(), "mesh")
for i in range(len(self._solutions)):
h5.write(self._solutions[i].leaf_node(), "solution" + str(i))
""" The fenics.HDF5File interface does not allow us to write floats,
but rather only a numpy array. """
h5.write(numpy.array((self._times[i],)), "time" + str(i))
def read_checkpoint(self, filepath):
"""Read solutions and times from a checkpoint file."""
self._mesh = fenics.Mesh()
print("Reading checkpoint from " + filepath)
with fenics.HDF5File(self.mesh.mpi_comm(), filepath, "r") as h5:
h5.read(self._mesh, "mesh", True)
self._function_space = fenics.FunctionSpace(self.mesh, self._element)
for i in range(self.time_order + 1):
self._solutions[i] = fenics.Function(self.function_space)
h5.read(self._solutions[i], "solution" + str(i))
""" fenics.HDF5File doesn't implement read methods for every write method.
Our only option here seems to be to use a fenics.Vector to store values,
because a reader is implemented for GenericVector, which Vector inherits from.
Furthermore, for the correct read method to be called, we must pass a boolean
as a third argument related to distributed memory.
"""
time = fenics.Vector(fenics.mpi_comm_world(), 1)
h5.read(time, "time" + str(i), False)
self._times[i] = time.get_local()[0]
self.newton_solution = fenics.Function(self.function_space)
self.setup_solver()
def write_solution(self, file, solution_index = 0):
""" Write the solution to a file.
Parameters
----------
file : fenics.XDMFFile
This method should have been called from within the context of the open `file`.
"""
print("Writing solution to " + file.path)
for var in self._solutions[solution_index].leaf_node().split():
file.write(var, self._times[solution_index])
def convert_checkpoints_to_xdmf_solution(self, checkpoint_dir, xdmf_solution_filepath):
with phaseflow.helpers.SolutionFile(xdmf_solution_filepath) as xdmf_solution_file:
for filename in os.listdir(checkpoint_dir):
if ("checkpoint" in filename) and filename.endswith(".h5"):
self.read_checkpoint(checkpoint_dir + "/" + filename)
self.write_solution(xdmf_solution_file)
def plot(self, solution_index = 0, savefigs = False):
""" Plot the adaptive mesh and all parts of the mixed finite element solution. """
if not (self.output_dir == ""):
phaseflow.helpers.mkdir_p(self.output_dir)
self._plot(
solution = self._solutions[solution_index],
time = self._times[solution_index],
savefigs = savefigs)
def _plot(self, solution, time, savefigs = False):
phaseflow.plotting.plot(solution.function_space().mesh().leaf_node())
matplotlib.pyplot.title("$\Omega_h, t = " + str(time) + "$")
matplotlib.pyplot.xlabel("$x$")
matplotlib.pyplot.ylabel("$y$")
if savefigs:
matplotlib.pyplot.savefig(fname = self.output_dir + "mesh_t" + str(time) + ".png")
matplotlib.pyplot.show()
w = solution.leaf_node().split()
for i in range(len(w)):
some_mappable_thing = phaseflow.plotting.plot(w[i])
matplotlib.pyplot.colorbar(some_mappable_thing)
matplotlib.pyplot.title("$w_" + str(i) + ", t = " + str(time) + "$")
matplotlib.pyplot.xlabel("$x$")
matplotlib.pyplot.ylabel("$y$")
if savefigs:
matplotlib.pyplot.savefig(fname = self.output_dir + "w" + str(i) + "_t" + str(time) + ".png")
matplotlib.pyplot.show()
def share_solver_parameters(share_to_parameters, share_from_parameters):
""" FEniCS implements a setter for the solver parameters which does not allow us to
`adaptive_solver.parameters["nonlinear_variational_solver"] = solver.parameters`
so we recursively catch the resulting KeyError exception and set all parameters.
"""
for key in share_from_parameters:
try:
share_to_parameters[key] = share_from_parameters[key]
except KeyError:
share_solver_parameters(share_to_parameters[key], share_from_parameters[key])
| {
"repo_name": "geo-fluid-dynamics/phaseflow-fenics",
"path": "phaseflow/abstract_simulation.py",
"copies": "1",
"size": "19299",
"license": "mit",
"hash": 4228228136344768500,
"line_mean": 35.2763157895,
"line_max": 113,
"alpha_frac": 0.5685786828,
"autogenerated": false,
"ratio": 4.4122085048010975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.040655865919558165,
"num_lines": 532
} |
""" Abstract sql statements to query schema """
import asyncio
import logging
from zope.interface import implementer
from aiorm import registry
from . import interfaces
log = logging.getLogger(__name__)
class _Query:
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
self._child = None
def __getattr__(self, key):
"""
Use zope.interface for easy extensability.
If you want to had your own statement, register your own interface,
with your own implementation in aiorm registry.
"""
iface = 'I' + ''.join(txt.capitalize() for txt in key.split('_'))
iface = getattr(interfaces, iface)
self._child = registry.get(iface)(self)
return self._child
@asyncio.coroutine
def run(self, cursor=None, fetchall=True):
@asyncio.coroutine
def wrapped(cursor):
sql_statement = self.render_sql()
if log.isEnabledFor(logging.DEBUG):
log.debug('{} % {!r}'.format(*sql_statement))
yield from cursor.execute(*sql_statement)
return ((yield from cursor.fetchall())
if fetchall else (yield from cursor.fetchone()))
if cursor:
return (yield from wrapped(cursor))
else:
driver = registry.get_driver(self._args[0].__meta__['database'])
with (yield from driver.cursor()) as cursor:
return (yield from wrapped(cursor))
class _NoResultQuery(_Query):
@asyncio.coroutine
def run(self, cursor=None):
@asyncio.coroutine
def wrapped(cursor):
sql_statement = self.render_sql()
log.debug('{!r} % {!r}'.format(*sql_statement))
yield from cursor.execute(*sql_statement)
return True
if cursor:
return (yield from wrapped(cursor))
else:
driver = registry.get_driver(self._args[0].__meta__['database'])
with (yield from driver.cursor()) as cursor:
return (yield from wrapped(cursor))
class _SingleResultQuery(_Query):
@asyncio.coroutine
def run(self, cursor=None):
row = yield from super().run(fetchall=False, cursor=cursor)
if row is None:
return None
model = self._args[0]()
for idx, col in enumerate(model.__meta__['columns']):
setattr(model, model.__meta__['attributes'][col], row[idx])
return model
class _ManyResultQuery(_Query):
@asyncio.coroutine
def run(self, cursor=None, fetchall=True):
def to_model(row):
if row is None:
return None
model = self._args[0]()
for idx, col in enumerate(model.__meta__['columns']):
setattr(model, model.__meta__['attributes'][col], row[idx])
return model
def iter_models(rows): # XXX Can't mix yield and yield from
for row in rows:
yield to_model(row)
rows = yield from super().run(fetchall=fetchall, cursor=cursor)
return iter_models(rows) if fetchall else to_model(rows)
class Get(_SingleResultQuery):
def render_sql(self):
renderer = registry.get(interfaces.IDialect)()
renderer.render_get(*self._args, **self._kwargs)
if self._child:
self._child.render_sql(renderer)
return renderer.query, renderer.parameters
class Select(_ManyResultQuery):
def render_sql(self):
renderer = registry.get(interfaces.IDialect)()
renderer.render_select(*self._args, **self._kwargs)
if self._child:
self._child.render_sql(renderer)
return renderer.query, renderer.parameters
class Insert(_Query):
def render_sql(self):
renderer = registry.get(interfaces.IDialect)()
renderer.render_insert(*self._args, **self._kwargs)
return renderer.query, renderer.parameters
@asyncio.coroutine
def run(self, cursor=None):
row = yield from super().run(fetchall=False, cursor=cursor)
if row is None:
return None
model = self._args[0]
for idx, col in enumerate(model.__meta__['columns']):
setattr(model, col, row[idx])
return model
class Update(Insert):
def render_sql(self):
renderer = registry.get(interfaces.IDialect)()
renderer.render_update(*self._args, **self._kwargs)
return renderer.query, renderer.parameters
class Delete(_NoResultQuery):
def render_sql(self):
renderer = registry.get(interfaces.IDialect)()
renderer.render_delete(*self._args, **self._kwargs)
return renderer.query, renderer.parameters
class Statement:
def __init__(self, query):
self._args = None
self._kwargs = None
self._query = query
self._child = None
def __getattr__(self, key):
"""
Use zope.interface for easy extensability.
If you want to had your own statement, register your own interface,
with your own implementation in aiorm registry.
"""
iface = ['I'] + [word.capitalize() for word in key.split('_')]
iface = getattr(interfaces, ''.join(iface))
self._child = registry.get(iface)(self._query)
return self._child
def __call__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
return self
@asyncio.coroutine
def run(self, *args, **kwargs):
return (yield from self._query.run(*args, **kwargs))
@implementer(interfaces.IJoin)
class Join(Statement):
def render_sql(self, renderer):
renderer.render_join(*self._args, **self._kwargs)
if self._child:
self._child.render_sql(renderer)
return renderer.query, renderer.parameters
@implementer(interfaces.ILeftJoin)
class LeftJoin(Statement):
def render_sql(self, renderer):
renderer.render_left_join(*self._args, **self._kwargs)
if self._child:
self._child.render_sql(renderer)
return renderer.query, renderer.parameters
@implementer(interfaces.IWhere)
class Where(Statement):
def render_sql(self, renderer):
renderer.render_where(*self._args, **self._kwargs)
if self._child:
self._child.render_sql(renderer)
return renderer.query, renderer.parameters
@implementer(interfaces.ILimit)
class Limit(Statement):
def render_sql(self, renderer):
renderer.render_limit(*self._args, **self._kwargs)
if self._child:
self._child.render_sql(renderer)
return renderer.query, renderer.parameters
@implementer(interfaces.IGroupBy)
class GroupBy(Statement):
""" A Where clause statement """
def render_sql(self, renderer):
renderer.render_group_by(*self._args, **self._kwargs)
if self._child:
self._child.render_sql(renderer)
return renderer.query, renderer.parameters
@implementer(interfaces.IOrderBy)
class OrderBy(Statement):
def render_sql(self, renderer):
renderer.render_order_by(*self._args, **self._kwargs)
if self._child:
self._child.render_sql(renderer)
return renderer.query, renderer.parameters
registry.register(Where, interfaces.IWhere)
registry.register(Join, interfaces.IJoin)
registry.register(LeftJoin, interfaces.ILeftJoin)
registry.register(Limit, interfaces.ILimit)
registry.register(GroupBy, interfaces.IGroupBy)
registry.register(OrderBy, interfaces.IOrderBy)
| {
"repo_name": "mardiros/aiorm",
"path": "aiorm/orm/query/statements.py",
"copies": "1",
"size": "7562",
"license": "bsd-3-clause",
"hash": 5992269424044634000,
"line_mean": 28.6549019608,
"line_max": 76,
"alpha_frac": 0.617825972,
"autogenerated": false,
"ratio": 4.141292442497262,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00033272459310521594,
"num_lines": 255
} |
""" Abstracts the capturing and interfacing of applications """
import os
import re
import time
import pyperclip
import platform
import subprocess
from .RegionMatching import Region
from .SettingsDebug import Debug
if platform.system() == "Windows":
from .PlatformManagerWindows import PlatformManagerWindows
PlatformManager = PlatformManagerWindows() # No other input managers built yet
elif platform.system() == "Darwin":
from .PlatformManagerDarwin import PlatformManagerDarwin
PlatformManager = PlatformManagerDarwin()
else:
# Avoid throwing an error if it's just being imported for documentation purposes
if not os.environ.get('READTHEDOCS') == 'True':
raise NotImplementedError("Lackey is currently only compatible with Windows and OSX.")
# Python 3 compatibility
try:
basestring
except NameError:
basestring = str
class App(object):
""" Allows apps to be selected by title, PID, or by starting an
application directly. Can address individual windows tied to an
app.
For more information, see `Sikuli's App documentation <http://sikulix-2014.readthedocs.io/en/latest/appclass.html#App>`_.
"""
def __init__(self, identifier=None):
self._pid = None
self._search = identifier
self._title = ""
self._exec = ""
self._params = ""
self._process = None
self._devnull = None
self._defaultScanRate = 0.1
self.proc = None
# Replace class methods with instance methods
self.focus = self._focus_instance
self.close = self._close_instance
self.open = self._open_instance
# Process `identifier`
if isinstance(identifier, int):
# `identifier` is a PID
Debug.log(3, "Creating App by PID ({})".format(identifier))
self._pid = identifier
elif isinstance(identifier, basestring):
# `identifier` is either part of a window title
# or a command line to execute. If it starts with a "+",
# launch it immediately. Otherwise, store it until open() is called.
Debug.log(3, "Creating App by string ({})".format(identifier))
launchNow = False
if identifier.startswith("+"):
# Should launch immediately - strip the `+` sign and continue
launchNow = True
identifier = identifier[1:]
# Check if `identifier` is an executable commmand
# Possible formats:
# Case 1: notepad.exe C:\sample.txt
# Case 2: "C:\Program Files\someprogram.exe" -flag
# Extract hypothetical executable name
if identifier.startswith('"'):
executable = identifier[1:].split('"')[0]
params = identifier[len(executable)+2:].split(" ") if len(identifier) > len(executable) + 2 else []
else:
executable = identifier.split(" ")[0]
params = identifier[len(executable)+1:].split(" ") if len(identifier) > len(executable) + 1 else []
# Check if hypothetical executable exists
if self._which(executable) is not None:
# Found the referenced executable
self._exec = executable
self._params = params
# If the command was keyed to execute immediately, do so.
if launchNow:
self.open()
else:
# No executable found - treat as a title instead. Try to capture window.
self._title = identifier
self.open()
else:
self._pid = -1 # Unrecognized identifier, setting to empty app
self._pid = self.getPID() # Confirm PID is an active process (sets to -1 otherwise)
def _which(self, program):
""" Private method to check if an executable exists
Shamelessly stolen from http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
@classmethod
def pause(cls, waitTime):
time.sleep(waitTime)
@classmethod
def focus(cls, appName):
""" Searches for exact text, case insensitive, anywhere in the window title.
Brings the matching window to the foreground.
As a class method, accessible as `App.focus(appName)`. As an instance method,
accessible as `App(appName).focus()`.
"""
app = cls(appName)
return app.focus()
def _focus_instance(self):
""" In instances, the ``focus()`` classmethod is replaced with this instance method. """
if self._title:
Debug.log(3, "Focusing app with title like ({})".format(self._title))
PlatformManager.focusWindow(PlatformManager.getWindowByTitle(re.escape(self._title)))
if self.getPID() == -1:
self.open()
elif self._pid and self._pid != -1:
Debug.log(3, "Focusing app with pid ({})".format(self._pid))
PlatformManager.focusWindow(PlatformManager.getWindowByPID(self._pid))
return self
@classmethod
def close(cls, appName):
""" Closes the process associated with the specified app.
As a class method, accessible as `App.class(appName)`.
As an instance method, accessible as `App(appName).close()`.
"""
return cls(appName).close()
def _close_instance(self):
if self._process:
self._process.terminate()
self._devnull.close()
elif self.getPID() != -1:
PlatformManager.killProcess(self.getPID())
@classmethod
def open(self, executable):
""" Runs the specified command and returns an App linked to the generated PID.
As a class method, accessible as `App.open(executable_path)`.
As an instance method, accessible as `App(executable_path).open()`.
"""
return App(executable).open()
def _open_instance(self, waitTime=0):
if self._exec != "":
# Open from an executable + parameters
self._devnull = open(os.devnull, 'w')
self._process = subprocess.Popen([self._exec] + self._params, shell=False, stderr=self._devnull, stdout=self._devnull)
self._pid = self._process.pid
elif self._title != "":
# Capture an existing window that matches self._title
self._pid = PlatformManager.getWindowPID(
PlatformManager.getWindowByTitle(
re.escape(self._title)))
time.sleep(waitTime)
return self
@classmethod
def focusedWindow(cls):
""" Returns a Region corresponding to whatever window is in the foreground """
x, y, w, h = PlatformManager.getWindowRect(PlatformManager.getForegroundWindow())
return Region(x, y, w, h)
def getWindow(self):
""" Returns the title of the main window of the currently open app.
Returns an empty string if no match could be found.
"""
if self.getPID() != -1:
if not self.hasWindow():
return ""
return PlatformManager.getWindowTitle(PlatformManager.getWindowByPID(self.getPID()))
else:
return ""
def getName(self):
""" Returns the short name of the app as shown in the process list """
return PlatformManager.getProcessName(self.getPID())
def getPID(self):
""" Returns the PID for the associated app
(or -1, if no app is associated or the app is not running)
"""
if self._pid is not None:
if not PlatformManager.isPIDValid(self._pid):
self._pid = -1
return self._pid
return -1
def hasWindow(self):
""" Returns True if the process has a window associated, False otherwise """
return PlatformManager.getWindowByPID(self.getPID()) is not None
def waitForWindow(self, seconds=5):
timeout = time.time() + seconds
while True:
window_region = self.window()
if window_region is not None or time.time() > timeout:
break
time.sleep(0.5)
return window_region
def window(self, windowNum=0):
""" Returns the region corresponding to the specified window of the app.
Defaults to the first window found for the corresponding PID.
"""
if self._pid == -1:
return None
if not self.hasWindow():
return None
x,y,w,h = PlatformManager.getWindowRect(PlatformManager.getWindowByPID(self._pid, windowNum))
return Region(x,y,w,h).clipRegionToScreen()
def setUsing(self, params):
self._params = params.split(" ")
def __repr__(self):
""" Returns a string representation of the app """
return "[{pid}:{executable} ({windowtitle})] {searchtext}".format(pid=self._pid, executable=self.getName(), windowtitle=self.getWindow(), searchtext=self._search)
def isRunning(self, waitTime=0):
""" If PID isn't set yet, checks if there is a window with the specified title. """
waitUntil = time.time() + waitTime
while True:
if self.getPID() > 0:
return True
else:
self._pid = PlatformManager.getWindowPID(PlatformManager.getWindowByTitle(re.escape(self._title)))
# Check if we've waited long enough
if time.time() > waitUntil:
break
else:
time.sleep(self._defaultScanRate)
return self.getPID() > 0
def isValid(self):
return (os.path.isfile(self._exec) or self.getPID() > 0)
@classmethod
def getClipboard(cls):
""" Gets the contents of the clipboard (as classmethod) """
return pyperclip.paste()
@classmethod
def setClipboard(cls, contents):
""" Sets the contents of the clipboard (as classmethod) """
return pyperclip.copy(contents)
| {
"repo_name": "glitchassassin/lackey",
"path": "lackey/App.py",
"copies": "1",
"size": "10479",
"license": "mit",
"hash": 1650966455382736100,
"line_mean": 38.5433962264,
"line_max": 170,
"alpha_frac": 0.5995801126,
"autogenerated": false,
"ratio": 4.432741116751269,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003252873644777717,
"num_lines": 265
} |
""" Abstract Storage. """
from __future__ import print_function
from os import path as op, walk
class BaseStorage():
""" Base class for storages. """
def __init__(self, collect, verbose=False):
self.verbose = verbose
self.collect = collect
def __iter__(self):
""" Seek static files and result full and relative paths.
:return generator: Walk files
"""
for bp in [self.collect.app] + list(self.collect.blueprints.values()):
if bp.has_static_folder and op.isdir(bp.static_folder):
for root, _, files in walk(bp.static_folder):
for f in files:
fpath = op.join(root, f)
opath = op.relpath(fpath, bp.static_folder.rstrip('/'))
if bp.static_url_path and self.collect.static_url and \
bp.static_url_path.startswith(
op.join(self.collect.static_url, '')): # noqa
opath = op.join(
op.relpath(
bp.static_url_path,
self.collect.static_url), opath)
yield bp, fpath, opath
def log(self, msg):
""" Log message. """
if self.verbose:
print(msg)
| {
"repo_name": "jirikuncar/Flask-Collect",
"path": "flask_collect/storage/base.py",
"copies": "1",
"size": "1374",
"license": "bsd-3-clause",
"hash": 7436117648085603000,
"line_mean": 34.2307692308,
"line_max": 81,
"alpha_frac": 0.481077147,
"autogenerated": false,
"ratio": 4.549668874172186,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00031269543464665416,
"num_lines": 39
} |
"""Abstract syntax for edn.
Roughly speaking, every edn element gets its own terml symbol. Thus, an edn
stream consisting of a vector of two strings, e.g.::
["foo" "bar"]
Will be mapped to::
Vector((String(u'foo'), String(u'bar')))
Beyond that::
#foo 42 <=> TaggedValue(Symbol('foo'), 42)
:my/keyword <=> Keyword(Symbol('keyword', 'my'))
my/symbol <=> Symbol('symbol', 'my')
"""
from functools import partial
import os
from parsley import makeGrammar
from terml.nodes import coerceToTerm, termMaker as t
from ._parsley import iterGrammar, parseGrammar
Character = t.Character
ExactFloat = t.ExactFloat
Keyword = t.Keyword
List = t.List
Map = t.Map
Nil = t.Nil()
Set = t.Set
String = t.String
Symbol = t.Symbol
TaggedValue = t.TaggedValue
Vector = t.Vector
def Float(value, exact):
if exact:
return ExactFloat(value)
return float(value)
_edn_grammar_file = os.path.join(os.path.dirname(__file__), 'edn.parsley')
_edn_grammar_definition = open(_edn_grammar_file).read()
_edn_bindings = {
'Character': Character,
'Float': Float,
'String': String,
'Symbol': Symbol,
'Keyword': Keyword,
'Vector': Vector,
'TaggedValue': TaggedValue,
'Map': Map,
'Nil': Nil,
'Set': Set,
'List': List,
}
_parsed_edn = parseGrammar(_edn_grammar_definition, 'edn')
edn = makeGrammar(_edn_grammar_definition, _edn_bindings, name='edn')
def parse(string):
"""Parse a single edn element.
Returns an abstract representation of a single edn element.
"""
return edn(string).edn()
def parse_stream(stream):
"""Parse a stream of edn elements from a file-like object.
Yields the abstract syntax for each edn element in 'stream'.
:param stream: A file-like object to read edn elements from.
"""
return iterGrammar(_parsed_edn, _edn_bindings, 'edn', stream)
def _wrap(start, end, *middle):
return ''.join([start] + list(middle) + [end])
class _Builder(object):
PRIMITIVES = (
((int, float), str),
(long, lambda x: str(x) + 'N'),
(unicode, unicode),
(str, str),
)
def _dump_true(self):
return 'true'
def _dump_false(self):
return 'false'
def _dump_Nil(self):
return 'nil'
def _dump_Character(self, obj):
return '\\' + obj
def _dump_ExactFloat(self, obj):
return '%sM' % (obj,)
def _dump_Keyword(self, obj):
return ':' + obj
def _dump_Symbol(self, name, prefix=None):
if prefix:
return '%s/%s' % (prefix, name)
else:
return name
def _dump_TaggedValue(self, tag, value):
return '#%s %s' % (tag, value)
def _dump_String(self, obj):
quote = '"'
escape = {
'"': r'\"',
'\\': r'\\',
'\n': r'\n',
'\r': r'\r',
'\t': r'\t',
'\b': r'\b',
'\f': r'\f',
}
output = [quote]
encoded = obj.encode('utf8')
for byte in encoded:
escaped = escape.get(byte, byte)
output.append(escaped)
output.append(quote)
return ''.join(output)
_dump_List = partial(_wrap, '(', ')')
_dump_Vector = partial(_wrap, '[', ']')
_dump_Set = partial(_wrap, '#{', '}')
_dump_Map = partial(_wrap, '{', '}')
def _merge_elements(self, *elements):
return ' '.join(elements)
def leafTag(self, tag, span):
if tag.name == '.tuple.':
return self._merge_elements
return getattr(self, '_dump_%s' % (tag.name,))
def leafData(self, data, span=None):
for base_type, dump_rule in self.PRIMITIVES:
if isinstance(data, base_type):
return lambda *args: dump_rule(data)
raise ValueError("Cannot encode %r" % (data,))
def term(self, f, built_terms):
return f(*built_terms)
def unparse(obj):
"""Turn an abstract edn element into a string.
Returns a valid edn string representing 'obj'.
"""
builder = _Builder()
return coerceToTerm(obj).build(builder)
def unparse_stream(input_elements, output_stream):
"""Write abstract edn elements out as edn to a file-like object.
Elements will be separated by UNIX newlines. This may change in future
versions.
"""
separator = u'\n'.encode('utf8')
builder = _Builder()
for element in input_elements:
output_stream.write(coerceToTerm(element).build(builder))
output_stream.write(separator)
| {
"repo_name": "dreid/edn",
"path": "edn/_ast.py",
"copies": "1",
"size": "4515",
"license": "mit",
"hash": 8023433229935271000,
"line_mean": 23.1443850267,
"line_max": 76,
"alpha_frac": 0.5858250277,
"autogenerated": false,
"ratio": 3.4571209800918834,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4542946007791883,
"avg_score": null,
"num_lines": null
} |
# Abstract Syntax Tree (AST) for Latex (Mixed) Linear Progamming formulation (MLP)
class LinearProgram:
"""
Class representing the root node in the AST of a MLP
"""
def __init__(self, objectives, constraints, declarations = None):
"""
Set the objective and the constraints
:param objectives: Objectives
:param constraints: Constraints
"""
self.objectives = objectives
self.constraints = constraints
self.declarations = declarations
def __str__(self):
"""
to string
"""
res = "\nLP:\n" + str(self.objectives) + "\n"
if self.constraints:
res += str(self.constraints) + "\n"
return res
def setupEnvironment(self, codeSetup):
codeSetup.setupEnvironment(self)
def prepare(self, codePrepare):
codePrepare.prepare(self)
def generateCode(self, codeGenerator):
"""
Generate the code in MiniZinc for this Linear Program
"""
return codeGenerator.generateCode(self)
| {
"repo_name": "rafaellc28/Latex2MiniZinc",
"path": "latex2minizinc/LinearProgram.py",
"copies": "1",
"size": "1111",
"license": "mit",
"hash": -1023045377291000200,
"line_mean": 26.775,
"line_max": 82,
"alpha_frac": 0.5805580558,
"autogenerated": false,
"ratio": 4.57201646090535,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.565257451670535,
"avg_score": null,
"num_lines": null
} |
"""Abstract syntax tree for songs.
Basic structure:
* A song consists of verses, choruses and maybe comments.
* Verses and choruses consist of lines and maybe comments.
Global information like titles and chords
are properties of the top-level Song object.
"""
# TODO: Rename to song_ast?
class Line(object):
def __init__(self, line):
self._line = line
def write_out(self, pdf_writer):
pdf_writer.addLine(self._line)
def __repr__(self):
return repr(self._line)
class Comment(object):
def __init__(self, comment):
self._comment = comment
def write_out(self, pdf_writer):
pdf_writer.addComment(self._comment)
def __repr__(self):
return "/* %s */" % self._comment
class ContainerNode(object):
def __init__(self, children):
self._children = children
def __repr__(self):
return "<%s: %s>" % (type(self).__name__, " / ".join(map(repr, self._children)))
class Verse(ContainerNode):
def write_out(self, pdf_writer):
# TODO: This is a terrible way to separate sections.
pdf_writer.addLine([])
for child in self._children:
child.write_out(pdf_writer)
class Chorus(ContainerNode):
def write_out(self, pdf_writer):
# TODO: This is a terrible way to separate sections.
pdf_writer.addLine([])
with pdf_writer.chorusSection():
for child in self._children:
child.write_out(pdf_writer)
class Song(object):
def __init__(self, children, title='', subtitle='', chords={}):
self._children = children
self._title = title
self._subtitle = subtitle
self._chords = chords
def write_out(self, pdf_writer):
for name, frets in self._chords.items():
pdf_writer._chords[name] = frets
pdf_writer.setTitle(self._title, self._subtitle)
pdf_writer.startLyrics()
for child in self._children:
child.write_out(pdf_writer)
pdf_writer.finish()
def __repr__(self):
return "[%s (%s): %r]" % (self._title, self._subtitle, self._children)
| {
"repo_name": "gnoack/ukechord",
"path": "song.py",
"copies": "1",
"size": "1973",
"license": "apache-2.0",
"hash": 4582273945225376000,
"line_mean": 24.2948717949,
"line_max": 84,
"alpha_frac": 0.6523061328,
"autogenerated": false,
"ratio": 3.4735915492957745,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4625897682095775,
"avg_score": null,
"num_lines": null
} |
"""Abstract tensor product."""
from __future__ import print_function, division
from sympy import Expr, Add, Mul, Matrix, Pow, sympify
from sympy.core.compatibility import range
from sympy.core.trace import Tr
from sympy.printing.pretty.stringpict import prettyForm
from sympy.physics.quantum.qexpr import QuantumError
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.commutator import Commutator
from sympy.physics.quantum.anticommutator import AntiCommutator
from sympy.physics.quantum.state import Ket, Bra
from sympy.physics.quantum.matrixutils import (
numpy_ndarray,
scipy_sparse_matrix,
matrix_tensor_product
)
__all__ = [
'TensorProduct',
'tensor_product_simp'
]
#-----------------------------------------------------------------------------
# Tensor product
#-----------------------------------------------------------------------------
_combined_printing = False
def combined_tensor_printing(combined):
"""Set flag controlling whether tensor products of states should be
printed as a combined bra/ket or as an explicit tensor product of different
bra/kets. This is a global setting for all TensorProduct class instances.
Parameters
----------
combine : bool
When true, tensor product states are combined into one ket/bra, and
when false explicit tensor product notation is used between each
ket/bra.
"""
global _combined_printing
_combined_printing = combined
class TensorProduct(Expr):
"""The tensor product of two or more arguments.
For matrices, this uses ``matrix_tensor_product`` to compute the Kronecker
or tensor product matrix. For other objects a symbolic ``TensorProduct``
instance is returned. The tensor product is a non-commutative
multiplication that is used primarily with operators and states in quantum
mechanics.
Currently, the tensor product distinguishes between commutative and non-
commutative arguments. Commutative arguments are assumed to be scalars and
are pulled out in front of the ``TensorProduct``. Non-commutative arguments
remain in the resulting ``TensorProduct``.
Parameters
==========
args : tuple
A sequence of the objects to take the tensor product of.
Examples
========
Start with a simple tensor product of sympy matrices::
>>> from sympy import I, Matrix, symbols
>>> from sympy.physics.quantum import TensorProduct
>>> m1 = Matrix([[1,2],[3,4]])
>>> m2 = Matrix([[1,0],[0,1]])
>>> TensorProduct(m1, m2)
Matrix([
[1, 0, 2, 0],
[0, 1, 0, 2],
[3, 0, 4, 0],
[0, 3, 0, 4]])
>>> TensorProduct(m2, m1)
Matrix([
[1, 2, 0, 0],
[3, 4, 0, 0],
[0, 0, 1, 2],
[0, 0, 3, 4]])
We can also construct tensor products of non-commutative symbols:
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> tp = TensorProduct(A, B)
>>> tp
AxB
We can take the dagger of a tensor product (note the order does NOT reverse
like the dagger of a normal product):
>>> from sympy.physics.quantum import Dagger
>>> Dagger(tp)
Dagger(A)xDagger(B)
Expand can be used to distribute a tensor product across addition:
>>> C = Symbol('C',commutative=False)
>>> tp = TensorProduct(A+B,C)
>>> tp
(A + B)xC
>>> tp.expand(tensorproduct=True)
AxC + BxC
"""
is_commutative = False
def __new__(cls, *args):
if isinstance(args[0], (Matrix, numpy_ndarray, scipy_sparse_matrix)):
return matrix_tensor_product(*args)
c_part, new_args = cls.flatten(sympify(args))
c_part = Mul(*c_part)
if len(new_args) == 0:
return c_part
elif len(new_args) == 1:
return c_part * new_args[0]
else:
tp = Expr.__new__(cls, *new_args)
return c_part * tp
@classmethod
def flatten(cls, args):
# TODO: disallow nested TensorProducts.
c_part = []
nc_parts = []
for arg in args:
cp, ncp = arg.args_cnc()
c_part.extend(list(cp))
nc_parts.append(Mul._from_args(ncp))
return c_part, nc_parts
def _eval_adjoint(self):
return TensorProduct(*[Dagger(i) for i in self.args])
def _eval_rewrite(self, pattern, rule, **hints):
sargs = self.args
terms = [t._eval_rewrite(pattern, rule, **hints) for t in sargs]
return TensorProduct(*terms).expand(tensorproduct=True)
def _sympystr(self, printer, *args):
from sympy.printing.str import sstr
length = len(self.args)
s = ''
for i in range(length):
if isinstance(self.args[i], (Add, Pow, Mul)):
s = s + '('
s = s + sstr(self.args[i])
if isinstance(self.args[i], (Add, Pow, Mul)):
s = s + ')'
if i != length - 1:
s = s + 'x'
return s
def _pretty(self, printer, *args):
if (_combined_printing and
(all([isinstance(arg, Ket) for arg in self.args]) or
all([isinstance(arg, Bra) for arg in self.args]))):
length = len(self.args)
pform = printer._print('', *args)
for i in range(length):
next_pform = printer._print('', *args)
length_i = len(self.args[i].args)
for j in range(length_i):
part_pform = printer._print(self.args[i].args[j], *args)
next_pform = prettyForm(*next_pform.right(part_pform))
if j != length_i - 1:
next_pform = prettyForm(*next_pform.right(', '))
if len(self.args[i].args) > 1:
next_pform = prettyForm(
*next_pform.parens(left='{', right='}'))
pform = prettyForm(*pform.right(next_pform))
if i != length - 1:
pform = prettyForm(*pform.right(',' + ' '))
pform = prettyForm(*pform.left(self.args[0].lbracket))
pform = prettyForm(*pform.right(self.args[0].rbracket))
return pform
length = len(self.args)
pform = printer._print('', *args)
for i in range(length):
next_pform = printer._print(self.args[i], *args)
if isinstance(self.args[i], (Add, Mul)):
next_pform = prettyForm(
*next_pform.parens(left='(', right=')')
)
pform = prettyForm(*pform.right(next_pform))
if i != length - 1:
if printer._use_unicode:
pform = prettyForm(*pform.right(u'\N{N-ARY CIRCLED TIMES OPERATOR}' + u' '))
else:
pform = prettyForm(*pform.right('x' + ' '))
return pform
def _latex(self, printer, *args):
if (_combined_printing and
(all([isinstance(arg, Ket) for arg in self.args]) or
all([isinstance(arg, Bra) for arg in self.args]))):
def _label_wrap(label, nlabels):
return label if nlabels == 1 else r"\left\{%s\right\}" % label
s = r", ".join([_label_wrap(arg._print_label_latex(printer, *args),
len(arg.args)) for arg in self.args])
return r"{%s%s%s}" % (self.args[0].lbracket_latex, s,
self.args[0].rbracket_latex)
length = len(self.args)
s = ''
for i in range(length):
if isinstance(self.args[i], (Add, Mul)):
s = s + '\\left('
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
s = s + '{' + printer._print(self.args[i], *args) + '}'
if isinstance(self.args[i], (Add, Mul)):
s = s + '\\right)'
if i != length - 1:
s = s + '\\otimes '
return s
def doit(self, **hints):
return TensorProduct(*[item.doit(**hints) for item in self.args])
def _eval_expand_tensorproduct(self, **hints):
"""Distribute TensorProducts across addition."""
args = self.args
add_args = []
stop = False
for i in range(len(args)):
if isinstance(args[i], Add):
for aa in args[i].args:
tp = TensorProduct(*args[:i] + (aa,) + args[i + 1:])
if isinstance(tp, TensorProduct):
tp = tp._eval_expand_tensorproduct()
add_args.append(tp)
break
if add_args:
return Add(*add_args)
else:
return self
def _eval_trace(self, **kwargs):
indices = kwargs.get('indices', None)
exp = tensor_product_simp(self)
if indices is None or len(indices) == 0:
return Mul(*[Tr(arg).doit() for arg in exp.args])
else:
return Mul(*[Tr(value).doit() if idx in indices else value
for idx, value in enumerate(exp.args)])
def tensor_product_simp_Mul(e):
"""Simplify a Mul with TensorProducts.
Current the main use of this is to simplify a ``Mul`` of ``TensorProduct``s
to a ``TensorProduct`` of ``Muls``. It currently only works for relatively
simple cases where the initial ``Mul`` only has scalars and raw
``TensorProduct``s, not ``Add``, ``Pow``, ``Commutator``s of
``TensorProduct``s.
Parameters
==========
e : Expr
A ``Mul`` of ``TensorProduct``s to be simplified.
Returns
=======
e : Expr
A ``TensorProduct`` of ``Mul``s.
Examples
========
This is an example of the type of simplification that this function
performs::
>>> from sympy.physics.quantum.tensorproduct import \
tensor_product_simp_Mul, TensorProduct
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> C = Symbol('C',commutative=False)
>>> D = Symbol('D',commutative=False)
>>> e = TensorProduct(A,B)*TensorProduct(C,D)
>>> e
AxB*CxD
>>> tensor_product_simp_Mul(e)
(A*C)x(B*D)
"""
# TODO: This won't work with Muls that have other composites of
# TensorProducts, like an Add, Pow, Commutator, etc.
# TODO: This only works for the equivalent of single Qbit gates.
if not isinstance(e, Mul):
return e
c_part, nc_part = e.args_cnc()
n_nc = len(nc_part)
if n_nc == 0 or n_nc == 1:
return e
elif e.has(TensorProduct):
current = nc_part[0]
if not isinstance(current, TensorProduct):
raise TypeError('TensorProduct expected, got: %r' % current)
n_terms = len(current.args)
new_args = list(current.args)
for next in nc_part[1:]:
# TODO: check the hilbert spaces of next and current here.
if isinstance(next, TensorProduct):
if n_terms != len(next.args):
raise QuantumError(
'TensorProducts of different lengths: %r and %r' %
(current, next)
)
for i in range(len(new_args)):
new_args[i] = new_args[i] * next.args[i]
else:
# this won't quite work as we don't want next in the
# TensorProduct
for i in range(len(new_args)):
new_args[i] = new_args[i] * next
current = next
return Mul(*c_part) * TensorProduct(*new_args)
else:
return e
def tensor_product_simp(e, **hints):
"""Try to simplify and combine TensorProducts.
In general this will try to pull expressions inside of ``TensorProducts``.
It currently only works for relatively simple cases where the products have
only scalars, raw ``TensorProducts``, not ``Add``, ``Pow``, ``Commutators``
of ``TensorProducts``. It is best to see what it does by showing examples.
Examples
========
>>> from sympy.physics.quantum import tensor_product_simp
>>> from sympy.physics.quantum import TensorProduct
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> C = Symbol('C',commutative=False)
>>> D = Symbol('D',commutative=False)
First see what happens to products of tensor products:
>>> e = TensorProduct(A,B)*TensorProduct(C,D)
>>> e
AxB*CxD
>>> tensor_product_simp(e)
(A*C)x(B*D)
This is the core logic of this function, and it works inside, powers, sums,
commutators and anticommutators as well:
>>> tensor_product_simp(e**2)
(A*C)x(B*D)**2
"""
if isinstance(e, Add):
return Add(*[tensor_product_simp(arg) for arg in e.args])
elif isinstance(e, Pow):
return tensor_product_simp(e.base) ** e.exp
elif isinstance(e, Mul):
return tensor_product_simp_Mul(e)
elif isinstance(e, Commutator):
return Commutator(*[tensor_product_simp(arg) for arg in e.args])
elif isinstance(e, AntiCommutator):
return AntiCommutator(*[tensor_product_simp(arg) for arg in e.args])
else:
return e
| {
"repo_name": "madan96/sympy",
"path": "sympy/physics/quantum/tensorproduct.py",
"copies": "23",
"size": "13565",
"license": "bsd-3-clause",
"hash": -5662125056116942000,
"line_mean": 33.5165394402,
"line_max": 96,
"alpha_frac": 0.5525248802,
"autogenerated": false,
"ratio": 3.9559638378536017,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00009065072276518575,
"num_lines": 393
} |
"""Abstract tensor product."""
from __future__ import print_function, division
from sympy import Expr, Add, Mul, Matrix, Pow, sympify
from sympy.core.compatibility import u
from sympy.core.trace import Tr
from sympy.printing.pretty.stringpict import prettyForm
from sympy.physics.quantum.matrixutils import (
numpy_ndarray,
scipy_sparse_matrix,
matrix_tensor_product
)
from sympsi.qexpr import QuantumError
from sympsi.dagger import Dagger
from sympsi.commutator import Commutator
from sympsi.anticommutator import AntiCommutator
from sympsi.state import Ket, Bra
__all__ = [
'TensorProduct',
'tensor_product_simp'
]
#-----------------------------------------------------------------------------
# Tensor product
#-----------------------------------------------------------------------------
_combined_printing = False
def combined_tensor_printing(combined):
"""Set flag controlling whether tensor products of states should be
printed as a combined bra/ket or as an explicit tensor product of different
bra/kets. This is a global setting for all TensorProduct class instances.
Parameters
----------
combine : bool
When true, tensor product states are combined into one ket/bra, and
when false explicit tensor product notation is used between each
ket/bra.
"""
global _combined_printing
_combined_printing = combined
class TensorProduct(Expr):
"""The tensor product of two or more arguments.
For matrices, this uses ``matrix_tensor_product`` to compute the Kronecker
or tensor product matrix. For other objects a symbolic ``TensorProduct``
instance is returned. The tensor product is a non-commutative
multiplication that is used primarily with operators and states in quantum
mechanics.
Currently, the tensor product distinguishes between commutative and non-
commutative arguments. Commutative arguments are assumed to be scalars and
are pulled out in front of the ``TensorProduct``. Non-commutative arguments
remain in the resulting ``TensorProduct``.
Parameters
==========
args : tuple
A sequence of the objects to take the tensor product of.
Examples
========
Start with a simple tensor product of sympy matrices::
>>> from sympy import I, Matrix, symbols
>>> from sympsi import TensorProduct
>>> m1 = Matrix([[1,2],[3,4]])
>>> m2 = Matrix([[1,0],[0,1]])
>>> TensorProduct(m1, m2)
Matrix([
[1, 0, 2, 0],
[0, 1, 0, 2],
[3, 0, 4, 0],
[0, 3, 0, 4]])
>>> TensorProduct(m2, m1)
Matrix([
[1, 2, 0, 0],
[3, 4, 0, 0],
[0, 0, 1, 2],
[0, 0, 3, 4]])
We can also construct tensor products of non-commutative symbols:
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> tp = TensorProduct(A, B)
>>> tp
AxB
We can take the dagger of a tensor product (note the order does NOT reverse
like the dagger of a normal product):
>>> from sympsi import Dagger
>>> Dagger(tp)
Dagger(A)xDagger(B)
Expand can be used to distribute a tensor product across addition:
>>> C = Symbol('C',commutative=False)
>>> tp = TensorProduct(A+B,C)
>>> tp
(A + B)xC
>>> tp.expand(tensorproduct=True)
AxC + BxC
"""
is_commutative = False
def __new__(cls, *args):
if isinstance(args[0], (Matrix, numpy_ndarray, scipy_sparse_matrix)):
return matrix_tensor_product(*args)
c_part, new_args = cls.flatten(sympify(args))
c_part = Mul(*c_part)
if len(new_args) == 0:
return c_part
elif len(new_args) == 1:
return c_part * new_args[0]
else:
tp = Expr.__new__(cls, *new_args)
return c_part * tp
@classmethod
def flatten(cls, args):
# TODO: disallow nested TensorProducts.
c_part = []
nc_parts = []
for arg in args:
cp, ncp = arg.args_cnc()
c_part.extend(list(cp))
nc_parts.append(Mul._from_args(ncp))
return c_part, nc_parts
def _eval_adjoint(self):
return TensorProduct(*[Dagger(i) for i in self.args])
def _eval_rewrite(self, pattern, rule, **hints):
sargs = self.args
terms = [t._eval_rewrite(pattern, rule, **hints) for t in sargs]
return TensorProduct(*terms).expand(tensorproduct=True)
def _sympystr(self, printer, *args):
from sympy.printing.str import sstr
length = len(self.args)
s = ''
for i in range(length):
if isinstance(self.args[i], (Add, Pow, Mul)):
s = s + '('
s = s + sstr(self.args[i])
if isinstance(self.args[i], (Add, Pow, Mul)):
s = s + ')'
if i != length - 1:
s = s + 'x'
return s
def _pretty(self, printer, *args):
if (_combined_printing and
(all([isinstance(arg, Ket) for arg in self.args]) or
all([isinstance(arg, Bra) for arg in self.args]))):
length = len(self.args)
pform = printer._print('', *args)
for i in range(length):
next_pform = printer._print('', *args)
length_i = len(self.args[i].args)
for j in range(length_i):
part_pform = printer._print(self.args[i].args[j], *args)
next_pform = prettyForm(*next_pform.right(part_pform))
if j != length_i - 1:
next_pform = prettyForm(*next_pform.right(', '))
if len(self.args[i].args) > 1:
next_pform = prettyForm(
*next_pform.parens(left='{', right='}'))
pform = prettyForm(*pform.right(next_pform))
if i != length - 1:
pform = prettyForm(*pform.right(',' + ' '))
pform = prettyForm(*pform.left(self.args[0].lbracket))
pform = prettyForm(*pform.right(self.args[0].rbracket))
return pform
length = len(self.args)
pform = printer._print('', *args)
for i in range(length):
next_pform = printer._print(self.args[i], *args)
if isinstance(self.args[i], (Add, Mul)):
next_pform = prettyForm(
*next_pform.parens(left='(', right=')')
)
pform = prettyForm(*pform.right(next_pform))
if i != length - 1:
if printer._use_unicode:
pform = prettyForm(*pform.right(u('\u2a02') + u(' ')))
else:
pform = prettyForm(*pform.right('x' + ' '))
return pform
def _latex(self, printer, *args):
if (_combined_printing and
(all([isinstance(arg, Ket) for arg in self.args]) or
all([isinstance(arg, Bra) for arg in self.args]))):
def _label_wrap(label, nlabels):
return label if nlabels == 1 else r"\left\{%s\right\}" % label
s = r", ".join([_label_wrap(arg._print_label_latex(printer, *args),
len(arg.args)) for arg in self.args])
return r"{%s%s%s}" % (self.args[0].lbracket_latex, s,
self.args[0].rbracket_latex)
length = len(self.args)
s = ''
for i in range(length):
if isinstance(self.args[i], (Add, Mul)):
s = s + '\\left('
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
s = s + '{' + printer._print(self.args[i], *args) + '}'
if isinstance(self.args[i], (Add, Mul)):
s = s + '\\right)'
if i != length - 1:
s = s + '\\otimes '
return s
def doit(self, **hints):
return TensorProduct(*[item.doit(**hints) for item in self.args])
def _eval_expand_tensorproduct(self, **hints):
"""Distribute TensorProducts across addition."""
args = self.args
add_args = []
stop = False
for i in range(len(args)):
if isinstance(args[i], Add):
for aa in args[i].args:
tp = TensorProduct(*args[:i] + (aa,) + args[i + 1:])
if isinstance(tp, TensorProduct):
tp = tp._eval_expand_tensorproduct()
add_args.append(tp)
break
if add_args:
return Add(*add_args)
else:
return self
def _eval_trace(self, **kwargs):
indices = kwargs.get('indices', None)
exp = tensor_product_simp(self)
if indices is None or len(indices) == 0:
return Mul(*[Tr(arg).doit() for arg in exp.args])
else:
return Mul(*[Tr(value).doit() if idx in indices else value
for idx, value in enumerate(exp.args)])
def tensor_product_simp_Mul(e):
"""Simplify a Mul with TensorProducts.
Current the main use of this is to simplify a ``Mul`` of ``TensorProduct``s
to a ``TensorProduct`` of ``Muls``. It currently only works for relatively
simple cases where the initial ``Mul`` only has scalars and raw
``TensorProduct``s, not ``Add``, ``Pow``, ``Commutator``s of
``TensorProduct``s.
Parameters
==========
e : Expr
A ``Mul`` of ``TensorProduct``s to be simplified.
Returns
=======
e : Expr
A ``TensorProduct`` of ``Mul``s.
Examples
========
This is an example of the type of simplification that this function
performs::
>>> from sympsi.tensorproduct import \
tensor_product_simp_Mul, TensorProduct
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> C = Symbol('C',commutative=False)
>>> D = Symbol('D',commutative=False)
>>> e = TensorProduct(A,B)*TensorProduct(C,D)
>>> e
AxB*CxD
>>> tensor_product_simp_Mul(e)
(A*C)x(B*D)
"""
# TODO: This won't work with Muls that have other composites of
# TensorProducts, like an Add, Pow, Commutator, etc.
# TODO: This only works for the equivalent of single Qbit gates.
if not isinstance(e, Mul):
return e
c_part, nc_part = e.args_cnc()
n_nc = len(nc_part)
if n_nc == 0 or n_nc == 1:
return e
elif e.has(TensorProduct):
current = nc_part[0]
if not isinstance(current, TensorProduct):
raise TypeError('TensorProduct expected, got: %r' % current)
n_terms = len(current.args)
new_args = list(current.args)
for next in nc_part[1:]:
# TODO: check the hilbert spaces of next and current here.
if isinstance(next, TensorProduct):
if n_terms != len(next.args):
raise QuantumError(
'TensorProducts of different lengths: %r and %r' %
(current, next)
)
for i in range(len(new_args)):
new_args[i] = new_args[i] * next.args[i]
else:
# this won't quite work as we don't want next in the
# TensorProduct
for i in range(len(new_args)):
new_args[i] = new_args[i] * next
current = next
return Mul(*c_part) * TensorProduct(*new_args)
else:
return e
def tensor_product_simp(e, **hints):
"""Try to simplify and combine TensorProducts.
In general this will try to pull expressions inside of ``TensorProducts``.
It currently only works for relatively simple cases where the products have
only scalars, raw ``TensorProducts``, not ``Add``, ``Pow``, ``Commutators``
of ``TensorProducts``. It is best to see what it does by showing examples.
Examples
========
>>> from sympsi import tensor_product_simp
>>> from sympsi import TensorProduct
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> C = Symbol('C',commutative=False)
>>> D = Symbol('D',commutative=False)
First see what happens to products of tensor products:
>>> e = TensorProduct(A,B)*TensorProduct(C,D)
>>> e
AxB*CxD
>>> tensor_product_simp(e)
(A*C)x(B*D)
This is the core logic of this function, and it works inside, powers, sums,
commutators and anticommutators as well:
>>> tensor_product_simp(e**2)
(A*C)x(B*D)**2
"""
if isinstance(e, Add):
return Add(*[tensor_product_simp(arg) for arg in e.args])
elif isinstance(e, Pow):
return tensor_product_simp(e.base) ** e.exp
elif isinstance(e, Mul):
return tensor_product_simp_Mul(e)
elif isinstance(e, Commutator):
return Commutator(*[tensor_product_simp(arg) for arg in e.args])
elif isinstance(e, AntiCommutator):
return AntiCommutator(*[tensor_product_simp(arg) for arg in e.args])
else:
return e
| {
"repo_name": "sympsi/sympsi",
"path": "sympsi/tensorproduct.py",
"copies": "1",
"size": "13390",
"license": "bsd-3-clause",
"hash": -4105018808006211000,
"line_mean": 32.9847715736,
"line_max": 79,
"alpha_frac": 0.5482449589,
"autogenerated": false,
"ratio": 3.935920047031158,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9983843731112678,
"avg_score": 0.00006425496369594551,
"num_lines": 394
} |
"""Abstract tensor product."""
from __future__ import print_function, division
from sympy import Expr, Add, Mul, Matrix, Pow, sympify
from sympy.core.compatibility import u, range
from sympy.core.trace import Tr
from sympy.printing.pretty.stringpict import prettyForm
from sympy.physics.quantum.qexpr import QuantumError
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.commutator import Commutator
from sympy.physics.quantum.anticommutator import AntiCommutator
from sympy.physics.quantum.state import Ket, Bra
from sympy.physics.quantum.matrixutils import (
numpy_ndarray,
scipy_sparse_matrix,
matrix_tensor_product
)
__all__ = [
'TensorProduct',
'tensor_product_simp'
]
#-----------------------------------------------------------------------------
# Tensor product
#-----------------------------------------------------------------------------
_combined_printing = False
def combined_tensor_printing(combined):
"""Set flag controlling whether tensor products of states should be
printed as a combined bra/ket or as an explicit tensor product of different
bra/kets. This is a global setting for all TensorProduct class instances.
Parameters
----------
combine : bool
When true, tensor product states are combined into one ket/bra, and
when false explicit tensor product notation is used between each
ket/bra.
"""
global _combined_printing
_combined_printing = combined
class TensorProduct(Expr):
"""The tensor product of two or more arguments.
For matrices, this uses ``matrix_tensor_product`` to compute the Kronecker
or tensor product matrix. For other objects a symbolic ``TensorProduct``
instance is returned. The tensor product is a non-commutative
multiplication that is used primarily with operators and states in quantum
mechanics.
Currently, the tensor product distinguishes between commutative and non-
commutative arguments. Commutative arguments are assumed to be scalars and
are pulled out in front of the ``TensorProduct``. Non-commutative arguments
remain in the resulting ``TensorProduct``.
Parameters
==========
args : tuple
A sequence of the objects to take the tensor product of.
Examples
========
Start with a simple tensor product of sympy matrices::
>>> from sympy import I, Matrix, symbols
>>> from sympy.physics.quantum import TensorProduct
>>> m1 = Matrix([[1,2],[3,4]])
>>> m2 = Matrix([[1,0],[0,1]])
>>> TensorProduct(m1, m2)
Matrix([
[1, 0, 2, 0],
[0, 1, 0, 2],
[3, 0, 4, 0],
[0, 3, 0, 4]])
>>> TensorProduct(m2, m1)
Matrix([
[1, 2, 0, 0],
[3, 4, 0, 0],
[0, 0, 1, 2],
[0, 0, 3, 4]])
We can also construct tensor products of non-commutative symbols:
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> tp = TensorProduct(A, B)
>>> tp
AxB
We can take the dagger of a tensor product (note the order does NOT reverse
like the dagger of a normal product):
>>> from sympy.physics.quantum import Dagger
>>> Dagger(tp)
Dagger(A)xDagger(B)
Expand can be used to distribute a tensor product across addition:
>>> C = Symbol('C',commutative=False)
>>> tp = TensorProduct(A+B,C)
>>> tp
(A + B)xC
>>> tp.expand(tensorproduct=True)
AxC + BxC
"""
is_commutative = False
def __new__(cls, *args):
if isinstance(args[0], (Matrix, numpy_ndarray, scipy_sparse_matrix)):
return matrix_tensor_product(*args)
c_part, new_args = cls.flatten(sympify(args))
c_part = Mul(*c_part)
if len(new_args) == 0:
return c_part
elif len(new_args) == 1:
return c_part * new_args[0]
else:
tp = Expr.__new__(cls, *new_args)
return c_part * tp
@classmethod
def flatten(cls, args):
# TODO: disallow nested TensorProducts.
c_part = []
nc_parts = []
for arg in args:
cp, ncp = arg.args_cnc()
c_part.extend(list(cp))
nc_parts.append(Mul._from_args(ncp))
return c_part, nc_parts
def _eval_adjoint(self):
return TensorProduct(*[Dagger(i) for i in self.args])
def _eval_rewrite(self, pattern, rule, **hints):
sargs = self.args
terms = [t._eval_rewrite(pattern, rule, **hints) for t in sargs]
return TensorProduct(*terms).expand(tensorproduct=True)
def _sympystr(self, printer, *args):
from sympy.printing.str import sstr
length = len(self.args)
s = ''
for i in range(length):
if isinstance(self.args[i], (Add, Pow, Mul)):
s = s + '('
s = s + sstr(self.args[i])
if isinstance(self.args[i], (Add, Pow, Mul)):
s = s + ')'
if i != length - 1:
s = s + 'x'
return s
def _pretty(self, printer, *args):
if (_combined_printing and
(all([isinstance(arg, Ket) for arg in self.args]) or
all([isinstance(arg, Bra) for arg in self.args]))):
length = len(self.args)
pform = printer._print('', *args)
for i in range(length):
next_pform = printer._print('', *args)
length_i = len(self.args[i].args)
for j in range(length_i):
part_pform = printer._print(self.args[i].args[j], *args)
next_pform = prettyForm(*next_pform.right(part_pform))
if j != length_i - 1:
next_pform = prettyForm(*next_pform.right(', '))
if len(self.args[i].args) > 1:
next_pform = prettyForm(
*next_pform.parens(left='{', right='}'))
pform = prettyForm(*pform.right(next_pform))
if i != length - 1:
pform = prettyForm(*pform.right(',' + ' '))
pform = prettyForm(*pform.left(self.args[0].lbracket))
pform = prettyForm(*pform.right(self.args[0].rbracket))
return pform
length = len(self.args)
pform = printer._print('', *args)
for i in range(length):
next_pform = printer._print(self.args[i], *args)
if isinstance(self.args[i], (Add, Mul)):
next_pform = prettyForm(
*next_pform.parens(left='(', right=')')
)
pform = prettyForm(*pform.right(next_pform))
if i != length - 1:
if printer._use_unicode:
pform = prettyForm(*pform.right(u('\N{N-ARY CIRCLED TIMES OPERATOR}') + u(' ')))
else:
pform = prettyForm(*pform.right('x' + ' '))
return pform
def _latex(self, printer, *args):
if (_combined_printing and
(all([isinstance(arg, Ket) for arg in self.args]) or
all([isinstance(arg, Bra) for arg in self.args]))):
def _label_wrap(label, nlabels):
return label if nlabels == 1 else r"\left\{%s\right\}" % label
s = r", ".join([_label_wrap(arg._print_label_latex(printer, *args),
len(arg.args)) for arg in self.args])
return r"{%s%s%s}" % (self.args[0].lbracket_latex, s,
self.args[0].rbracket_latex)
length = len(self.args)
s = ''
for i in range(length):
if isinstance(self.args[i], (Add, Mul)):
s = s + '\\left('
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
s = s + '{' + printer._print(self.args[i], *args) + '}'
if isinstance(self.args[i], (Add, Mul)):
s = s + '\\right)'
if i != length - 1:
s = s + '\\otimes '
return s
def doit(self, **hints):
return TensorProduct(*[item.doit(**hints) for item in self.args])
def _eval_expand_tensorproduct(self, **hints):
"""Distribute TensorProducts across addition."""
args = self.args
add_args = []
stop = False
for i in range(len(args)):
if isinstance(args[i], Add):
for aa in args[i].args:
tp = TensorProduct(*args[:i] + (aa,) + args[i + 1:])
if isinstance(tp, TensorProduct):
tp = tp._eval_expand_tensorproduct()
add_args.append(tp)
break
if add_args:
return Add(*add_args)
else:
return self
def _eval_trace(self, **kwargs):
indices = kwargs.get('indices', None)
exp = tensor_product_simp(self)
if indices is None or len(indices) == 0:
return Mul(*[Tr(arg).doit() for arg in exp.args])
else:
return Mul(*[Tr(value).doit() if idx in indices else value
for idx, value in enumerate(exp.args)])
def tensor_product_simp_Mul(e):
"""Simplify a Mul with TensorProducts.
Current the main use of this is to simplify a ``Mul`` of ``TensorProduct``s
to a ``TensorProduct`` of ``Muls``. It currently only works for relatively
simple cases where the initial ``Mul`` only has scalars and raw
``TensorProduct``s, not ``Add``, ``Pow``, ``Commutator``s of
``TensorProduct``s.
Parameters
==========
e : Expr
A ``Mul`` of ``TensorProduct``s to be simplified.
Returns
=======
e : Expr
A ``TensorProduct`` of ``Mul``s.
Examples
========
This is an example of the type of simplification that this function
performs::
>>> from sympy.physics.quantum.tensorproduct import \
tensor_product_simp_Mul, TensorProduct
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> C = Symbol('C',commutative=False)
>>> D = Symbol('D',commutative=False)
>>> e = TensorProduct(A,B)*TensorProduct(C,D)
>>> e
AxB*CxD
>>> tensor_product_simp_Mul(e)
(A*C)x(B*D)
"""
# TODO: This won't work with Muls that have other composites of
# TensorProducts, like an Add, Pow, Commutator, etc.
# TODO: This only works for the equivalent of single Qbit gates.
if not isinstance(e, Mul):
return e
c_part, nc_part = e.args_cnc()
n_nc = len(nc_part)
if n_nc == 0 or n_nc == 1:
return e
elif e.has(TensorProduct):
current = nc_part[0]
if not isinstance(current, TensorProduct):
raise TypeError('TensorProduct expected, got: %r' % current)
n_terms = len(current.args)
new_args = list(current.args)
for next in nc_part[1:]:
# TODO: check the hilbert spaces of next and current here.
if isinstance(next, TensorProduct):
if n_terms != len(next.args):
raise QuantumError(
'TensorProducts of different lengths: %r and %r' %
(current, next)
)
for i in range(len(new_args)):
new_args[i] = new_args[i] * next.args[i]
else:
# this won't quite work as we don't want next in the
# TensorProduct
for i in range(len(new_args)):
new_args[i] = new_args[i] * next
current = next
return Mul(*c_part) * TensorProduct(*new_args)
else:
return e
def tensor_product_simp(e, **hints):
"""Try to simplify and combine TensorProducts.
In general this will try to pull expressions inside of ``TensorProducts``.
It currently only works for relatively simple cases where the products have
only scalars, raw ``TensorProducts``, not ``Add``, ``Pow``, ``Commutators``
of ``TensorProducts``. It is best to see what it does by showing examples.
Examples
========
>>> from sympy.physics.quantum import tensor_product_simp
>>> from sympy.physics.quantum import TensorProduct
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> C = Symbol('C',commutative=False)
>>> D = Symbol('D',commutative=False)
First see what happens to products of tensor products:
>>> e = TensorProduct(A,B)*TensorProduct(C,D)
>>> e
AxB*CxD
>>> tensor_product_simp(e)
(A*C)x(B*D)
This is the core logic of this function, and it works inside, powers, sums,
commutators and anticommutators as well:
>>> tensor_product_simp(e**2)
(A*C)x(B*D)**2
"""
if isinstance(e, Add):
return Add(*[tensor_product_simp(arg) for arg in e.args])
elif isinstance(e, Pow):
return tensor_product_simp(e.base) ** e.exp
elif isinstance(e, Mul):
return tensor_product_simp_Mul(e)
elif isinstance(e, Commutator):
return Commutator(*[tensor_product_simp(arg) for arg in e.args])
elif isinstance(e, AntiCommutator):
return AntiCommutator(*[tensor_product_simp(arg) for arg in e.args])
else:
return e
| {
"repo_name": "vipulroxx/sympy",
"path": "sympy/physics/quantum/tensorproduct.py",
"copies": "64",
"size": "13572",
"license": "bsd-3-clause",
"hash": 5962044668408874000,
"line_mean": 33.534351145,
"line_max": 100,
"alpha_frac": 0.5523135868,
"autogenerated": false,
"ratio": 3.9545454545454546,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00008961182136186369,
"num_lines": 393
} |
"""Abstract tensor product."""
from sympy import Expr, Add, Mul, Matrix, Pow, sympify
from sympy.printing.pretty.stringpict import prettyForm
from sympy.physics.quantum.qexpr import QuantumError
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.commutator import Commutator
from sympy.physics.quantum.anticommutator import AntiCommutator
from sympy.physics.quantum.matrixutils import (
numpy_ndarray,
scipy_sparse_matrix,
matrix_tensor_product
)
__all__ = [
'TensorProduct',
'tensor_product_simp'
]
#-----------------------------------------------------------------------------
# Tensor product
#-----------------------------------------------------------------------------
class TensorProduct(Expr):
"""The tensor product of two or more arguments.
For matrices, this uses ``matrix_tensor_product`` to compute the Kronecker
or tensor product matrix. For other objects a symbolic ``TensorProduct``
instance is returned. The tensor product is a non-commutative
multiplication that is used primarily with operators and states in quantum
mechanics.
Currently, the tensor product distinguishes between commutative and non-
commutative arguments. Commutative arguments are assumed to be scalars and
are pulled out in front of the ``TensorProduct``. Non-commutative arguments
remain in the resulting ``TensorProduct``.
Parameters
==========
args : tuple
A sequence of the objects to take the tensor product of.
Examples
========
Start with a simple tensor product of sympy matrices::
>>> from sympy import I, Matrix, symbols
>>> from sympy.physics.quantum import TensorProduct
>>> m1 = Matrix([[1,2],[3,4]])
>>> m2 = Matrix([[1,0],[0,1]])
>>> TensorProduct(m1, m2)
[1, 0, 2, 0]
[0, 1, 0, 2]
[3, 0, 4, 0]
[0, 3, 0, 4]
>>> TensorProduct(m2, m1)
[1, 2, 0, 0]
[3, 4, 0, 0]
[0, 0, 1, 2]
[0, 0, 3, 4]
We can also construct tensor products of non-commutative symbols:
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> tp = TensorProduct(A, B)
>>> tp
AxB
We can take the dagger of a tensor product (note the order does NOT reverse
like the dagger of a normal product):
>>> from sympy.physics.quantum import Dagger
>>> Dagger(tp)
Dagger(A)xDagger(B)
Expand can be used to distribute a tensor product across addition:
>>> C = Symbol('C',commutative=False)
>>> tp = TensorProduct(A+B,C)
>>> tp
(A + B)xC
>>> tp.expand(tensorproduct=True)
AxC + BxC
"""
def __new__(cls, *args, **assumptions):
if isinstance(args[0], (Matrix, numpy_ndarray, scipy_sparse_matrix)):
return matrix_tensor_product(*args)
c_part, new_args = cls.flatten(sympify(args))
c_part = Mul(*c_part)
if len(new_args) == 0:
return c_part
elif len(new_args) == 1:
return c_part*new_args[0]
else:
tp = Expr.__new__(cls, *new_args, **{'commutative': False})
return c_part*tp
@classmethod
def flatten(cls, args):
# TODO: disallow nested TensorProducts.
c_part = []
nc_parts = []
for arg in args:
cp, ncp = arg.args_cnc()
c_part.extend(list(cp))
nc_parts.append(Mul._from_args(ncp))
return c_part, nc_parts
def _eval_dagger(self):
return TensorProduct(*[Dagger(i) for i in self.args])
def _eval_rewrite(self, pattern, rule, **hints):
sargs = self.args
terms = [ t._eval_rewrite(pattern, rule, **hints) for t in sargs]
return TensorProduct(*terms).expand(tensorproduct=True)
def _sympystr(self, printer, *args):
from sympy.printing.str import sstr
length = len(self.args)
s = ''
for i in range(length):
if isinstance(self.args[i], (Add, Pow, Mul)):
s = s + '('
s = s + sstr(self.args[i])
if isinstance(self.args[i], (Add, Pow, Mul)):
s = s + ')'
if i != length-1:
s = s + 'x'
return s
def _pretty(self, printer, *args):
length = len(self.args)
pform = printer._print('', *args)
for i in range(length):
next_pform = printer._print(self.args[i], *args)
if isinstance(self.args[i], (Add, Mul)):
next_pform = prettyForm(
*next_pform.parens(left='(', right=')')
)
pform = prettyForm(*pform.right(next_pform))
if i != length-1:
pform = prettyForm(*pform.right(u'\u2a02' + u' '))
return pform
def _latex(self, printer, *args):
length = len(self.args)
s = ''
for i in range(length):
if isinstance(self.args[i], (Add, Mul)):
s = s + '\\left('
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
s = s + '{' + printer._print(self.args[i], *args) + '}'
if isinstance(self.args[i], (Add, Mul)):
s = s + '\\right)'
if i != length-1:
s = s + '\\otimes '
return s
def doit(self, **hints):
return TensorProduct(*[item.doit(**hints) for item in self.args])
def _eval_expand_tensorproduct(self, **hints):
"""Distribute TensorProducts across addition."""
args = self.args
add_args = []
stop = False
for i in range(len(args)):
if isinstance(args[i], Add):
for aa in args[i].args:
add_args.append(
TensorProduct(
*args[:i]+(aa,)+args[i+1:]
).expand(**hints)
)
stop = True
if stop: break
if add_args:
return Add(*add_args).expand(**hints)
else:
return self
def expand(self, **hints):
tp = TensorProduct(*[sympify(item).expand(**hints) for item in self.args])
return Expr.expand(tp, **hints)
def tensor_product_simp_Mul(e):
"""Simplify a Mul with TensorProducts.
Current the main use of this is to simplify a ``Mul`` of ``TensorProduct``s
to a ``TensorProduct`` of ``Muls``. It currently only works for relatively
simple cases where the initial ``Mul`` only has scalars and raw
``TensorProduct``s, not ``Add``, ``Pow``, ``Commutator``s of
``TensorProduct``s.
Parameters
==========
e : Expr
A ``Mul`` of ``TensorProduct``s to be simplified.
Returns
=======
e : Expr
A ``TensorProduct`` of ``Mul``s.
Examples
========
This is an example of the type of simplification that this function
performs::
>>> from sympy.physics.quantum.tensorproduct import tensor_product_simp_Mul, TensorProduct
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> C = Symbol('C',commutative=False)
>>> D = Symbol('D',commutative=False)
>>> e = TensorProduct(A,B)*TensorProduct(C,D)
>>> e
AxB*CxD
>>> tensor_product_simp_Mul(e)
(A*C)x(B*D)
"""
# TODO: This won't work with Muls that have other composites of
# TensorProducts, like an Add, Pow, Commutator, etc.
# TODO: This only works for the equivalent of single Qbit gates.
if not isinstance(e, Mul):
return e
c_part, nc_part = e.args_cnc()
n_nc = len(nc_part)
if n_nc == 0 or n_nc == 1:
return e
elif e.has(TensorProduct):
current = nc_part[0]
if not isinstance(current, TensorProduct):
raise TypeError('TensorProduct expected, got: %r' % current)
n_terms = len(current.args)
new_args = list(current.args)
for next in nc_part[1:]:
# TODO: check the hilbert spaces of next and current here.
if isinstance(next, TensorProduct):
if n_terms != len(next.args):
raise QuantumError(
'TensorProducts of different lengths: %r and %r' % \
(current, next)
)
for i in range(len(new_args)):
new_args[i] = new_args[i]*next.args[i]
else:
# this won't quite work as we don't want next in the TensorProduct
for i in range(len(new_args)):
new_args[i] = new_args[i]*next
current = next
return Mul(*c_part)*TensorProduct(*new_args)
else:
return e
def tensor_product_simp(e, **hints):
"""Try to simplify and combine TensorProducts.
In general this will try to pull expressions inside of ``TensorProducts``.
It currently only works for relatively simple cases where the products have
only scalars, raw ``TensorProducts``, not ``Add``, ``Pow``, ``Commutators``
of ``TensorProducts``. It is best to see what it does by showing examples.
Examples
========
>>> from sympy.physics.quantum import tensor_product_simp
>>> from sympy.physics.quantum import TensorProduct
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> C = Symbol('C',commutative=False)
>>> D = Symbol('D',commutative=False)
First see what happens to products of tensor products:
>>> e = TensorProduct(A,B)*TensorProduct(C,D)
>>> e
AxB*CxD
>>> tensor_product_simp(e)
(A*C)x(B*D)
This is the core logic of this function, and it works inside, powers, sums,
commutators and anticommutators as well:
>>> tensor_product_simp(e**2)
(A*C)x(B*D)**2
"""
if isinstance(e, Add):
return Add(*[tensor_product_simp(arg) for arg in e.args])
elif isinstance(e, Pow):
return tensor_product_simp(e.base)**e.exp
elif isinstance(e, Mul):
return tensor_product_simp_Mul(e)
elif isinstance(e, Commutator):
return Commutator(*[tensor_product_simp(arg) for arg in e.args])
elif isinstance(e, AntiCommutator):
return AntiCommutator(*[tensor_product_simp(arg) for arg in e.args])
else:
return e
| {
"repo_name": "ichuang/sympy",
"path": "sympy/physics/quantum/tensorproduct.py",
"copies": "1",
"size": "10569",
"license": "bsd-3-clause",
"hash": -4380075969612379000,
"line_mean": 32.3406940063,
"line_max": 98,
"alpha_frac": 0.5568171066,
"autogenerated": false,
"ratio": 3.9217068645640074,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4978523971164007,
"avg_score": null,
"num_lines": null
} |
"""Abstract tensor product."""
from sympy import Expr, Add, Mul, Matrix, Pow, sympify
from sympy.printing.pretty.stringpict import prettyForm
from sympy.physics.quantum.qexpr import QuantumError, split_commutative_parts
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.commutator import Commutator
from sympy.physics.quantum.anticommutator import AntiCommutator
from sympy.physics.quantum.matrixutils import (
numpy_ndarray,
scipy_sparse_matrix,
matrix_tensor_product
)
__all__ = [
'TensorProduct',
'tensor_product_simp'
]
#-----------------------------------------------------------------------------
# Tensor product
#-----------------------------------------------------------------------------
class TensorProduct(Expr):
"""The tensor product of two or more arguments.
For matrices, this uses ``matrix_tensor_product`` to compute the
Kronecker or tensor product matrix. For other objects a symbolic
``TensorProduct`` instance is returned. The tensor product is a
non-commutative multiplication that is used primarily with operators
and states in quantum mechanics.
Currently, the tensor product distinguishes between commutative and non-
commutative arguments. Commutative arguments are assumed to be scalars
and are pulled out in front of the ``TensorProduct``. Non-commutative
arguments remain in the resulting ``TensorProduct``.
Parameters
==========
args : tuple
A sequence of the objects to take the tensor product of.
Examples
========
Start with a simple tensor product of sympy matrices::
>>> from sympy import I, Matrix, symbols
>>> from sympy.physics.quantum import TensorProduct
>>> m1 = Matrix([[1,2],[3,4]])
>>> m2 = Matrix([[1,0],[0,1]])
>>> TensorProduct(m1, m2)
[1, 0, 2, 0]
[0, 1, 0, 2]
[3, 0, 4, 0]
[0, 3, 0, 4]
>>> TensorProduct(m2, m1)
[1, 2, 0, 0]
[3, 4, 0, 0]
[0, 0, 1, 2]
[0, 0, 3, 4]
We can also construct tensor products of non-commutative symbols::
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> tp = TensorProduct(A, B)
>>> tp
AxB
We can take the dagger of a tensor product (note the order does NOT
reverse like the dagger of a normal product)::
>>> from sympy.physics.quantum import Dagger
>>> Dagger(tp)
Dagger(A)xDagger(B)
Expand can be used to distribute a tensor product across addition::
>>> C = Symbol('C',commutative=False)
>>> tp = TensorProduct(A+B,C)
>>> tp
(A + B)xC
>>> tp.expand(tensorproduct=True)
AxC + BxC
"""
def __new__(cls, *args, **assumptions):
if isinstance(args[0], (Matrix, numpy_ndarray, scipy_sparse_matrix)):
return matrix_tensor_product(*args)
c_part, new_args = cls.flatten(args)
c_part = Mul(*c_part)
if len(new_args) == 0:
return c_part
elif len(new_args) == 1:
return c_part*new_args[0]
else:
tp = Expr.__new__(cls, *new_args, **{'commutative': False})
return c_part*tp
@classmethod
def flatten(cls, args):
# TODO: disallow nested TensorProducts.
c_part = []
nc_parts = []
for arg in args:
if isinstance(arg, Mul):
cp, ncp = split_commutative_parts(arg)
ncp = Mul(*ncp)
else:
if sympify(arg).is_commutative:
cp = [arg]; ncp = 1
else:
cp = []; ncp = arg
c_part.extend(cp)
nc_parts.append(ncp)
return c_part, nc_parts
def _eval_dagger(self):
return TensorProduct(*[Dagger(i) for i in self.args])
def _eval_rewrite(self, pattern, rule, **hints):
sargs = self.args
terms = [ t._eval_rewrite(pattern, rule, **hints) for t in sargs]
return TensorProduct(*terms).expand(tensorproduct=True)
def _sympystr(self, printer, *args):
from sympy.printing.str import sstr
length = len(self.args)
s = ''
for i in range(length):
if isinstance(self.args[i], (Add, Pow, Mul)):
s = s + '('
s = s + sstr(self.args[i])
if isinstance(self.args[i], (Add, Pow, Mul)):
s = s + ')'
if i != length-1:
s = s + 'x'
return s
def _pretty(self, printer, *args):
length = len(self.args)
pform = printer._print('', *args)
for i in range(length):
next_pform = printer._print(self.args[i], *args)
if isinstance(self.args[i], (Add, Mul)):
next_pform = prettyForm(
*next_pform.parens(left='(', right=')')
)
pform = prettyForm(*pform.right(next_pform))
if i != length-1:
pform = prettyForm(*pform.right(u'\u2a02' + u' '))
return pform
def _latex(self, printer, *args):
length = len(self.args)
s = ''
for i in range(length):
if isinstance(self.args[i], (Add, Mul)):
s = s + '\\left('
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
s = s + '{' + printer._print(self.args[i], *args) + '}'
if isinstance(self.args[i], (Add, Mul)):
s = s + '\\right)'
if i != length-1:
s = s + '\\otimes '
return s
def doit(self, **hints):
return TensorProduct(*[item.doit(**hints) for item in self.args])
def _eval_expand_tensorproduct(self, **hints):
"""Distribute TensorProducts across addition."""
args = self.args
add_args = []
stop = False
for i in range(len(args)):
if isinstance(args[i], Add):
for aa in args[i].args:
add_args.append(
TensorProduct(
*args[:i]+(aa,)+args[i+1:]
).expand(**hints)
)
stop = True
if stop: break
if add_args:
return Add(*add_args).expand(**hints)
else:
return self
def expand(self, **hints):
tp = TensorProduct(*[sympify(item).expand(**hints) for item in self.args])
return Expr.expand(tp, **hints)
def tensor_product_simp_Mul(e):
"""Simplify a Mul with TensorProducts.
Current the main use of this is to simplify a ``Mul`` of
``TensorProduct``s to a ``TensorProduct`` of ``Muls``. It currently only
works for relatively simple cases where the initial ``Mul`` only has
scalars and raw ``TensorProduct``s, not ``Add``, ``Pow``, ``Commutator``s
of ``TensorProduct``s.
Parameters
==========
e : Expr
A ``Mul`` of ``TensorProduct``s to be simplified.
Returns
=======
e : Expr
A ``TensorProduct`` of ``Mul``s.
Examples
========
This is an example of the type of simplification that this function
performs::
>>> from sympy.physics.quantum.tensorproduct import tensor_product_simp_Mul, TensorProduct
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> C = Symbol('C',commutative=False)
>>> D = Symbol('D',commutative=False)
>>> e = TensorProduct(A,B)*TensorProduct(C,D)
>>> e
AxB*CxD
>>> tensor_product_simp_Mul(e)
(A*C)x(B*D)
"""
# TODO: This won't work with Muls that have other composites of
# TensorProducts, like an Add, Pow, Commutator, etc.
# TODO: This only works for the equivalent of single Qbit gates.
if not isinstance(e, Mul):
return e
c_part, nc_part = split_commutative_parts(e)
n_nc = len(nc_part)
if n_nc == 0 or n_nc == 1:
return e
elif e.has(TensorProduct):
current = nc_part[0]
if not isinstance(current, TensorProduct):
raise TypeError('TensorProduct expected, got: %r' % current)
n_terms = len(current.args)
new_args = list(current.args)
for next in nc_part[1:]:
# TODO: check the hilbert spaces of next and current here.
if isinstance(next, TensorProduct):
if n_terms != len(next.args):
raise QuantumError(
'TensorProducts of different lengths: %r and %r' % \
(current, next)
)
for i in range(len(new_args)):
new_args[i] = new_args[i]*next.args[i]
else:
# this won't quite work as we don't want next in the TensorProduct
for i in range(len(new_args)):
new_args[i] = new_args[i]*next
current = next
return Mul(*c_part)*TensorProduct(*new_args)
else:
return e
def tensor_product_simp(e, **hints):
"""Try to simplify and combine TensorProducts.
In general this will try to pull expressions inside of ``TensorProducts``.
It currently only works for relatively simple cases where the products
have only scalars, raw ``TensorProduct``s, not ``Add``, ``Pow``,
``Commutator``s of ``TensorProduct``s. It is best to see what it does by
showing examples.
Examples
========
>>> from sympy.physics.quantum import tensor_product_simp
>>> from sympy.physics.quantum import TensorProduct
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> C = Symbol('C',commutative=False)
>>> D = Symbol('D',commutative=False)
First see what happens to products of tensor products::
>>> e = TensorProduct(A,B)*TensorProduct(C,D)
>>> e
AxB*CxD
>>> tensor_product_simp(e)
(A*C)x(B*D)
This is the core logic of this function, and it works inside, powers,
sums, commutators and anticommutators as well::
>>> tensor_product_simp(e**2)
(A*C)x(B*D)**2
"""
if isinstance(e, Add):
return Add(*[tensor_product_simp(arg) for arg in e.args])
elif isinstance(e, Pow):
return tensor_product_simp(e.base)**e.exp
elif isinstance(e, Mul):
return tensor_product_simp_Mul(e)
elif isinstance(e, Commutator):
return Commutator(*[tensor_product_simp(arg) for arg in e.args])
elif isinstance(e, AntiCommutator):
return AntiCommutator(*[tensor_product_simp(arg) for arg in e.args])
else:
return e
| {
"repo_name": "Cuuuurzel/KiPyCalc",
"path": "sympy_old/physics/quantum/tensorproduct.py",
"copies": "2",
"size": "10892",
"license": "mit",
"hash": -7167864914925618000,
"line_mean": 32.9314641745,
"line_max": 98,
"alpha_frac": 0.5498531032,
"autogenerated": false,
"ratio": 3.957848837209302,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5507701940409302,
"avg_score": null,
"num_lines": null
} |
"""Abstract TestCase for all resources related tests."""
# pylint: disable=attribute-defined-outside-init
# pylint: disable=too-many-public-methods,invalid-name
from __future__ import absolute_import
import time
from threading import current_thread
from django.test.testcases import TransactionTestCase
from rotest.core.result.result import Result
from rotest.management.base_resource import BaseResource
from rotest.core.result.handlers.db_handler import DBHandler
from rotest.management.models.ut_models import (DemoResourceData,
DemoComplexResourceData)
class BaseResourceManagementTest(TransactionTestCase):
"""Abstract TestCase for resources related tests.
Allow multiple access to the DB from different threads by deriving from
TransactionTestCase. Every test that use resource manager directly or
indirectly (using Case) should run the server before it starts. Deriving
from this class will start the resource manager server in an independent
thread on the setUp of each test and stop in on the tearDown.
"""
@staticmethod
def create_result(main_test):
"""Create a result object for the test and starts it.
Args:
main_test(TestSuite / TestCase): the test to be ran.
Returns:
Result. a new initiated result object.
"""
result = Result(outputs=[DBHandler.NAME], main_test=main_test)
result.startTestRun()
return result
class ThreadedResource(BaseResource):
"""A UT resource that initializes in another thread."""
DATA_CLASS = DemoResourceData
THREADS = []
RAISE_EXCEPTION = False
EXCEPTION_MESSAGE = "Intentional Error in initialization"
def validate(self):
"""Mock validate, register the thread and wait for another."""
self.THREADS.append(current_thread().ident)
while len(self.THREADS) <= 1:
time.sleep(0.1)
def initialize(self):
"""Mock initialize, raises an error according to a flag."""
if self.RAISE_EXCEPTION:
raise RuntimeError(self.EXCEPTION_MESSAGE)
class ThreadedParent(BaseResource):
"""Fake complex resource class, used in multithreaded resource tests.
Attributes:
demo1 (ThreadedResource): sub resource pointer.
demo2 (ThreadedResource): sub resource pointer.
"""
DATA_CLASS = DemoComplexResourceData
PARALLEL_INITIALIZATION = True
def create_sub_resources(self):
"""Return an iterable to the complex resource's sub-resources."""
self.demo1 = ThreadedResource(data=self.data.demo1)
self.demo2 = ThreadedResource(data=self.data.demo2)
return {'demo1': self.demo1, 'demo2': self.demo2}
| {
"repo_name": "gregoil/rotest",
"path": "tests/management/resource_base_test.py",
"copies": "1",
"size": "2749",
"license": "mit",
"hash": -4591869192664641500,
"line_mean": 35.1710526316,
"line_max": 76,
"alpha_frac": 0.6984357948,
"autogenerated": false,
"ratio": 4.419614147909968,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 76
} |
"""Abstract tests for container implementations."""
# This file is part of geneparse.
#
# The MIT License (MIT)
#
# Copyright (c) 2017 Pharmacogenomics Centre
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import random
import unittest
from pkg_resources import resource_filename
import numpy as np
from pybgen.tests.truths import truths
from ..readers import bgen
from ..core import Variant
from .generic_tests import TestContainer
BGEN_FILE = resource_filename(
"pybgen.tests",
os.path.join("data", "example.8bits.bgen"),
)
class TestBGEN(TestContainer, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.reader_f = lambda x: bgen.BGENReader(BGEN_FILE)
# Using truths from pybgen
cls.truth = truths["dosage"]["example.8bits.truths.txt.bz2"]
# The expected variant object
cls.expected_variants = {
name: Variant(
name=name,
chrom=int(info["variant"].chrom),
pos=info["variant"].pos,
alleles=[info["variant"].a1, info["variant"].a2],
) for name, info in cls.truth["variants"].items()
}
def test_get_samples(self):
with self.reader_f() as f:
self.assertEqual(
list(self.truth["samples"]), f.get_samples()
)
def test_iter_variants(self):
"""Test that all variants are iterated over"""
seen = set()
with self.reader_f() as f:
for v in f.iter_variants():
seen.add(v.name)
self.assertEqual(v, self.expected_variants[v.name])
self.assertEqual(seen, set(self.expected_variants.keys()))
def test_iter_genotypes(self):
"""Test that the genotypes are read correctly"""
seen = set()
with self.reader_f() as f:
for g in f.iter_genotypes():
# Checking the variant
variant = g.variant
self.assertEqual(variant, self.expected_variants[variant.name])
seen.add(variant.name)
# Checking the genotypes
expected = self.truth["variants"][variant.name]
self.assertEqual(g.reference, expected["variant"].a1)
self.assertEqual(g.coded, expected["variant"].a2)
np.testing.assert_array_almost_equal(
g.genotypes, expected["data"],
)
self.assertEqual(seen, set(self.expected_variants.keys()))
@unittest.skip("Not implemented")
def test_multiallelic_identifier(self):
"""Test that the multiallelic flag gets set when iterating"""
pass
def test_get_biallelic_variant(self):
"""Test simplest possible case of variant accession."""
random_variant = random.choice(list(self.expected_variants.values()))
v = self.expected_variants[random_variant.name]
with self.reader_f() as f:
# Getting the results
results = f.get_variant_genotypes(v)
if v.name in {"RSID_10", "RSID_100"}:
# Those have the same location and alleles
self.assertEqual(2, len(results))
else:
# The remaining variants are unique
self.assertEqual(1, len(results))
for g in results:
# Checking the variant is the same
self.assertEqual(g.variant, random_variant)
# Checking the genotypes
expected = self.truth["variants"][g.variant.name]
self.assertEqual(g.reference, expected["variant"].a1)
self.assertEqual(g.coded, expected["variant"].a2)
np.testing.assert_array_almost_equal(
g.genotypes, expected["data"],
)
def test_get_all_biallelic_variant(self):
"""Test simplest possible case of variant accession."""
for random_variant in self.expected_variants.values():
v = self.expected_variants[random_variant.name]
with self.reader_f() as f:
results = f.get_variant_genotypes(v)
if v.name in {"RSID_10", "RSID_100"}:
self.assertEqual(2, len(results))
else:
self.assertEqual(1, len(results))
for g in results:
# Checking the variant is the same
self.assertEqual(g.variant, random_variant)
# Checking the genotypes
expected = self.truth["variants"][g.variant.name]
self.assertEqual(g.reference, expected["variant"].a1)
self.assertEqual(g.coded, expected["variant"].a2)
np.testing.assert_array_almost_equal(
g.genotypes, expected["data"],
err_msg="Difference for {}/{}".format(
g.variant.name, random_variant.name,
),
)
def test_get_na_biallelic_variant(self):
"""Test asking for an unavailable biallelic variant."""
v = random.choice(list(self.expected_variants.values())).copy()
v.alleles = v._encode_alleles(["NO", "ALLELE"])
with self.reader_f() as f:
g = f.get_variant_genotypes(v)
self.assertEqual([], g)
@unittest.skip("Not implemented")
def test_get_multiallelic_variant_by_locus(self):
"""Test getting a multiallelic variant using a locus."""
pass
@unittest.skip("Not implemented")
def test_get_multiallelic_variant_by_specific(self):
"""Find a biallelic variant at a multiallelic locus."""
pass
@unittest.skip("Not implemented")
def test_get_multiallelic_variant_by_unavailable(self):
"""Test asking for an unavailable biallelic variant at a multiallelic
locus.
"""
pass
@unittest.skip("Not implemented")
def test_get_multiallelic_variant_by_multiallelic(self):
"""Test asking for a multiallelic variant."""
pass
def test_get_variant_in_region(self):
"""Test getting a variant by region."""
seen = set()
with self.reader_f() as f:
for g in f.get_variants_in_region("1", 67000, 70999):
# Checking the variant
self.assertEqual(
g.variant, self.expected_variants[g.variant.name],
)
# Checking the genotypes
expected = self.truth["variants"][g.variant.name]
self.assertEqual(g.reference, expected["variant"].a1)
self.assertEqual(g.coded, expected["variant"].a2)
np.testing.assert_array_almost_equal(
g.genotypes, expected["data"],
)
seen.add(g.variant.name)
expected = set()
for name in self.truth["variant_set"]:
v = self.truth["variants"][name]["variant"]
if v.pos >= 67000 and v.pos <= 70999:
expected.add(name)
self.assertEqual(seen, expected)
def test_get_variant_in_empty_region(self):
"""Test getting an empty region."""
with self.reader_f() as f:
g = list(f.get_variants_in_region("2", 46521000, 46521005))
self.assertEqual([], g)
def test_get_variant_by_name(self):
"""Test getting a variant by name."""
random_variant = random.choice(list(self.expected_variants.values()))
with self.reader_f() as f:
g = f.get_variant_by_name(random_variant.name)
self.assertEqual(len(g), 1)
g = g.pop()
# Checking the variant
self.assertEqual(g.variant, random_variant)
# Checking the genotypes
expected = self.truth["variants"][random_variant.name]
self.assertEqual(g.reference, expected["variant"].a1)
self.assertEqual(g.coded, expected["variant"].a2)
np.testing.assert_array_almost_equal(
g.genotypes, expected["data"],
)
def test_iter_variants_by_names(self):
"""Tests getting the variations."""
# Getting 10 random variants
random_variants = random.sample(
list(self.expected_variants.values()), 10,
)
# Generating a map of variants
variant_map = {v.name: v for v in random_variants}
# Reading the file
seen_variants = set()
with self.reader_f() as f:
names = [v.name for v in random_variants]
for g in f.iter_variants_by_names(names):
# Getting the original variant
ori_variant = variant_map[g.variant.name]
# Checking the variant
self.assertEqual(g.variant, ori_variant)
# Checking the genotypes
expected = self.truth["variants"][g.variant.name]
self.assertEqual(g.reference, expected["variant"].a1)
self.assertEqual(g.coded, expected["variant"].a2)
np.testing.assert_array_almost_equal(
g.genotypes, expected["data"],
)
seen_variants.add(g.variant.name)
self.assertEqual(set(variant_map.keys()), seen_variants)
def test_get_variant_by_name_invalid(self):
"""Test getting an invalid variant by name."""
with self.reader_f() as f:
g = f.get_variant_by_name("invalid_variant_name")
self.assertEqual(len(g), 0)
@unittest.skip("Not implemented")
def test_get_multiallelic_variant_by_name(self):
"""Find a biallelic variant at a multiallelic locus by name."""
pass
class TestBGENParallel(TestBGEN, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.reader_f = lambda x: bgen.BGENReader(BGEN_FILE, cpus=2)
# Using truths from pybgen
cls.truth = truths["dosage"]["example.8bits.truths.txt.bz2"]
# The expected variant object
cls.expected_variants = {
name: Variant(
name=name,
chrom=int(info["variant"].chrom),
pos=info["variant"].pos,
alleles=[info["variant"].a1, info["variant"].a2],
) for name, info in cls.truth["variants"].items()
}
| {
"repo_name": "pgxcentre/geneparse",
"path": "geneparse/tests/test_bgen.py",
"copies": "1",
"size": "11429",
"license": "mit",
"hash": 6143405496058140000,
"line_mean": 36.2280130293,
"line_max": 79,
"alpha_frac": 0.5850030624,
"autogenerated": false,
"ratio": 4.137943519188993,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 307
} |
"""Abstract Transport class."""
__all__ = (
'BaseTransport', 'ReadTransport', 'WriteTransport',
'Transport', 'DatagramTransport', 'SubprocessTransport',
)
class BaseTransport:
"""Base class for transports."""
def __init__(self, extra=None):
if extra is None:
extra = {}
self._extra = extra
def get_extra_info(self, name, default=None):
"""Get optional transport information."""
return self._extra.get(name, default)
def is_closing(self):
"""Return True if the transport is closing or closed."""
raise NotImplementedError
def close(self):
"""Close the transport.
Buffered data will be flushed asynchronously. No more data
will be received. After all buffered data is flushed, the
protocol's connection_lost() method will (eventually) called
with None as its argument.
"""
raise NotImplementedError
def set_protocol(self, protocol):
"""Set a new protocol."""
raise NotImplementedError
def get_protocol(self):
"""Return the current protocol."""
raise NotImplementedError
class ReadTransport(BaseTransport):
"""Interface for read-only transports."""
def is_reading(self):
"""Return True if the transport is receiving."""
raise NotImplementedError
def pause_reading(self):
"""Pause the receiving end.
No data will be passed to the protocol's data_received()
method until resume_reading() is called.
"""
raise NotImplementedError
def resume_reading(self):
"""Resume the receiving end.
Data received will once again be passed to the protocol's
data_received() method.
"""
raise NotImplementedError
class WriteTransport(BaseTransport):
"""Interface for write-only transports."""
def set_write_buffer_limits(self, high=None, low=None):
"""Set the high- and low-water limits for write flow control.
These two values control when to call the protocol's
pause_writing() and resume_writing() methods. If specified,
the low-water limit must be less than or equal to the
high-water limit. Neither value can be negative.
The defaults are implementation-specific. If only the
high-water limit is given, the low-water limit defaults to an
implementation-specific value less than or equal to the
high-water limit. Setting high to zero forces low to zero as
well, and causes pause_writing() to be called whenever the
buffer becomes non-empty. Setting low to zero causes
resume_writing() to be called only once the buffer is empty.
Use of zero for either limit is generally sub-optimal as it
reduces opportunities for doing I/O and computation
concurrently.
"""
raise NotImplementedError
def get_write_buffer_size(self):
"""Return the current size of the write buffer."""
raise NotImplementedError
def write(self, data):
"""Write some data bytes to the transport.
This does not block; it buffers the data and arranges for it
to be sent out asynchronously.
"""
raise NotImplementedError
def writelines(self, list_of_data):
"""Write a list (or any iterable) of data bytes to the transport.
The default implementation concatenates the arguments and
calls write() on the result.
"""
data = b''.join(list_of_data)
self.write(data)
def write_eof(self):
"""Close the write end after flushing buffered data.
(This is like typing ^D into a UNIX program reading from stdin.)
Data may still be received.
"""
raise NotImplementedError
def can_write_eof(self):
"""Return True if this transport supports write_eof(), False if not."""
raise NotImplementedError
def abort(self):
"""Close the transport immediately.
Buffered data will be lost. No more data will be received.
The protocol's connection_lost() method will (eventually) be
called with None as its argument.
"""
raise NotImplementedError
class Transport(ReadTransport, WriteTransport):
"""Interface representing a bidirectional transport.
There may be several implementations, but typically, the user does
not implement new transports; rather, the platform provides some
useful transports that are implemented using the platform's best
practices.
The user never instantiates a transport directly; they call a
utility function, passing it a protocol factory and other
information necessary to create the transport and protocol. (E.g.
EventLoop.create_connection() or EventLoop.create_server().)
The utility function will asynchronously create a transport and a
protocol and hook them up by calling the protocol's
connection_made() method, passing it the transport.
The implementation here raises NotImplemented for every method
except writelines(), which calls write() in a loop.
"""
class DatagramTransport(BaseTransport):
"""Interface for datagram (UDP) transports."""
def sendto(self, data, addr=None):
"""Send data to the transport.
This does not block; it buffers the data and arranges for it
to be sent out asynchronously.
addr is target socket address.
If addr is None use target address pointed on transport creation.
"""
raise NotImplementedError
def abort(self):
"""Close the transport immediately.
Buffered data will be lost. No more data will be received.
The protocol's connection_lost() method will (eventually) be
called with None as its argument.
"""
raise NotImplementedError
class SubprocessTransport(BaseTransport):
def get_pid(self):
"""Get subprocess id."""
raise NotImplementedError
def get_returncode(self):
"""Get subprocess returncode.
See also
http://docs.python.org/3/library/subprocess#subprocess.Popen.returncode
"""
raise NotImplementedError
def get_pipe_transport(self, fd):
"""Get transport for pipe with number fd."""
raise NotImplementedError
def send_signal(self, signal):
"""Send signal to subprocess.
See also:
docs.python.org/3/library/subprocess#subprocess.Popen.send_signal
"""
raise NotImplementedError
def terminate(self):
"""Stop the subprocess.
Alias for close() method.
On Posix OSs the method sends SIGTERM to the subprocess.
On Windows the Win32 API function TerminateProcess()
is called to stop the subprocess.
See also:
http://docs.python.org/3/library/subprocess#subprocess.Popen.terminate
"""
raise NotImplementedError
def kill(self):
"""Kill the subprocess.
On Posix OSs the function sends SIGKILL to the subprocess.
On Windows kill() is an alias for terminate().
See also:
http://docs.python.org/3/library/subprocess#subprocess.Popen.kill
"""
raise NotImplementedError
class _FlowControlMixin(Transport):
"""All the logic for (write) flow control in a mix-in base class.
The subclass must implement get_write_buffer_size(). It must call
_maybe_pause_protocol() whenever the write buffer size increases,
and _maybe_resume_protocol() whenever it decreases. It may also
override set_write_buffer_limits() (e.g. to specify different
defaults).
The subclass constructor must call super().__init__(extra). This
will call set_write_buffer_limits().
The user may call set_write_buffer_limits() and
get_write_buffer_size(), and their protocol's pause_writing() and
resume_writing() may be called.
"""
def __init__(self, extra=None, loop=None):
super().__init__(extra)
assert loop is not None
self._loop = loop
self._protocol_paused = False
self._set_write_buffer_limits()
def _maybe_pause_protocol(self):
size = self.get_write_buffer_size()
if size <= self._high_water:
return
if not self._protocol_paused:
self._protocol_paused = True
try:
self._protocol.pause_writing()
except Exception as exc:
self._loop.call_exception_handler({
'message': 'protocol.pause_writing() failed',
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
def _maybe_resume_protocol(self):
if (self._protocol_paused and
self.get_write_buffer_size() <= self._low_water):
self._protocol_paused = False
try:
self._protocol.resume_writing()
except Exception as exc:
self._loop.call_exception_handler({
'message': 'protocol.resume_writing() failed',
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
def get_write_buffer_limits(self):
return (self._low_water, self._high_water)
def _set_write_buffer_limits(self, high=None, low=None):
if high is None:
if low is None:
high = 64 * 1024
else:
high = 4 * low
if low is None:
low = high // 4
if not high >= low >= 0:
raise ValueError(
f'high ({high!r}) must be >= low ({low!r}) must be >= 0')
self._high_water = high
self._low_water = low
def set_write_buffer_limits(self, high=None, low=None):
self._set_write_buffer_limits(high=high, low=low)
self._maybe_pause_protocol()
def get_write_buffer_size(self):
raise NotImplementedError
| {
"repo_name": "huguesv/PTVS",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/asyncio/transports.py",
"copies": "11",
"size": "10122",
"license": "apache-2.0",
"hash": 7722921255069662000,
"line_mean": 31.5466237942,
"line_max": 79,
"alpha_frac": 0.6256668643,
"autogenerated": false,
"ratio": 4.712290502793296,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""Abstract Transport class."""
__all__ = ['ReadTransport', 'WriteTransport', 'Transport']
class BaseTransport:
"""Base ABC for transports."""
def __init__(self, extra=None):
if extra is None:
extra = {}
self._extra = extra
def get_extra_info(self, name, default=None):
"""Get optional transport information."""
return self._extra.get(name, default)
def close(self):
"""Close the transport.
Buffered data will be flushed asynchronously. No more data
will be received. After all buffered data is flushed, the
protocol's connection_lost() method will (eventually) called
with None as its argument.
"""
raise NotImplementedError
class ReadTransport(BaseTransport):
"""ABC for read-only transports."""
def pause_reading(self):
"""Pause the receiving end.
No data will be passed to the protocol's data_received()
method until resume_reading() is called.
"""
raise NotImplementedError
def resume_reading(self):
"""Resume the receiving end.
Data received will once again be passed to the protocol's
data_received() method.
"""
raise NotImplementedError
class WriteTransport(BaseTransport):
"""ABC for write-only transports."""
def set_write_buffer_limits(self, high=None, low=None):
"""Set the high- and low-water limits for write flow control.
These two values control when to call the protocol's
pause_writing() and resume_writing() methods. If specified,
the low-water limit must be less than or equal to the
high-water limit. Neither value can be negative.
The defaults are implementation-specific. If only the
high-water limit is given, the low-water limit defaults to a
implementation-specific value less than or equal to the
high-water limit. Setting high to zero forces low to zero as
well, and causes pause_writing() to be called whenever the
buffer becomes non-empty. Setting low to zero causes
resume_writing() to be called only once the buffer is empty.
Use of zero for either limit is generally sub-optimal as it
reduces opportunities for doing I/O and computation
concurrently.
"""
raise NotImplementedError
def get_write_buffer_size(self):
"""Return the current size of the write buffer."""
raise NotImplementedError
def write(self, data):
"""Write some data bytes to the transport.
This does not block; it buffers the data and arranges for it
to be sent out asynchronously.
"""
raise NotImplementedError
def writelines(self, list_of_data):
"""Write a list (or any iterable) of data bytes to the transport.
The default implementation just calls write() for each item in
the list/iterable.
"""
for data in list_of_data:
self.write(data)
def write_eof(self):
"""Close the write end after flushing buffered data.
(This is like typing ^D into a UNIX program reading from stdin.)
Data may still be received.
"""
raise NotImplementedError
def can_write_eof(self):
"""Return True if this transport supports write_eof(), False if not."""
raise NotImplementedError
def abort(self):
"""Close the transport immediately.
Buffered data will be lost. No more data will be received.
The protocol's connection_lost() method will (eventually) be
called with None as its argument.
"""
raise NotImplementedError
class Transport(ReadTransport, WriteTransport):
"""ABC representing a bidirectional transport.
There may be several implementations, but typically, the user does
not implement new transports; rather, the platform provides some
useful transports that are implemented using the platform's best
practices.
The user never instantiates a transport directly; they call a
utility function, passing it a protocol factory and other
information necessary to create the transport and protocol. (E.g.
EventLoop.create_connection() or EventLoop.create_server().)
The utility function will asynchronously create a transport and a
protocol and hook them up by calling the protocol's
connection_made() method, passing it the transport.
The implementation here raises NotImplemented for every method
except writelines(), which calls write() in a loop.
"""
class DatagramTransport(BaseTransport):
"""ABC for datagram (UDP) transports."""
def sendto(self, data, addr=None):
"""Send data to the transport.
This does not block; it buffers the data and arranges for it
to be sent out asynchronously.
addr is target socket address.
If addr is None use target address pointed on transport creation.
"""
raise NotImplementedError
def abort(self):
"""Close the transport immediately.
Buffered data will be lost. No more data will be received.
The protocol's connection_lost() method will (eventually) be
called with None as its argument.
"""
raise NotImplementedError
class SubprocessTransport(BaseTransport):
def get_pid(self):
"""Get subprocess id."""
raise NotImplementedError
def get_returncode(self):
"""Get subprocess returncode.
See also
http://docs.python.org/3/library/subprocess#subprocess.Popen.returncode
"""
raise NotImplementedError
def get_pipe_transport(self, fd):
"""Get transport for pipe with number fd."""
raise NotImplementedError
def send_signal(self, signal):
"""Send signal to subprocess.
See also:
docs.python.org/3/library/subprocess#subprocess.Popen.send_signal
"""
raise NotImplementedError
def terminate(self):
"""Stop the subprocess.
Alias for close() method.
On Posix OSs the method sends SIGTERM to the subprocess.
On Windows the Win32 API function TerminateProcess()
is called to stop the subprocess.
See also:
http://docs.python.org/3/library/subprocess#subprocess.Popen.terminate
"""
raise NotImplementedError
def kill(self):
"""Kill the subprocess.
On Posix OSs the function sends SIGKILL to the subprocess.
On Windows kill() is an alias for terminate().
See also:
http://docs.python.org/3/library/subprocess#subprocess.Popen.kill
"""
raise NotImplementedError
| {
"repo_name": "mikhtonyuk/rxpython",
"path": "asyncio/transports.py",
"copies": "1",
"size": "6767",
"license": "mit",
"hash": -3519612496910996000,
"line_mean": 31.0710900474,
"line_max": 79,
"alpha_frac": 0.6544997783,
"autogenerated": false,
"ratio": 4.854375896700144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 211
} |
"""Abstract Transport class."""
from ActualVim.lib.asyncio_inc import compat
__all__ = ['BaseTransport', 'ReadTransport', 'WriteTransport',
'Transport', 'DatagramTransport', 'SubprocessTransport',
]
class BaseTransport:
"""Base class for transports."""
def __init__(self, extra=None):
if extra is None:
extra = {}
self._extra = extra
def get_extra_info(self, name, default=None):
"""Get optional transport information."""
return self._extra.get(name, default)
def is_closing(self):
"""Return True if the transport is closing or closed."""
raise NotImplementedError
def close(self):
"""Close the transport.
Buffered data will be flushed asynchronously. No more data
will be received. After all buffered data is flushed, the
protocol's connection_lost() method will (eventually) called
with None as its argument.
"""
raise NotImplementedError
def set_protocol(self, protocol):
"""Set a new protocol."""
raise NotImplementedError
def get_protocol(self):
"""Return the current protocol."""
raise NotImplementedError
class ReadTransport(BaseTransport):
"""Interface for read-only transports."""
def pause_reading(self):
"""Pause the receiving end.
No data will be passed to the protocol's data_received()
method until resume_reading() is called.
"""
raise NotImplementedError
def resume_reading(self):
"""Resume the receiving end.
Data received will once again be passed to the protocol's
data_received() method.
"""
raise NotImplementedError
class WriteTransport(BaseTransport):
"""Interface for write-only transports."""
def set_write_buffer_limits(self, high=None, low=None):
"""Set the high- and low-water limits for write flow control.
These two values control when to call the protocol's
pause_writing() and resume_writing() methods. If specified,
the low-water limit must be less than or equal to the
high-water limit. Neither value can be negative.
The defaults are implementation-specific. If only the
high-water limit is given, the low-water limit defaults to an
implementation-specific value less than or equal to the
high-water limit. Setting high to zero forces low to zero as
well, and causes pause_writing() to be called whenever the
buffer becomes non-empty. Setting low to zero causes
resume_writing() to be called only once the buffer is empty.
Use of zero for either limit is generally sub-optimal as it
reduces opportunities for doing I/O and computation
concurrently.
"""
raise NotImplementedError
def get_write_buffer_size(self):
"""Return the current size of the write buffer."""
raise NotImplementedError
def write(self, data):
"""Write some data bytes to the transport.
This does not block; it buffers the data and arranges for it
to be sent out asynchronously.
"""
raise NotImplementedError
def writelines(self, list_of_data):
"""Write a list (or any iterable) of data bytes to the transport.
The default implementation concatenates the arguments and
calls write() on the result.
"""
data = compat.flatten_list_bytes(list_of_data)
self.write(data)
def write_eof(self):
"""Close the write end after flushing buffered data.
(This is like typing ^D into a UNIX program reading from stdin.)
Data may still be received.
"""
raise NotImplementedError
def can_write_eof(self):
"""Return True if this transport supports write_eof(), False if not."""
raise NotImplementedError
def abort(self):
"""Close the transport immediately.
Buffered data will be lost. No more data will be received.
The protocol's connection_lost() method will (eventually) be
called with None as its argument.
"""
raise NotImplementedError
class Transport(ReadTransport, WriteTransport):
"""Interface representing a bidirectional transport.
There may be several implementations, but typically, the user does
not implement new transports; rather, the platform provides some
useful transports that are implemented using the platform's best
practices.
The user never instantiates a transport directly; they call a
utility function, passing it a protocol factory and other
information necessary to create the transport and protocol. (E.g.
EventLoop.create_connection() or EventLoop.create_server().)
The utility function will asynchronously create a transport and a
protocol and hook them up by calling the protocol's
connection_made() method, passing it the transport.
The implementation here raises NotImplemented for every method
except writelines(), which calls write() in a loop.
"""
class DatagramTransport(BaseTransport):
"""Interface for datagram (UDP) transports."""
def sendto(self, data, addr=None):
"""Send data to the transport.
This does not block; it buffers the data and arranges for it
to be sent out asynchronously.
addr is target socket address.
If addr is None use target address pointed on transport creation.
"""
raise NotImplementedError
def abort(self):
"""Close the transport immediately.
Buffered data will be lost. No more data will be received.
The protocol's connection_lost() method will (eventually) be
called with None as its argument.
"""
raise NotImplementedError
class SubprocessTransport(BaseTransport):
def get_pid(self):
"""Get subprocess id."""
raise NotImplementedError
def get_returncode(self):
"""Get subprocess returncode.
See also
http://docs.python.org/3/library/subprocess#subprocess.Popen.returncode
"""
raise NotImplementedError
def get_pipe_transport(self, fd):
"""Get transport for pipe with number fd."""
raise NotImplementedError
def send_signal(self, signal):
"""Send signal to subprocess.
See also:
docs.python.org/3/library/subprocess#subprocess.Popen.send_signal
"""
raise NotImplementedError
def terminate(self):
"""Stop the subprocess.
Alias for close() method.
On Posix OSs the method sends SIGTERM to the subprocess.
On Windows the Win32 API function TerminateProcess()
is called to stop the subprocess.
See also:
http://docs.python.org/3/library/subprocess#subprocess.Popen.terminate
"""
raise NotImplementedError
def kill(self):
"""Kill the subprocess.
On Posix OSs the function sends SIGKILL to the subprocess.
On Windows kill() is an alias for terminate().
See also:
http://docs.python.org/3/library/subprocess#subprocess.Popen.kill
"""
raise NotImplementedError
class _FlowControlMixin(Transport):
"""All the logic for (write) flow control in a mix-in base class.
The subclass must implement get_write_buffer_size(). It must call
_maybe_pause_protocol() whenever the write buffer size increases,
and _maybe_resume_protocol() whenever it decreases. It may also
override set_write_buffer_limits() (e.g. to specify different
defaults).
The subclass constructor must call super().__init__(extra). This
will call set_write_buffer_limits().
The user may call set_write_buffer_limits() and
get_write_buffer_size(), and their protocol's pause_writing() and
resume_writing() may be called.
"""
def __init__(self, extra=None, loop=None):
super().__init__(extra)
assert loop is not None
self._loop = loop
self._protocol_paused = False
self._set_write_buffer_limits()
def _maybe_pause_protocol(self):
size = self.get_write_buffer_size()
if size <= self._high_water:
return
if not self._protocol_paused:
self._protocol_paused = True
try:
self._protocol.pause_writing()
except Exception as exc:
self._loop.call_exception_handler({
'message': 'protocol.pause_writing() failed',
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
def _maybe_resume_protocol(self):
if (self._protocol_paused and
self.get_write_buffer_size() <= self._low_water):
self._protocol_paused = False
try:
self._protocol.resume_writing()
except Exception as exc:
self._loop.call_exception_handler({
'message': 'protocol.resume_writing() failed',
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
def get_write_buffer_limits(self):
return (self._low_water, self._high_water)
def _set_write_buffer_limits(self, high=None, low=None):
if high is None:
if low is None:
high = 64*1024
else:
high = 4*low
if low is None:
low = high // 4
if not high >= low >= 0:
raise ValueError('high (%r) must be >= low (%r) must be >= 0' %
(high, low))
self._high_water = high
self._low_water = low
def set_write_buffer_limits(self, high=None, low=None):
self._set_write_buffer_limits(high=high, low=low)
self._maybe_pause_protocol()
def get_write_buffer_size(self):
raise NotImplementedError
| {
"repo_name": "lunixbochs/actualvim",
"path": "lib/asyncio/transports.py",
"copies": "1",
"size": "10084",
"license": "mit",
"hash": 6707273321003111000,
"line_mean": 31.954248366,
"line_max": 79,
"alpha_frac": 0.6259420865,
"autogenerated": false,
"ratio": 4.7033582089552235,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000052709255745308875,
"num_lines": 306
} |
"""Abstract Transport class."""
from asyncio import compat
__all__ = ['BaseTransport', 'ReadTransport', 'WriteTransport',
'Transport', 'DatagramTransport', 'SubprocessTransport',
]
class BaseTransport:
"""Base class for transports."""
def __init__(self, extra=None):
if extra is None:
extra = {}
self._extra = extra
def get_extra_info(self, name, default=None):
"""Get optional transport information."""
return self._extra.get(name, default)
def close(self):
"""Close the transport.
Buffered data will be flushed asynchronously. No more data
will be received. After all buffered data is flushed, the
protocol's connection_lost() method will (eventually) called
with None as its argument.
"""
raise NotImplementedError
class ReadTransport(BaseTransport):
"""Interface for read-only transports."""
def pause_reading(self):
"""Pause the receiving end.
No data will be passed to the protocol's data_received()
method until resume_reading() is called.
"""
raise NotImplementedError
def resume_reading(self):
"""Resume the receiving end.
Data received will once again be passed to the protocol's
data_received() method.
"""
raise NotImplementedError
class WriteTransport(BaseTransport):
"""Interface for write-only transports."""
def set_write_buffer_limits(self, high=None, low=None):
"""Set the high- and low-water limits for write flow control.
These two values control when to call the protocol's
pause_writing() and resume_writing() methods. If specified,
the low-water limit must be less than or equal to the
high-water limit. Neither value can be negative.
The defaults are implementation-specific. If only the
high-water limit is given, the low-water limit defaults to a
implementation-specific value less than or equal to the
high-water limit. Setting high to zero forces low to zero as
well, and causes pause_writing() to be called whenever the
buffer becomes non-empty. Setting low to zero causes
resume_writing() to be called only once the buffer is empty.
Use of zero for either limit is generally sub-optimal as it
reduces opportunities for doing I/O and computation
concurrently.
"""
raise NotImplementedError
def get_write_buffer_size(self):
"""Return the current size of the write buffer."""
raise NotImplementedError
def write(self, data):
"""Write some data bytes to the transport.
This does not block; it buffers the data and arranges for it
to be sent out asynchronously.
"""
raise NotImplementedError
def writelines(self, list_of_data):
"""Write a list (or any iterable) of data bytes to the transport.
The default implementation concatenates the arguments and
calls write() on the result.
"""
data = compat.flatten_list_bytes(list_of_data)
self.write(data)
def write_eof(self):
"""Close the write end after flushing buffered data.
(This is like typing ^D into a UNIX program reading from stdin.)
Data may still be received.
"""
raise NotImplementedError
def can_write_eof(self):
"""Return True if this transport supports write_eof(), False if not."""
raise NotImplementedError
def abort(self):
"""Close the transport immediately.
Buffered data will be lost. No more data will be received.
The protocol's connection_lost() method will (eventually) be
called with None as its argument.
"""
raise NotImplementedError
class Transport(ReadTransport, WriteTransport):
"""Interface representing a bidirectional transport.
There may be several implementations, but typically, the user does
not implement new transports; rather, the platform provides some
useful transports that are implemented using the platform's best
practices.
The user never instantiates a transport directly; they call a
utility function, passing it a protocol factory and other
information necessary to create the transport and protocol. (E.g.
EventLoop.create_connection() or EventLoop.create_server().)
The utility function will asynchronously create a transport and a
protocol and hook them up by calling the protocol's
connection_made() method, passing it the transport.
The implementation here raises NotImplemented for every method
except writelines(), which calls write() in a loop.
"""
class DatagramTransport(BaseTransport):
"""Interface for datagram (UDP) transports."""
def sendto(self, data, addr=None):
"""Send data to the transport.
This does not block; it buffers the data and arranges for it
to be sent out asynchronously.
addr is target socket address.
If addr is None use target address pointed on transport creation.
"""
raise NotImplementedError
def abort(self):
"""Close the transport immediately.
Buffered data will be lost. No more data will be received.
The protocol's connection_lost() method will (eventually) be
called with None as its argument.
"""
raise NotImplementedError
class SubprocessTransport(BaseTransport):
def get_pid(self):
"""Get subprocess id."""
raise NotImplementedError
def get_returncode(self):
"""Get subprocess returncode.
See also
http://docs.python.org/3/library/subprocess#subprocess.Popen.returncode
"""
raise NotImplementedError
def get_pipe_transport(self, fd):
"""Get transport for pipe with number fd."""
raise NotImplementedError
def send_signal(self, signal):
"""Send signal to subprocess.
See also:
docs.python.org/3/library/subprocess#subprocess.Popen.send_signal
"""
raise NotImplementedError
def terminate(self):
"""Stop the subprocess.
Alias for close() method.
On Posix OSs the method sends SIGTERM to the subprocess.
On Windows the Win32 API function TerminateProcess()
is called to stop the subprocess.
See also:
http://docs.python.org/3/library/subprocess#subprocess.Popen.terminate
"""
raise NotImplementedError
def kill(self):
"""Kill the subprocess.
On Posix OSs the function sends SIGKILL to the subprocess.
On Windows kill() is an alias for terminate().
See also:
http://docs.python.org/3/library/subprocess#subprocess.Popen.kill
"""
raise NotImplementedError
class _FlowControlMixin(Transport):
"""All the logic for (write) flow control in a mix-in base class.
The subclass must implement get_write_buffer_size(). It must call
_maybe_pause_protocol() whenever the write buffer size increases,
and _maybe_resume_protocol() whenever it decreases. It may also
override set_write_buffer_limits() (e.g. to specify different
defaults).
The subclass constructor must call super().__init__(extra). This
will call set_write_buffer_limits().
The user may call set_write_buffer_limits() and
get_write_buffer_size(), and their protocol's pause_writing() and
resume_writing() may be called.
"""
def __init__(self, extra=None, loop=None):
super().__init__(extra)
assert loop is not None
self._loop = loop
self._protocol_paused = False
self._set_write_buffer_limits()
def _maybe_pause_protocol(self):
size = self.get_write_buffer_size()
if size <= self._high_water:
return
if not self._protocol_paused:
self._protocol_paused = True
try:
self._protocol.pause_writing()
except Exception as exc:
self._loop.call_exception_handler({
'message': 'protocol.pause_writing() failed',
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
def _maybe_resume_protocol(self):
if (self._protocol_paused and
self.get_write_buffer_size() <= self._low_water):
self._protocol_paused = False
try:
self._protocol.resume_writing()
except Exception as exc:
self._loop.call_exception_handler({
'message': 'protocol.resume_writing() failed',
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
def get_write_buffer_limits(self):
return (self._low_water, self._high_water)
def _set_write_buffer_limits(self, high=None, low=None):
if high is None:
if low is None:
high = 64*1024
else:
high = 4*low
if low is None:
low = high // 4
if not high >= low >= 0:
raise ValueError('high (%r) must be >= low (%r) must be >= 0' %
(high, low))
self._high_water = high
self._low_water = low
def set_write_buffer_limits(self, high=None, low=None):
self._set_write_buffer_limits(high=high, low=low)
self._maybe_pause_protocol()
def get_write_buffer_size(self):
raise NotImplementedError
| {
"repo_name": "gvanrossum/asyncio",
"path": "asyncio/transports.py",
"copies": "7",
"size": "9726",
"license": "apache-2.0",
"hash": 2617741477909142500,
"line_mean": 32.0816326531,
"line_max": 79,
"alpha_frac": 0.6255397903,
"autogenerated": false,
"ratio": 4.689488910318226,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8815028700618226,
"avg_score": null,
"num_lines": null
} |
"""Abstract Transport class."""
from trollius import compat
__all__ = ['BaseTransport', 'ReadTransport', 'WriteTransport',
'Transport', 'DatagramTransport', 'SubprocessTransport',
]
class BaseTransport(object):
"""Base class for transports."""
def __init__(self, extra=None):
if extra is None:
extra = {}
self._extra = extra
def get_extra_info(self, name, default=None):
"""Get optional transport information."""
return self._extra.get(name, default)
def close(self):
"""Close the transport.
Buffered data will be flushed asynchronously. No more data
will be received. After all buffered data is flushed, the
protocol's connection_lost() method will (eventually) called
with None as its argument.
"""
raise NotImplementedError
class ReadTransport(BaseTransport):
"""Interface for read-only transports."""
def pause_reading(self):
"""Pause the receiving end.
No data will be passed to the protocol's data_received()
method until resume_reading() is called.
"""
raise NotImplementedError
def resume_reading(self):
"""Resume the receiving end.
Data received will once again be passed to the protocol's
data_received() method.
"""
raise NotImplementedError
class WriteTransport(BaseTransport):
"""Interface for write-only transports."""
def set_write_buffer_limits(self, high=None, low=None):
"""Set the high- and low-water limits for write flow control.
These two values control when to call the protocol's
pause_writing() and resume_writing() methods. If specified,
the low-water limit must be less than or equal to the
high-water limit. Neither value can be negative.
The defaults are implementation-specific. If only the
high-water limit is given, the low-water limit defaults to a
implementation-specific value less than or equal to the
high-water limit. Setting high to zero forces low to zero as
well, and causes pause_writing() to be called whenever the
buffer becomes non-empty. Setting low to zero causes
resume_writing() to be called only once the buffer is empty.
Use of zero for either limit is generally sub-optimal as it
reduces opportunities for doing I/O and computation
concurrently.
"""
raise NotImplementedError
def get_write_buffer_size(self):
"""Return the current size of the write buffer."""
raise NotImplementedError
def write(self, data):
"""Write some data bytes to the transport.
This does not block; it buffers the data and arranges for it
to be sent out asynchronously.
"""
raise NotImplementedError
def writelines(self, list_of_data):
"""Write a list (or any iterable) of data bytes to the transport.
The default implementation concatenates the arguments and
calls write() on the result.
"""
data = compat.flatten_list_bytes(list_of_data)
self.write(data)
def write_eof(self):
"""Close the write end after flushing buffered data.
(This is like typing ^D into a UNIX program reading from stdin.)
Data may still be received.
"""
raise NotImplementedError
def can_write_eof(self):
"""Return True if this transport supports write_eof(), False if not."""
raise NotImplementedError
def abort(self):
"""Close the transport immediately.
Buffered data will be lost. No more data will be received.
The protocol's connection_lost() method will (eventually) be
called with None as its argument.
"""
raise NotImplementedError
class Transport(ReadTransport, WriteTransport):
"""Interface representing a bidirectional transport.
There may be several implementations, but typically, the user does
not implement new transports; rather, the platform provides some
useful transports that are implemented using the platform's best
practices.
The user never instantiates a transport directly; they call a
utility function, passing it a protocol factory and other
information necessary to create the transport and protocol. (E.g.
EventLoop.create_connection() or EventLoop.create_server().)
The utility function will asynchronously create a transport and a
protocol and hook them up by calling the protocol's
connection_made() method, passing it the transport.
The implementation here raises NotImplemented for every method
except writelines(), which calls write() in a loop.
"""
class DatagramTransport(BaseTransport):
"""Interface for datagram (UDP) transports."""
def sendto(self, data, addr=None):
"""Send data to the transport.
This does not block; it buffers the data and arranges for it
to be sent out asynchronously.
addr is target socket address.
If addr is None use target address pointed on transport creation.
"""
raise NotImplementedError
def abort(self):
"""Close the transport immediately.
Buffered data will be lost. No more data will be received.
The protocol's connection_lost() method will (eventually) be
called with None as its argument.
"""
raise NotImplementedError
class SubprocessTransport(BaseTransport):
def get_pid(self):
"""Get subprocess id."""
raise NotImplementedError
def get_returncode(self):
"""Get subprocess returncode.
See also
http://docs.python.org/3/library/subprocess#subprocess.Popen.returncode
"""
raise NotImplementedError
def get_pipe_transport(self, fd):
"""Get transport for pipe with number fd."""
raise NotImplementedError
def send_signal(self, signal):
"""Send signal to subprocess.
See also:
docs.python.org/3/library/subprocess#subprocess.Popen.send_signal
"""
raise NotImplementedError
def terminate(self):
"""Stop the subprocess.
Alias for close() method.
On Posix OSs the method sends SIGTERM to the subprocess.
On Windows the Win32 API function TerminateProcess()
is called to stop the subprocess.
See also:
http://docs.python.org/3/library/subprocess#subprocess.Popen.terminate
"""
raise NotImplementedError
def kill(self):
"""Kill the subprocess.
On Posix OSs the function sends SIGKILL to the subprocess.
On Windows kill() is an alias for terminate().
See also:
http://docs.python.org/3/library/subprocess#subprocess.Popen.kill
"""
raise NotImplementedError
class _FlowControlMixin(Transport):
"""All the logic for (write) flow control in a mix-in base class.
The subclass must implement get_write_buffer_size(). It must call
_maybe_pause_protocol() whenever the write buffer size increases,
and _maybe_resume_protocol() whenever it decreases. It may also
override set_write_buffer_limits() (e.g. to specify different
defaults).
The subclass constructor must call super(Class, self).__init__(extra). This
will call set_write_buffer_limits().
The user may call set_write_buffer_limits() and
get_write_buffer_size(), and their protocol's pause_writing() and
resume_writing() may be called.
"""
def __init__(self, extra=None, loop=None):
super(_FlowControlMixin, self).__init__(extra)
assert loop is not None
self._loop = loop
self._protocol_paused = False
self._set_write_buffer_limits()
def _maybe_pause_protocol(self):
size = self.get_write_buffer_size()
if size <= self._high_water:
return
if not self._protocol_paused:
self._protocol_paused = True
try:
self._protocol.pause_writing()
except Exception as exc:
self._loop.call_exception_handler({
'message': 'protocol.pause_writing() failed',
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
def _maybe_resume_protocol(self):
if (self._protocol_paused and
self.get_write_buffer_size() <= self._low_water):
self._protocol_paused = False
try:
self._protocol.resume_writing()
except Exception as exc:
self._loop.call_exception_handler({
'message': 'protocol.resume_writing() failed',
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
def get_write_buffer_limits(self):
return (self._low_water, self._high_water)
def _set_write_buffer_limits(self, high=None, low=None):
if high is None:
if low is None:
high = 64*1024
else:
high = 4*low
if low is None:
low = high // 4
if not high >= low >= 0:
raise ValueError('high (%r) must be >= low (%r) must be >= 0' %
(high, low))
self._high_water = high
self._low_water = low
def set_write_buffer_limits(self, high=None, low=None):
self._set_write_buffer_limits(high=high, low=low)
self._maybe_pause_protocol()
def get_write_buffer_size(self):
raise NotImplementedError
| {
"repo_name": "haypo/trollius",
"path": "trollius/transports.py",
"copies": "1",
"size": "9769",
"license": "apache-2.0",
"hash": -8925508420162894000,
"line_mean": 32.2278911565,
"line_max": 80,
"alpha_frac": 0.6264714915,
"autogenerated": false,
"ratio": 4.676400191479177,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5802871682979177,
"avg_score": null,
"num_lines": null
} |
"""Abstract Transport class."""
import sys
from .compat import flatten_bytes
_PY34 = sys.version_info >= (3, 4)
__all__ = ['BaseTransport', 'ReadTransport', 'WriteTransport',
'Transport', 'DatagramTransport', 'SubprocessTransport',
]
class BaseTransport(object):
"""Base class for transports."""
def __init__(self, extra=None):
if extra is None:
extra = {}
self._extra = extra
def get_extra_info(self, name, default=None):
"""Get optional transport information."""
return self._extra.get(name, default)
def close(self):
"""Close the transport.
Buffered data will be flushed asynchronously. No more data
will be received. After all buffered data is flushed, the
protocol's connection_lost() method will (eventually) called
with None as its argument.
"""
raise NotImplementedError
class ReadTransport(BaseTransport):
"""Interface for read-only transports."""
def pause_reading(self):
"""Pause the receiving end.
No data will be passed to the protocol's data_received()
method until resume_reading() is called.
"""
raise NotImplementedError
def resume_reading(self):
"""Resume the receiving end.
Data received will once again be passed to the protocol's
data_received() method.
"""
raise NotImplementedError
class WriteTransport(BaseTransport):
"""Interface for write-only transports."""
def set_write_buffer_limits(self, high=None, low=None):
"""Set the high- and low-water limits for write flow control.
These two values control when to call the protocol's
pause_writing() and resume_writing() methods. If specified,
the low-water limit must be less than or equal to the
high-water limit. Neither value can be negative.
The defaults are implementation-specific. If only the
high-water limit is given, the low-water limit defaults to a
implementation-specific value less than or equal to the
high-water limit. Setting high to zero forces low to zero as
well, and causes pause_writing() to be called whenever the
buffer becomes non-empty. Setting low to zero causes
resume_writing() to be called only once the buffer is empty.
Use of zero for either limit is generally sub-optimal as it
reduces opportunities for doing I/O and computation
concurrently.
"""
raise NotImplementedError
def get_write_buffer_size(self):
"""Return the current size of the write buffer."""
raise NotImplementedError
def write(self, data):
"""Write some data bytes to the transport.
This does not block; it buffers the data and arranges for it
to be sent out asynchronously.
"""
raise NotImplementedError
def writelines(self, list_of_data):
"""Write a list (or any iterable) of data bytes to the transport.
The default implementation concatenates the arguments and
calls write() on the result.
"""
data = map(flatten_bytes, list_of_data)
self.write(b''.join(data))
def write_eof(self):
"""Close the write end after flushing buffered data.
(This is like typing ^D into a UNIX program reading from stdin.)
Data may still be received.
"""
raise NotImplementedError
def can_write_eof(self):
"""Return True if this transport supports write_eof(), False if not."""
raise NotImplementedError
def abort(self):
"""Close the transport immediately.
Buffered data will be lost. No more data will be received.
The protocol's connection_lost() method will (eventually) be
called with None as its argument.
"""
raise NotImplementedError
class Transport(ReadTransport, WriteTransport):
"""Interface representing a bidirectional transport.
There may be several implementations, but typically, the user does
not implement new transports; rather, the platform provides some
useful transports that are implemented using the platform's best
practices.
The user never instantiates a transport directly; they call a
utility function, passing it a protocol factory and other
information necessary to create the transport and protocol. (E.g.
EventLoop.create_connection() or EventLoop.create_server().)
The utility function will asynchronously create a transport and a
protocol and hook them up by calling the protocol's
connection_made() method, passing it the transport.
The implementation here raises NotImplemented for every method
except writelines(), which calls write() in a loop.
"""
class DatagramTransport(BaseTransport):
"""Interface for datagram (UDP) transports."""
def sendto(self, data, addr=None):
"""Send data to the transport.
This does not block; it buffers the data and arranges for it
to be sent out asynchronously.
addr is target socket address.
If addr is None use target address pointed on transport creation.
"""
raise NotImplementedError
def abort(self):
"""Close the transport immediately.
Buffered data will be lost. No more data will be received.
The protocol's connection_lost() method will (eventually) be
called with None as its argument.
"""
raise NotImplementedError
class SubprocessTransport(BaseTransport):
def get_pid(self):
"""Get subprocess id."""
raise NotImplementedError
def get_returncode(self):
"""Get subprocess returncode.
See also
http://docs.python.org/3/library/subprocess#subprocess.Popen.returncode
"""
raise NotImplementedError
def get_pipe_transport(self, fd):
"""Get transport for pipe with number fd."""
raise NotImplementedError
def send_signal(self, signal):
"""Send signal to subprocess.
See also:
docs.python.org/3/library/subprocess#subprocess.Popen.send_signal
"""
raise NotImplementedError
def terminate(self):
"""Stop the subprocess.
Alias for close() method.
On Posix OSs the method sends SIGTERM to the subprocess.
On Windows the Win32 API function TerminateProcess()
is called to stop the subprocess.
See also:
http://docs.python.org/3/library/subprocess#subprocess.Popen.terminate
"""
raise NotImplementedError
def kill(self):
"""Kill the subprocess.
On Posix OSs the function sends SIGKILL to the subprocess.
On Windows kill() is an alias for terminate().
See also:
http://docs.python.org/3/library/subprocess#subprocess.Popen.kill
"""
raise NotImplementedError
class _FlowControlMixin(Transport):
"""All the logic for (write) flow control in a mix-in base class.
The subclass must implement get_write_buffer_size(). It must call
_maybe_pause_protocol() whenever the write buffer size increases,
and _maybe_resume_protocol() whenever it decreases. It may also
override set_write_buffer_limits() (e.g. to specify different
defaults).
The subclass constructor must call super(Class, self).__init__(extra). This
will call set_write_buffer_limits().
The user may call set_write_buffer_limits() and
get_write_buffer_size(), and their protocol's pause_writing() and
resume_writing() may be called.
"""
def __init__(self, extra=None, loop=None):
super(_FlowControlMixin, self).__init__(extra)
assert loop is not None
self._loop = loop
self._protocol_paused = False
self._set_write_buffer_limits()
def _maybe_pause_protocol(self):
size = self.get_write_buffer_size()
if size <= self._high_water:
return
if not self._protocol_paused:
self._protocol_paused = True
try:
self._protocol.pause_writing()
except Exception as exc:
self._loop.call_exception_handler({
'message': 'protocol.pause_writing() failed',
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
def _maybe_resume_protocol(self):
if (self._protocol_paused and
self.get_write_buffer_size() <= self._low_water):
self._protocol_paused = False
try:
self._protocol.resume_writing()
except Exception as exc:
self._loop.call_exception_handler({
'message': 'protocol.resume_writing() failed',
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
def get_write_buffer_limits(self):
return (self._low_water, self._high_water)
def _set_write_buffer_limits(self, high=None, low=None):
if high is None:
if low is None:
high = 64*1024
else:
high = 4*low
if low is None:
low = high // 4
if not high >= low >= 0:
raise ValueError('high (%r) must be >= low (%r) must be >= 0' %
(high, low))
self._high_water = high
self._low_water = low
def set_write_buffer_limits(self, high=None, low=None):
self._set_write_buffer_limits(high=high, low=low)
self._maybe_pause_protocol()
def get_write_buffer_size(self):
raise NotImplementedError
| {
"repo_name": "overcastcloud/trollius",
"path": "trollius/transports.py",
"copies": "1",
"size": "9825",
"license": "apache-2.0",
"hash": 8985140314209577000,
"line_mean": 32.0808080808,
"line_max": 80,
"alpha_frac": 0.6260559796,
"autogenerated": false,
"ratio": 4.6541923259118905,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.578024830551189,
"avg_score": null,
"num_lines": null
} |
"""Abstract Transport class."""
import sys
_PY34 = sys.version_info >= (3, 4)
__all__ = ['BaseTransport', 'ReadTransport', 'WriteTransport',
'Transport', 'DatagramTransport', 'SubprocessTransport',
]
class BaseTransport:
"""Base class for transports."""
def __init__(self, extra=None):
if extra is None:
extra = {}
self._extra = extra
def get_extra_info(self, name, default=None):
"""Get optional transport information."""
return self._extra.get(name, default)
def close(self):
"""Close the transport.
Buffered data will be flushed asynchronously. No more data
will be received. After all buffered data is flushed, the
protocol's connection_lost() method will (eventually) called
with None as its argument.
"""
raise NotImplementedError
class ReadTransport(BaseTransport):
"""Interface for read-only transports."""
def pause_reading(self):
"""Pause the receiving end.
No data will be passed to the protocol's data_received()
method until resume_reading() is called.
"""
raise NotImplementedError
def resume_reading(self):
"""Resume the receiving end.
Data received will once again be passed to the protocol's
data_received() method.
"""
raise NotImplementedError
class WriteTransport(BaseTransport):
"""Interface for write-only transports."""
def set_write_buffer_limits(self, high=None, low=None):
"""Set the high- and low-water limits for write flow control.
These two values control when to call the protocol's
pause_writing() and resume_writing() methods. If specified,
the low-water limit must be less than or equal to the
high-water limit. Neither value can be negative.
The defaults are implementation-specific. If only the
high-water limit is given, the low-water limit defaults to a
implementation-specific value less than or equal to the
high-water limit. Setting high to zero forces low to zero as
well, and causes pause_writing() to be called whenever the
buffer becomes non-empty. Setting low to zero causes
resume_writing() to be called only once the buffer is empty.
Use of zero for either limit is generally sub-optimal as it
reduces opportunities for doing I/O and computation
concurrently.
"""
raise NotImplementedError
def get_write_buffer_size(self):
"""Return the current size of the write buffer."""
raise NotImplementedError
def write(self, data):
"""Write some data bytes to the transport.
This does not block; it buffers the data and arranges for it
to be sent out asynchronously.
"""
raise NotImplementedError
def writelines(self, list_of_data):
"""Write a list (or any iterable) of data bytes to the transport.
The default implementation concatenates the arguments and
calls write() on the result.
"""
if not _PY34:
# In Python 3.3, bytes.join() doesn't handle memoryview.
list_of_data = (
bytes(data) if isinstance(data, memoryview) else data
for data in list_of_data)
self.write(b''.join(list_of_data))
def write_eof(self):
"""Close the write end after flushing buffered data.
(This is like typing ^D into a UNIX program reading from stdin.)
Data may still be received.
"""
raise NotImplementedError
def can_write_eof(self):
"""Return True if this transport supports write_eof(), False if not."""
raise NotImplementedError
def abort(self):
"""Close the transport immediately.
Buffered data will be lost. No more data will be received.
The protocol's connection_lost() method will (eventually) be
called with None as its argument.
"""
raise NotImplementedError
class Transport(ReadTransport, WriteTransport):
"""Interface representing a bidirectional transport.
There may be several implementations, but typically, the user does
not implement new transports; rather, the platform provides some
useful transports that are implemented using the platform's best
practices.
The user never instantiates a transport directly; they call a
utility function, passing it a protocol factory and other
information necessary to create the transport and protocol. (E.g.
EventLoop.create_connection() or EventLoop.create_server().)
The utility function will asynchronously create a transport and a
protocol and hook them up by calling the protocol's
connection_made() method, passing it the transport.
The implementation here raises NotImplemented for every method
except writelines(), which calls write() in a loop.
"""
class DatagramTransport(BaseTransport):
"""Interface for datagram (UDP) transports."""
def sendto(self, data, addr=None):
"""Send data to the transport.
This does not block; it buffers the data and arranges for it
to be sent out asynchronously.
addr is target socket address.
If addr is None use target address pointed on transport creation.
"""
raise NotImplementedError
def abort(self):
"""Close the transport immediately.
Buffered data will be lost. No more data will be received.
The protocol's connection_lost() method will (eventually) be
called with None as its argument.
"""
raise NotImplementedError
class SubprocessTransport(BaseTransport):
def get_pid(self):
"""Get subprocess id."""
raise NotImplementedError
def get_returncode(self):
"""Get subprocess returncode.
See also
http://docs.python.org/3/library/subprocess#subprocess.Popen.returncode
"""
raise NotImplementedError
def get_pipe_transport(self, fd):
"""Get transport for pipe with number fd."""
raise NotImplementedError
def send_signal(self, signal):
"""Send signal to subprocess.
See also:
docs.python.org/3/library/subprocess#subprocess.Popen.send_signal
"""
raise NotImplementedError
def terminate(self):
"""Stop the subprocess.
Alias for close() method.
On Posix OSs the method sends SIGTERM to the subprocess.
On Windows the Win32 API function TerminateProcess()
is called to stop the subprocess.
See also:
http://docs.python.org/3/library/subprocess#subprocess.Popen.terminate
"""
raise NotImplementedError
def kill(self):
"""Kill the subprocess.
On Posix OSs the function sends SIGKILL to the subprocess.
On Windows kill() is an alias for terminate().
See also:
http://docs.python.org/3/library/subprocess#subprocess.Popen.kill
"""
raise NotImplementedError
class _FlowControlMixin(Transport):
"""All the logic for (write) flow control in a mix-in base class.
The subclass must implement get_write_buffer_size(). It must call
_maybe_pause_protocol() whenever the write buffer size increases,
and _maybe_resume_protocol() whenever it decreases. It may also
override set_write_buffer_limits() (e.g. to specify different
defaults).
The subclass constructor must call super().__init__(extra). This
will call set_write_buffer_limits().
The user may call set_write_buffer_limits() and
get_write_buffer_size(), and their protocol's pause_writing() and
resume_writing() may be called.
"""
def __init__(self, extra=None, loop=None):
super().__init__(extra)
assert loop is not None
self._loop = loop
self._protocol_paused = False
self._set_write_buffer_limits()
def _maybe_pause_protocol(self):
size = self.get_write_buffer_size()
if size <= self._high_water:
return
if not self._protocol_paused:
self._protocol_paused = True
try:
self._protocol.pause_writing()
except Exception as exc:
self._loop.call_exception_handler({
'message': 'protocol.pause_writing() failed',
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
def _maybe_resume_protocol(self):
if (self._protocol_paused and
self.get_write_buffer_size() <= self._low_water):
self._protocol_paused = False
try:
self._protocol.resume_writing()
except Exception as exc:
self._loop.call_exception_handler({
'message': 'protocol.resume_writing() failed',
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
def get_write_buffer_limits(self):
return (self._low_water, self._high_water)
def _set_write_buffer_limits(self, high=None, low=None):
if high is None:
if low is None:
high = 64*1024
else:
high = 4*low
if low is None:
low = high // 4
if not high >= low >= 0:
raise ValueError('high (%r) must be >= low (%r) must be >= 0' %
(high, low))
self._high_water = high
self._low_water = low
def set_write_buffer_limits(self, high=None, low=None):
self._set_write_buffer_limits(high=high, low=low)
self._maybe_pause_protocol()
def get_write_buffer_size(self):
raise NotImplementedError
| {
"repo_name": "gsb-eng/asyncio",
"path": "asyncio/transports.py",
"copies": "16",
"size": "9941",
"license": "apache-2.0",
"hash": 4151774146633109000,
"line_mean": 32.1366666667,
"line_max": 79,
"alpha_frac": 0.6223719948,
"autogenerated": false,
"ratio": 4.6540262172284645,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000053763440860215054,
"num_lines": 300
} |
""" abstract vpp object and object registry """
import abc
import six
from six import moves
@six.add_metaclass(abc.ABCMeta)
class VppObject(object):
""" Abstract vpp object """
@abc.abstractmethod
def add_vpp_config(self):
""" Add the configuration for this object to vpp. """
pass
@abc.abstractmethod
def query_vpp_config(self):
"""Query the vpp configuration.
:return: True if the object is configured"""
pass
@abc.abstractmethod
def remove_vpp_config(self):
""" Remove the configuration for this object from vpp. """
pass
def object_id(self):
""" Return a unique string representing this object. """
return "Undefined. for <%s %s>" % (self.__class__.__name__, id(self))
def __str__(self):
return self.object_id()
def __repr__(self):
return '<%s>' % self.object_id()
def __hash__(self):
return hash(self.object_id())
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
if other.object_id() == self.object_id():
return True
return False
# This can be removed when python2 support is dropped.
def __ne__(self, other):
return not self.__eq__(other)
class VppObjectRegistry(object):
""" Class which handles automatic configuration cleanup. """
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
if not hasattr(self, "_object_registry"):
self._object_registry = []
if not hasattr(self, "_object_dict"):
self._object_dict = dict()
def register(self, obj, logger):
""" Register an object in the registry. """
if obj.object_id() not in self._object_dict:
self._object_registry.append(obj)
self._object_dict[obj.object_id()] = obj
logger.debug("REG: registering %s" % obj)
else:
logger.debug("REG: duplicate add, ignoring (%s)" % obj)
def unregister_all(self, logger):
""" Remove all object registrations from registry. """
logger.debug("REG: removing all object registrations")
self._object_registry = []
self._object_dict = dict()
def remove_vpp_config(self, logger):
"""
Remove configuration (if present) from vpp and then remove all objects
from the registry.
"""
if not self._object_registry:
logger.info("REG: No objects registered for auto-cleanup.")
return
logger.info("REG: Removing VPP configuration for registered objects")
# remove the config in reverse order as there might be dependencies
failed = []
for obj in reversed(self._object_registry):
if obj.query_vpp_config():
logger.info("REG: Removing configuration for %s" % obj)
obj.remove_vpp_config()
if obj.query_vpp_config():
failed.append(obj)
else:
logger.info(
"REG: Skipping removal for %s, configuration not present" %
obj)
self.unregister_all(logger)
if failed:
logger.error("REG: Couldn't remove configuration for object(s):")
for obj in failed:
logger.error(repr(obj))
raise Exception("Couldn't remove configuration for object(s): %s" %
(", ".join(str(x) for x in failed)))
| {
"repo_name": "vpp-dev/vpp",
"path": "test/vpp_object.py",
"copies": "1",
"size": "3546",
"license": "apache-2.0",
"hash": -3162753444662757000,
"line_mean": 31.8333333333,
"line_max": 79,
"alpha_frac": 0.5699379583,
"autogenerated": false,
"ratio": 4.329670329670329,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 108
} |
""" abstract vpp object and object registry """
import abc
class VppObject(metaclass=abc.ABCMeta):
""" Abstract vpp object """
@abc.abstractmethod
def add_vpp_config(self) -> None:
""" Add the configuration for this object to vpp. """
pass
@abc.abstractmethod
def query_vpp_config(self) -> bool:
"""Query the vpp configuration.
:return: True if the object is configured"""
pass
@abc.abstractmethod
def remove_vpp_config(self) -> None:
""" Remove the configuration for this object from vpp. """
pass
def object_id(self) -> str:
""" Return a unique string representing this object. """
return "Undefined. for <%s %s>" % (self.__class__.__name__, id(self))
def __str__(self) -> str:
return self.object_id()
def __repr__(self) -> str:
return '<%s>' % self.object_id()
def __hash__(self) -> int:
return hash(self.object_id())
def __eq__(self, other) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
if other.object_id() == self.object_id():
return True
return False
# This can be removed when python2 support is dropped.
def __ne__(self, other):
return not self.__eq__(other)
class VppObjectRegistry:
""" Class which handles automatic configuration cleanup. """
_shared_state = {}
def __init__(self) -> None:
self.__dict__ = self._shared_state
if not hasattr(self, "_object_registry"):
self._object_registry = []
if not hasattr(self, "_object_dict"):
self._object_dict = dict()
def register(self, obj: VppObject, logger) -> None:
""" Register an object in the registry. """
if obj.object_id() not in self._object_dict:
self._object_registry.append(obj)
self._object_dict[obj.object_id()] = obj
logger.debug("REG: registering %s" % obj)
else:
logger.debug("REG: duplicate add, ignoring (%s)" % obj)
def unregister_all(self, logger) -> None:
""" Remove all object registrations from registry. """
logger.debug("REG: removing all object registrations")
self._object_registry = []
self._object_dict = dict()
def remove_vpp_config(self, logger) -> None:
"""
Remove configuration (if present) from vpp and then remove all objects
from the registry.
"""
if not self._object_registry:
logger.info("REG: No objects registered for auto-cleanup.")
return
logger.info("REG: Removing VPP configuration for registered objects")
# remove the config in reverse order as there might be dependencies
failed = []
for obj in reversed(self._object_registry):
if obj.query_vpp_config():
logger.info("REG: Removing configuration for %s" % obj)
obj.remove_vpp_config()
if obj.query_vpp_config():
failed.append(obj)
else:
logger.info(
"REG: Skipping removal for %s, configuration not present" %
obj)
self.unregister_all(logger)
if failed:
logger.error("REG: Couldn't remove configuration for object(s):")
for obj in failed:
logger.error(repr(obj))
raise Exception("Couldn't remove configuration for object(s): %s" %
(", ".join(str(x) for x in failed)))
| {
"repo_name": "chrisy/vpp",
"path": "test/vpp_object.py",
"copies": "2",
"size": "3590",
"license": "apache-2.0",
"hash": -5150089499043791000,
"line_mean": 33.5192307692,
"line_max": 79,
"alpha_frac": 0.5651810585,
"autogenerated": false,
"ratio": 4.273809523809524,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5838990582309525,
"avg_score": null,
"num_lines": null
} |
"""Abstract Workflow Engine."""
import os
import pkgutil
from importlib import import_module
from django.core.exceptions import ImproperlyConfigured
class InvalidEngineError(Exception):
"""Raised when an invalid engine is requested."""
class BaseEngine:
"""Base class for all engines in Resolwe workflow."""
name = None
def __init__(self, manager, settings=None):
"""Construct a Resolwe engine."""
self.manager = manager
self.settings = settings or {}
def get_name(self):
"""Return the engine name."""
return self.name
def load_engines(
manager, class_name, base_module, engines, class_key="ENGINE", engine_type="engine"
):
"""Load engines."""
loaded_engines = {}
for module_name_or_dict in engines:
if not isinstance(module_name_or_dict, dict):
module_name_or_dict = {class_key: module_name_or_dict}
try:
module_name = module_name_or_dict[class_key]
engine_settings = module_name_or_dict
except KeyError:
raise ImproperlyConfigured(
"If {} specification is a dictionary, it must define {}".format(
engine_type, class_key
)
)
try:
engine_module = import_module(module_name)
try:
engine = getattr(engine_module, class_name)(
manager=manager, settings=engine_settings
)
if not isinstance(engine, BaseEngine):
raise ImproperlyConfigured(
"{} module {} class {} must extend BaseEngine".format(
engine_type.capitalize(), module_name, class_name
)
)
except AttributeError:
raise ImproperlyConfigured(
"{} module {} is missing a {} class".format(
engine_type.capitalize(), module_name, class_name
)
)
if engine.get_name() in loaded_engines:
raise ImproperlyConfigured(
"Duplicated {} {}".format(engine_type, engine.get_name())
)
loaded_engines[engine.get_name()] = engine
except ImportError as ex:
# The engine wasn't found. Display a helpful error message listing all possible
# (built-in) engines.
engine_dir = os.path.join(os.path.dirname(__file__), base_module)
try:
builtin_engines = [
name for _, name, _ in pkgutil.iter_modules([engine_dir])
]
except EnvironmentError:
builtin_engines = []
if module_name not in [
"resolwe.flow.{}.{}".format(base_module, builtin_engine)
for builtin_engine in builtin_engines
]:
engine_reprs = map(repr, sorted(builtin_engines))
error_msg = (
"{} isn't an available dataflow {}.\n"
"Try using 'resolwe.flow.{}.XXX', where XXX is one of:\n"
" {}\n"
"Error was: {}".format(
module_name,
engine_type,
base_module,
", ".join(engine_reprs),
ex,
)
)
raise ImproperlyConfigured(error_msg)
else:
# If there's some other error, this must be an error in Django
raise
return loaded_engines
| {
"repo_name": "genialis/resolwe",
"path": "resolwe/flow/engine.py",
"copies": "1",
"size": "3670",
"license": "apache-2.0",
"hash": 8600617544381650000,
"line_mean": 32.9814814815,
"line_max": 91,
"alpha_frac": 0.5046321526,
"autogenerated": false,
"ratio": 5.020519835841314,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00032017482030901245,
"num_lines": 108
} |
# Abstrys Command Line Utility library
#
# Provides classes and functions useful for writing command-line scripts.
import sys
def print_error(string):
"""Print an error message."""
sys.stderr.write("*** Error: %s\n" % (string))
return
def confirm(query):
"""Asks the user a y/n question, and returns True if the user responded
with 'y'."""
answer = raw_input("%s (y/n): " % (query))
return answer.lower() == 'y'
# adapted from http://www.python.org/dev/peps/pep-0257/
def format_doc(string, extra_indent=0, line_start=0, line_end=-1):
"""Remove significant leading space from all lines and return the resulting
string."""
if not string:
return ''
# Convert tabs to spaces and split into a list of lines:
lines = string.expandtabs(4).splitlines()
# Determine minimum indentation (first line doesn't count, and blank lines
# don't count):
indent = sys.maxint
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# if line_end is negative, then set it to the last line in the list.
if line_end == -1:
line_end = len(lines)
# put the lines together, removing the first num_spaces characters from
# each line (except for line 0).
result_text = ""
if line_start == 0:
result_text = lines[0] + '\n'
line_start = 1
# convert the extra indent number into spaces
extra_indent = ' ' * extra_indent
# add each line to the result.
for cur_line in lines[line_start:line_end]:
result_text += "%s%s\n" % (extra_indent, cur_line[indent:])
return result_text
class TempMessage:
"""A class for displaying temporary (eraseable) messages. Typical use::
a = TempMessage("some text")
a.show()
# something happens...
a.erase()
# continue onward!
"""
text = ""
erase_string = ""
def set(self, text):
"""Sets the message text."""
self.text = text
l = len(text)
e = '\b' * l
self.erase_string = "%s%s%s" % (e, ' '*l, e)
def __init__(self, text=""):
"""Initializes a TempMessage with some optional text"""
self.set(text)
def show(self, text=""):
"""Shows the existing message text, or sets and shows new text."""
if text != "":
self.set(text)
sys.stdout.write(self.text)
sys.stdout.flush()
def erase(self):
"""Erase the current msg frame from sys.stdout."""
sys.stdout.write(self.erase_string)
sys.stdout.flush()
def __len__(self):
"""Get the length of the msg frame, in characters."""
return len(self.text)
class TwirlingProgressIndicator:
"""A console text-based twirling progress indicator."""
temp_message = None
def __init__(self):
self.temp_message = TempMessage()
self.progress_frames = ['-', '\\', '|', '/', '-', '\\', '|']
self.progress_string = ' %s '
self.progress_string_len = 3
# self.cur_frame will be incremented to zero on the first get_next()
self.cur_frame = -1
def show(self):
"""Write the next progress frame to sys.stdout."""
self.cur_frame += 1
if self.cur_frame == len(self.progress_frames):
self.cur_frame = 0
self.temp_message.erase()
self.temp_message.show(self.progress_string %
self.progress_frames[self.cur_frame])
def erase(self): self.temp_message.erase()
class ProgressBar:
"""A console text-based bar-style progress indicator."""
temp_message = None
bar_parts = None
bar_size = None
cur_value = None
target_value = None
outputs = None
def __init__(self, size=10, target=100, outputs=['bar'],
parts=['[','#',']']):
self.temp_message = TempMessage()
self.bar_parts = parts
self.bar_size = int(size)
self.target_value = target
self.outputs = outputs
self.cur_value = 0
def set_target(self, value):
self.target_value = value
def update(self, value):
self.cur_value = value
def show(self, value=None):
if value:
self.cur_value = value
output_parts = ['','','']
for output_type in self.outputs:
if output_type == 'bar':
fill_amount = int((self.cur_value * self.bar_size) / self.target_value)
output_parts[0] = ('%s%s%s%s ' % (self.bar_parts[0],
self.bar_parts[1] * int(fill_amount), ' ' * int(self.bar_size - fill_amount),
self.bar_parts[2]))
if output_type[:3] == 'val':
(boo,units) = output_type.split(':')
if units:
output_parts[1] = ('(%d/%d %s) ' % (self.cur_value,
self.target_value, units))
else:
output_parts[1] = ('(%d/%d) ' % (cur_value, target_value))
if output_type == 'pct':
output_parts[2] = '%d%%' % ((self.cur_value * 100) / self.target_value)
self.temp_message.erase()
self.temp_message.show('%s%s%s' % (output_parts[0], output_parts[1],
output_parts[2]))
def erase(self):
self.temp_message.erase()
| {
"repo_name": "Abstrys/abstrys-toolkit",
"path": "abstrys/cmd_utils.py",
"copies": "1",
"size": "5365",
"license": "bsd-3-clause",
"hash": -7903589806643099000,
"line_mean": 30.1918604651,
"line_max": 97,
"alpha_frac": 0.5602982293,
"autogenerated": false,
"ratio": 3.728283530229326,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47885817595293256,
"avg_score": null,
"num_lines": null
} |
# abstrys text utility functions and classes.
import re
def camel2snake(text, sep=r'_'):
"""Convert *text* from CamelCase (or camelCase) into snake_case."""
rebounds = [r'([a-z])([A-Z])', r'([A-Z])([A-Z])']
# make a copy, don't operate on original.
ophic_text = str(text)
for b in rebounds:
ophic_text = re.sub(b, (r'\g<1>'+ sep + r'\g<2>'), ophic_text)
return ophic_text.lower()
def snakeify(text, sep=r'_'):
"""Snakeify the input text: convert spaces to underscores and lowercase
everything. Return the result"""
ophic_text = sep.join(text.split()).lower()
# remove any quote characters
ophic_text = re.sub('[\?\!\@\#\$\%\^\*<\'\">]', '', ophic_text)
# turn commas into sep characters
ophic_text = re.sub('[,:;]', sep, ophic_text)
# remove any periods that are followed by a sep character.
ophic_text = ophic_text.replace('.%s' % sep, sep)
# remove any sep characters that follow an open brace or parentheses.
ophic_text = ophic_text.replace('(%s' % sep, "(")
ophic_text = ophic_text.replace('[%s' % sep, "[")
ophic_text = ophic_text.replace('{%s' % sep, "{")
# replace any '&' or '+' characters with the word 'and'
ophic_text = re.sub('[&\+]', 'and', ophic_text)
# reduce sequence of dashes or underscores to a single sep character
ophic_text = re.sub('[\-_]+', sep, ophic_text)
return ophic_text
| {
"repo_name": "Abstrys/abstrys-toolkit",
"path": "abstrys/txt_utils.py",
"copies": "1",
"size": "1415",
"license": "bsd-3-clause",
"hash": -5801663547049893000,
"line_mean": 34.375,
"line_max": 75,
"alpha_frac": 0.606360424,
"autogenerated": false,
"ratio": 3.0042462845010616,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8960497557433956,
"avg_score": 0.03002183021342093,
"num_lines": 40
} |
# abs(x)
# If the argument is a complex number, its magnitude is returned.
"""
In [2]: abs(10)
Out[2]: 10
In [4]: abs(-10.4)
Out[4]: 10.4
"""
# all(iterable)
"""
In [7]: all([1, 2, 3])
Out[7]: True
In [8]: all([1, 2, True, {'a': 'a'}])
Out[8]: True
In [9]: all([1, 2, True, {'a': 'a'}, None])
Out[9]: False
"""
# any(iterable)
"""
In [13]: any([True, False])
Out[13]: True
In [14]: any({None: 'c'})
Out[14]: False
"""
# ascii(object)
"""
In [18]: ascii('编程')
Out[18]: "'\\u7f16\\u7a0b'"
In [19]: ascii('code')
Out[19]: "'code'"
"""
# bin(x)
# 将一个int变成一个二进制字符串,结果是一个合法的python表达式。
# 如果参数不是一个int,那么他应该定义一个__index__()方法来返回一个int
"""
In [21]: bin(10)
Out[21]: '0b1010'
In [22]: 0b1010
Out[22]: 10
In [28]: bin(CanBin())
Out[28]: '0b1010'
"""
"""
In [30]: class CanNotBin(object):
...: pass
...:
In [31]: bin(CanNotBin())
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-31-bbc800b33295> in <module>()
----> 1 bin(CanNotBin())
TypeError: 'CanNotBin' object cannot be interpreted as an integer
"""
"""
In [32]: class CanBin(object):
...: def __index__(self):
...: return 10
...:
In [33]: bin(CanBin())
Out[33]: '0b1010'
"""
# class bool([x])
# bool是int的子类
# bool唯一的实例就是True或者False,不能进一步被继承
"""
In [34]: bool(1)
Out[34]: True
In [36]: bool(0)
Out[36]: False
In [37]: bool()
Out[37]: False
"""
"""
In [39]: class BoolFurther(bool):
...: pass
...:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-39-685387e5d4ab> in <module>()
----> 1 class BoolFurther(bool):
2 pass
TypeError: type 'bool' is not an acceptable base type
"""
# class bytearray([source[, encoding[, errors]]])
# Without an argument, an array of size 0 is created.
"""
In [77]: bytearray()
Out[77]: bytearray(b'')
"""
# If it is a string, you must also give the encoding (and optionally, errors) parameters; bytearray() then converts the string to bytes using str.encode().
"""
In [75]: bytearray('code', encoding='utf-8')
Out[75]: bytearray(b'code')
In [76]: bytearray('代码', encoding='utf-8')
Out[76]: bytearray(b'\xe4\xbb\xa3\xe7\xa0\x81')
"""
# If it is an integer, the array will have that size and will be initialized with null bytes.
"""
In [63]: bytearray(0)
Out[63]: bytearray(b'')
In [64]: bytearray(2)
Out[64]: bytearray(b'\x00\x00')
In [65]: bytearray(12)
Out[65]: bytearray(b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
"""
# If it is an object conforming to the buffer interface, a read-only buffer of the object will be used to initialize the bytes array.
"""
In [66]: bytearray({0, 2, 12, 100, 255})
Out[66]: bytearray(b'\x00\x02d\x0c\xff')
In [68]: bytearray({256})
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-68-864ea48aadd3> in <module>()
----> 1 bytearray({256})
ValueError: byte must be in range(0, 256)
"""
# If it is an iterable, it must be an iterable of integers in the range 0 <= x < 256, which are used as the initial contents of the array.
"""
In [70]: bytearray([0, 2, 12, 100, 255])
Out[70]: bytearray(b'\x00\x02\x0cd\xff')
In [71]: bytearray([256])
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-71-803c67e950f7> in <module>()
----> 1 bytearray([256])
ValueError: byte must be in range(0, 256)
"""
# class bytes([source[, encoding[, errors]]])
# 和上面的一样,区别: bytes is an immutable version of bytearray – it has the same non-mutating methods and the same indexing and slicing behavior.
# callable(object)
# 在python3.0中移除,又在3.2加回来了
# 如果可调用,返回True,否则是False
# 即使是True,也不能保证每次都成功,但如果是False,那么必然失败
# class是可调用的,调用class返回一个实例;如果这个class有__call__,那么这个实力也是可调用的
"""
In [29]: class IsCallable(object):
...: pass
...:
In [30]: callable(IsCallable)
Out[30]: True
In [31]: callable(IsCallable())
Out[31]: False
In [32]: class AndInstanceIsCallable(object):
...: def __call__(self):
...: pass
...:
In [33]: callable(AndInstanceIsCallable)
Out[33]: True
In [34]: callable(AndInstanceIsCallable())
Out[34]: True
"""
# chr(i)
# Return the string representing a character whose Unicode code point is the integer i.
# i的参数范围是0 - 1,114,111 (0x10FFFF in base 16).
"""
In [39]: chr(97)
Out[39]: 'a'
In [36]: chr(8364)
Out[36]: '€'
In [37]: chr(1114111)
Out[37]: '\U0010ffff'
In [38]: chr(1114112)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-38-59208660f350> in <module>()
----> 1 chr(1114112)
ValueError: chr() arg not in range(0x110000)
"""
# classmethod(function)
# 是一个函数修饰器
"""
In [42]: class B:
...: @classmethod
...: def f(cls):
...: print(B.f)
...:
In [43]: B.f()
<bound method B.f of <class '__main__.B'>>
"""
# compile(source, filename, mode, flags=0, dont_inherit=False, optimize=-1)
# TODO
# class complex([real[, imag]])
# 第二个参数永远不能是string,
# 第一个参数如果是一个字符串,被解析为复数,这个时候,第二个参数不能赋值
"""
In [52]: complex('123+456j')
Out[52]: (123+456j)
In [53]: complex('123+456j', 0)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-53-46a74c5496e6> in <module>()
----> 1 complex('123+456j', 0)
TypeError: complex() can't take second arg if first is a string
"""
# 每一个参数都是numeric type(包括复数)
# 如果第二个参数省略,默认是实数+0j,如果第一个参数省略,那么返回0j
"""
In [54]: complex()
Out[54]: 0j
In [55]: complex(123)
Out[55]: (123+0j)
In [56]: complex(123,456)
Out[56]: (123+456j)
In [61]: complex(4, complex(6, 7))
Out[61]: (-3+6j)
In [62]: complex(complex(2, 3), complex(6, 7))
Out[62]: (-5+9j)
"""
# delattr(object, name)
# TODO
# class dict(**kwarg)
# class dict(mapping, **kwarg)
# class dict(iterable, **kwarg)
# (**kwarg)
"""
In [72]: dict(k1='v1', k2='v2')
Out[72]: {'k1': 'v1', 'k2': 'v2'}
In [73]: data = {
...: 'k1': 'v1',
...: 'k2': 'v2'
...: }
...: dict(**data)
...:
Out[73]: {'k1': 'v1', 'k2': 'v2'}
"""
# (mapping, **kwarg)
# TODO
# (iterable, **kwarg)
# TODO
# dir([object])
# 如果没有参数,返回current local scope(当前本地命令?)的列表
# 有参数的话,试图返回那个参数的合法属性的列表
# 对于__dir__和__dict__的一段描述,见文档
# 对于不同的types of objects,dir的行为是不一样的
"""
In [1]: import struct
In [2]: dir()
Out[2]:
['In',
'Out',
'_',
'__',
'___',
'__builtin__',
'__builtins__',
'__doc__',
'__loader__',
'__name__',
'__package__',
'__spec__',
'_dh',
'_i',
'_i1',
'_i2',
'_ih',
'_ii',
'_iii',
'_oh',
'_sh',
'exit',
'get_ipython',
'quit',
'struct']
In [3]: dir(struct)
Out[3]:
['Struct',
'__all__',
'__builtins__',
'__cached__',
'__doc__',
'__file__',
'__loader__',
'__name__',
'__package__',
'__spec__',
'_clearcache',
'calcsize',
'error',
'iter_unpack',
'pack',
'pack_into',
'unpack',
'unpack_from']
In [4]: class B(object):
...: def __dir__(self):
...: return ['run']
...:
In [5]: dir(B())
Out[5]: ['run']
"""
# divmod(a, b)
# 参数是两个不是复数的数字类型
# 如果是两个整数,那么返回商和余数
"""
In [6]: divmod(12, 5)
Out[6]: (2, 2)
In [8]: divmod(12, 4.9)
Out[8]: (2.0, 2.1999999999999993)
In [9]: divmod(12, 4)
Out[9]: (3, 0)
"""
# enumerate(iterable, start=0)
# 接受一个可迭代的参数,返回一个枚举对象
# __next__()返回一个元组,第一项是index,第二项是数据,start参数是index的开始值
# 所以list可以拿到list
"""
In [27]: list(enumerate(seasons))
Out[27]: [(0, 'Spring'), (1, 'Summer'), (2, 'Fall'), (3, 'Winter')]
In [28]: list(enumerate(seasons, start=2))
Out[28]: [(2, 'Spring'), (3, 'Summer'), (4, 'Fall'), (5, 'Winter')]
"""
# eval(expression, globals=None, locals=None)
# globals参数是可选的,如果提供,那么是一个dictionary
# locals参数是可选的,如果提供,那么可以是任何mapping object
"""
In [1]: eval('1+1')
Out[1]: 2
In [2]: eval(bin(10))
Out[2]: 10
"""
# exec(object[, globals[, locals]])
"""
In [7]: exec('print("1234567890")')
1234567890
"""
# filter(function, iterable)
"""
In [19]: a = [1, 2, -1, 0, 4, -2]
...: list(filter(lambda i: i > 0, a))
...:
Out[19]: [1, 2, 4]
"""
# class float([x])
"""
sign ::= "+" | "-"
infinity ::= "Infinity" | "inf"
nan ::= "nan"
numeric_value ::= floatnumber | infinity | nan
numeric_string ::= [sign] numeric_value
"""
"""
In [22]: float('+1.23')
Out[22]: 1.23
In [23]: float(' -12345\n')
Out[23]: -12345.0
In [24]: float('1e-003')
Out[24]: 0.001
In [25]: float('+1E6')
Out[25]: 1000000.0
In [26]: float('-Infinity')
Out[26]: -inf
"""
# 3.6中使用下划线分组可以识别了
"""
In [35]: float('11_22.99')
Out[35]: 1122.99
"""
# format(value[, format_spec])
# TODO
# class frozenset([iterable])
"""
In [38]: a = [1, 2, -1, 0, 4, -2]
...: frozenset(a)
...:
Out[38]: frozenset({-2, -1, 0, 1, 2, 4})
"""
# getattr(object, name[, default])
"""
In [44]: class A(object):
...: def a(self):
...: pass
...: getattr(A, 'a')
...:
Out[44]: <function __main__.A.a>
In [48]: getattr(A, 'b', 'default b')
Out[48]: 'default b'
"""
# globals()
# 返回当前全局符号标志表的字典,
# 返回的是被定义的地方的xx,而不是被called的xx
# hasattr(object, name)
"""
In [44]: class A(object):
...: def a(self):
...: pass
In [50]: hasattr(A, 'a')
Out[50]: True
In [51]: hasattr(A, 'b')
Out[51]: False
"""
# hash(object)
"""
In [52]: hash(A)
Out[52]: -9223363274321361268
In [56]: a = [1]
In [57]: hash(a)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-57-fe724719d9a1> in <module>()
----> 1 hash(a)
TypeError: unhashable type: 'list'
"""
# help([object])
# 交互式的函数
"""
In [60]: help(help)
Help on _Helper in module _sitebuiltins object:
class _Helper(builtins.object)
| Define the builtin 'help'.
|
| This is a wrapper around pydoc.help that provides a helpful message
| when 'help' is typed at the Python interactive prompt.
|
| Calling help() at the Python prompt starts an interactive help session.
| Calling help(thing) prints help for the python object 'thing'.
|
| Methods defined here:
|
| __call__(self, *args, **kwds)
| Call self as a function.
|
| __repr__(self)
| Return repr(self).
|
| ----------------------------------------------------------------------
| Data descriptors defined here:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
(END)
"""
# id(object)
# 返回一个object的identity,在不重叠的周期内,两个id可能相同
# input([prompt])
"""
In [74]: s = input('--> ')
--> hello world
In [75]: s
Out[75]: 'hello world'
"""
# class int(x=0)
# class int(x, base=10)
# 没有参数没有默认值0
# 如果参数是一个数字(非复数),那么将其向0靠拢截断为整数
"""
In [76]: int()
Out[76]: 0
In [78]: int(1.1)
Out[78]: 1
In [79]: int(1.8)
Out[79]: 1
"""
# 在3.6中,支持下划线作为分隔符
"""
In [81]: int(1_1.8)
Out[81]: 11
"""
# 如果第一个参数是字符串,那么将使用第二个参数作为进制数解析第一个参数
"""
In [89]: int('010', 8)
Out[89]: 8
In [92]: int('010', 0)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-92-1fdf06fb8fa4> in <module>()
----> 1 int('010', 0)
ValueError: invalid literal for int() with base 0: '010'
In [93]: int('111', 2)
Out[93]: 7
"""
# isinstance(object, classinfo)
"""
In [101]: isinstance([], list)
Out[101]: True
In [102]: isinstance([], list())
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-102-a1225406137a> in <module>()
----> 1 isinstance([], list())
TypeError: isinstance() arg 2 must be a type or tuple of types
"""
# 第二个参数可以是type object的元组或者可递归的
"""
In [104]: isinstance([], (list, str))
Out[104]: True
In [105]: isinstance({}, (list, str))
Out[105]: False
"""
# issubclass(class, classinfo)
# 判断一个第一个参数是不是第二个参数的子类
# 一个类是他自己的子类
# 第二个参数可以是type object的元组或者可递归的
"""
In [109]: class A(object):
...: pass
...: class B(A):
...: pass
In [110]: issubclass(A, object)
Out[110]: True
In [113]: issubclass(A, B)
Out[113]: False
In [112]: issubclass(A, (B, object))
Out[112]: True
"""
# iter(object[, sentinel])
# TODO
# len(s)
# The argument may be a sequence (such as a string, bytes, tuple, list, or range) or a collection (such as a dictionary, set, or frozen set).
"""
In [1]: len('aaaa')
Out[1]: 4
In [2]: len([1, 2, 3])
Out[2]: 3
In [3]: len({'a': 'a', 'b': 'b'})
Out[3]: 2
"""
# class list([iterable])
# TODO
# locals()
# TODO
# map(function, iterable, ...)
"""
In [6]: list(map(lambda n: n * 2, [1, 2, 3]))
Out[6]: [2, 4, 6]
"""
# max(iterable, *[, key, default])
# max(arg1, arg2, *args[, key])
# 如果提供一个参数,返回其最大的;如果是两个,返回那个值最大的所在的项
"""
In [7]: max([2, 4, 6])
Out[7]: 6
In [12]: max([2, 4, 6], [10, 1])
Out[12]: [10, 1]
"""
# memoryview(obj)
# TODO
# min(arg1, arg2, *args[, key])
"""
In [14]: min([1, 2, 3])
Out[14]: 1
In [15]: min([1, 2, 3], [4])
Out[15]: [1, 2, 3]
"""
# next(iterator[, default])
# TODO
# class object
# 是所有class的base。
# oct(x)
# 将一个整数变成一个八进制python合法的表达式
# 如果不是一个整数,那么__index__方法应该返回一个整数
"""
In [35]: oct(10)
Out[35]: '0o12'
In [36]: 0o12
Out[36]: 10
In [37]: class A(object):
...: def __index__(self):
...: return 10
In [39]: oct(A())
Out[39]: '0o12'
"""
# open(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None)
"""
In [51]: f = open('BinaryData.py')
In [52]: f
Out[52]: <_io.TextIOWrapper name='BinaryData.py' mode='r' encoding='UTF-8'>
In [53]: f.closed
Out[53]: False
In [54]: f.close()
In [55]: f.closed
Out[55]: True
"""
"""
Character Meaning
'r' 读文件(默认)
'w' open for writing, truncating the file first
'x' open for exclusive creation, failing if the file already exists
'a' 追加文件
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newlines mode (deprecated)
"""
# TODO
# ord(c)
# 和 chr()相反
# 返回一个字符的Unicode code
"""
In [59]: ord('a')
Out[59]: 97
In [60]: ord('€')
Out[60]: 8364
In [61]: ord('\U0010ffff')
Out[61]: 1114111
"""
# pow(x, y[, z])
# pow(a, b) ---> a**b, 幂运算
# pow(a, b, c) ---> (a**b) % c ,幂运算然后取模
"""
In [66]: pow(2, 3)
Out[66]: 8
In [67]: pow(2, 3, 3)
Out[67]: 2
"""
# print(*objects, sep=' ', end='\n', file=sys.stdout, flush=False)
# print() ---> print(end='\n')
"""
In [9]: print()
In [10]: print(1, 2, 3)
1 2 3
In [11]: print(1, 2, 3, sep='==')
1==2==3
In [12]: print(1, 2, 3, end='--')
1 2 3--
"""
# class property(fget=None, fset=None, fdel=None, doc=None)
"""
In [1]: class A(object):
...: _x = None
...:
...: @property
...: def x(self):
...: return self._x
...:
...: @x.setter
...: def x(self, v):
...: self._x = v
...:
...: @x.deleter
...: def x(self):
...: del self._x
...:
...: a = A()
In [3]: a.x
In [5]: a.x = 1
In [6]: a.x
Out[6]: 1
In [7]: del a.x
In [8]: a.x
"""
# range(stop)
# range(start, stop[, step])
"""
In [11]: list(range(10))
Out[11]: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
In [12]: list(range(2, 10))
Out[12]: [2, 3, 4, 5, 6, 7, 8, 9]
In [14]: list(range(2, 10, 3))
Out[14]: [2, 5, 8]
"""
# repr(object)
"""
In [17]: class A(object):
...: def __repr__(self):
...: return 'this is repr.'
...:
...: repr(A())
Out[17]: 'this is repr.'
"""
# reversed(seq)
# 返回一个逆序的iterator
# seq必须是一个实现了 __reversed__() 的 object
# 或者支持 sequence protocol ( the __len__() method and the __getitem__() method with integer arguments starting at 0 )
# round(number[, ndigits])
# 参考资料 https://docs.python.org/3.6/tutorial/floatingpoint.html#tut-fp-issues
# class set([iterable])
# setattr(object, name, value)
"""
In [22]: class A(object):
...: pass
In [25]: setattr(A, 'a', '2')
In [26]: A.a
Out[26]: '2'
"""
# class slice(stop)
# class slice(start, stop[, step])
# See itertools.islice() for an alternate version that returns an iterator.
# sorted(iterable[, key][, reverse])
# staticmethod(function)
"""
In [28]: class A(object):
...: @staticmethod
...: def f():
...: print('A.f or A().f')
In [29]: A.f()
A.f or A().f
In [30]: A().f()
A.f or A().f
"""
# class str(object='')
# class str(object=b'', encoding='utf-8', errors='strict')
# sum(iterable[, start])
# super([type[, object-or-type]])
# tuple([iterable])
# 一个 immutable sequence type ,不可变序列类型?
# class type(object)
# class type(name, bases, dict)
# 一个参数,检测类型。isinstance() 推荐类测试 the type of an object
# With three arguments, return a new type object. This is essentially a dynamic form of the class statement
# vars([object])
# return __dict__
# Without an argument, vars() acts like locals(). Note, the locals dictionary is only useful for reads since updates to the locals dictionary are ignored.
# zip(*iterables)
"""
In [40]: list(zip('ABCD', 'xy'))
Out[40]: [('A', 'x'), ('B', 'y')]
In [43]: list(itertools.zip_longest('ABCD', 'xy'))
Out[43]: [('A', 'x'), ('B', 'y'), ('C', None), ('D', None)]
"""
# __import__(name, globals=None, locals=None, fromlist=(), level=0)
| {
"repo_name": "Chyroc/study-code",
"path": "Language/Python/BuildInFunctions.py",
"copies": "1",
"size": "19012",
"license": "mit",
"hash": 8457115140151026000,
"line_mean": 18.2836484983,
"line_max": 155,
"alpha_frac": 0.5492616521,
"autogenerated": false,
"ratio": 2.401773344416736,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3451034996516736,
"avg_score": null,
"num_lines": null
} |
# A Bug in the ADC perhaps? This is an incompletely-explored specimin.
# simple
import array
from machine import Pin
from pyb import Timer, rng, ADC, DAC, LED, Switch
from time import sleep, ticks_ms, ticks_diff
import micropython
def hv(signal, harmonic):
length = len(signal)
return sum(signal[i]*signal[(harmonic*i)%length]
for i in range(length)) / length
class LaserBeam:
def __init__(self, laser_pinname, photodiode_pinname):
self.laser = Pin(laser_pinname, Pin.OUT_OD)
self.photodiode = ADC(photodiode_pinname)
self.threshold = 100
def ping(self):
dark = self.photodiode.read()
self.laser.value(0) # pull down to on
light = self.photodiode.read()
self.laser.value(1) # float to off
return light-dark
def interrupted(self):
return self.ping() < self.threshold \
and sum(self.ping() for i in range(10)) < 10 * self.threshold
class Mic:
def __init__(self, mic_pinname, timer_id=6):
self.mic = ADC(mic_pinname)
self.tim = Timer(timer_id, freq=48000)
self.samples = array.array('h', range(4800))
self.normalized_spl = 0.0
def level(self):
samples = self.samples
self.mic.read_timed(samples, self.tim)
ave = sum(samples) / len(samples)
self.normalized_spl = \
min(1.0, sum((v-ave)**2 for v in samples) / len(samples) / 2278619.0)
return self.normalized_spl
def excited(self):
return self.level() > 0.01
class Piano:
def __init__(self, mic, beam):
self.mic = mic
self.beam = beam
self.beam_ever_interrupted = self.mic_ever_excited = False
self.being_played = False
self.ms_internote = 30 * 1000
def poll_beam(self):
if self.beam.interrupted():
self.beam_interrupted_t = ticks_ms()
self.beam_ever_interrupted = True
return True
else:
return False
def poll_mic(self):
if self.mic.excited():
self.mic_excited_t = ticks_ms()
self.mic_ever_excited = True
return True
else:
return False
def playing(self):
"""
Determine if the piano is being played:
1. A beam interruption (transition from unterrupted to interrupted)
indicates the start of playing.
2. It's no longer being played if the inter-note time has passed with
no subsequent beam interruption
"""
return self.poll_beam() \
or self.beam_ever_interrupted \
and ticks_diff(self.beam_interrupted_t, ticks_ms()) < self.ms_internote
"""
if not self.being_played:
if self.poll_beam(): # Check the laser beam (this is fast)
self.being_played = True
else:
self.poll_beam() # Check the laser beam (this is fast)
if ticks_diff(self.beam_interrupted_t, ticks_ms()) < self.ms_internote:
self.being_played = True
else:
# Could conceiveably be in an extended legato, so check the mic
self.poll_mic() # This is slow
if self.mic_ever_excited \
and ticks_diff(self.mic_excited_t, ticks_ms() < self.ms_internote):
self.being_played = True
else:
self.being_played = False
return self.being_played
"""
class CL1:
def __init__(self, stop_cmd, stop_status,
record_cmd, rec_status,
play_cmd, play_status):
self.record_cmd = Pin(record_cmd, Pin.OUT, value=1)
self.stop_cmd = Pin(stop_cmd, Pin.OUT, value=1)
self.play_cmd = Pin(play_cmd, Pin.OUT, value=1)
self.rec_status = Pin(rec_status)
self.stop_status = Pin(stop_status)
self.play_status = Pin(play_status)
self.pulse_duration = 0.2
def stopped(self):
return not self.stop_status()
def recording(self):
return not self.rec_status()
def playing(self):
return not self.play_status()
#FIXME: timeouts
def stop(self):
while not self.stopped():
self._pulse_low(self.stop_cmd)
if not self.stopped():
print('Hey, stop!')
def record(self):
while not self.recording():
self._pulse_low(self.record_cmd)
if not self.recording():
print('Hey, record!')
def play(self):
while not self.playing():
self._pulse_low(self.play_cmd)
if not self.playing():
print('Hey, play!')
def _pulse_low(self, what):
what(0)
sleep(self.pulse_duration)
what(1)
sleep(self.pulse_duration)
def status(self):
s = (self.stopped(), self.recording(), self.playing())
names = ('stopped', 'recording', 'playing')
return ' '.join(v[1] for v in zip(s, names) if v[0])
class Lights:
def __init__(self, mic, beam, deck):
self.mic = mic
self.beam = beam
self.deck = deck
self.leds = [LED(1), LED(2), LED(3), LED(4)]
def update(self):
l = self.leds
if self.deck.recording():
l[0].on()
else:
l[0].off()
if self.deck.playing():
l[1].on()
else:
l[1].off()
l[2].intensity(self.beam.ping() >> 4)
l[3].intensity(int(256*self.mic.normalized_spl))
def main():
micropython.alloc_emergency_exception_buf(100)
print('simp here')
beam = LaserBeam('X1', 'X11')
mic = Mic('X12')
deck = CL1('X17', 'X18', 'X19', 'X20', 'X21', 'X22')
piano = Piano(mic, beam)
lights = Lights(mic, beam, deck)
pushbutton = Switch()
verbose = False
def was_show(): # BAD
lights.update()
#if pushbutton():
if True:
print('laser {}, mic {}'.format(beam.interrupted(), mic.excited()), end=' ')
print('deck %s' % deck.status(), end=' ')
if piano.playing():
print('Piano being played', end='')
print()
def show():
lights.update()
print('laser {}'.format(beam.interrupted()), end=' ')
print('deck %s' % deck.status(), end=' ')
if piano.playing():
print('Piano being played', end='')
print()
def s11(): # BAD
mic.excited()
print('deck %s' % deck.status())
def s12(): # ok
mic.excited()
sleep(0.1)
print('deck %s' % deck.status())
def s13(): # BAD
mic.excited()
sleep(0.001)
print('deck %s' % deck.status())
def s14(): # BAD: prints "deck" (or maybe "deck ") only
mic.excited()
sleep(0.01)
print('deck %s' % deck.status())
def s15(): # BAD
mic.excited()
gc.collect()
print('deck %s' % deck.status())
def foo(): # ok
print('deck %s' % deck.status(), end=' ')
sleep(1) # stabilize
while True:
show()
if piano.playing() and not deck.recording():
deck.record()
print("record")
while piano.playing():
show()
deck.stop()
print("stop")
if __name__ == '__main__':
main()
| {
"repo_name": "pramasoul/pyboard-music-detector",
"path": "bug.py",
"copies": "1",
"size": "7426",
"license": "mit",
"hash": 2067080835494769200,
"line_mean": 28.8232931727,
"line_max": 88,
"alpha_frac": 0.5324535416,
"autogenerated": false,
"ratio": 3.5244423350735645,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9536984378660871,
"avg_score": 0.003982299602538717,
"num_lines": 249
} |
"""A building is a saved set of locations on a board and has a color and
possibly an owner. A building also includes the locations of attached
stables.
A building can only have building pieces added to it if it does not have an
owner. A stable can be added to a building at any time. When adding to a
building, a gap must be left between buildings and the well."""
from Player import *
from Location import *
import random
NEUTRAL_OWNER = ()
def make_building(color, start):
"""Makes a building of a given color starting at a location."""
return {'color':color,
'locations':[start],
'stables':[],
'owner':None,
'owner_color':None,
'rooftop':None}
def clone_building(building):
"""Clones a building"""
return {'color': get_building_color(building),
'locations': get_building_locations(building)[:],
'stables': get_stable_locations(building)[:],
'owner': get_owner(building),
'owner_color': get_owner_color(building),
'rooftop': get_rooftop_location(building)}
def get_building_color(building):
"""Gets the color of a building"""
return building['color']
def get_building_locations(building):
"""Gets all the locations a building pieces occupies."""
return building['locations'][:]
def attach_building_locations(building, location):
"""Attaches a building piece to the buliding. The building must no be
claimed in order to attach building segments."""
assert not has_owner(building)
building['locations'].append(location)
def get_stable_locations(building):
"""Gets all the stables attached to a building."""
return building['stables'][:]
def buidling_contains_location(building, location):
"""Checks if a location is part of the building."""
return location in get_building_locations(building)
def get_building_and_stables(building):
"""Gets all the building and stable locations of a building in a single list."""
locs = set(get_stable_locations(building))
locs = locs.union(get_building_locations(building))
return locs
def buliding_contans_location_stables(building, location):
"""Checks if a building or it's attached stables contains a location."""
return locaiton in get_building_locations(building) or location in get_stable_locations(building)
def get_building_peice_attach(building):
"""Gets all the locations that building peices can be attached, this is the
list of orthogonal location excluding those occupied by stables."""
return get_building_orthogonal(building).difference(set(get_stable_locations(building)));
def get_building_orthogonal(building):
"""Gets a set of all locations orthogonally adjacent to the building. This
only includes locations that are next to the building pieces. This will
include the location of stables adjacent to the building if any are
attached."""
included = get_building_locations(building)
stables = get_stable_locations(building)
building_orth = set()
for loc in included:
if loc not in stables:
building_orth.update([orth for orth in get_orthogonal(loc) if orth not in included])
return building_orth
def get_building_stable_orthogonal(building):
"""Gets a set of all locations orthogonally adjacent to the building and
attached stables. This excludes the locations of stables."""
included = get_building_locations(building)[:]
included.extend(get_stable_locations(building))
building_orth = set()
for loc in included:
building_orth.update([orth for orth in get_orthogonal(loc) if orth not in included])
return building_orth
def get_building_adjacent(building):
"""Gets a set of all the locations adjacent to the building, excluding
those that are part of the building. This is adjacency to building pieces,
this does not locations adjacent to attached stables but will include the
location of attached stables if they exist."""
included = get_building_locations(building)
building_adj = set()
for loc in included:
building_adj.update([adj for adj in get_adjacent(loc) if adj not in included])
return building_adj
def get_building_stable_adjacent(building):
"""Gets a set of all the locations adjacent to the building and attached
stables. This adjacency is to any attached part of the building. It does
include locations adjacent to stables and excludes stables."""
included = get_building_locations(building)
included.extend(get_stable_locations(building))
building_adj = set()
for loc in included:
building_adj.update([adj for adj in get_adjacent(loc) if adj not in included])
return building_adj
def attach_stable_location(building, location):
"""Attaches a building piece to a building."""
building['stables'].append(location)
def get_owner_color(building):
"""Gets the color of the owner"""
return building['owner_color']
def get_rooftop_location(building):
"""Gets the location of a rooftop in a building"""
return building['rooftop']
def get_owner(building):
"""Gets the owner of a building."""
return building['owner']
def has_owner(building):
"""Checks if a building has an owner."""
return get_owner(building) != None
def assign_owner(building, player, color, rooftop=None):
"""Sets the owner of a building. The building must not have an owner to be
claimed."""
assert not has_owner(building)
building['owner'] = player
building['owner_color'] = color
if rooftop == None or rooftop not in get_building_locations(building):
rooftop = random.choice(get_building_locations(building))
building['rooftop'] = rooftop
| {
"repo_name": "nicholas-maltbie/Medina",
"path": "Building.py",
"copies": "1",
"size": "5906",
"license": "mit",
"hash": 1145090060936310300,
"line_mean": 39.3006993007,
"line_max": 101,
"alpha_frac": 0.6884524213,
"autogenerated": false,
"ratio": 4.182719546742209,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0058856044012054205,
"num_lines": 143
} |
# A bunch of CONVERTERS for the Fit part of Alphabet.
import os
import math
from array import array
import optparse
import ROOT
from ROOT import *
import scipy
#### LINEAR ####
class LinearFit:
def __init__(self, init_var, range_min, range_max, name, Opt):
self.Opt = Opt
self.rm = range_min
self.rp = range_max
self.name = name
self.fit = TF1("LinearFit_"+self.name, "[0]+ [1]*x",self.rm,self.rp)
self.fit.SetParameter(0, init_var[0])
self.fit.SetParameter(1, init_var[1])
def Converter(self, fitter):
self.ErrUp = TF1("LinearFitErrorUp"+self.name, "[0]+ [1]*x + sqrt((x*x*[3]*[3])+(x*2*[4])+([2]*[2]))",self.rm,self.rp)
self.ErrUp.SetParameter(0, self.fit.GetParameter(0))
self.ErrUp.SetParameter(1, self.fit.GetParameter(1))
self.ErrUp.SetParameter(2, self.fit.GetParErrors()[0])
self.ErrUp.SetParameter(3, self.fit.GetParErrors()[1])
self.ErrUp.SetParameter(4, fitter.GetCovarianceMatrixElement(0,1))
self.ErrDn = TF1("LinearFitErrorDn"+self.name, "[0]+ [1]*x - sqrt((x*x*[3]*[3])+(x*2*[4])+([2]*[2]))",self.rm,self.rp)
self.ErrDn.SetParameter(0, self.fit.GetParameter(0))
self.ErrDn.SetParameter(1, self.fit.GetParameter(1))
self.ErrDn.SetParameter(2, self.fit.GetParErrors()[0])
self.ErrDn.SetParameter(3, self.fit.GetParErrors()[1])
self.ErrDn.SetParameter(4, fitter.GetCovarianceMatrixElement(0,1))
def MakeConvFactor(self, var, center):
X = var + "-" + str(center)
self.ConvFact = "({0:2.9f} + (({2})*{1:2.9f}))".format(self.ErrUp.GetParameter(0),self.ErrUp.GetParameter(1),X)
self.ConvFactUp = "({0:2.9f} + (({5})*{1:2.9f}) + (({5})*({5})*{3:2.9f}*{3:2.9f}+(({5})*2*{4:2.9f})+({2:2.9f}*{2:2.9f}))^0.5)".format(self.ErrUp.GetParameter(0),self.ErrUp.GetParameter(1),self.ErrUp.GetParameter(2),self.ErrUp.GetParameter(3),self.ErrUp.GetParameter(4),X)
self.ConvFactDn = "({0:2.9f} + (({5})*{1:2.9f}) - (({5})*({5})*{3:2.9f}*{3:2.9f}+(({5})*2*{4:2.9f})+({2:2.9f}*{2:2.9f}))^0.5)".format(self.ErrUp.GetParameter(0),self.ErrUp.GetParameter(1),self.ErrUp.GetParameter(2),self.ErrUp.GetParameter(3),self.ErrUp.GetParameter(4),X)
#### QUADRATIC ####
class QuadraticFit:
def __init__(self, init_var, range_min, range_max, name, Opt):
self.Opt = Opt
self.rm = range_min
self.rp = range_max
self.name = name
self.fit = TF1("QuadraticFit", "[0]+ [1]*x + [2]*x*x",self.rm,self.rp)
self.fit.SetParameter(0, init_var[0])
self.fit.SetParameter(1, init_var[0])
self.fit.SetParameter(2, init_var[0])
#self.fit.SetParLimits(2,0,20)
def Converter(self, fitter):
self.ErrUp = TF1("QuadrarticFitErrorUp"+self.name, "[0]+ [1]*x + [2]*x*x + sqrt(([3]*[3]) + (2*x*[6]) + (x*x*[4]*[4]) + (2*x*x*[7]) + (2*x*x*x*[8]) + (x*x*x*x*[5]*[5]))",self.rm,self.rp)
self.ErrUp.SetParameter(0, self.fit.GetParameter(0))
self.ErrUp.SetParameter(1, self.fit.GetParameter(1))
self.ErrUp.SetParameter(2, self.fit.GetParameter(2))
self.ErrUp.SetParameter(3, self.fit.GetParErrors()[0])
self.ErrUp.SetParameter(4, self.fit.GetParErrors()[1])
self.ErrUp.SetParameter(5, self.fit.GetParErrors()[2])
self.ErrUp.SetParameter(6, fitter.GetCovarianceMatrixElement(0,1))
self.ErrUp.SetParameter(7, fitter.GetCovarianceMatrixElement(0,2))
self.ErrUp.SetParameter(8, fitter.GetCovarianceMatrixElement(1,2))
self.ErrDn = TF1("QuadrarticFitErrorDn"+self.name, "[0]+ [1]*x + [2]*x*x - sqrt(([3]*[3]) + (2*x*[6]) + (x*x*[4]*[4]) + (2*x*x*[7]) + (2*x*x*x*[8]) + (x*x*x*x*[5]*[5]))",self.rm,self.rp)
self.ErrDn.SetParameter(0, self.fit.GetParameter(0))
self.ErrDn.SetParameter(1, self.fit.GetParameter(1))
self.ErrDn.SetParameter(2, self.fit.GetParameter(2))
self.ErrDn.SetParameter(3, self.fit.GetParErrors()[0])
self.ErrDn.SetParameter(4, self.fit.GetParErrors()[1])
self.ErrDn.SetParameter(5, self.fit.GetParErrors()[2])
self.ErrDn.SetParameter(6, fitter.GetCovarianceMatrixElement(0,1))
self.ErrDn.SetParameter(7, fitter.GetCovarianceMatrixElement(0,2))
self.ErrDn.SetParameter(8, fitter.GetCovarianceMatrixElement(1,2))
def MakeConvFactor(self, var, center):
X = var + "-" + str(center)
self.ConvFact = "({0:2.9f} + (({3})*{1:2.9f}) + (({3})*({3})*{2:2.9f}))".format(self.ErrUp.GetParameter(0),self.ErrUp.GetParameter(1),self.ErrUp.GetParameter(2),X)
self.ConvFactUp = "({0:2.9f} + (({9})*{1:2.9f}) + (({9})*({9})*{2:2.9f}) + (({3:2.9f}*{3:2.9f}) + (2*({9})*{6:2.9f}) + (({9})*({9})*{4:2.9f}*{4:2.9f}) + (2*({9})*({9})*{7:2.9f}) + (2*({9})*({9})*({9})*{8:2.9f}) + (({9})*({9})*({9})*({9})*{5:2.9f}*{5:2.9f}))^0.5)".format(self.ErrUp.GetParameter(0),self.ErrUp.GetParameter(1),self.ErrUp.GetParameter(2),self.ErrUp.GetParameter(3),self.ErrUp.GetParameter(4),self.ErrUp.GetParameter(5),self.ErrUp.GetParameter(6),self.ErrUp.GetParameter(7),self.ErrUp.GetParameter(8),X)
self.ConvFactDn = "({0:2.9f} + (({9})*{1:2.9f}) + (({9})*({9})*{2:2.9f}) - (({3:2.9f}*{3:2.9f}) + (2*({9})*{6:2.9f}) + (({9})*({9})*{4:2.9f}*{4:2.9f}) + (2*({9})*({9})*{7:2.9f}) + (2*({9})*({9})*({9})*{8:2.9f}) + (({9})*({9})*({9})*({9})*{5:2.9f}*{5:2.9f}))^0.5)".format(self.ErrUp.GetParameter(0),self.ErrUp.GetParameter(1),self.ErrUp.GetParameter(2),self.ErrUp.GetParameter(3),self.ErrUp.GetParameter(4),self.ErrUp.GetParameter(5),self.ErrUp.GetParameter(6),self.ErrUp.GetParameter(7),self.ErrUp.GetParameter(8),X)
#### CUBIC ####
class CubicFit:
def __init__(self, init_var, range_min, range_max, name, Opt):
self.Opt = Opt
self.rm = range_min
self.rp = range_max
self.name = name
self.fit = TF1("CubeicFit"+self.name, "[0]+ [1]*x + [2]*x^2 + [3]*x^3",self.rm,self.rp)
self.fit.SetParameter(0, init_var[0])
self.fit.SetParameter(1, init_var[1])
self.fit.SetParameter(2, init_var[2])
self.fit.SetParameter(3, init_var[3])
def Converter(self, fitter):
#errTerm = "[4]^2 +((2*[8])*x) + (([5]^2+2*[9])*x^2) + ((2*[11])*x^3)"
errTerm = "[4]^2+((2*[8])*x)+(([5]^2+2*[9])*x^2)+((2*[10]+2*[11])*x^3)+(([6]^2+2*[12])*x^4)+((2*[13])*x^5)+(([7]^2)*x^6)"
self.ErrUp = TF1("CubicFitErrorUp"+self.name, "[0]+ [1]*x + [2]*x*x + [3]*x*x*x + sqrt("+errTerm+")",self.rm,self.rp)
self.ErrUp.SetParameter(0, self.fit.GetParameter(0))
self.ErrUp.SetParameter(1, self.fit.GetParameter(1))
self.ErrUp.SetParameter(2, self.fit.GetParameter(2))
self.ErrUp.SetParameter(3, self.fit.GetParameter(3))
self.ErrUp.SetParameter(4, self.fit.GetParErrors()[0])
self.ErrUp.SetParameter(5, self.fit.GetParErrors()[1])
self.ErrUp.SetParameter(6, self.fit.GetParErrors()[2])
self.ErrUp.SetParameter(7, self.fit.GetParErrors()[3])
self.ErrUp.SetParameter(8, fitter.GetCovarianceMatrixElement(0,1))
self.ErrUp.SetParameter(9, fitter.GetCovarianceMatrixElement(0,2))
self.ErrUp.SetParameter(10, fitter.GetCovarianceMatrixElement(0,3))
self.ErrUp.SetParameter(11, fitter.GetCovarianceMatrixElement(1,2))
self.ErrUp.SetParameter(12, fitter.GetCovarianceMatrixElement(1,3))
self.ErrUp.SetParameter(13, fitter.GetCovarianceMatrixElement(2,3))
self.ErrDn = TF1("CubicFitErrorUp"+self.name, "[0]+ [1]*x + [2]*x*x + [3]*x*x*x - sqrt("+errTerm+")",self.rm,self.rp)
self.ErrDn.SetParameter(0, self.fit.GetParameter(0))
self.ErrDn.SetParameter(1, self.fit.GetParameter(1))
self.ErrDn.SetParameter(2, self.fit.GetParameter(2))
self.ErrDn.SetParameter(3, self.fit.GetParameter(3))
self.ErrDn.SetParameter(4, self.fit.GetParErrors()[0])
self.ErrDn.SetParameter(5, self.fit.GetParErrors()[1])
self.ErrDn.SetParameter(6, self.fit.GetParErrors()[2])
self.ErrDn.SetParameter(7, self.fit.GetParErrors()[3])
self.ErrDn.SetParameter(8, fitter.GetCovarianceMatrixElement(0,1))
self.ErrDn.SetParameter(9, fitter.GetCovarianceMatrixElement(0,2))
self.ErrDn.SetParameter(10, fitter.GetCovarianceMatrixElement(0,3))
self.ErrDn.SetParameter(11, fitter.GetCovarianceMatrixElement(1,2))
self.ErrDn.SetParameter(12, fitter.GetCovarianceMatrixElement(1,3))
self.ErrDn.SetParameter(13, fitter.GetCovarianceMatrixElement(2,3))
for i in [self.ErrUp, self.ErrDn]:
i.SetLineStyle(2)
def MakeConvFactor(self, var, center):
X = var + "-" + str(center)
self.ConvFact = "({0:2.9f} + (({3})*{1:2.9f}) + (({3})*({3})*{2:2.9f}))".format(self.ErrUp.GetParameter(0),self.ErrUp.GetParameter(1),self.ErrUp.GetParameter(2),X)
self.ConvFactUp = "({0:2.9f} + (({9})*{1:2.9f}) + (({9})*({9})*{2:2.9f}) + (({3:2.9f}*{3:2.9f}) + (2*({9})*{6:2.9f}) + (({9})*({9})*{4:2.9f}*{4:2.9f}) + (2*({9})*({9})*{7:2.9f}) + (2*({9})*({9})*({9})*{8:2.9f}) + (({9})*({9})*({9})*({9})*{5:2.9f}*{5:2.9f}))^0.5)".format(self.ErrUp.GetParameter(0),self.ErrUp.GetParameter(1),self.ErrUp.GetParameter(2),self.ErrUp.GetParameter(3),self.ErrUp.GetParameter(4),self.ErrUp.GetParameter(5),self.ErrUp.GetParameter(6),self.ErrUp.GetParameter(7),self.ErrUp.GetParameter(8),X)
self.ConvFactDn = "({0:2.9f} + (({9})*{1:2.9f}) + (({9})*({9})*{2:2.9f}) - (({3:2.9f}*{3:2.9f}) + (2*({9})*{6:2.9f}) + (({9})*({9})*{4:2.9f}*{4:2.9f}) + (2*({9})*({9})*{7:2.9f}) + (2*({9})*({9})*({9})*{8:2.9f}) + (({9})*({9})*({9})*({9})*{5:2.9f}*{5:2.9f}))^0.5)".format(self.ErrUp.GetParameter(0),self.ErrUp.GetParameter(1),self.ErrUp.GetParameter(2),self.ErrUp.GetParameter(3),self.ErrUp.GetParameter(4),self.ErrUp.GetParameter(5),self.ErrUp.GetParameter(6),self.ErrUp.GetParameter(7),self.ErrUp.GetParameter(8),X)
#### LOGARITHMIC ####
#### EXPONENTIAL ####
#### GAUSSIAN ####
#CUSTOM =========--------------=============------------=============-------------===============
| {
"repo_name": "anovak10/plots",
"path": "DDTmethod/Converters.py",
"copies": "1",
"size": "9313",
"license": "mit",
"hash": 3396949255843328500,
"line_mean": 65.0496453901,
"line_max": 517,
"alpha_frac": 0.647696768,
"autogenerated": false,
"ratio": 2.1794991809033464,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.33271959489033465,
"avg_score": null,
"num_lines": null
} |
'''a bunch of examples of how to get a list of urls in parallel
each of them uses a different greenhouse api to retrieve a list of urls in
parallel and return a dictionary mapping urls to response bodies
'''
import greenhouse
# urllib2 obviously doesn't explicitly use greenhouse sockets, but we can
# import it with the socket module patched so it uses them anyway
urllib2 = greenhouse.patched("urllib2")
#
# simply schedule greenlets and use an event to signal the all clear
#
def _get_one(url, results, count, done_event):
results[url] = urllib2.urlopen(url).read()
if len(results) == count:
done_event.set() # wake up the original greenlet
def get_urls(urls):
count = len(urls)
results = {}
alldone = greenhouse.Event()
# each url gets its own greenlet to fetch it
for url in urls:
greenhouse.schedule(_get_one, args=(url, results, count, alldone))
alldone.wait()
return results
#
# create two Queue objects, one for sending urls to be processed, another for
# sending back the results.
#
# this is a little awkward for this specific use case, but is more like how you
# might do it if you didn't have a bounded set of inputs and want to
# continually send off jobs to be run.
#
def _queue_runner(in_q, out_q, stop):
while 1:
url = in_q.get()
if url is stop:
break
out_q.put((url, urllib2.urlopen(url).read()))
def get_urls_queue(urls, parallelism=None):
in_q = greenhouse.Queue()
out_q = greenhouse.Queue()
results = {}
stop = object()
parallelism = parallelism or len(urls)
for i in xrange(parallelism):
greenhouse.schedule(_queue_runner, args=(in_q, out_q, stop))
for url in urls:
in_q.put(url)
for url in urls:
url, result = out_q.get()
results[url] = result
for i in xrange(parallelism):
in_q.put(stop)
return results
#
# the Queue example above is basically a small reimplementation of Pools
#
def _pool_job(url):
return url, urllib2.urlopen(url).read()
def get_urls_pool(urls, parallelism=None):
pool = greenhouse.Pool(_pool_job, parallelism or len(urls))
pool.start()
results = {}
for url in urls:
pool.put(url)
for url in urls:
url, result = pool.get()
results[url] = result
pool.close()
return results
#
# this one returns a list of the results in an order corresponding to the
# arguments instead of a dictionary mapping them (to show off OrderedPool)
#
def _ordered_pool_job(url):
return urllib2.urlopen(url).read()
def get_urls_ordered_pool(urls, parallelism=None):
pool = greenhouse.OrderedPool(_ordered_pool_job, parallelism or len(urls))
pool.start()
for url in urls:
pool.put(url)
# OrderedPool caches out-of-order results and produces
# them corresponding to the order in which they were put()
results = [pool.get() for url in urls]
pool.close()
return results
#
# one last version, showcasing a further abstraction of OrderedPool
#
def get_urls_ordered_map(urls, parallelism=None):
return greenhouse.map(
lambda u: urllib2.urlopen(u).read(),
urls,
pool_size=parallelism or len(urls))
| {
"repo_name": "teepark/greenhouse",
"path": "examples/parallel_client.py",
"copies": "1",
"size": "3256",
"license": "bsd-3-clause",
"hash": 1437722325059562800,
"line_mean": 23.8549618321,
"line_max": 79,
"alpha_frac": 0.6652334152,
"autogenerated": false,
"ratio": 3.597790055248619,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9757941989934605,
"avg_score": 0.0010162961028025862,
"num_lines": 131
} |
"""A bunch of extractors go here"""
import os
import six
import rarfile
import tarfile
import zipfile
class ExtractorError(Exception):
pass
class ExtractorRegistry(type):
registry = {}
def __new__(cls, name, bases, attrs):
new_cls = type.__new__(cls, name, bases, attrs)
cls.register_extractor(new_cls)
return new_cls
@classmethod
def register_extractor(cls, new_cls):
cls.registry[new_cls] = new_cls.supported_extensions
class ExtractorFactory(object):
@classmethod
def create(cls, filename, destination='.'):
for extractor in ExtractorRegistry.registry:
if extractor.supports(filename):
return extractor(filename, destination)
else:
raise ExtractorError('File extension not supported.')
@six.add_metaclass(ExtractorRegistry)
class Extractor(object):
supported_extensions = []
def __init__(self, filename, destination='.'):
self.filename = filename
self.destination = destination
@classmethod
def supports(cls, filename):
"""Returns True if the extractor supports the given file"""
for supported_extension in cls.supported_extensions:
if filename.endswith(supported_extension):
return True
return False
@staticmethod
def strip_extension(full_path, extension):
return os.path.basename(full_path[:-len(extension)])
def extract(self):
raise NotImplementedError()
class TarExtractor(Extractor):
supported_extensions = ['.tar', '.tar.gz', '.tgz', '.tar.bz2', '.tbz']
def extract(self):
mode_map = {
'.tar.gz': 'r:gz',
'.tgz': 'r:gz',
'.tar.bz2': 'r:bz2',
'.tbz': 'r:bz2',
'.tar': 'r:'}
for file_extension, mode in mode_map.items():
if self.filename.endswith(file_extension):
with tarfile.open(self.filename, mode) as archive:
archive.extractall(path=self.destination)
return self.destination
raise ExtractorError(
'Failed to extract {} as tar file.'.format(self.filename))
class ZipExtractor(Extractor):
supported_extensions = ['.zip']
def extract(self):
with zipfile.ZipFile(self.filename, 'r') as archive:
archive.extractall(path=self.destination)
return self.destination
class RarExtractor(Extractor):
"""Extracting RAR file"""
supported_extensions = ['.rar']
def extract(self):
with rarfile.RarFile(self.filename, 'r') as archive:
archive.extractall(path=self.destination)
return self.destination
| {
"repo_name": "brian-bates/kai",
"path": "kai/extractors.py",
"copies": "1",
"size": "2693",
"license": "bsd-3-clause",
"hash": 1876480067077729000,
"line_mean": 27.0520833333,
"line_max": 74,
"alpha_frac": 0.6201262532,
"autogenerated": false,
"ratio": 4.3088,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5428926253199999,
"avg_score": null,
"num_lines": null
} |
"""A bunch of functions that are used by multiple threads.
"""
import pt, hashlib, re, subprocess, time, copy, networking, custom, logging, random
from json import dumps as package, loads as unpackage
from urllib import urlopen
import re
#print(json.dumps(x, indent=3, sort_keys=True)) for pretty printing
def getPublicIp():
data = str(urlopen('http://checkip.dyndns.com/').read())
# data = '<html><head><title>Current IP Check</title></head><body>Current IP Address: 65.96.168.198</body></html>\r\n'
return re.compile(r'Address: (\d+\.\d+\.\d+\.\d+)').search(data).group(1)
def int2hash(a): return buffer_(str(hex(a))[2:], 64)
def hash2int(a): return int(str(a), 16)
def cost_0(txs, address):
#cost of the zeroth confirmation transactions
spends=['spend', 'spend2wait', 'bond2spend']
total_cost = []
txs=filter(lambda t: address == addr(t), txs)
txs=filter(lambda t: t['type'] in do.keys(), txs)
for t in txs:
total_cost.append(t['fee'])
if t['type'] in spends:
total_cost.append(t['amount'])
return sum(total_cost)
def block_fee(length): return 10**9#total blocks is all_money divided by this. 21000000 blocks in this case
#assume it takes 0.5 seconds to process each block. If someone with 1% money does DDOS, total_blocks/200 seconds is how long they can. I am aiming for 1% of money to be able to DDOS for 1 day.
#since each block can be 10 kb, total block length is total_blocks*10kB. I am aiming for 210 GB.
#once we apply mini-blockchain proposal, then this number will shrink slowly. The total coins will decrease by a half-life ever 21 million blocks.
def fee_check(tx, txs, DB):
address = addr(tx)
cost=cost_0(txs+[tx], address)
acc=db_get(address, DB)
if int(acc['amount']) < cost:
log('insufficient money')
return False
return True
def entropy(txs):
one=0
zero=0
log('txs: ' +str(txs))
for t in filter(lambda x: x['type']=='sign', txs):
if t['entropy']==0:
zero+=len(t['jackpots'])
elif t['entropy']==1:
one+=len(t['jackpots'])
else:
error()
if one>zero: return 1
else: return 0
def get_(loc, thing):
if loc==[]: return thing
return get_(loc[1:], thing[str(loc[0])])
def set_(loc, dic, val):
get_(loc[:-1], dic)[loc[-1]] = val
return dic
def adjust(pubkey, DB, f):#location shouldn't be here.
acc = db_get(pubkey, DB)
f(acc)
db_put(pubkey, acc, DB)
def adjust_int(key, pubkey, amount, DB, add_block):
amount=int(amount)
def f(acc, amount=amount):
if not add_block: amount=-amount
set_(key, acc, (get_(key, acc) + amount))
adjust(pubkey, DB, f)
def adjust_string(location, pubkey, old, new, DB, add_block):
def f(acc, old=old, new=new):
current=get_(location, acc)
if add_block:
set_(location, acc, new)
else: set_(location, acc, old)
adjust(pubkey, DB, f)
def adjust_dict(location, pubkey, remove, dic, DB, add_block):
def f(acc, remove=remove, dic=dic):
current=get_(location, acc)
if remove != add_block:# 'xor' and '!=' are the same.
current=dict(dic.items() + current.items())
else:
try:
current.pop(dic.keys()[0])
except:
log('current dic: ' +str(current) + ' ' +str(dic)+' '+str(location))
set_(location, acc, current)
adjust(pubkey, DB, f)
def adjust_list(location, pubkey, remove, item, DB, add_block):
def f(acc, remove=remove, item=item):
current=get_(location, acc)
if remove != (add_block):# 'xor' and '!=' are the same.
current.append(item)
else:
current.remove(item)
set_(location, acc, current)
adjust(pubkey, DB, f)
def symmetric_put(id_, dic, DB, add_block):
if add_block: db_put(id_, dic, DB)
else: db_delete(id_, DB)
def empty_peer(): return {'blacklist':0, 'lag':40.0, 'length':0}
def peer_split(peer):
a=peer.split(':')
a[1]=int(a[1])
return a
def port_grab(peer): return peer_split(peer)[1]
def add_peer(peer, current_peers=0):
if current_peers==0:
current_peers=local_get('peers')
if peer in current_peers.keys():
return False
a=empty_peer()
a['port']=port_grab(peer)
current_peers[peer]=a
local_put('peers', current_peers)
def dump_out(queue):
while not queue.empty():
try:
queue.get(False)
except:
pass
logging.basicConfig(filename=custom.log_file, level=logging.INFO)
def log(junk):
if isinstance(junk, Exception):
logging.exception(junk)
else:
logging.info(str(junk))
def can_unpack(o):
try:
unpackage(o)
return True
except:
return False
def addr(tx): return make_address(tx['pubkeys'], len(tx['signatures']))
def sign(msg, privkey): return pt.ecdsa_sign(msg, privkey)
def verify(msg, sig, pubkey): return pt.ecdsa_verify(msg, sig, pubkey)
def privtopub(privkey): return pt.privtopub(privkey)
def hash_(x): return hashlib.sha384(x).hexdigest()[0:64]
def det_hash(x):
"""Deterministically takes sha256 of dict, list, int, or string."""
#x=unpackage(package(x))
#log('in det hash: ' +str(package(x, sort_keys=True)))
return hash_(package(x, sort_keys=True))
def POW(block):
h=det_hash(block)
block[u'nonce'] = random.randint(0, 10000000000000000000000000000000000000000)
while det_hash(a) > custom.buy_shares_target:
block[u'nonce'] += 1
a={u'nonce': block['nonce'], u'halfHash': h}
return block
def make_half_way(block):
a = copy.deepcopy(block)
a.pop('nonce')
return({u'nonce': block['nonce'], u'halfHash': det_hash(a)})
def base58_encode(num):
num = int(num, 16)
alphabet = '123456789abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ'
base_count = len(alphabet)
encode = ''
if num < 0:
return ''
while (num >= base_count):
mod = num % base_count
encode = alphabet[mod] + encode
num = num / base_count
if num:
encode = alphabet[num] + encode
return encode
def make_address(pubkeys, n):
"""n is the number of pubkeys required to spend from this address."""
return (str(len(pubkeys)) + str(n) +
base58_encode(det_hash({str(n): pubkeys}))[0:29])
def buffer_(str_to_pad, size):
return str_to_pad.rjust(size, '0')
def E_check(dic, key, type_):
if not isinstance(type_, list): type_=[type_]
if len(type_)==0: return False#to end the recursion.
if not key in dic: return False
if isinstance(type_[0], type):
if not isinstance(dic[key], type_[0]): return E_check(dic, key, type_[1:])
else:
if not dic[key] == type_[0]: return E_check(dic, key, type_[1:])
return True
def is_number(s):
try:
int(s)
return True
except:
return False
def kill_processes_using_ports(ports):
popen = subprocess.Popen(['netstat', '-lpn'],
shell=False,
stdout=subprocess.PIPE)
(data, err) = popen.communicate()
pattern = "^tcp.*((?:{0})).* (?P<pid>[0-9]*)/.*$"
pattern = pattern.format(')|(?:'.join(ports))
prog = re.compile(pattern)
for line in data.split('\n'):
match = re.match(prog, line)
if match:
pid = match.group('pid')
subprocess.Popen(['kill', '-9', pid])
def s_to_db(c):
response=networking.send_command(['localhost', custom.database_port], c)
if (type(response)==dict and 'error' in response):
time.sleep(0.001)
log('s to db failed at '+str(c))
log('s to db failed at '+str(response))
#return s_to_db(c)
else:
return response
def local_get(k): return s_to_db({'type':'local_get', 'args':[str(k)]})
def local_put(k, v): return s_to_db({'type':'local_put', 'args':[str(k), v]})
def db_get(n, DB={}): return s_to_db({'type':'get', 'args':[str(n)]})
def db_put(key, dic, DB={}):
dic=unpackage(package(dic))#keeps it deterministic.
return s_to_db({'type':'put', 'args':[str(key), dic]})
def db_delete(key, DB={}):
return s_to_db({'type':'delete', 'args':[str(key)]})
def db_existence(key, DB={}): return s_to_db({'type':'existence', 'args':[str(key)]})
def db_proof(key): return s_to_db({'type':'proof', 'args':[str(key)]})
def db_verify(root, key, proof): return s_to_db({'type':'verify', 'args':[root, key, proof]})
def db_root(): return s_to_db({'type':'root', 'args':[]})
def fork_check(newblocks, DB, length, block):
#block is most recent block in our chain
recent_hash = block['block_hash']#recent_hash
if length<=0: return False
if len(newblocks)<1:
return False
#log('newblocks: ' +str(newblocks))
their_hashes = map(lambda x: x['block_hash'] if x['length']>0 else 0, newblocks)+[det_hash(newblocks[-1])]
b=(recent_hash not in their_hashes) and length>=newblocks[0]['length'] and length<newblocks[-1]['length']
return b
if __name__ == "__main__":
a=POW({'a':'b'})
print(a)
'''
time_0=time.time()
for i in range(100):
timea=time.time()
POW({'empty':0})
print(time.time()-timea)
print(time.time()-time_0)
'''
def relative_reward(on_block, my_address):
#redistributes spend fees to signers based upon how big the signer_bond was
one_before=on_block-1
txs=db_get(on_block)['txs']
sign_txs=filter(lambda t: t['type']=='sign', txs)
my_sign_tx=filter(lambda t: addr(t)==my_address, sign_txs)[0]
amounts=map(lambda t: int(t['amount']), sign_txs)
total_amount=sum(amounts)
total_fee=block_reward(db_get(one_before)['txs'])
blockmaker_fee=custom.reward_blockmaker_vs_signers(total_fee)
fee=total_fee-blockmaker_fee
return (my_sign_tx['amount']/total_amount)*fee
def winner(B, M, ran, my_address, j):#this doesn't work if we skip a block. It will say that the same people are signers whether we skip or not.
b=hash2int('f'*64)*64*B/(200*M)
a=hash2int(det_hash(str(ran)+str(my_address)+str([j])))
return a<b
def entropy_bit(length):#too slow
block=db_get(length)
#log('block: ' +str(block))
txs=block['txs']
txs=filter(lambda t: t['type']=='sign', txs)
accs=map(lambda t: db_get(addr(t)), txs)
log('accs: ' +str(accs))
yea=0
nay=0
for acc in accs:
if str(length) in acc['entropy']:
a=acc['entropy'][str(length)]
if a['vote']['entropy']==0:
nay+=a['power']
else:
yea+=a['power']
if nay>yea: return 0
return 1
def det_random(length):
#returns random seed to elect signers for the next block.
def mean(l): return sorted(l)[len(l)/2]
ran=[]#this list should include a default value maybe 0, for every skipped height.
for i in range(custom.medium_time/2):
a=length-custom.long_time*2-custom.medium_time-i
if a<0:
ran.append(a)
else:
ran.append(entropy_bit(a))
out=[]
while ran!=[]:
a=min(17, len(ran))
l=ran[0:a]
ran=ran[a:]
out.append(mean(l))
return det_hash(out)
def mint_cost(txs, gap):#returns float???
a=custom.reward_blockmaker_vs_signers(block_reward(txs))
b=custom.block_fee(gap)
return a-b
def block_reward(txs):
spends=filter(lambda x: x['type']=='spend', txs)
fees=map(lambda t: int(t['fee']), spends)
return sum(fees)
def signature_check(tx):#verify that a transaction has a valid ECDSA signature on it.
tx_copy = copy.deepcopy(tx)
tx_copy.pop('signatures')
if len(tx['pubkeys']) == 0:
tools.log('pubkey error')
return False
if len(tx['signatures']) > len(tx['pubkeys']):
tools.log('sigs too long')
return False
msg = tools.det_hash(tx_copy)
if not sigs_match(copy.deepcopy(tx['signatures']),
copy.deepcopy(tx['pubkeys']), msg):
tools.log('sigs do not match')
return False
return True
| {
"repo_name": "jtremback/FlyingFox",
"path": "docs/python/tools.py",
"copies": "2",
"size": "12058",
"license": "unlicense",
"hash": 6177991626753372000,
"line_mean": 37.1582278481,
"line_max": 192,
"alpha_frac": 0.6047437386,
"autogenerated": false,
"ratio": 3.2043582248206217,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4809101963420621,
"avg_score": null,
"num_lines": null
} |
""" A bunch of helper functions that, when fixed up, will return the things we
need to make this website work! These functions use the weather and twitter APIs!!!
"""
###############################################
### Problem One! ###
###############################################
def get_city_coordinates():
"""Find the GPS coordinates for here,
and fill in the information below
"""
lattitude = 38.9095396
longitude = -77.0757553
return lattitude, longitude
###############################################
### Problem Two! ###
###############################################
def get_icon_size():
""" Choose a number of pixels to represent the size of the weather pic.
"""
size = 2
return size
###############################################
### Problem Three! ###
###############################################
def choose_number_of_tweets():
""" Modify this function to return the max number of tweets
you want to appear on the site at a time!
"""
number = 3
return number
###############################################
### Problem Four! ###
###############################################
def choose_hashtag():
""" Modify this function to return the hashtag #capwic2017
"""
hashtag = "#capwic2017"
return hashtag
| {
"repo_name": "samanehsan/learn-git",
"path": "helpers.py",
"copies": "2",
"size": "1413",
"license": "apache-2.0",
"hash": -1229171534897610800,
"line_mean": 27.26,
"line_max": 83,
"alpha_frac": 0.423920736,
"autogenerated": false,
"ratio": 5.272388059701493,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6696308795701492,
"avg_score": null,
"num_lines": null
} |
# A bunch of nifty utility functions to use in python
import netifaces
import requests
import datetime
import json
import sys
import pymongo
import pprint
import os
import glob
import logging
import time
import math
import subprocess
import re
import ConfigParser
import socket
import shutil
import random
import code
#code.interact(local=locals())
import pdb
# pdb.set_trace()
# useful to get the function name
def get_func_name():
return sys._getframe(1).f_code.co_name
def write_list_to_file(file_name, list_name):
thefile = open(file_name, 'w')
for item in list_name:
thefile.write("%s\n" % item)
# sums up all the elements in a list that matches 'field'
# and returns the total
def sum_list_dict(ll, field):
mysum = 0
for i in xrange(len(ll)):
#print(type(ll[i]['count']))
#code.interact(local=locals())
mysum += ll[i]['count']
return mysum
# returns rows and cols
def get_screen_size():
rows, columns = os.popen('stty size', 'r').read().split()
return (rows, columns)
# draws a line
def drawline(cols=0):
if cols == 0:
(_, cols) = get_screen_size()
# alternatively
# print u"\u2501"
# https://en.wikipedia.org/wiki/Box-drawing_character
#print unichr(0x2501) * int(cols)
print '-' * int(cols)
# TODO: can go in a seperate file as a module
# print bcolors.WARNING + "Warning: No active frommets remain. Continue?" + bcolors.ENDC
# print bcolors.OKBLUE + 'Test is over' + bcolors.ENDC
# print bcolors.FAIL + 'root perms needed' + bcolors.ENDC
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# Accepts an epoch and converts it into a human readable time
# date -d @1478587042
# to get a date in epoch format : date +'%s'
def epoch2date(epoch):
epoch = str(epoch)
if len(epoch) > 10:
# strip chars greater than 10
# must be ms and beyond and we cant process them
epoch = epoch[:10]
print ('converting ' + epoch)
return (time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(epoch))))
# accepts a datettime object
# if no argument is specified, will return time in epoch
def date2epoch(dt = None):
if dt is None:
return int(datetime.datetime.now().strftime('%s'))
else:
return int(dt.strftime('%s'))
# accepts a datetime.timedelta
# time_delta_to_str(8640)
# '2:24:0'
def time_delta_to_str(diff):
s = diff.seconds
#s = 13420
hours, remainder = divmod(s, 3600)
minutes, seconds = divmod(remainder, 60)
#print '%s:%s:%s' % (hours, minutes, seconds)
res_str = ''
if hours:
res_str += str(hours) + ' hours'
if minutes:
res_str += ' ' + str(minutes) + ' mins'
if seconds:
res_str += ' ' + str(seconds) + ' secs'
return res_str
#return '%s hours:%s mins:%s seconds' % (hours, minutes, seconds)
# returns date and time in the present
# eg. 2016_09_08_13
def get_date_time_human_readable():
#return str(datetime.datetime.now().strftime('%Y_%m_%d_%H_%M:%S'))
return str(datetime.datetime.now().strftime('%Y_%m_%d_%H_%M'))
# hacky datetime apis follow
# import time
# import datetime
def get_epoch_by_date_str(dt_str):
dt = datetime.datetime.strptime(dt_str, "%b %d %Y %H:%M:%S")
epoch_base = datetime.datetime.utcfromtimestamp(0)
epoch = int((dt - epoch_base).total_seconds())
return epoch
def get_date_str_by_epoch(epoch):
# epoch (adjust for tz) -> datetime -> strftime
add_tz = int(time.strftime("%z")) * (-1)
hr = add_tz / 100
min = add_tz % 100
add_secs = (hr * 3600) + (min * 60)
epoch += add_secs
dt_epoch = datetime.datetime.fromtimestamp(epoch)
dt_str = dt_epoch.strftime('%b %d %Y %H:%M:%S')
return dt_str
# expects an array of dicts
# merges array elements with the same key
# Input :
# l = [{u'count': 8, u'user_agent': u'CHROME44'},
# {u'count': 10, u'user_agent': u'OPERA10'},
# {u'count': 5, u'user_agent': u'FIREFOX21'},
# {u'count': 9, u'user_agent': u'DOWNLOAD'},
# {u'count': 18, u'user_agent': u'UNKNOWN'},
# {u'count': 6, u'user_agent': u'SAFARI5'}]
#
# merge_list_on_key(l, 'user_agent'])
# #merges on user_agent UNKNOWN
#
# Output:
# {'SAFARI5': 6, 'CHROME44': 8, 'UNKNOWN': 18, 'OPERA10': 10, 'FIREFOX21': 5, 'DOWNLOAD': 9}
#
def merge_list_on_key(l1, key):
s1 = set()
lz = list()
for i in l1:
if i[key] not in s1:
s1.add(i[key])
lz.append(i)
else:
for l in lz:
if l[key] == i[key]:
l['count'] += i['count']
return lz
# returns True if lists match
# else returns False
def is_list_equal(l1, l2):
s1 = set(l1)
s2 = set(l2)
if s1 == s2:
#print('sets match')
return True
else:
#print('sets dont match')
return False
def sleep_sensible():
print(date_str() + 'Sleeping for ' + str(knob_sleep_sensible) + ' seconds')
time.sleep(knob_sleep_sensible)
print(date_str() + 'Resuming...')
def sleep_seconds(secs = 10):
print(date_str() + 'Sleeping for ' + str(secs) + ' seconds')
time.sleep(secs)
print(date_str() + 'Resuming...')
# creates a directory if it does not exist
def ensure_dir(directory):
if not os.path.exists(directory):
print(date_str() + 'creating directory ' + directory)
os.makedirs(directory)
# rounds a value to the next nearest multiple of 10.
# roundup_to_nearest_ten(1) = 10
# roundup_to_nearest_ten(10) = 10
# roundup_to_nearest_ten(11) = 20
def roundup_to_nearest_ten(x):
return int(math.ceil(x / 10.0)) * 10
# rounds a value to the previous nearest multiple of 10.
# rounddown_to_nearest_ten(1) = 0
# rounddown_to_nearest_ten(9) = 0
# rounddown_to_nearest_ten(10) = 0
def rounddown_to_nearest_ten(x):
return int(math.floor(x / 10.0)) * 10
# think of it as py_time_round_later
# typically done with the end time
def py_time_round_up(tm):
upmins = math.ceil(float(tm.minute)/10)*10
diffmins = upmins - tm.minute
newtime = tm + datetime.timedelta(minutes=diffmins)
newtime = newtime.replace(second=0)
return newtime
# think of it as py_time_round_earlier
# typically done with the start time
def py_time_round_down(tm):
upmins = math.floor(float(tm.minute)/10)*10
diffmins = upmins - tm.minute
newtime = tm + datetime.timedelta(minutes=diffmins)
newtime = newtime.replace(second=0)
return newtime
# checks if the program has root permissions
def is_sudo():
if os.getuid() == 0:
#print("root perms acquired")
return True
else:
#print("I cannot run as a mortal. Sorry.")
return False
# class that adds a fake section to c ofnig file
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[asection]\n'
def readline(self):
if self.sechead:
try:
return self.sechead
finally:
self.sechead = None
else:
return self.fp.readline()
# reads all the important config into a dict
def read_config_file_without_sections(file_path):
cp = ConfigParser.SafeConfigParser()
#cp.readfp(FakeSecHead(open('/home/rohit/Downloads/somefile.json')))
cp.readfp(FakeSecHead(open(file_path)))
#print cp.items('asection')
#print(type(cp.items('asection')))
# convert from a two tuple list into a dict
dd = dict( cp.items('asection'))
#print(dd)
return dd
###############################################################################
# Networking utils
###############################################################################
def is_port_open_local(port):
popen = subprocess.Popen(['netstat', '-lpn'],
shell=False,
stdout=subprocess.PIPE)
#(data, err) = popen.communicate()
(data, _) = popen.communicate()
patt = '^tcp.*((?:' + str(port) + ')).* (?P<pid>[0-9]*)/.*'
p = re.compile(patt)
for line in data.split('\n'):
match = re.match(p, line)
if match:
#pid = match.group('pid')
#print('found pid ' + str(pid))
return True
return False
def find_pid_by_port(port):
popen = subprocess.Popen(['netstat', '-lpn'],
shell=False,
stdout=subprocess.PIPE)
#(data, err) = popen.communicate()
(data, _) = popen.communicate()
patt = '^tcp.*((?:' + str(port) + ')).* (?P<pid>[0-9]*)/.*'
print(patt)
p = re.compile(patt)
for line in data.split('\n'):
match = re.match(p, line)
if match:
pid = match.group('pid')
#print('found pid ' + str(pid))
return pid
return 0
def are_all_ports_open(host, port_dict):
# convert dict of ports into a list and string it
str_port_list = [str(i) for i in port_dict.values()]
for check_port in str_port_list:
# print('converting portstr ' + check_port + ' to int')
check_port = int(check_port)
if is_port_open(host, check_port) == False:
return False
return True
# checks if a port is open on a host
# host is string
# port is a number
# returns true if port is open, else returns false
# ex.
# result = is_port_open('127.0.0.1', 80)
def is_port_open(host, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((host, port))
if result == 0:
# print host + ':' + str(port) + ' is open'
return True
else:
print host + ':' + str(port) + ' is not open'
popen = subprocess.Popen(['netstat', '-tlpn'],
shell=False,
stdout=subprocess.PIPE)
#(data, err) = popen.communicate()
(data, _) = popen.communicate()
print(data)
return False
# sends n requests
# returns the total number of requests sent
def send_request_n_times(method, url, user_agent_str, repetitions = 10):
#url = "http://localhost/atm/zipcode"
header_dict = dict()
header_dict['User-Agent'] = user_agent_str
iterations = int(repetitions)
#print("Sending " + iterations + " requests")
for _ in range(iterations):
# response is returned and is unused
_ = requests.request(method, url, headers=header_dict)
#print(response.text)
# housekeeping
add_to_total_requests_sent(repetitions)
return repetitions
def get_active_interface():
# returns a tuple like
# ('192.168.5.1', 'wlp3s0')
(gw_ip, gw_if) = netifaces.gateways()['default'][netifaces.AF_INET]
return gw_if
# dumps a list of secondary ip addresses to a file
# returns list of interfaces
def get_interfaces():
if_list = netifaces.interfaces()
return if_list
# takes an interface name like 'wlp3s0'
# and returns a list of secondary ip addresses
def get_ip_addresses(iface):
#if_list = get_interfaces()
ip_list = []
if_count = len(netifaces.ifaddresses(iface)[netifaces.AF_INET])
for i in xrange(1,if_count - 1):
tmp_addr = netifaces.ifaddresses(iface)[netifaces.AF_INET][i]['addr']
ip_list.append(tmp_addr)
return ip_list
# accepts a list of port numbers
# and kills the processes that are attached to them
def kill_ports(port_list):
popen = subprocess.Popen(['netstat', '-tlpn'],
shell=False,
stdout=subprocess.PIPE)
#(data, err) = popen.communicate()
(data, _) = popen.communicate()
pattern = "^tcp.*((?:{0})).* (?P<pid>[0-9]*)/.*$"
pattern = pattern.format(')|(?:'.join(port_list))
prog = re.compile(pattern)
for line in data.split('\n'):
match = re.match(prog, line)
if match:
pid = match.group('pid')
# print(date_str() + 'killing PID ' + pid)
subprocess.Popen(['kill', '-9', pid])
#os.kill(pid)
def dequote(s):
"""
If a string has single or double quotes around it, remove them.
Make sure the pair of quotes match.
If a matching pair of quotes is not found, return the string unchanged.
"""
if (s[0] == s[-1]) and s.startswith(("'", '"')):
return s[1:-1]
return s
| {
"repo_name": "trohit/automatic-broccoli",
"path": "pyutils.py",
"copies": "1",
"size": "12498",
"license": "mit",
"hash": -1983809346630828500,
"line_mean": 28.8995215311,
"line_max": 92,
"alpha_frac": 0.5886541847,
"autogenerated": false,
"ratio": 3.291545957334738,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43802001420347375,
"avg_score": null,
"num_lines": null
} |
""" Abundances and condensation temperatures.
This contains the following datasets:
Asolar:
An ordered dictionary of abundances from `Lodders 2003, ApJ, 591,
1220 <http://adsabs.harvard.edu/abs/2003ApJ...591.1220L>`_. It
contains a value A for each element `el`, where A is defined::
A(el) = log10 n(el)/n(H) + 12
`n(el)` is the number density of atoms of that element, and `n(H)`
is the number density of hydrogen.
cond_temp:
An array of condensation temperatures for each element from the same
reference. The condensation temperature is the temperature at which
an element in a gaseous state attaches to dust grains.
It contains the values `tc` and `tc50` for each element, where `tc`
is the condensation temperature in K when condensation begins, and
`tc50` is the temperature when 50% of the element is left in a
gaseous state.
protosolar, photosphere:
Protosolar and photospheric mass fractions of H (X), He (Y) and
metals (Z) from Asplund et al. 2009ARA&A..47..481A, table 4.
"""
from __future__ import unicode_literals
import numpy as np
from .utilities import get_data_path
from astropy.io import ascii
from barak.io import readtxt
from .io import readtxt
from collections import OrderedDict
datapath = get_data_path()
Asolar = OrderedDict(
(t['el'], t['A']) for t in
ascii.read(datapath + 'abundances/SolarAbundance.txt'))
Asolar_c13 = OrderedDict(
(t['el'], t['A']) for t in
ascii.read(datapath + 'abundances/SolarAbundance_c13.02.txt'))
cond_temp = readtxt(datapath +
'abundances/CondensationTemperatures.txt',
readnames=1, sep='|')
# Mass fractions of H (X), He (Y) and metals (Z) below are from Asplund et
# al. 2009ARA&A..47..481A, table 4.
protosolar = dict(X=0.7381, Y=0.2485, Z=0.0134, ZonX=0.0181,
ref='2009ARA&A..47..481A')
photosphere = dict(X=0.7154, Y=0.2703, Z=0.0142, ZonX=0.0199,
ref='2009ARA&A..47..481A')
def calc_abund(X, Y, logNX, logNY, ref='Lodders03'):
""" Find the abundance relative to solar given two elements and
their column densities.
Parameters
----------
X, Y : str
Element identifiers (for example 'C', 'Si', 'Mg').
logNX : array_like, shape (N,)
log10 of element X column density in cm^-2.
logNY : array_like, shape (N,)
log10 of element Y column density in cm^-2.
ref : str (default 'Lodders03')
Which solar abundance measurements to use as a reference. One
of {'Lodders03', 'cloudy13.02'}.
Returns
-------
abundance_ratio : ndarray, shape (N,)
Abundance ratio relative to solar, [X/Y].
Notes
-----
The abundance ratio is defined::
[X/Y] = log10 (n_X / n_Y) - log10 (n_Xsun / n_Ysun)
Where N_Xsun / N_Ysun is the ratio of the number density of
species X to species Y for 'proto-solar' abundances (See Lodders
2003, ApJ, 591, 1220). For example, if [X/Y] = 0, it has the same
abundance as the proto-solar values. If [X/Y] = 1, then ten times
larger, [X/Y] = -1, then ten times smaller.
If Y is hydrogen, then this estimates the metallicity.
"""
if ref == 'Lodders03':
ref = Asolar
elif ref == 'cloudy13.02':
ref = Asolar_c13
return logNX - logNY - (ref[X] - ref[Y])
| {
"repo_name": "nhmc/Barak",
"path": "barak/abundances.py",
"copies": "1",
"size": "3322",
"license": "bsd-3-clause",
"hash": -913906225741362400,
"line_mean": 31.568627451,
"line_max": 74,
"alpha_frac": 0.6520168573,
"autogenerated": false,
"ratio": 3.0787766450417053,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9175402373143695,
"avg_score": 0.011078225839602088,
"num_lines": 102
} |
# abundanceToolkit.py
"""
This toolkit has several functions that help compare RNA seq runs.
"""
import csv
import os
import json
import numpy as np
####
def abundance_read(infile):
readline, lineNum = [], -1
with open(infile, 'r') as fIn:
for line in fIn:
if line:
lineNum = lineNum + 1
if lineNum > 0 and float(line.split(None)[-1]) != 0:
readline.append(line.split(None))
return readline
##################
import os
#
def merge_abundances(filelist, output='testOutput.txt', returnoutput=False):
# Given a directory, this script loads all *.txt in a given folder and
# treats them as abundances -- no other .txt files should be in that folder
if filelist is None:
fils = ['output_ERR852089.fastq/abundance.txt',
'output_ERR852099.fastq/abundance.txt']
else:
fils = os.listdir(filelist)
fils = [f for f in fils if f.split('.')[-1] == 'txt']
o_data = []
for afile in fils:
file_h = open(afile)
a_list = []
a_list.append(afile)
csv_reader = csv.reader(file_h, delimiter='\t')
for row in csv_reader:
a_list.append(row[4])
o_data.append((n for n in a_list))
file_h.close()
#
with open('testOutput', 'w') as op_file:
csv_writer = csv.writer(op_file, delimiter='\t')
for row in list(zip(*o_data)):
csv_writer.writerow(row)
op_file.close()
if returnoutput:
return o_data
###############
def parse_gname(gi):
# From abundance file from Kallisto
# Example: gi|559098400|ref|NM_001287053.1|
s = gi.split('|')
return int(s[1]), s[3]
def get_genedict(refgfile='gene2refseq'):
"""
Return the default gene dictionary.
"""
genedict = {}
# Load the reference sequence to genome file
with open(refgfile, 'r') as fIn:
for line in fIn:
if line:
splitline = line.split(None)
g_num = int(splitline[1])
nm_num = splitline[3]
tx_num = int(splitline[4])
# Add each gene to the gene dict, then add its transcripts
if g_num not in genedict.keys():
genedict[g_num] = {'nm_nums': [], 'gi_nums': []}
if nm_num not in genedict[g_num]['nm_nums']:
genedict[g_num]['nm_nums'].append(nm_num)
if tx_num not in genedict[g_num]['gi_nums']:
genedict[g_num]['gi_nums'].append(tx_num)
return genedict
def load_abfile(abfile, countonly=True):
"""
Load the abundance file -- Kallisto-like input.
"""
# Should now have all of the transcripts for all the genes
#
# Get the matrix from the abfile and assign it to the genes
lol, lineNum = [], -1
with open(abfile, 'r') as fIn:
for line in fIn:
if line:
lineNum = lineNum + 1
splitline = line.split(None)
if lineNum > 1:
# But only get the
if float(splitline[3]) > 0:
gname = parse_gname(splitline[0])
if countonly is False:
thing = [gname[0], gname[1], int(splitline[1]),
int(splitline[2])]
for k in [float(i) for i in splitline[3:]]:
thing.append(k)
else:
thing =[gname[0], gname[1]]
for k in [float(i) for i in splitline[1:]]:
thing.append(k)
lol.append(thing)
return lol
def collapse_genes(abfile, genedict='gene2refseq', countonly=True):
"""
Given an abundance file (similar to Kallisto format, except colums are:
target id (gi|...|ref|NM_...|) length eff_length counts0 counts1 ... countsN
"""
def add_entry(ab_dict, lin, g, countonly=True):
ab_dict[g]['gi_nums'].append(lin[0])
ab_dict[g]['nm_nums'].append(lin[1])
if countonly is False:
ab_dict[g]['length'].append(lin[2])
ab_dict[g]['eff_length'].append(lin[3])
ab_dict[g]['count'].append(lin[4])
else:
ab_dict[g]['count'].append(lin[2])
return ab_dict
#
# Get the list of lists (abfile matrix)
if type(abfile) == str:
lol = load_abfile(abfile, countonly)
elif type(abfile) == list:
lol = abfile
# lol[i] = [gi(int), nm(str), length(int), eff_len(int), count(float)]
if type(genedict) == str:
genedict = get_genedict(genedict)
elif type(genedict) == dict:
genedict = genedict
# Should have all transcript info
samples = [{} for i in range(len(lol[0])-2)] # Log all samples
lost = [] # In case nothing fits
# Now, for each transcript in lol log its counts and length in a new dict
# called ab_gene
for l in lol: # For each line (each transcript!)
for i in range(2,len(l)): # For each sample (each column except first 2)
for g in genedict.keys(): # Scan through genes
# If this particular transcript is in the gene dict, log it and counts
##print(l)
#print(l[i])
if l[0] in genedict[g]['gi_nums'] or l[1] in genedict[g]['nm_nums']:
# Log this gene
if g not in samples[i-2].keys():
if countonly is False:
samples[i-2][g] = {'gi_nums': [], 'nm_nums': [], 'eff_length': [],
'length': [], 'count': []}
else:
samples[i-2][g] = {'gi_nums': [], 'nm_nums': [], 'count': []}
samples[i-2] = add_entry(samples[i-2], l, g, countonly)
else:
lost.append(l)
# Now have all genes assigned
return np.unique(samples)
def simple_collapse(samples, genedict='gene2refseq', show=None):
"""
Return simple counts by gene instead of transcipt.
"""
# Get the most common transcript
def most_common_tx(dict_elem):
max_gi, gi_name, max_nm, nm_name = 0, None, 0, None
for gi in dict_elem['gi_nums']:
if dict_elem['gi_nums'].count(gi) > max_gi:
max_gi = dict_elem['gi_nums'].count(gi)
gi_name = gi
for nm in dict_elem['nm_nums']:
if dict_elem['nm_nums'].count(nm) > max_nm:
max_nm = dict_elem['nm_nums'].count(nm)
nm_name = nm
if man_nm > max_gi:
return nm_name
else:
return gi_name
#
# Collapse gene-wise stats into single element
sample_trunc = []
for s in samples:
for g in s.keys():
gene_trunc[g] = {'counts': sum(s[g]['counts']),
'eff_length_max': max(s[g]['eff_length']),
'most_common': most_common_tx(s[g])}
if show is None:
print(sample_trunc)
else:
json.dump(sample_trunc, open(show, 'w'))
return
def compare_variability():
return
| {
"repo_name": "DCGenomics/rnaseq_comparison_hackathon_v002",
"path": "collapseGene.py",
"copies": "3",
"size": "6503",
"license": "cc0-1.0",
"hash": 8421708464145993000,
"line_mean": 25.4349593496,
"line_max": 80,
"alpha_frac": 0.5752729509,
"autogenerated": false,
"ratio": 3.2145328719723185,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5289805822872319,
"avg_score": null,
"num_lines": null
} |
''' A bundle of ElasticSearch functions used throughout the pipelines
'''
import logging
import os
import types
from os.path import join, dirname
import elasticsearch
import simplejson as json
METADATA_INDEX = 'openfdametadata'
METADATA_TYPE = 'last_run'
RUN_DIR = dirname(dirname(os.path.abspath(__file__)))
INDEX_SETTINGS = join(RUN_DIR, 'schemas/indexing.json')
def clear_and_load(es, index_name, type_name, mapping_file):
try:
es.indices.delete(index=index_name)
logging.info('Deleting index %s', index_name)
except elasticsearch.ElasticsearchException:
logging.info('%s does not exist, nothing to delete', index_name)
load_mapping(es, index_name, type_name, mapping_file)
def load_mapping(es, index_name, type_name, mapping_file_or_dict):
if not isinstance(mapping_file_or_dict, types.DictType):
mapping = open(mapping_file_or_dict, 'r').read().strip()
mapping_dict = json.loads(mapping)
else:
mapping_dict = mapping_file_or_dict
if es.indices.exists(index_name):
logging.info('Index %s already exists, skipping creation.', index_name)
return
try:
settings_dict = json.loads(open(INDEX_SETTINGS).read())
# Ignore "index already exists" error
es.indices.create(index=index_name,
body=settings_dict, ignore=400)
es.indices.put_mapping(index=index_name,
doc_type=type_name,
body=mapping_dict)
es.indices.clear_cache(index=index_name)
except:
logging.fatal('Something has gone wrong making the mapping for %s', type_name,
exc_info=1)
raise
def update_process_datetime(es, doc_id, timestamp):
''' Updates the last_update_date for the document id passed into function.
The document id in will be the name of another index in the cluster.
'''
_map = {
'last_run': {
'properties': {
'last_update_date': {
'type': 'date',
'format': 'dateOptionalTime'
}
}
}
}
load_mapping(es, METADATA_INDEX, METADATA_TYPE, _map)
new_doc = { 'last_update_date': timestamp }
es.index(index=METADATA_INDEX,
doc_type=METADATA_TYPE,
id=doc_id,
body=new_doc)
| {
"repo_name": "HiTechIronMan/openfda",
"path": "openfda/elasticsearch_requests.py",
"copies": "1",
"size": "2227",
"license": "cc0-1.0",
"hash": 5301331187861956000,
"line_mean": 29.5068493151,
"line_max": 82,
"alpha_frac": 0.652896273,
"autogenerated": false,
"ratio": 3.5746388443017656,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9659605042497805,
"avg_score": 0.013586014960792033,
"num_lines": 73
} |
"""ab URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/signup/$', TemplateView.as_view(template_name="signup-no.html"), name='signup'),
url(r'^accounts/', include('allauth.urls')),
url(r'^', include('baza.urls', namespace='baza')),
url(r'^grupy/', include('grupa.urls', namespace='grupa')),
url(r'^raport/', include('raporty.urls', namespace='raporty')),
url(r'^zadania/', include('zadania.urls', namespace='zadania')),
]
| {
"repo_name": "szymanskirafal/ab",
"path": "ab/urls.py",
"copies": "1",
"size": "1210",
"license": "mit",
"hash": 1874570066930117400,
"line_mean": 33.5714285714,
"line_max": 100,
"alpha_frac": 0.6809917355,
"autogenerated": false,
"ratio": 3.3333333333333335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45143250688333336,
"avg_score": null,
"num_lines": null
} |
"""AbzuGames URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('blog.urls', namespace = "app_blog")),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| {
"repo_name": "PachaTechnology/Abzugames",
"path": "AbzuGames/urls.py",
"copies": "2",
"size": "1080",
"license": "apache-2.0",
"hash": 7877839921741770000,
"line_mean": 37.5714285714,
"line_max": 82,
"alpha_frac": 0.7138888889,
"autogenerated": false,
"ratio": 3.495145631067961,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5209034519967961,
"avg_score": null,
"num_lines": null
} |
# AC2DC.py
#
# This is a demo to show the power of the audio hardware under the
# influence of standard text mode Python.
#
# (C)2010, B.Walker, G0LCU. Now issued as Public Domain; you may do
# with this code as you please.
#
# Tested on PCLinuxOS 2009 and Debian 6.0.0 using Python 2.5.2, 2.6.6
# and 2.7.2; (it may well work on Python versions earlier than the
# above but it is untested).
#
# The device "/dev/dsp" is needed for this to work so you might have
# to install "oss-compat" from your distribution's repository...
#
# A very simple voltage doubler and passive filter TEST CIRCUIT ONLY...
# Best viewed in pure text mode.
# (Connect DC OUT & GND to a DC coupled oscilloscope to see it working.)
#
# Headset O/P. C1. |\|D2.
# O--------o--||--o-------o---| +---o-------o-------O +VE DC OUT.
# | | | |/|+ | |
# O < | + < | <
# | > --+-- > | + >
# | * R1. < / \ D1. < R2. === C2. < R3.
# | > +---+ > | >
# | < | < | <
# | | | | | |
# +--------o------o-------o---------o---o---o-------O -VE.
# |
# Parts List. ---+--- GND.
# ----------- ///////
# C1 = 1.0 uF, 50V.
# C2 = 10 uF, electrolytic, 10V.
# R1 = 47 KilOhms, (* this can be ommitted).
# R2 = 1 MegOhm.
# R3 = 100 KilOhms.
# D1, D2 = OA90 or any similar germanium diode.
# 3.2 mm stereo jack plug for headset socket.
# Coaxial connecting cable.
# Sundries as required, stripboard, etc.
import os
# The running code...
def main():
# Set globals, my choice... ;o)
global waveform
global value
global count
# Choose startup values...
waveform=chr(0)+chr(0)
value="(C)2010, B.Walker, G0LCU."
count=0
while 1:
# Use the Linux system clear-screen command.
os.system("clear")
# A simple user screen...
print "\nA DEMO variable AC to DC Generator using the sound card in Linux.\n"
print "(C)2010, B.Walker, G0LCU; now issued as Public Domain...\n"
value=raw_input("Input any integer from 0 to 255, [RETURN/ENTER] to Quit:- ")
# Don't allow any errors...
if value=="": break
if len(value)>=4: value="255"
count=0
while count<=(len(value)-1):
if value[count]>=chr(48) and value[count]<=chr(57): count=count+1
else: value="255"
if int(value)>=255: value="255"
if int(value)<=0: value="0"
# Create a symetrical triangle waveform with an amplitude of "value".
print "\nOutput level value is "+value+"..."
waveform=chr(0)+chr(int(value))
# Generate this signal for about 10 seconds for this DEMO.
if int(value)>=1:
count=0
audio=open("/dev/dsp", "wb")
while count<=40000:
audio.write(waveform)
count=count+1
audio.close()
main()
# End of AC2DC.py program.
# Enjoy finding simple solutions to often very difficult problems... ;o)
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/577924_Now_Something_COMPLETELY_Different_Using_Text/recipe-577924.py",
"copies": "1",
"size": "2947",
"license": "mit",
"hash": -8052637446363649000,
"line_mean": 34.5060240964,
"line_max": 79,
"alpha_frac": 0.5564981337,
"autogenerated": false,
"ratio": 2.897738446411013,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3954236580111013,
"avg_score": null,
"num_lines": null
} |
"""A C4.5 Decision Tree learner for classification problems"""
import math
def increment_counter(dictionary, key):
"""Increments the counter for a given key in the given dictionary or
sets the counter to 1 if it does not yet exist."""
if key in dictionary:
dictionary[key] += 1
else:
dictionary[key] = 1
def add_to_list(dictionary, key, value):
"""Adds a given value to a list that is specified by its key in the given
dictionary. If the list does not yet exist, it is created on the fly."""
if key in dictionary:
dictionary[key].append(value)
else:
dictionary[key] = [value]
def partitions_class_attribute(data_points, attr_index):
"""Partitions data points using a given class attribute. Data points
with the same class label are combined into a partition.
:param data_points: List of tuples representing the data points.
:param attr_index: Index of the attribute inside the tuple to be used
for partitioning."""
partitioned_data_points = {}
for point in data_points:
add_to_list(partitioned_data_points, point[attr_index], point)
return partitioned_data_points
def partition_sizes_class_attribute(data_points, attribute_index):
"""Returns the number of items in each partition when partitioning
the given data points using a specified attribute
:param data_points: The data points which ought to be partitioned
:param attribute_index: The class-type attribute to use to distinguish
the partitions
:return: A dictionary which maps the given class-attributes labels to
the number of points with this attribute label"""
inputs_per_class = {}
for point in data_points:
increment_counter(inputs_per_class, point[attribute_index])
return inputs_per_class
def information(data_points):
"""Returns the average amount of information of the given data_points
(also known as entropy).
:param data_points: List of tuples representing the data points.
Last tuple component corresponds to output label.
:return: Average amount of information of data_points"""
inputs_per_class = partition_sizes_class_attribute(data_points, -1)
entropy = 0.0
for count in inputs_per_class.values():
relative_frequency = count / len(data_points)
information = relative_frequency * math.log2(relative_frequency)
entropy += (-information)
return entropy
def information_partitioned(point_count, partitioned_data_points):
"""Returns the average amount of information given the partitioned
data_points.
:param point_count: Total number of data_points.
:param partitioned_data_points: Dictionary of partitions where each
partition contains a list of tuples representing the data points.
Last tuple component corresponds to output label."""
partitioned_entropy = 0.0
for partition in partitioned_data_points.values():
partition_size = len(partition)
entropy = information(partition)
partitioned_entropy += partition_size / point_count * entropy
return partitioned_entropy
def split(sorted_data_points, attr_index, split_value):
"""Splits a list of data points sorted by a given element into two
lists with one list containing tuples <= split_value and one list
containing tuples > split_value.
:param sorted_data_points: List of data points sorted by their values
of the attribute specified by attr_index.
:param attr_index: Index of the attribute of the tuple used to
specify order of tuples.
:param split_value: Value of tuple attribute where list of data points
is split.
:return: List containing two lists of data points as specified above."""
for index, value in enumerate(sorted_data_points):
if value[attr_index] > split_value:
return [sorted_data_points[:index], sorted_data_points[index:]]
return [sorted_data_points, []]
def cont_attr_best_split(data_points, attr_index):
"""Finds the split value for partitioning the data points using
the given attribute such that the information gain is maximized.
Searches the partition with minimum information as its minimum maximizes
information gain (gain = information unpartit. - information partit.).
:param data_points: List of tuples representing the data points.
Last tuple component corresponds to output label.
:param attr_index: Index of the attribute for which the best split
value is requested.
:return: The minimum average information when using the optimal split
value along with the optimal split value."""
data_points.sort(key=lambda tup: tup[attr_index])
left_sublist = []
right_sublist = data_points
unique_attr_values = list(set([i[attr_index] for i in data_points]))
unique_attr_values.sort()
min_info = None
best_value = None
for value in unique_attr_values:
for index in range(len(right_sublist)):
if right_sublist[index][attr_index] > value:
left_sublist.extend(right_sublist[:index])
right_sublist = right_sublist[index:]
break
partitioned_data_points = {"l": left_sublist, "r": right_sublist}
info = information_partitioned(len(data_points), partitioned_data_points)
if min_info is None or info < min_info:
min_info = info
best_value = value
return min_info, best_value
def cont_attr_gain(data_points, attr_index, info):
"""Calculates the information gain when partitioning the data points
with the given continuous attribute.
:param data_points: List of tuples representing the data points.
Last tuple component corresponds to output label.
:param attr_index: Index of the attribute used for partitioning
the data points. Must be continuous attribute type.
:param info: Average information of the unpartitioned data points.
:return: Information gain when partitioning with the specified
attribute along with the best split value for creating the partitioning."""
min_info, best_value = cont_attr_best_split(data_points, attr_index)
return info - min_info, best_value
def class_attr_gain(data_points, attr_index, info):
"""Calculates the information gain when partitioning the data points
with the given class attribute.
:param: data_points: List of tuples representing the data points.
Last tuple component corresponds to output label.
:param attr_index: Index of the attribute used for partitioning
the data points. Must be class attribute type.
:param info: Average information of the unpartitioned data points.
:return: Information gain when partitioning with the specified
attribute."""
partitioned_data_points = partitions_class_attribute(data_points, attr_index)
info_partitioned = information_partitioned(len(data_points), partitioned_data_points)
return info - info_partitioned
def gain(data_points, attr_index):
"""Calculate the information gain when partitioning the data points
using the given attribute.
:param data_points: List of tuples representing the data points.
Last tuple component corresponds to output label.
:param attr_index: Index of the attribute used for partitioning
the data points.
:return: Information gain achieved when partitioning the data points
using the given attribute."""
if len(data_points) == 0:
return 0.0
info = information(data_points)
if type(data_points[0][attr_index]) is int:
return class_attr_gain(data_points, attr_index, info)
else:
return cont_attr_gain(data_points, attr_index, info)[0]
def best_split_attribute(data_points):
"""Determines the attribute which results in the highest information
gain when used for partitioning the data points.
:param data_points: List of tuples representing the data points.
Last tuple component corresponds to output label.
:return: Index of the attribute with highest information gain along
with the gain."""
best_attribute = 0
best_gain = 0.0
for attribute_index in range(len(data_points[0])-1):
curr_gain = gain(data_points, attribute_index)
if curr_gain > best_gain:
best_attribute = attribute_index
best_gain = curr_gain
return best_attribute, best_gain
class ClassificationTree:
"""A C4.5 decision tree for classification problems."""
def __init__(self):
self.root = None
def build(self, data_points, depth_constraint=None, gain_threshold=0.01):
"""Builds/trains the decision tree using the given data points.
:param data_points: List of tuples representing the data points.
Last tuple component must corresponds to output label.
:param depth_constraint: Specifies the maximum depth of the decision
tree. A depth constraint of 0 results in a tree with a single
test (i.e. single inner node). Specifying None means that no depth
constraint is applied.
:param gain_threshold: Minimum information gain a test/ inner node
must achieve to be added to the tree. Otherwise, no further test
is added to the current branch."""
self.root = Node.suitable_node(data_points, 0, depth_constraint, gain_threshold)
def evaluate(self, input):
"""Evaluates the decision tree on the given input. Must only be
called after the tree has been built.
:param input: Input point. Should not contain the last component
which corresponds to the output label.
:return: Output label predicted by the decision tree."""
return self.root.evaluate(input)
def __str__(self):
return self.root.description_string(0)
class Node:
"""A node of the decision tree."""
def __init__(self, attr_index=None):
self.successors = []
self.attr_index = attr_index
@staticmethod
def suitable_node(data_points, depth, depth_constraint, gain_threshold):
"""Constructs a suitable node for the given data points. If a
further test results in sufficient information gain and does not
exceed the depth constraint, an inner node with the corresponding
test (continuous or class attribute) is created. Otherwise, a
Leaf with a suitable output label is created.
:param data_points: List of tuples representing the data points.
Last tuple component must corresponds to output label.
:param depth: Depth of the node to be constructed.
:param depth_constraint: Specifies the maximum depth allowed
for the decision tree.
:param gain_threshold: Minimum information gain a test/ inner node
must achieve to be added to the tree. Otherwise, no further test
is added to the current branch.
:return: Suitable node."""
output_class_sizes = partition_sizes_class_attribute(data_points, -1)
output_classes = len(output_class_sizes)
if output_classes == 1: # Recursion ends
return Leaf(data_points[0][-1])
else: # Recursion continues (with exception)
best_split_attr, gain = best_split_attribute(data_points)
if gain > gain_threshold and (depth_constraint is None or
depth <= depth_constraint):
if type(data_points[0][best_split_attr]) is int:
return ClassNode(data_points, best_split_attr, depth,
depth_constraint, gain_threshold)
else:
return ContNode(data_points, best_split_attr, depth,
depth_constraint, gain_threshold)
else: # Gain too small, Recursion ends
value = max(output_class_sizes.items(),
key=lambda item: item[1])[0]
return Leaf(value)
class ContNode(Node):
"""A node of the decision tree which performs a test on a continuous
attribute."""
def __init__(self, data_points, attr_index, depth, depth_constraint,
gain_threshold):
"""Initializes a node for a continuous attribute test using the
given data points.
:param data_points: List of tuples representing the data points.
Last tuple component must corresponds to output label.
:param attr_index: Index of the attribute used for the test.
Must be a continuous attribute.
:param depth: Depth of the node to be constructed.
:param depth_constraint: Specifies the maximum depth allowed
for the decision tree.
:param gain_threshold: Minimum information gain a test/ inner node
must achieve to be added to the tree. Otherwise, no further test
is added to the current branch."""
Node.__init__(self, attr_index)
self.split_value = cont_attr_best_split(data_points, attr_index)[1]
partitioned_data_points = split(data_points, attr_index,
self.split_value)
self.__create_successors(partitioned_data_points, depth + 1,
depth_constraint, gain_threshold)
def __create_successors(self, partitioned_data_points, depth,
depth_constraint, gain_threshold):
"""Adds suitable successor nodes to the current node.
:param partitioned_data_points: Dictionary of partitions where each
partition contains a list of tuples representing the data points.
:param depth: Depth of the node to be constructed.
:param depth_constraint: Specifies the maximum depth allowed
for the decision tree.
:param gain_threshold: Minimum information gain a test/ inner node
must achieve to be added to the tree. Otherwise, no further test
is added to the current branch."""
for partition in partitioned_data_points:
successor = Node.suitable_node(partition, depth, depth_constraint,
gain_threshold)
self.successors.append(successor)
def evaluate(self, input):
"""Evaluates the subtree rooted in the current node on the given
input by performing the test on the input and passes the input
on to its successors.
:param input: Input point. Should not contain the last component
which corresponds to the output label.
:return: Output label predicted by this subtree."""
if input[self.attr_index] <= self.split_value:
return self.successors[0].evaluate(input)
else:
return self.successors[1].evaluate(input)
def description_string(self, depth):
"""String representation of the subtree rooted in the current node.
:param depth: Depth of the current node.
:return: String representation."""
descr = ""
indent = "\t" * depth
descr += indent + "If x[" + str(self.attr_index) + "] <= " + \
str(self.split_value) + ":\n"
descr += self.successors[0].description_string(depth + 1)
descr += indent + "If x[" + str(self.attr_index) + "] > " + \
str(self.split_value) + ":\n"
descr += self.successors[1].description_string(depth + 1)
return descr
class ClassNode(Node):
"""A node of the decision tree which performs a test on a class
attribute."""
def __init__(self, data_points, attr_index, depth, depth_constraint,
gain_threshold):
"""Initializes a node for a class attribute test using the
given data points.
:param data_points: List of tuples representing the data points.
Last tuple component must corresponds to output label.
:param attr_index: Index of the attribute used for the test. Must
be a class attribute.
:param depth: Depth of the node to be constructed.
:param depth_constraint: Specifies the maximum depth allowed
for the decision tree.
:param gain_threshold: Minimum information gain a test/ inner node
must achieve to be added to the tree. Otherwise, no further test
is added to the current branch."""
Node.__init__(self, attr_index)
partitioned_data_points = partitions_class_attribute(data_points,
attr_index)
self.keys = list(partitioned_data_points.keys())
self.__create_successors(partitioned_data_points, depth + 1,
depth_constraint, gain_threshold)
def __create_successors(self, partitioned_data_points, depth,
depth_constraint, gain_threshold):
"""Adds suitable successor nodes to the current node.
:param partitioned_data_points: Dictionary of partitions where each
partition contains a list of tuples representing the data points.
:param depth: Depth of the node to be constructed.
:param depth_constraint: Specifies the maximum depth allowed
for the decision tree.
:param gain_threshold: Minimum information gain a test/ inner node
must achieve to be added to the tree. Otherwise, no further test
is added to the current branch."""
for key in self.keys:
partition = partitioned_data_points[key]
successor = Node.suitable_node(partition, depth, depth_constraint,
gain_threshold)
self.successors.append(successor)
def evaluate(self, input):
"""Evaluates the subtree rooted in the current node on the given
input by performing the test on the input and passes the input
on to its successors.
:param input: Input point. Should not contain the last component
which corresponds to the output label.
:return: Output label predicted by this subtree."""
succ_index = 0
if input[self.attr_index] in self.keys:
succ_index = self.keys.index(input[self.attr_index])
return self.successors[succ_index].evaluate(input)
def description_string(self, depth):
"""String representation of the subtree rooted in the current node.
:param depth: Depth of the current node.
:return: String representation."""
descr = ""
indent = "\t" * depth
for index, key in enumerate(self.keys):
descr += indent + "If x[" + str(self.attr_index) + "] == " + \
str(key) + ":\n"
descr += self.successors[index].description_string(depth + 1)
return descr
class Leaf(Node):
"""Leaf of the decision tree. Holds a value for the output label which
is returned when an input's evaluation leads to this leaf."""
def __init__(self, value):
Node.__init__(self)
self.value = value
def evaluate(self, input):
"""Evaluates the subtree rooted in the current node on the given
input by performing the test on the input and passes the input
on to its successors.
:param input: Input point. Should not contain the last component
which corresponds to the output label.
:return: Output label predicted by this subtree."""
return self.value
def description_string(self, depth):
"""String representation of the subtree rooted in the current node.
:param depth: Depth of the current node.
:return: String representation."""
return "\t" * depth + str(self.value) + "\n"
if __name__ == '__main__':
tree = ClassificationTree()
data = [(0, 1, 0, 0.25, 0), (0, 1, 1, 0.2, 0), (0, 2, 0, 0.5, 1),
(0, 1, 0, 0.55, 1), (0, 2, 0, 0.52, 1), (0, 1, 1, 0.88, 2),
(1, 1, 0, 0.95, 2)]
tree.build(data)
print(tree)
print(tree.evaluate((0, 1, 1, 0.9)))
| {
"repo_name": "FelixOpolka/Statistical-Learning-Algorithms",
"path": "decision tree/DecisionTree.py",
"copies": "1",
"size": "20040",
"license": "mit",
"hash": 4157810443159869000,
"line_mean": 45.2817551963,
"line_max": 89,
"alpha_frac": 0.6579341317,
"autogenerated": false,
"ratio": 4.42481784058291,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.558275197228291,
"avg_score": null,
"num_lines": null
} |
"""A cache for immutable values (functions, constants, and classes).
These are immutable for a given interpreter, but may possibly change
if we run multiple Interpreters in sequence. This logic allows the
JIT-compiled machine code to quickly check if we're in the common
case of seeing the same value as previously.
"""
from rpython.rlib import jit
class ImmutCell(object):
_immutable_fields_ = ['constant_value', 'is_builtin']
_class_key = (None, None, None)
def __init__(self, constant_value, is_builtin=False):
assert constant_value is not None
self.constant_value = constant_value
self.currently_declared = constant_value
self.constant_value_is_currently_declared = True
self.is_builtin = is_builtin
def get_current_value(self):
if self.is_builtin or self.constant_value_is_currently_declared:
return self.constant_value # constant-folded
else:
return self.currently_declared
class GlobalImmutCacheVersion(object): pass
class GlobalImmutCache(object):
_immutable_fields_ = ['version?']
def __init__(self, space, initdict={}, force_lowcase=True):
self.space = space
self.all_cells = {}
self.force_lowcase = force_lowcase
for key, value in initdict.items():
self.set_builtin(key, value)
self.version = GlobalImmutCacheVersion()
def set_builtin(self, name, value):
self.set_cell(name, ImmutCell(value, is_builtin=True))
def reset(self):
# un-declare every non-builtin value
# Note: we don't need to reset the cache version here, as we're not
# deleting cell objects, merely emptying their contents.
for cell in self.all_cells.itervalues():
if not cell.is_builtin:
cell.currently_declared = None
cell.constant_value_is_currently_declared = False
@jit.elidable_promote()
def get_cell(self, name, version):
if self.force_lowcase:
name = name.lower()
try:
return self.all_cells[name]
except KeyError:
return None
def set_cell(self, name, newcell):
if self.force_lowcase:
name = name.lower()
assert name not in self.all_cells
self.all_cells[name] = newcell
self.version = GlobalImmutCacheVersion()
def has_definition(self, name):
cell = self.get_cell(name, self.version)
if cell is None:
return False
return cell.currently_declared is not None
def locate(self, name):
cell = self.get_cell(name, self.version)
if cell is None:
return None
return cell.get_current_value()
def declare_new(self, name, value):
assert value is not None
cell = self.get_cell(name, self.version)
if cell is None:
cell = ImmutCell(value)
self.set_cell(name, cell)
else:
assert cell.currently_declared is None
assert not cell.constant_value_is_currently_declared
cell.currently_declared = value
cell.constant_value_is_currently_declared = (
value is cell.constant_value)
return cell
def create_class(self, interp, name, decl, key):
"Special case for classes"
cell = self.get_cell(name, self.version)
if cell is not None:
if cell._class_key == key:
cell.currently_declared = cell.constant_value
cell.constant_value_is_currently_declared = True
decl.redefine_old_class(interp, cell.constant_value)
return
kls = decl.define_new_class(interp)
cell = self.declare_new(name, kls)
decl._immut_cell = cell
if cell.constant_value_is_currently_declared:
cell._class_key = key
| {
"repo_name": "hippyvm/hippyvm",
"path": "hippy/immut_cache.py",
"copies": "2",
"size": "3884",
"license": "mit",
"hash": 5445372691081891000,
"line_mean": 33.990990991,
"line_max": 75,
"alpha_frac": 0.6207518023,
"autogenerated": false,
"ratio": 4.012396694214876,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5633148496514876,
"avg_score": null,
"num_lines": null
} |
"""A cache for storing small matrices in multiple formats."""
from __future__ import print_function, division
from sympy import Matrix, I, Pow, Rational, exp, pi
from sympy.physics.quantum.matrixutils import (
to_sympy, to_numpy, to_scipy_sparse
)
class MatrixCache(object):
"""A cache for small matrices in different formats.
This class takes small matrices in the standard ``sympy.Matrix`` format,
and then converts these to both ``numpy.matrix`` and
``scipy.sparse.csr_matrix`` matrices. These matrices are then stored for
future recovery.
"""
def __init__(self, dtype='complex'):
self._cache = {}
self.dtype = dtype
def cache_matrix(self, name, m):
"""Cache a matrix by its name.
Parameters
----------
name : str
A descriptive name for the matrix, like "identity2".
m : list of lists
The raw matrix data as a sympy Matrix.
"""
try:
self._sympy_matrix(name, m)
except ImportError:
pass
try:
self._numpy_matrix(name, m)
except ImportError:
pass
try:
self._scipy_sparse_matrix(name, m)
except ImportError:
pass
def get_matrix(self, name, format):
"""Get a cached matrix by name and format.
Parameters
----------
name : str
A descriptive name for the matrix, like "identity2".
format : str
The format desired ('sympy', 'numpy', 'scipy.sparse')
"""
m = self._cache.get((name, format))
if m is not None:
return m
raise NotImplementedError(
'Matrix with name %s and format %s is not available.' %
(name, format)
)
def _store_matrix(self, name, format, m):
self._cache[(name, format)] = m
def _sympy_matrix(self, name, m):
self._store_matrix(name, 'sympy', to_sympy(m))
def _numpy_matrix(self, name, m):
m = to_numpy(m, dtype=self.dtype)
self._store_matrix(name, 'numpy', m)
def _scipy_sparse_matrix(self, name, m):
# TODO: explore different sparse formats. But sparse.kron will use
# coo in most cases, so we use that here.
m = to_scipy_sparse(m, dtype=self.dtype)
self._store_matrix(name, 'scipy.sparse', m)
sqrt2_inv = Pow(2, Rational(-1, 2), evaluate=False)
# Save the common matrices that we will need
matrix_cache = MatrixCache()
matrix_cache.cache_matrix('eye2', Matrix([[1, 0], [0, 1]]))
matrix_cache.cache_matrix('op11', Matrix([[0, 0], [0, 1]])) # |1><1|
matrix_cache.cache_matrix('op00', Matrix([[1, 0], [0, 0]])) # |0><0|
matrix_cache.cache_matrix('op10', Matrix([[0, 0], [1, 0]])) # |1><0|
matrix_cache.cache_matrix('op01', Matrix([[0, 1], [0, 0]])) # |0><1|
matrix_cache.cache_matrix('X', Matrix([[0, 1], [1, 0]]))
matrix_cache.cache_matrix('Y', Matrix([[0, -I], [I, 0]]))
matrix_cache.cache_matrix('Z', Matrix([[1, 0], [0, -1]]))
matrix_cache.cache_matrix('S', Matrix([[1, 0], [0, I]]))
matrix_cache.cache_matrix('T', Matrix([[1, 0], [0, exp(I*pi/4)]]))
matrix_cache.cache_matrix('H', sqrt2_inv*Matrix([[1, 1], [1, -1]]))
matrix_cache.cache_matrix('Hsqrt2', Matrix([[1, 1], [1, -1]]))
matrix_cache.cache_matrix(
'SWAP', Matrix([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]))
matrix_cache.cache_matrix('ZX', sqrt2_inv*Matrix([[1, 1], [1, -1]]))
matrix_cache.cache_matrix('ZY', Matrix([[I, 0], [0, -I]]))
| {
"repo_name": "Vishluck/sympy",
"path": "sympy/physics/quantum/matrixcache.py",
"copies": "124",
"size": "3519",
"license": "bsd-3-clause",
"hash": 6685193551654197000,
"line_mean": 33.5,
"line_max": 77,
"alpha_frac": 0.5745950554,
"autogenerated": false,
"ratio": 3.3836538461538463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A cache for storing small matrices in multiple formats."""
from sympy import Matrix, I, Pow, Rational, exp, pi
from sympy.physics.quantum.matrixutils import (
to_sympy, to_numpy, to_scipy_sparse
)
class MatrixCache(object):
"""A cache for small matrices in different formats.
This class takes small matrices in the standard ``sympy.Matrix`` format,
and then converts these to both ``numpy.matrix`` and
``scipy.sparse.csr_matrix`` matrices. These matrices are then stored for
future recovery.
"""
def __init__(self, dtype='complex'):
self._cache = {}
self.dtype = dtype
def cache_matrix(self, name, m):
"""Cache a matrix by its name.
Parameters
----------
name : str
A descriptive name for the matrix, like "identity2".
m : list of lists
The raw matrix data as a sympy Matrix.
"""
try:
self._sympy_matrix(name, m)
except ImportError:
pass
try:
self._numpy_matrix(name, m)
except ImportError:
pass
try:
self._scipy_sparse_matrix(name, m)
except ImportError:
pass
def get_matrix(self, name, format):
"""Get a cached matrix by name and format.
Parameters
----------
name : str
A descriptive name for the matrix, like "identity2".
format : str
The format desired ('sympy', 'numpy', 'scipy.sparse')
"""
m = self._cache.get((name, format))
if m is not None:
return m
raise NotImplementedError(
'Matrix with name %s and format %s is not available.' %\
(name, format)
)
def _store_matrix(self, name, format, m):
self._cache[(name, format)] = m
def _sympy_matrix(self, name, m):
self._store_matrix(name, 'sympy', to_sympy(m))
def _numpy_matrix(self, name, m):
m = to_numpy(m, dtype=self.dtype)
self._store_matrix(name, 'numpy', m)
def _scipy_sparse_matrix(self, name, m):
# TODO: explore different sparse formats. But sparse.kron will use
# coo in most cases, so we use that here.
m = to_scipy_sparse(m, dtype=self.dtype)
self._store_matrix(name, 'scipy.sparse', m)
sqrt2_inv = Pow(2, Rational(-1,2), evaluate=False)
# Save the common matrices that we will need
matrix_cache = MatrixCache()
matrix_cache.cache_matrix('eye2', Matrix([[1,0],[0,1]]))
matrix_cache.cache_matrix('op11', Matrix([[0,0],[0,1]])) # |1><1|
matrix_cache.cache_matrix('op00', Matrix([[1,0],[0,0]])) # |0><0|
matrix_cache.cache_matrix('op10', Matrix([[0,0],[1,0]])) # |1><0|
matrix_cache.cache_matrix('op01', Matrix([[0,1],[0,0]])) # |0><1|
matrix_cache.cache_matrix('X', Matrix([[0, 1], [1, 0]]))
matrix_cache.cache_matrix('Y', Matrix([[0, -I], [I, 0]]))
matrix_cache.cache_matrix('Z', Matrix([[1, 0], [0, -1]]))
matrix_cache.cache_matrix('S', Matrix([[1, 0], [0, I]]))
matrix_cache.cache_matrix('T', Matrix([[1, 0], [0, exp(I*pi/4)]]))
matrix_cache.cache_matrix('H', sqrt2_inv*Matrix([[1, 1], [1, -1]]))
matrix_cache.cache_matrix('Hsqrt2', Matrix([[1, 1], [1, -1]]))
matrix_cache.cache_matrix('SWAP',Matrix([[1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]]))
matrix_cache.cache_matrix('ZX', sqrt2_inv*Matrix([[1,1],[1,-1]]))
matrix_cache.cache_matrix('ZY', Matrix([[I,0],[0,-I]]))
| {
"repo_name": "flacjacket/sympy",
"path": "sympy/physics/quantum/matrixcache.py",
"copies": "2",
"size": "3425",
"license": "bsd-3-clause",
"hash": 6827747135500982000,
"line_mean": 33.25,
"line_max": 83,
"alpha_frac": 0.5795620438,
"autogenerated": false,
"ratio": 3.387734915924827,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9932666940646027,
"avg_score": 0.006926003815760176,
"num_lines": 100
} |
"""A cache for thumbnailed images."""
from __future__ import unicode_literals
import collections
import sys
import os.path
try:
from io import BytesIO as StringIO
except Exception as e:
from cStringIO import StringIO
from PIL import Image
from django.core.files.base import File
from optimizations.assetcache import default_asset_cache, Asset, AdaptiveAsset
from optimizations.propertycache import cached_property
class Size(collections.namedtuple("SizeBase", ("width", "height",))):
"""Represents the size of an image."""
def __new__(cls, width, height):
"""Creats a new Size."""
if width is not None:
width = int(width)
if height is not None:
height = int(height)
return tuple.__new__(cls, (width, height))
@property
def aspect(self):
"""Returns the aspect ratio of this size."""
return float(self.width) / float(self.height)
def intersect(self, size):
"""
Returns a Size that represents the intersection of this and another
Size.
"""
return Size(min(self.width, size.width), min(self.height, size.height))
def constrain(self, reference):
"""
Returns a new Size that is this Size shrunk to fit inside.
"""
reference_aspect = reference.aspect
width = min(round(self.height * reference_aspect), self.width)
height = min(round(self.width / reference_aspect), self.height)
return Size(width, height)
def scale(self, x_scale, y_scale):
"""Returns a new Size with it's width and height scaled."""
return Size(float(self.width) * x_scale, float(self.height) * y_scale)
# Size adjustment callbacks. These are used to determine the display and data size of the thumbnail.
def _replace_null(value, fallback):
"""Replaces a null value with a fallback."""
if value is None:
return fallback
return value
def _size(reference, size):
"""Ignores the reference size, and just returns the desired size."""
return Size(
_replace_null(size.width, reference.width),
_replace_null(size.height, reference.height),
)
def _size_proportional(reference, size):
"""Adjusts the desired size to match the aspect ratio of the reference."""
if size.width is None and size.height is None:
return _size(reference, size)
return Size(
_replace_null(size.width, sys.maxsize),
_replace_null(size.height, sys.maxsize),
).constrain(reference)
# Resize callbacks. These are used to actually resize the image data.
def _resize(image, image_size, thumbnail_display_size, thumbnail_image_size):
"""
Resizes the image to exactly match the desired data size, ignoring aspect
ratio.
"""
return image.resize(thumbnail_image_size, Image.ANTIALIAS)
def _resize_cropped(image, image_size, thumbnail_display_size, thumbnail_image_size):
"""
Resizes the image to fit the desired size, preserving aspect ratio by
cropping, if required.
"""
# Resize with nice filter.
image_aspect = image_size.aspect
if image_aspect > thumbnail_image_size.aspect:
# Too wide.
pre_cropped_size = Size(thumbnail_image_size.height * image_aspect, thumbnail_image_size.height)
else:
# Too tall.
pre_cropped_size = Size(thumbnail_image_size.width, thumbnail_image_size.width / image_aspect)
# Crop.
image = image.resize(pre_cropped_size, Image.ANTIALIAS)
source_x = int((pre_cropped_size.width - thumbnail_image_size.width) / 2)
source_y = int((pre_cropped_size.height - thumbnail_image_size.height) / 2)
return image.crop((
source_x,
source_y,
source_x + thumbnail_image_size.width,
source_y + thumbnail_image_size.height,
))
# Methods of generating thumbnails.
PROPORTIONAL = "proportional"
RESIZE = "resize"
CROP = "crop"
ResizeMethod = collections.namedtuple("ResizeMethod", ("get_display_size", "get_data_size", "do_resize", "hash_key",))
_methods = {
PROPORTIONAL: ResizeMethod(_size_proportional, _size, _resize, "resize"),
RESIZE: ResizeMethod(_size, _size, _resize, "resize"),
CROP: ResizeMethod(_size, _size_proportional, _resize_cropped, "crop"),
}
class ThumbnailError(Exception):
"""Something went wrong with thumbnail generation."""
class ThumbnailAsset(Asset):
"""An asset representing a thumbnailed file."""
def __init__(self, asset, width, height, method):
"""Initializes the asset."""
self._asset = asset
self._width = width
self._height = height
self._method = method
def open(self):
"""Returns an open File for this asset."""
return self._asset.open()
def get_name(self):
"""Returns the name of this asset."""
return self._asset.get_name()
def get_url(self):
"""Returns the frontend URL of this asset."""
return self._asset.get_url()
def get_path(self):
"""Returns the filesystem path of this asset."""
return self._asset.get_path()
def get_id_params(self):
""""Returns the params which should be used to generate the id."""
params = super(ThumbnailAsset, self).get_id_params()
params["width"] = self._width is None and -1 or self._width
params["height"] = self._height is None and -1 or self._height
params["method"] = self._method.hash_key
return params
@cached_property
def _image_data_and_size(self):
"""Returns the image data used by this thumbnail asset."""
image_data = open_image(self._asset)
return image_data, Size(*image_data.size)
def get_save_meta(self):
"""Returns the meta parameters to associate with the asset in the asset cache."""
method = self._method
requested_size = Size(self._width, self._height)
_, original_size = self._image_data_and_size
# Calculate the final width and height of the thumbnail.
display_size = method.get_display_size(original_size, requested_size)
return {
"size": display_size
}
def save(self, storage, name, meta):
"""Saves this asset to the given storage."""
method = self._method
# Calculate sizes.
display_size = meta["size"]
image_data, original_size = self._image_data_and_size
data_size = method.get_data_size(display_size, display_size.intersect(original_size))
# Check whether we need to make a thumbnail.
if data_size == original_size:
super(ThumbnailAsset, self).save(storage, name, meta)
else:
# Use efficient image loading.
image_data.draft(None, data_size)
# Resize the image data.
try:
image_data = method.do_resize(image_data, original_size, display_size, data_size)
except Exception as ex: # HACK: PIL raises all sorts of Exceptions :(
raise ThumbnailError(str(ex))
# Parse the image format.
_, extension = os.path.splitext(name)
format = extension.lstrip(".").upper().replace("JPG", "JPEG") or "PNG"
# If we're saving to PNG, make sure we're not in CMYK.
if image_data.mode == "CMYK" and format == "PNG":
image_data = image_data.convert("RGB")
# If the storage has a path, then save it efficiently.
try:
thumbnail_path = storage.path(name)
except NotImplementedError:
# No path for the storage, so save it in a memory buffer.
buffer = StringIO()
try:
image_data.save(buffer, format)
except Exception as ex: # HACK: PIL raises all sorts of Exceptions :(
raise ThumbnailError(str(ex))
# Write the file.
buffer.seek(0, os.SEEK_END)
buffer_length = buffer.tell()
buffer.seek(0)
file = File(buffer)
file.size = buffer_length
storage.save(name, file)
else:
# We can do an efficient streaming save.
try:
os.makedirs(os.path.dirname(thumbnail_path))
except OSError:
pass
try:
image_data.save(thumbnail_path, format)
except Exception as ex: # HACK: PIL raises all sorts of Exceptions :(
try:
raise ThumbnailError(str(ex))
finally:
# Remove an incomplete file, if present.
try:
os.unlink(thumbnail_path)
except:
pass
def open_image(asset):
"""Opens the image represented by the given asset."""
try:
asset_path = asset.get_path()
except NotImplementedError:
return Image.open(StringIO(asset.get_contents()))
else:
return Image.open(asset_path)
class Thumbnail(object):
"""A generated thumbnail."""
def __init__(self, asset_cache, asset):
"""Initializes the thumbnail."""
self._asset_cache = asset_cache
self._asset = asset
self.name = asset.get_name()
@cached_property
def _asset_name_and_meta(self):
return self._asset_cache.get_name_and_meta(self._asset)
@property
def width(self):
"""The width of the thumbnail."""
return self._asset_name_and_meta[1]["size"][0]
@property
def height(self):
"""The width of the thumbnail."""
return self._asset_name_and_meta[1]["size"][1]
@property
def url(self):
"""The URL of the thumbnail."""
return self._asset_cache._storage.url(self._asset_name_and_meta[0])
@property
def path(self):
"""The path of the thumbnail."""
return self._asset_cache._storage.path(self._asset_name_and_meta[0])
class ThumbnailCache(object):
"""A cache of thumbnailed images."""
def __init__(self, asset_cache=default_asset_cache):
"""Initializes the thumbnail cache."""
self._asset_cache = asset_cache
def get_thumbnail(self, asset, width=None, height=None, method=PROPORTIONAL):
"""
Returns a thumbnail of the given size.
Either or both of width and height may be None, in which case the
image's original size will be used.
"""
# Lookup the method.
try:
method = _methods[method]
except KeyError:
raise ValueError("{method} is not a valid thumbnail method. Should be one of {methods}.".format(
method = method,
methods = ", ".join(_methods.keys())
))
# Adapt the asset.
asset = AdaptiveAsset(asset)
# Create the thumbnail.
return Thumbnail(self._asset_cache, ThumbnailAsset(asset, width, height, method))
# The default thumbnail cache.
default_thumbnail_cache = ThumbnailCache()
| {
"repo_name": "etianen/django-optimizations",
"path": "src/optimizations/thumbnailcache.py",
"copies": "1",
"size": "11169",
"license": "bsd-3-clause",
"hash": -1075283016190675100,
"line_mean": 33.3661538462,
"line_max": 118,
"alpha_frac": 0.6034559943,
"autogenerated": false,
"ratio": 4.119881962375508,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5223337956675508,
"avg_score": null,
"num_lines": null
} |
"""A cache of javascipt files, optionally compressed."""
from __future__ import unicode_literals
from contextlib import closing
import os.path
import re
import subprocess
try:
from django.utils.six.moves.urllib.parse import urlparse
except ImportError:
from six.moves.urllib.parse import urlparse
from django.conf import settings
from django.core.files.base import ContentFile
from django.utils.encoding import force_bytes
import optimizations
from optimizations.assetcache import default_asset_cache, GroupedAsset, AdaptiveAsset
from optimizations.assetcompiler import AssetCompilerPluginBase, default_asset_compiler
class StylesheetError(Exception):
"""Something went wrong with stylesheet compilation."""
def __init__(self, message, detail_message):
"""Initializes the stylesheet error."""
super(StylesheetError, self).__init__(message)
self.detail_message = detail_message
RE_URLS = (
re.compile("url\('([^']+)'\)", re.IGNORECASE),
re.compile("url\(\"([^\"]+)\"\)", re.IGNORECASE),
re.compile("url\(([^\)]+)\)", re.IGNORECASE),
re.compile("@import\s*\('([^']+)'\)", re.IGNORECASE),
re.compile("@import\s*\(\"([^\"]+)\"\)", re.IGNORECASE),
re.compile("@import\s*\(([^\)]+)\)", re.IGNORECASE),
)
class StylesheetAsset(GroupedAsset):
"""An asset that represents one or more stylesheet files."""
join_str = "\n"
def __init__(self, assets, compile):
"""Initializes the asset."""
super(StylesheetAsset, self).__init__(assets)
self._compile = compile
def get_id_params(self):
""""Returns the params which should be used to generate the id."""
params = super(StylesheetAsset, self).get_id_params()
params["compile"] = self._compile
return params
def save(self, storage, name, meta):
"""Saves this asset to the given storage."""
file_parts = []
# Compile the assets.
for asset in self._assets:
# Load the asset source.
with closing(asset.open()) as handle:
source = handle.read().decode("utf-8")
# Get the asset URL.
host_url = asset.get_url()
for re_url in RE_URLS:
def do_url_replacement(match):
url = match.group(1).strip()
# Resolve relative URLs.
url = urlparse.urljoin(host_url, url)
# Strip off query and fragment.
url_parts = urlparse.urlparse(url)
# Compile static urls.
if url.startswith(settings.STATIC_URL):
simple_url = urlparse.urlunparse(url_parts[:3] + ("", "", "",))
static_url = default_asset_cache.get_url(simple_url[len(settings.STATIC_URL):], force_save=True)
url = urlparse.urlunparse(urlparse.urlparse(static_url)[:3] + url_parts[3:])
return "url({url})".format(
url = url,
)
source = re_url.sub(do_url_replacement, source)
file_parts.append(source.encode("utf-8"))
# Consolidate the content.
contents = force_bytes(self.join_str).join(file_parts)
if self._compile:
# Compress the content.
compressor_path = os.path.join(os.path.abspath(os.path.dirname(optimizations.__file__)), "resources", "yuicompressor.jar")
process = subprocess.Popen(
("java", "-jar", compressor_path, "--type", "css", "--charset", "utf-8", "-v"),
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
)
contents, stderrdata = process.communicate(contents)
# Check it all worked.
if process.returncode != 0:
raise StylesheetError("Error while compiling stylesheets.", stderrdata)
# Write the output.
storage.save(name, ContentFile(contents))
class StylesheetCache(object):
"""A cache of stylesheet files."""
def __init__(self, asset_cache=default_asset_cache):
"""Initializes the thumbnail cache."""
self._asset_cache = asset_cache
def get_urls(self, assets, compile=True, force_save=None):
"""Returns a sequence of style URLs for the given assets."""
if force_save is None:
force_save = not settings.DEBUG
if force_save:
if assets:
return [self._asset_cache.get_url(StylesheetAsset(list(map(AdaptiveAsset, assets)), compile), force_save=True)]
return []
return [self._asset_cache.get_url(asset) for asset in assets]
# The default stylesheet cache.
default_stylesheet_cache = StylesheetCache()
# Asset compiler plugin.
class StylesheetAssetCompilerPlugin(AssetCompilerPluginBase):
"""An asset compiler plugin for stylesheet files."""
asset_type = "stylesheet"
def __init__(self, stylesheet_cache=default_stylesheet_cache):
"""Initialzies the stylesheet asset compiler plugin."""
self._stylesheet_cache = stylesheet_cache
def compile_assets(self, assets):
"""Compiles the given stylesheet assets."""
self._stylesheet_cache.get_urls(assets, force_save=True)
default_asset_compiler.register_plugin("css", StylesheetAssetCompilerPlugin())
| {
"repo_name": "etianen/django-optimizations",
"path": "src/optimizations/stylesheetcache.py",
"copies": "1",
"size": "5414",
"license": "bsd-3-clause",
"hash": -3155842452696273400,
"line_mean": 36.0821917808,
"line_max": 134,
"alpha_frac": 0.6067602512,
"autogenerated": false,
"ratio": 4.164615384615384,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004095992540715137,
"num_lines": 146
} |
"""A cache of javascipt files, optionally compressed."""
from __future__ import unicode_literals
from django.conf import settings
from django.core.files.base import ContentFile
from optimizations.assetcache import default_asset_cache, GroupedAsset, AdaptiveAsset
from optimizations.assetcompiler import default_asset_compiler, AssetCompilerPluginBase
from optimizations.javascriptcompiler import default_javascript_compiler
class JavascriptAsset(GroupedAsset):
"""An asset that represents one or more javascript files."""
join_str = ";"
def __init__(self, assets, compile, rescope):
"""Initializes the asset."""
super(JavascriptAsset, self).__init__(assets)
self._compile = compile
self._rescope = rescope
def get_id_params(self):
""""Returns the params which should be used to generate the id."""
params = super(JavascriptAsset, self).get_id_params()
params["compile"] = self._compile
params["rescope"] = self._rescope
return params
def save(self, storage, name, meta):
"""Saves this asset to the given storage."""
if self._compile:
contents = self.get_contents()
if self._rescope:
contents = "(function(window){%s}(window));" % contents
compiled_contents = default_javascript_compiler.compile(contents, force_compile=True)
# Write the output.
storage.save(name, ContentFile(compiled_contents))
else:
# Just save the joined code.
super(JavascriptAsset, self).save(storage, name, meta)
class JavascriptCache(object):
"""A cache of javascript files."""
def __init__(self, asset_cache=default_asset_cache):
"""Initializes the thumbnail cache."""
self._asset_cache = asset_cache
def get_urls(self, assets, compile=True, rescope=False, force_save=None):
"""Returns a sequence of script URLs for the given assets."""
if force_save is None:
force_save = not settings.DEBUG
if force_save:
if assets:
return [self._asset_cache.get_url(JavascriptAsset(list(map(AdaptiveAsset, assets)), compile, rescope), force_save=True)]
return []
return [self._asset_cache.get_url(asset) for asset in assets]
# The default javascript cache.
default_javascript_cache = JavascriptCache()
# Asset compiler plugin.
class JavascriptAssetCompilerPlugin(AssetCompilerPluginBase):
"""An asset compiler plugin for javascript files."""
asset_type = "javascript"
def __init__(self, javascript_cache=default_javascript_cache):
"""Initialzies the javascript asset compiler plugin."""
self._javascript_cache = javascript_cache
def compile_assets(self, assets):
"""Compiles the given javascript assets."""
self._javascript_cache.get_urls(assets, force_save=True)
default_asset_compiler.register_plugin("js", JavascriptAssetCompilerPlugin()) | {
"repo_name": "etianen/django-optimizations",
"path": "src/optimizations/javascriptcache.py",
"copies": "1",
"size": "3107",
"license": "bsd-3-clause",
"hash": -5335608348478830000,
"line_mean": 35.5647058824,
"line_max": 136,
"alpha_frac": 0.6462825877,
"autogenerated": false,
"ratio": 4.496382054992764,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02974935980290253,
"num_lines": 85
} |
# A caching engine using S3, requires an AWS account
# David Arthur, 2009
# http://cloudcached.com
import boto
from cPickle import loads, dumps
import datetime,hashlib
class Client:
"Client interface to CloudCache"
_STR = 0
_INT = 1
_LONG = 2
_COMPLEX = 4
_FLOAT = 5
_PICKLE = 6
def __init__(self,aws_access_key_id,aws_secret_access_key):
"Create S3 connections"
self.__s3_conn = boto.connect_s3(aws_access_key_id,aws_secret_access_key)
self.__bucket = self.__s3_conn.get_bucket("cloudcache")
def _dumpObject(self,v):
"Convert the requested object to a string and store determine the type"
flag = Client._STR
if isinstance(v,basestring):
pass
elif isinstance(v,int):
flag = Client._INT
v = str(v)
elif isinstance(v,long):
flag = Client._LONG
v = str(v)
elif isinstance(v,complex):
flag = Client._COMPLEX
v = str(v)
elif isinstance(v,float):
flag = Client._FLOAT
v = str(v)
else:
flag = Client._PICKLE
v = dumps(v,-1)
return v,flag
def _loadObject(self,v,flag):
"Given a string and a type, convert the object back into its original type"
if flag == Client._STR:
pass
elif flag == Client._INT:
v = int(v)
elif flag == Client._LONG:
v = long(v)
elif flag == Client._COMPLEX:
v = complex(v)
elif flag == Client._FLOAT:
v = float(v)
else:
v = loads(v)
return v
def get(self,k):
"Retrieve an object given its key"
s3_key = self.__bucket.get_key(k)
if not s3_key:
return None
now = datetime.datetime.today().isoformat()
if s3_key.metadata.has_key('expires') and s3_key.metadata['expires'] <= now:
return None
v = s3_key.get_contents_as_string()
flag = int(s3_key.metadata['type'])
v = self._loadObject(v,flag)
return v
def put(self, key, value, time_to_expire=3600, replace=False):
"Send an object and save it to a given key for a certain amount of time"
value,flag = self._dumpObject(value)
s3_key = self.__bucket.new_key(key)
if s3_key.exists() and replace is False:
return True
now = datetime.datetime.today()
expires = (now+datetime.timedelta(seconds=time_to_expire)).isoformat()
s3_key.set_metadata('expires',expires)
s3_key.set_metadata('type',str(flag))
s3_key.set_contents_from_string(value, replace=True)
if s3_key.md5 == hashlib.md5(value).hexdigest():
return True
else:
return False
def update(self, key, value, time_to_expire=3600):
"Replace an existing object given its key, or create one if it doesn't exist"
return self.put(key,value,time_to_expire,True)
def delete(self, key):
"Delete an object given its key"
s3_key = self.__bucket.get_key(k)
if not s3_key:
return None
s3_key.delete()
return True
class cached(object):
"Decorator to cache function calls. How fun!"
def __init__(self,expires):
self.client = Client(self.aws_access_key_id,self.aws_secret_access_key)
self.expires = expires
pass
def __call__(self,fn):
def func(*args,**kwargs):
key_parts = [fn.__name__]
for a in args:
key_parts += [str(a)]
for k,v in kwargs.items():
key_parts += ['%s->%s' % (k,v)]
key = hashlib.md5(":".join(key_parts)).hexdigest()
value = self.client.get(key)
if not value:
value = fn(*args,**kwargs)
self.client.put(key,value,time_to_expire=self.expires,replace=True)
return value
return func
| {
"repo_name": "mumrah/cloudcache",
"path": "cloudcache/__init__.py",
"copies": "1",
"size": "4035",
"license": "mit",
"hash": 769447558758554400,
"line_mean": 32.347107438,
"line_max": 85,
"alpha_frac": 0.5491945477,
"autogenerated": false,
"ratio": 3.75,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47991945477,
"avg_score": null,
"num_lines": null
} |
"""acad_search URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from acads import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^upload/',views.upload,name="upload"),
url(r'^$',views.home),
url(r'^success/',views.success),
url(r'^get_sub_list/(?P<dept>\D+)/$',views.get_sub_list),
url(r'^files/(?P<dept>\D+)/(?P<year>\w+)/$',views.get_files),
]+static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
admin.site.site_title= "</acad-search> Admin"
admin.site.site_header="</acad-search> Admin"
| {
"repo_name": "Vipul999ujawane/acad-search",
"path": "acad_search/urls.py",
"copies": "1",
"size": "1270",
"license": "mit",
"hash": 2513077968553827300,
"line_mean": 39.9677419355,
"line_max": 79,
"alpha_frac": 0.6952755906,
"autogenerated": false,
"ratio": 3.2564102564102564,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9416217708518437,
"avg_score": 0.0070936276983637435,
"num_lines": 31
} |
"""A calculator based on the OpenKIM project."""
from ase.calculators.calculator import Calculator
from asap3 import _asap
from asap3.Internal.ListOfElements import ListOfElements
from asap3.Internal.BuiltinPotentials import get_cell_heights, smallest_pbc_direction
from ase.data import atomic_numbers, chemical_symbols
import numpy as np
from copy import copy
OpenKIMinfo = _asap.OpenKIMinfo
kim_file_template = """#
# Autogenerated KIM specification file.
#
# Generated by the Asap module OpenKIMcalculator.
#
KIM_API_Version := 1.6.0
# UNITS
Unit_length := A
Unit_energy := eV
Unit_charge := e
Unit_temperature := K
Unit_time := ps
SUPPORTED_ATOM/PARTICLES_TYPES:
# Symbol/name Type code
%(symbols)s
CONVENTIONS:
# Name Type
ZeroBasedLists flag
%(neighboraccess)s
%(neighbormethods)s
MODEL_INPUT:
# Name Type Unit Shape Requirements
numberOfParticles integer none []
numberOfSpecies integer none []
particleSpecies integer none [numberOfParticles]
coordinates double length [numberOfParticles,3]
get_neigh method none []
neighObject pointer none []
numberContributingParticles integer none []
boxSideLengths double length [3]
MODEL_OUTPUT:
# Name Type Unit Shape Requirements
destroy method none []
compute method none []
cutoff double length []
%(energy)s
%(forces)s
%(particleEnergy)s
%(virial)s
%(particleVirial)s
"""
element_template = "%-2s spec %i\n"
output_template = "%-27s double %-19s [%s]"
nb_template = "%-27s flag\n"
class OpenKIMcalculator(_asap.OpenKIMcalculator):
"""A calculator interfacing to the OpenKIM models.
Parameters:
name: The long name of the KIM Model.
Optional parameters:
atoms: If set, set_atoms is called immediately. Default: no atoms set.
allowed: List of OpenKIM neighbor list keywords, only these will be considered when
matching the model. Default: ALl neighbor list modes are allowed.
allow_mi_opbc: If set to False, minimum-image orthogonal periodic boundary condition
methods are not considered when mathcing the neighbor lists. Useful
if the cell is expected to become skewed during the simulation.
access: Set to "iter" or "loca" to restrict the neigbor list access method to
iterator mode or locator mode, respectively. Default: not restricted.
stress: Set to False to refrain from calculate the global virial, even if the model
supports it. Default: True (calculate global virial / stress).
stresses: As above, but for atomic virials / stresses.
verbose: Set to True to print additional info during neigborlist matching.
"""
def __init__(self, name, atoms=None, allowed=None, allow_mi_opbc=True, access=None,
stress=True, stresses=True, verbose=False):
_asap.OpenKIMcalculator.__init__(self)
self.name = name
self.atoms = None
self.allowed = allowed
self.allow_mi_opbc = allow_mi_opbc
self.access = access
self.support_stress = stress
self.support_stresses = stresses
self.verbose = verbose
if atoms is not None:
self.set_atoms(atoms)
def set_atoms(self, atoms, *args):
"""Set the atoms belonging to this calculator.
The function set_atoms(atoms) is defined in C++, and just calls this
Python function. This ensures that this Python function is called
regardless of whether set_atoms() is called from C++ or Python.
"""
self.kim_spec = self.get_kim_spec(atoms)
#print self.kim_spec
self._initialize(self.kim_spec, self.name)
# Inform the calculator about which quantities should be allocated and calculated.
for k,v in self.supported_calculations.iteritems():
self.please_allocate(k, v)
# Inform the calculator about the translation from atomic numbers to particle types.
supported_elements = [atomic_numbers[sym] for sym in self.get_supported_types()]
self.z_to_typecode = {}
elements = ListOfElements(atoms)
for e in elements:
if e not in supported_elements:
raise RuntimeError("The OpenKIM model '%s' does not support element Z=%i (%s)."
% (self.name, e, chemical_symbols[e]))
code = self.get_type_code(chemical_symbols[e])
if code < 0:
raise ValueError("Negative KIM type codes not supported - what do they mean?")
self.z_to_typecode[e] = code
if self.verbose:
print "Translation: Z = %i -> id = %i" % (e, code)
self.set_translation(self.z_to_typecode)
# Find out which neighborlist type we got. Check sanity and if ImageAtoms should be used.
nblistmethod = self.get_NBC_method()
pbc = atoms.get_pbc()
cutoff = self.get_cutoff()
if self.verbose:
print "Neighbor list method: %s; cutoff: %.3f A; pbc: %s" % (nblistmethod, cutoff, pbc)
if nblistmethod == 'CLUSTER':
if not np.array_equal(pbc, (False, False, False)):
raise RuntimeError("OpenKIM chose CLUSTER, but PBC are "+str(pbc))
elif nblistmethod == 'NEIGH_RVEC_H' or nblistmethod == 'NEIGH_RVEC_F':
if smallest_pbc_direction(atoms) < 3 * cutoff:
if self.verbose:
print "Disabling minimum image convention for", nblistmethod
self._use_imageatoms()
elif nblistmethod == "NEIGH_PURE_H" or nblistmethod == "NEIGH_PURE_F":
if self.verbose:
print "Activating Image atoms for", nblistmethod
self._use_imageatoms()
elif nblistmethod == "MI_OPBC_H" or nblistmethod == "MI_OPBC_F":
if not np.array_equal(pbc, (True, True, True)):
raise RuntimeError("OpenKIM chose %s, but PBC are %s" (nblistmethod, str(pbc)))
if smallest_pbc_direction(atoms) < 2 * cutoff:
raise RuntimeError("OpenKIM chose %s, but system is too small." % (nblistmethod,))
_asap.OpenKIMcalculator.set_atoms(self, atoms, *args)
def get_kim_spec(self, atoms):
"""Return the KIM specification string.
Side effects:
self.supported_calculations (dictionary) will indicate which quantities
can be calculated by the Model.
self.z_to_typecode (dictionary) gives the translation from the
atomic numbers to the type codes of the model.
"""
kim_variables = {'symbols': '', 'neighbormethods': ''}
elements = ListOfElements(atoms)
info = OpenKIMinfo(self.name)
supported_elements = [atomic_numbers[sym] for sym in info.get_supported_types()]
self.z_to_typecode = {}
for e in elements:
if e not in supported_elements:
raise RuntimeError("The OpenKIM model '%s' does not support element Z=%i (%s)."
% (self.name, e, chemical_symbols[e]))
kim_variables['symbols'] += element_template % (chemical_symbols[e], e)
test_methods = ('energy', 'particleEnergy', 'forces', 'virial', 'particleVirial')
self.supported_calculations = {}
for method in test_methods:
self.supported_calculations[method] = info.get_API_index(method) > 0
if not self.support_stress:
self.supported_calculations['virial'] = False
if not self.support_stresses:
self.supported_calculations['particleVirial'] = False
if self.supported_calculations['energy']:
kim_variables['energy'] = output_template % ('energy', 'energy', '')
else:
kim_variables['energy'] = ''
if self.supported_calculations['particleEnergy']:
kim_variables['particleEnergy'] = output_template % ('particleEnergy', 'energy', 'numberOfParticles')
else:
kim_variables['particleEnergy'] = ''
if self.supported_calculations['forces']:
kim_variables['forces'] = output_template % ('forces', 'force', 'numberOfParticles,3')
else:
kim_variables['forces'] = ''
if self.support_stress and self.supported_calculations['virial']:
kim_variables['virial'] = output_template % ('virial', 'energy', '6')
else:
kim_variables['virial'] = ''
if self.support_stress and self.supported_calculations['particleVirial']:
kim_variables['particleVirial'] = output_template % ('particleVirial', 'energy', 'numberOfParticles,6')
else:
kim_variables['particleVirial'] = ''
nb_methods = self.find_neighbor_methods(atoms)
for nbm in nb_methods:
kim_variables['neighbormethods'] += nb_template % (nbm,)
if self.access == None:
kim_variables['neighboraccess'] = "Neigh_IterAccess flag\nNeigh_LocaAccess flag"
elif self.access.lower() == 'iter':
if self.verbose:
print "Using Neigh_IterAccess only"
kim_variables['neighboraccess'] = "Neigh_IterAccess flag"
elif self.access.lower() == 'loca':
if self.verbose:
print "Using Neigh_LocaAccess only"
kim_variables['neighboraccess'] = "Neigh_LocaAccess flag"
else:
raise ValueError("Illegal value for neighborlist access method: " + str(self.access))
return kim_file_template % kim_variables
def find_neighbor_methods(self, atoms):
"""Find which neighbor methods are relevant for this atom type
Parameters:
atoms:
The atoms object - its cell and PBC are examined.
allowed (optional, default=None):
The list of neighborlist methods being considered, in prioritized order.
The default (None) means all methods, in the order NEIGH_RVEC_H,
NEIGH_PURE_H, NEIGH_RVEC_F, NEIGH_PURE_F, MI_OPBC_H, MI_OPBC_F,
CLUSTER.
allow_mi_opbc (optional, default=True):
If set to False, the methods MI_OPBC_H/F will not be considered. Normally,
they are considered if there are full periodic boundary conditions and the
unit cell is orthorhombic. However, the unit cell may change during a
simulation, in those cases preventing these boundary conditions may be
a good idea.
"""
if not self.allowed:
allowed = ["NEIGH_RVEC_H", "NEIGH_PURE_H", "NEIGH_RVEC_F",
"NEIGH_PURE_F", "MI_OPBC_H", "MI_OPBC_F",
"CLUSTER"]
elif isinstance(self.allowed, str):
allowed = [self.allowed,]
else:
allowed = copy(self.allowed) # Will be modified
remove = []
uc = atoms.get_cell()
diagonal = True
for i in range(3):
for j in range(3):
if i != j and np.abs(uc[i,j]) > 1e-15:
diagonal = False
if not (self.allow_mi_opbc and atoms.get_pbc().all() and diagonal):
# Minimum Image Orthogonal Periodic Boundary Conditions
# are not allowed
remove.extend(["MI_OPBC_H", "MI_OPBC_F"])
if atoms.get_pbc().any():
# Cluster method is not allowed
remove.append("CLUSTER")
for rem in remove:
if rem in allowed:
allowed.remove(rem)
if self.verbose:
print "Allowed PBC:", allowed
return allowed
if __name__ == '__main__':
from ase.lattice.cubic import FaceCenteredCubic
atoms = FaceCenteredCubic(size=(10,10,10), symbol='Cu')
print "Creating calculator"
pot = OpenKIMcalculator('EMT_Asap_Standard_AlAgAuCuNiPdPt__MO_118428466217_000')
print "Setting atoms"
atoms.set_calculator(pot)
print "Calculating energy"
print atoms.get_potential_energy()
print atoms.get_forces()[10:]
print atoms.get_stress()
| {
"repo_name": "auag92/n2dm",
"path": "Asap-3.8.4/Python/asap3/Internal/OpenKIMcalculator.py",
"copies": "1",
"size": "12767",
"license": "mit",
"hash": 7320069747889538000,
"line_mean": 43.6433566434,
"line_max": 115,
"alpha_frac": 0.5863554476,
"autogenerated": false,
"ratio": 4.02363693665301,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.510999238425301,
"avg_score": null,
"num_lines": null
} |
'''A Calculator Implemented With A Top-Down, Recursive-Descent Parser'''
# Author: Erez Shinan, Dec 2012
import re, collections
from operator import add,sub,mul,div
import numpy as np
import nltk_helper
Token = collections.namedtuple('Token', ['name', 'value'])
RuleMatch = collections.namedtuple('RuleMatch', ['name', 'matched'])
token_map = {'+':'ADD', '-':'ADD', '*':'MUL', '/':'MUL', '(':'LPAR', ')':'RPAR'}
rule_map = {
'add' : ['mul ADD add', 'mul'],
'mul' : ['atom MUL mul', 'atom'],
'atom': ['NUM', 'LPAR add RPAR', 'neg'],
'neg' : ['ADD atom'],
}
fix_assoc_rules = 'add', 'mul'
bin_calc_map = {'*':mul, '/':div, '+':add, '-':sub}
class word2vec_calc():
'''
Expression parser for word2vec.
'''
def __init__(self, word2vec_model,):
'''
word2vec_model: gensim model
'''
self.model = word2vec_model
self.calc_map = {
'NUM' : float,
'atom': lambda x: x[len(x)!=1],
'neg' : lambda (op,num): (num,-num)[op=='-'],
'mul' : self.calc_binary,
'add' : self.calc_binary,
}
def calc_binary(self, x):
while len(x) > 1:
# replace object with vector of corresponding word
if type(x[0]) is str:
x[0] = self.model[nltk_helper.clean_nltk(x[0])]
if type(x[2]) is str:
x[2] = self.model[nltk_helper.clean_nltk(x[2])]
x[:3] = [ bin_calc_map[x[1]](x[0], x[2])]
return x[0]
def match(self, rule_name, tokens):
if tokens and rule_name == tokens[0].name: # Match a token?
return tokens[0], tokens[1:]
for expansion in rule_map.get(rule_name, ()): # Match a rule?
remaining_tokens = tokens
matched_subrules = []
for subrule in expansion.split():
matched, remaining_tokens = self.match(subrule, remaining_tokens)
if not matched:
break # no such luck. next expansion!
matched_subrules.append(matched)
else:
return RuleMatch(rule_name, matched_subrules), remaining_tokens
return None, None # match not found
def _recurse_tree(self, tree, func):
return map(func, tree.matched) if tree.name in rule_map else tree[1]
def flatten_right_associativity(self, tree):
new = self._recurse_tree(tree, self.flatten_right_associativity)
if tree.name in fix_assoc_rules and len(new)==3 and new[2].name==tree.name:
new[-1:] = new[-1].matched
return RuleMatch(tree.name, new)
def evaluate(self, tree):
solutions = self._recurse_tree(tree, self.evaluate)
return self.calc_map.get(tree.name, lambda x:x)(solutions)
def calc(self, expr):
split_expr = re.findall('[\d.]+|[%s]' % ''.join(token_map), expr)
tokens = [Token(token_map.get(x, 'NUM'), x) for x in split_expr]
tree = self.match('add', tokens)[0]
tree = self.flatten_right_associativity( tree )
return self.evaluate(tree)
# print( calc(raw_input('> ')) ) | {
"repo_name": "erccarls/vectorsearch",
"path": "vectorsearch/calculator.py",
"copies": "1",
"size": "3152",
"license": "apache-2.0",
"hash": 3688270940453472000,
"line_mean": 35.6627906977,
"line_max": 83,
"alpha_frac": 0.5494923858,
"autogenerated": false,
"ratio": 3.4260869565217393,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44755793423217394,
"avg_score": null,
"num_lines": null
} |
"""A Calendar to display editorial content."""
# FIXME: wtf is this and why is it all commented out?
# TODO cleanup with removing this
from calendar import HTMLCalendar
from datetime import date
from itertools import groupby
from django.utils.html import conditional_escape as esc
# class EditorialCalendar(HTMLCalendar):
# """Create a month calendar view of content."""
#
# def __init__(self, content):
# super(EditorialCalendar, self).__init__()
# self.content = self.group_by_day(content)
#
# def formattime(self, day, time):
# #TODO complete time
# pass
#
# def formatday(self, day, weekday):
# if day !=0:
# cssclass = self.cssclasses[weekday]
# if date.today() == date(self.year, self.month, day):
# cssclass += 'today'
# if day in self.content:
# cssclass += 'filled'
# body = ['<ul>']
# for facet in self.content[day]:
# body.append('<li>')
# body.append('<a href="%s">' % facet.get_absolute_url())
# body.append(esc(facet.title))
# body.append('</a></li>')
# body.append('</ul>')
# return self.day_cell(cssclass, '%d %s' % (day, ''.join(body)))
# return self.day_cell(cssclass, day)
# return self.day_cell('noday', ' ')
#
# def formatmonth(self, year, month):
# self.year, self.month = year, month
# return super (EditorialCalendar, self).formatmonth(year, month)
#
# def group_by_day(self, workouts):
# field = lambda content: content.due_edit.day
# return dict(
# [(day, list(items)) for day, items in groupby(content, field)]
# )
#
# def day_cell(self, cssclass, body):
# return '<td class="%s">%s</td>' % (cssclass, body)
| {
"repo_name": "ProjectFacet/facet",
"path": "project/editorial/editorialcalendar.py",
"copies": "1",
"size": "1958",
"license": "mit",
"hash": 489347101347664060,
"line_mean": 34.9433962264,
"line_max": 80,
"alpha_frac": 0.5352400409,
"autogenerated": false,
"ratio": 3.3934142114384747,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4428654252338475,
"avg_score": null,
"num_lines": null
} |
# A callable is anything that can be called.
# EXAMPLE 1:
# ==============================================================================
class Foo:
def __call__(self):
print 'called'
foo = Foo()
foo()
# EXAMPLE 2:
# ==============================================================================
# Have you ever wandered if we could make an object callable? Yes,
# I mean just use the object name as if you were calling function! Intersted?
class Add:
def __init__(self, num1, num2):
self.num1 = num1
self.num2 = num2
print "Sum of", self.num1,"and",self.num2, "is:"
def __call__(self):
return (self.num1 + self.num2)
add = Add(1, 2)
print add()
# EXAMPLE 3:
# ==============================================================================
class Cached:
def __init__(self, function):
self.function = function
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError as e:
ret = self.cache[args] = self.function(*args)
return ret
@Cached
def ack(x, y):
return ack(x-1, ack(x, y-1)) if x*y else (x + y + 1)
print ack(2, 4)
# EXAMPLE 4:
# ==============================================================================
# A callable is an object allows you to use round parenthesis ( )
# and eventually pass some parameters, just like functions.
# Every time you define a function python creates a callable object.
# In example, you could define the function func in these ways (it's the same):
class A(object):
def __call__(self, *args):
print 'Hello'
func = A()
# or ...
def func(*args):
print 'Hello'
# You could use this method instead of methods like doit or run,
# I think it's just more clear to see obj() than obj.doit() | {
"repo_name": "rolandovillca/python_basic_concepts",
"path": "others/callable.py",
"copies": "4",
"size": "1819",
"license": "mit",
"hash": 7041688077134034000,
"line_mean": 26.5757575758,
"line_max": 80,
"alpha_frac": 0.5057724024,
"autogenerated": false,
"ratio": 3.9287257019438444,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6434498104343844,
"avg_score": null,
"num_lines": null
} |
"""A callback manager that is copyable/picklable."""
import collections
class Transaction(object):
"""A context manager to prevent new events from firing."""
def __init__(self, manager):
self.manager = manager
self._original_state = self.manager.is_queueing()
def __enter__(self):
self.manager.set_queueing(True)
def __exit__(self, type, value, traceback):
self.manager.set_queueing(self._original_state)
if not self.manager.is_queueing():
self.manager.flush_events()
class CallbackManager(object):
"""A simple callback dispatcher.
Uses references to the listening object instead of to the object's
method in order to make it possible to deepcopy the handlers.
This is done in order to be able to make copies of the game state for
undo and for running simulations.
"""
def __init__(self):
self.handlers = collections.defaultdict(set)
self._queue_events = False
self._pending_events = []
def is_queueing(self):
return self._queue_events
def set_queueing(self, val):
self._queue_events = val
def transaction(self):
return Transaction(self)
def connect(self, event_type, listener):
"""Add a new listener for the given event class."""
self.handlers[event_type].add(listener)
def disconnect(self, event_type, listener):
"""Remove a listener."""
if listener in self.handlers[event_type]:
self.handlers[event_type].remove(listener)
def emit(self, event, sender):
"""Send the given event to all listeners."""
if self._queue_events:
self._pending_events.append((event, sender))
else:
self._emit(event, sender)
def flush_pending(self):
for event, sender in self._pending_events:
self._emit(event, sender)
def _emit(self, event, sender):
for listener in list(self.handlers[event.__class__]):
getattr(listener, event.callback)(sender, event)
def get(self, event_type):
"""Return all handlers for the event type."""
return self.handlers[event_type]
| {
"repo_name": "mrroach/CentralServer",
"path": "csrv/model/callback_manager.py",
"copies": "1",
"size": "2020",
"license": "apache-2.0",
"hash": 2343953004957653500,
"line_mean": 27.0555555556,
"line_max": 71,
"alpha_frac": 0.6831683168,
"autogenerated": false,
"ratio": 3.8996138996139,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5082782216413899,
"avg_score": null,
"num_lines": null
} |
"""A CallTip window class for Tkinter/IDLE.
After ToolTip.py, which uses ideas gleaned from PySol
Used by the CallTips IDLE extension.
"""
from Tkinter import Toplevel, Label, LEFT, SOLID, TclError
HIDE_VIRTUAL_EVENT_NAME = "<<calltipwindow-hide>>"
HIDE_SEQUENCES = ("<Key-Escape>", "<FocusOut>")
CHECKHIDE_VIRTUAL_EVENT_NAME = "<<calltipwindow-checkhide>>"
CHECKHIDE_SEQUENCES = ("<KeyRelease>", "<ButtonRelease>")
CHECKHIDE_TIME = 100 # miliseconds
MARK_RIGHT = "calltipwindowregion_right"
class CallTip:
def __init__(self, widget):
self.widget = widget
self.tipwindow = self.label = None
self.parenline = self.parencol = None
self.lastline = None
self.hideid = self.checkhideid = None
self.checkhide_after_id = None
def position_window(self):
"""Check if needs to reposition the window, and if so - do it."""
curline = int(self.widget.index("insert").split('.')[0])
if curline == self.lastline:
return
self.lastline = curline
self.widget.see("insert")
if curline == self.parenline:
box = self.widget.bbox("%d.%d" % (self.parenline,
self.parencol))
else:
box = self.widget.bbox("%d.0" % curline)
if not box:
box = list(self.widget.bbox("insert"))
# align to left of window
box[0] = 0
box[2] = 0
x = box[0] + self.widget.winfo_rootx() + 2
y = box[1] + box[3] + self.widget.winfo_rooty()
self.tipwindow.wm_geometry("+%d+%d" % (x, y))
def showtip(self, text, parenleft, parenright):
"""Show the calltip, bind events which will close it and reposition it.
"""
# Only called in CallTips, where lines are truncated
self.text = text
if self.tipwindow or not self.text:
return
self.widget.mark_set(MARK_RIGHT, parenright)
self.parenline, self.parencol = map(
int, self.widget.index(parenleft).split("."))
self.tipwindow = tw = Toplevel(self.widget)
self.position_window()
# remove border on calltip window
tw.wm_overrideredirect(1)
try:
# This command is only needed and available on Tk >= 8.4.0 for OSX
# Without it, call tips intrude on the typing process by grabbing
# the focus.
tw.tk.call("::tk::unsupported::MacWindowStyle", "style", tw._w,
"help", "noActivates")
except TclError:
pass
self.label = Label(tw, text=self.text, justify=LEFT,
background="#ffffe0", relief=SOLID, borderwidth=1,
font = self.widget['font'])
self.label.pack()
self.checkhideid = self.widget.bind(CHECKHIDE_VIRTUAL_EVENT_NAME,
self.checkhide_event)
for seq in CHECKHIDE_SEQUENCES:
self.widget.event_add(CHECKHIDE_VIRTUAL_EVENT_NAME, seq)
self.widget.after(CHECKHIDE_TIME, self.checkhide_event)
self.hideid = self.widget.bind(HIDE_VIRTUAL_EVENT_NAME,
self.hide_event)
for seq in HIDE_SEQUENCES:
self.widget.event_add(HIDE_VIRTUAL_EVENT_NAME, seq)
def checkhide_event(self, event=None):
if not self.tipwindow:
# If the event was triggered by the same event that unbinded
# this function, the function will be called nevertheless,
# so do nothing in this case.
return
curline, curcol = map(int, self.widget.index("insert").split('.'))
if curline < self.parenline or \
(curline == self.parenline and curcol <= self.parencol) or \
self.widget.compare("insert", ">", MARK_RIGHT):
self.hidetip()
else:
self.position_window()
if self.checkhide_after_id is not None:
self.widget.after_cancel(self.checkhide_after_id)
self.checkhide_after_id = \
self.widget.after(CHECKHIDE_TIME, self.checkhide_event)
def hide_event(self, event):
if not self.tipwindow:
# See the explanation in checkhide_event.
return
self.hidetip()
def hidetip(self):
if not self.tipwindow:
return
for seq in CHECKHIDE_SEQUENCES:
self.widget.event_delete(CHECKHIDE_VIRTUAL_EVENT_NAME, seq)
self.widget.unbind(CHECKHIDE_VIRTUAL_EVENT_NAME, self.checkhideid)
self.checkhideid = None
for seq in HIDE_SEQUENCES:
self.widget.event_delete(HIDE_VIRTUAL_EVENT_NAME, seq)
self.widget.unbind(HIDE_VIRTUAL_EVENT_NAME, self.hideid)
self.hideid = None
self.label.destroy()
self.label = None
self.tipwindow.destroy()
self.tipwindow = None
self.widget.mark_unset(MARK_RIGHT)
self.parenline = self.parencol = self.lastline = None
def is_active(self):
return bool(self.tipwindow)
def _calltip_window(parent): # htest #
from Tkinter import Toplevel, Text, LEFT, BOTH
top = Toplevel(parent)
top.title("Test calltips")
top.geometry("200x100+%d+%d" % (parent.winfo_rootx() + 200,
parent.winfo_rooty() + 150))
text = Text(top)
text.pack(side=LEFT, fill=BOTH, expand=1)
text.insert("insert", "string.split")
top.update()
calltip = CallTip(text)
def calltip_show(event):
calltip.showtip("(s=Hello world)", "insert", "end")
def calltip_hide(event):
calltip.hidetip()
text.event_add("<<calltip-show>>", "(")
text.event_add("<<calltip-hide>>", ")")
text.bind("<<calltip-show>>", calltip_show)
text.bind("<<calltip-hide>>", calltip_hide)
text.focus_set()
if __name__=='__main__':
from idlelib.idle_test.htest import run
run(_calltip_window)
| {
"repo_name": "gameduell/duell",
"path": "bin/win/python2.7.9/Lib/idlelib/CallTipWindow.py",
"copies": "25",
"size": "5968",
"license": "bsd-2-clause",
"hash": -311513745418954240,
"line_mean": 36.3,
"line_max": 79,
"alpha_frac": 0.5872989276,
"autogenerated": false,
"ratio": 3.656862745098039,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A CallTip window class for Tkinter/IDLE.
After tooltip.py, which uses ideas gleaned from PySol
Used by the calltips IDLE extension.
"""
from tkinter import Toplevel, Label, LEFT, SOLID, TclError
HIDE_VIRTUAL_EVENT_NAME = "<<calltipwindow-hide>>"
HIDE_SEQUENCES = ("<Key-Escape>", "<FocusOut>")
CHECKHIDE_VIRTUAL_EVENT_NAME = "<<calltipwindow-checkhide>>"
CHECKHIDE_SEQUENCES = ("<KeyRelease>", "<ButtonRelease>")
CHECKHIDE_TIME = 100 # milliseconds
MARK_RIGHT = "calltipwindowregion_right"
class CallTip:
def __init__(self, widget):
self.widget = widget
self.tipwindow = self.label = None
self.parenline = self.parencol = None
self.lastline = None
self.hideid = self.checkhideid = None
self.checkhide_after_id = None
def position_window(self):
"""Check if needs to reposition the window, and if so - do it."""
curline = int(self.widget.index("insert").split('.')[0])
if curline == self.lastline:
return
self.lastline = curline
self.widget.see("insert")
if curline == self.parenline:
box = self.widget.bbox("%d.%d" % (self.parenline,
self.parencol))
else:
box = self.widget.bbox("%d.0" % curline)
if not box:
box = list(self.widget.bbox("insert"))
# align to left of window
box[0] = 0
box[2] = 0
x = box[0] + self.widget.winfo_rootx() + 2
y = box[1] + box[3] + self.widget.winfo_rooty()
self.tipwindow.wm_geometry("+%d+%d" % (x, y))
def showtip(self, text, parenleft, parenright):
"""Show the calltip, bind events which will close it and reposition it.
"""
# Only called in CallTips, where lines are truncated
self.text = text
if self.tipwindow or not self.text:
return
self.widget.mark_set(MARK_RIGHT, parenright)
self.parenline, self.parencol = map(
int, self.widget.index(parenleft).split("."))
self.tipwindow = tw = Toplevel(self.widget)
self.position_window()
# remove border on calltip window
tw.wm_overrideredirect(1)
try:
# This command is only needed and available on Tk >= 8.4.0 for OSX
# Without it, call tips intrude on the typing process by grabbing
# the focus.
tw.tk.call("::tk::unsupported::MacWindowStyle", "style", tw._w,
"help", "noActivates")
except TclError:
pass
self.label = Label(tw, text=self.text, justify=LEFT,
background="#ffffe0", relief=SOLID, borderwidth=1,
font = self.widget['font'])
self.label.pack()
tw.lift() # work around bug in Tk 8.5.18+ (issue #24570)
self.checkhideid = self.widget.bind(CHECKHIDE_VIRTUAL_EVENT_NAME,
self.checkhide_event)
for seq in CHECKHIDE_SEQUENCES:
self.widget.event_add(CHECKHIDE_VIRTUAL_EVENT_NAME, seq)
self.widget.after(CHECKHIDE_TIME, self.checkhide_event)
self.hideid = self.widget.bind(HIDE_VIRTUAL_EVENT_NAME,
self.hide_event)
for seq in HIDE_SEQUENCES:
self.widget.event_add(HIDE_VIRTUAL_EVENT_NAME, seq)
def checkhide_event(self, event=None):
if not self.tipwindow:
# If the event was triggered by the same event that unbinded
# this function, the function will be called nevertheless,
# so do nothing in this case.
return None
curline, curcol = map(int, self.widget.index("insert").split('.'))
if curline < self.parenline or \
(curline == self.parenline and curcol <= self.parencol) or \
self.widget.compare("insert", ">", MARK_RIGHT):
self.hidetip()
return "break"
else:
self.position_window()
if self.checkhide_after_id is not None:
self.widget.after_cancel(self.checkhide_after_id)
self.checkhide_after_id = \
self.widget.after(CHECKHIDE_TIME, self.checkhide_event)
return None
def hide_event(self, event):
if not self.tipwindow:
# See the explanation in checkhide_event.
return None
self.hidetip()
return "break"
def hidetip(self):
if not self.tipwindow:
return
for seq in CHECKHIDE_SEQUENCES:
self.widget.event_delete(CHECKHIDE_VIRTUAL_EVENT_NAME, seq)
self.widget.unbind(CHECKHIDE_VIRTUAL_EVENT_NAME, self.checkhideid)
self.checkhideid = None
for seq in HIDE_SEQUENCES:
self.widget.event_delete(HIDE_VIRTUAL_EVENT_NAME, seq)
self.widget.unbind(HIDE_VIRTUAL_EVENT_NAME, self.hideid)
self.hideid = None
self.label.destroy()
self.label = None
self.tipwindow.destroy()
self.tipwindow = None
self.widget.mark_unset(MARK_RIGHT)
self.parenline = self.parencol = self.lastline = None
def is_active(self):
return bool(self.tipwindow)
def _calltip_window(parent): # htest #
from tkinter import Toplevel, Text, LEFT, BOTH
top = Toplevel(parent)
top.title("Test calltips")
x, y = map(int, parent.geometry().split('+')[1:])
top.geometry("200x100+%d+%d" % (x + 250, y + 175))
text = Text(top)
text.pack(side=LEFT, fill=BOTH, expand=1)
text.insert("insert", "string.split")
top.update()
calltip = CallTip(text)
def calltip_show(event):
calltip.showtip("(s=Hello world)", "insert", "end")
def calltip_hide(event):
calltip.hidetip()
text.event_add("<<calltip-show>>", "(")
text.event_add("<<calltip-hide>>", ")")
text.bind("<<calltip-show>>", calltip_show)
text.bind("<<calltip-hide>>", calltip_hide)
text.focus_set()
if __name__=='__main__':
from idlelib.idle_test.htest import run
run(_calltip_window)
| {
"repo_name": "Microsoft/PTVS",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/idlelib/calltip_w.py",
"copies": "3",
"size": "6117",
"license": "apache-2.0",
"hash": 7641397037113488000,
"line_mean": 36.2987804878,
"line_max": 79,
"alpha_frac": 0.5854176884,
"autogenerated": false,
"ratio": 3.6584928229665072,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5743910511366507,
"avg_score": null,
"num_lines": null
} |
"""A CallTip window class for Tkinter/IDLE.
After ToolTip.py, which uses ideas gleaned from PySol
Used by the CallTips IDLE extension.
"""
from Tkinter import *
class CallTip:
def __init__(self, widget):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
def showtip(self, text):
" Display text in calltip window"
# truncate overly long calltip
if len(text) >= 79:
text = text[:75] + ' ...'
self.text = text
if self.tipwindow or not self.text:
return
self.widget.see("insert")
x, y, cx, cy = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 2
y = y + cy + self.widget.winfo_rooty()
self.tipwindow = tw = Toplevel(self.widget)
# XXX 12 Dec 2002 KBK The following command has two effects: It removes
# the calltip window border (good) but also causes (at least on
# Linux) the calltip to show as a top level window, burning through
# any other window dragged over it. Also, shows on all viewports!
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
try:
# This command is only needed and available on Tk >= 8.4.0 for OSX
# Without it, call tips intrude on the typing process by grabbing
# the focus.
tw.tk.call("::tk::unsupported::MacWindowStyle", "style", tw._w,
"help", "noActivates")
except TclError:
pass
label = Label(tw, text=self.text, justify=LEFT,
background="#ffffe0", relief=SOLID, borderwidth=1,
font = self.widget['font'])
label.pack()
def hidetip(self):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
###############################
#
# Test Code
#
class container: # Conceptually an editor_window
def __init__(self):
root = Tk()
text = self.text = Text(root)
text.pack(side=LEFT, fill=BOTH, expand=1)
text.insert("insert", "string.split")
root.update()
self.calltip = CallTip(text)
text.event_add("<<calltip-show>>", "(")
text.event_add("<<calltip-hide>>", ")")
text.bind("<<calltip-show>>", self.calltip_show)
text.bind("<<calltip-hide>>", self.calltip_hide)
text.focus_set()
root.mainloop()
def calltip_show(self, event):
self.calltip.showtip("Hello world")
def calltip_hide(self, event):
self.calltip.hidetip()
def main():
# Test code
c=container()
if __name__=='__main__':
main()
| {
"repo_name": "MalloyPower/parsing-python",
"path": "front-end/testsuite-python-lib/Python-2.4/Lib/idlelib/CallTipWindow.py",
"copies": "11",
"size": "2716",
"license": "mit",
"hash": -6321301134527625000,
"line_mean": 29.8636363636,
"line_max": 79,
"alpha_frac": 0.5530191458,
"autogenerated": false,
"ratio": 3.660377358490566,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9713396504290567,
"avg_score": null,
"num_lines": null
} |
"""A CallTip window class for Tkinter/IDLE.
After ToolTip.py, which uses ideas gleaned from PySol
Used by the CallTips IDLE extension.
"""
from tkinter import *
HIDE_VIRTUAL_EVENT_NAME = "<<calltipwindow-hide>>"
HIDE_SEQUENCES = ("<Key-Escape>", "<FocusOut>")
CHECKHIDE_VIRTUAL_EVENT_NAME = "<<calltipwindow-checkhide>>"
CHECKHIDE_SEQUENCES = ("<KeyRelease>", "<ButtonRelease>")
CHECKHIDE_TIME = 100 # miliseconds
MARK_RIGHT = "calltipwindowregion_right"
class CallTip:
def __init__(self, widget):
self.widget = widget
self.tipwindow = self.label = None
self.parenline = self.parencol = None
self.lastline = None
self.hideid = self.checkhideid = None
def position_window(self):
"""Check if needs to reposition the window, and if so - do it."""
curline = int(self.widget.index("insert").split('.')[0])
if curline == self.lastline:
return
self.lastline = curline
self.widget.see("insert")
if curline == self.parenline:
box = self.widget.bbox("%d.%d" % (self.parenline,
self.parencol))
else:
box = self.widget.bbox("%d.0" % curline)
if not box:
box = list(self.widget.bbox("insert"))
# align to left of window
box[0] = 0
box[2] = 0
x = box[0] + self.widget.winfo_rootx() + 2
y = box[1] + box[3] + self.widget.winfo_rooty()
self.tipwindow.wm_geometry("+%d+%d" % (x, y))
def showtip(self, text, parenleft, parenright):
"""Show the calltip, bind events which will close it and reposition it.
"""
# truncate overly long calltip
if len(text) >= 79:
textlines = text.splitlines()
for i, line in enumerate(textlines):
if len(line) > 79:
textlines[i] = line[:75] + ' ...'
text = '\n'.join(textlines)
self.text = text
if self.tipwindow or not self.text:
return
self.widget.mark_set(MARK_RIGHT, parenright)
self.parenline, self.parencol = map(
int, self.widget.index(parenleft).split("."))
self.tipwindow = tw = Toplevel(self.widget)
self.position_window()
# remove border on calltip window
tw.wm_overrideredirect(1)
try:
# This command is only needed and available on Tk >= 8.4.0 for OSX
# Without it, call tips intrude on the typing process by grabbing
# the focus.
tw.tk.call("::tk::unsupported::MacWindowStyle", "style", tw._w,
"help", "noActivates")
except TclError:
pass
self.label = Label(tw, text=self.text, justify=LEFT,
background="#ffffe0", relief=SOLID, borderwidth=1,
font = self.widget['font'])
self.label.pack()
self.checkhideid = self.widget.bind(CHECKHIDE_VIRTUAL_EVENT_NAME,
self.checkhide_event)
for seq in CHECKHIDE_SEQUENCES:
self.widget.event_add(CHECKHIDE_VIRTUAL_EVENT_NAME, seq)
self.widget.after(CHECKHIDE_TIME, self.checkhide_event)
self.hideid = self.widget.bind(HIDE_VIRTUAL_EVENT_NAME,
self.hide_event)
for seq in HIDE_SEQUENCES:
self.widget.event_add(HIDE_VIRTUAL_EVENT_NAME, seq)
def checkhide_event(self, event=None):
if not self.tipwindow:
# If the event was triggered by the same event that unbinded
# this function, the function will be called nevertheless,
# so do nothing in this case.
return
curline, curcol = map(int, self.widget.index("insert").split('.'))
if curline < self.parenline or \
(curline == self.parenline and curcol <= self.parencol) or \
self.widget.compare("insert", ">", MARK_RIGHT):
self.hidetip()
else:
self.position_window()
self.widget.after(CHECKHIDE_TIME, self.checkhide_event)
def hide_event(self, event):
if not self.tipwindow:
# See the explanation in checkhide_event.
return
self.hidetip()
def hidetip(self):
if not self.tipwindow:
return
for seq in CHECKHIDE_SEQUENCES:
self.widget.event_delete(CHECKHIDE_VIRTUAL_EVENT_NAME, seq)
self.widget.unbind(CHECKHIDE_VIRTUAL_EVENT_NAME, self.checkhideid)
self.checkhideid = None
for seq in HIDE_SEQUENCES:
self.widget.event_delete(HIDE_VIRTUAL_EVENT_NAME, seq)
self.widget.unbind(HIDE_VIRTUAL_EVENT_NAME, self.hideid)
self.hideid = None
self.label.destroy()
self.label = None
self.tipwindow.destroy()
self.tipwindow = None
self.widget.mark_unset(MARK_RIGHT)
self.parenline = self.parencol = self.lastline = None
def is_active(self):
return bool(self.tipwindow)
###############################
#
# Test Code
#
class container: # Conceptually an editor_window
def __init__(self):
root = Tk()
text = self.text = Text(root)
text.pack(side=LEFT, fill=BOTH, expand=1)
text.insert("insert", "string.split")
root.update()
self.calltip = CallTip(text)
text.event_add("<<calltip-show>>", "(")
text.event_add("<<calltip-hide>>", ")")
text.bind("<<calltip-show>>", self.calltip_show)
text.bind("<<calltip-hide>>", self.calltip_hide)
text.focus_set()
root.mainloop()
def calltip_show(self, event):
self.calltip.showtip("Hello world")
def calltip_hide(self, event):
self.calltip.hidetip()
def main():
# Test code
c=container()
if __name__=='__main__':
main()
| {
"repo_name": "denisff/python-for-android",
"path": "python3-alpha/python3-src/Lib/idlelib/CallTipWindow.py",
"copies": "49",
"size": "5924",
"license": "apache-2.0",
"hash": -3896513594138543000,
"line_mean": 33.6432748538,
"line_max": 79,
"alpha_frac": 0.5697164078,
"autogenerated": false,
"ratio": 3.721105527638191,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A CallTip window class for Tkinter/IDLE.
After ToolTip.py, which uses ideas gleaned from PySol
Used by the CallTips IDLE extension.
"""
from Tkinter import *
HIDE_VIRTUAL_EVENT_NAME = "<<calltipwindow-hide>>"
HIDE_SEQUENCES = ("<Key-Escape>", "<FocusOut>")
CHECKHIDE_VIRTUAL_EVENT_NAME = "<<calltipwindow-checkhide>>"
CHECKHIDE_SEQUENCES = ("<KeyRelease>", "<ButtonRelease>")
CHECKHIDE_TIME = 100 # miliseconds
MARK_RIGHT = "calltipwindowregion_right"
class CallTip:
def __init__(self, widget):
self.widget = widget
self.tipwindow = self.label = None
self.parenline = self.parencol = None
self.lastline = None
self.hideid = self.checkhideid = None
def position_window(self):
"""Check if needs to reposition the window, and if so - do it."""
curline = int(self.widget.index("insert").split('.')[0])
if curline == self.lastline:
return
self.lastline = curline
self.widget.see("insert")
if curline == self.parenline:
box = self.widget.bbox("%d.%d" % (self.parenline,
self.parencol))
else:
box = self.widget.bbox("%d.0" % curline)
if not box:
box = list(self.widget.bbox("insert"))
# align to left of window
box[0] = 0
box[2] = 0
x = box[0] + self.widget.winfo_rootx() + 2
y = box[1] + box[3] + self.widget.winfo_rooty()
self.tipwindow.wm_geometry("+%d+%d" % (x, y))
def showtip(self, text, parenleft, parenright):
"""Show the calltip, bind events which will close it and reposition it.
"""
# truncate overly long calltip
if len(text) >= 79:
textlines = text.splitlines()
for i, line in enumerate(textlines):
if len(line) > 79:
textlines[i] = line[:75] + ' ...'
text = '\n'.join(textlines)
self.text = text
if self.tipwindow or not self.text:
return
self.widget.mark_set(MARK_RIGHT, parenright)
self.parenline, self.parencol = map(
int, self.widget.index(parenleft).split("."))
self.tipwindow = tw = Toplevel(self.widget)
self.position_window()
# remove border on calltip window
tw.wm_overrideredirect(1)
try:
# This command is only needed and available on Tk >= 8.4.0 for OSX
# Without it, call tips intrude on the typing process by grabbing
# the focus.
tw.tk.call("::tk::unsupported::MacWindowStyle", "style", tw._w,
"help", "noActivates")
except TclError:
pass
self.label = Label(tw, text=self.text, justify=LEFT,
background="#ffffe0", relief=SOLID, borderwidth=1,
font = self.widget['font'])
self.label.pack()
self.checkhideid = self.widget.bind(CHECKHIDE_VIRTUAL_EVENT_NAME,
self.checkhide_event)
for seq in CHECKHIDE_SEQUENCES:
self.widget.event_add(CHECKHIDE_VIRTUAL_EVENT_NAME, seq)
self.widget.after(CHECKHIDE_TIME, self.checkhide_event)
self.hideid = self.widget.bind(HIDE_VIRTUAL_EVENT_NAME,
self.hide_event)
for seq in HIDE_SEQUENCES:
self.widget.event_add(HIDE_VIRTUAL_EVENT_NAME, seq)
def checkhide_event(self, event=None):
if not self.tipwindow:
# If the event was triggered by the same event that unbinded
# this function, the function will be called nevertheless,
# so do nothing in this case.
return
curline, curcol = map(int, self.widget.index("insert").split('.'))
if curline < self.parenline or \
(curline == self.parenline and curcol <= self.parencol) or \
self.widget.compare("insert", ">", MARK_RIGHT):
self.hidetip()
else:
self.position_window()
self.widget.after(CHECKHIDE_TIME, self.checkhide_event)
def hide_event(self, event):
if not self.tipwindow:
# See the explanation in checkhide_event.
return
self.hidetip()
def hidetip(self):
if not self.tipwindow:
return
for seq in CHECKHIDE_SEQUENCES:
self.widget.event_delete(CHECKHIDE_VIRTUAL_EVENT_NAME, seq)
self.widget.unbind(CHECKHIDE_VIRTUAL_EVENT_NAME, self.checkhideid)
self.checkhideid = None
for seq in HIDE_SEQUENCES:
self.widget.event_delete(HIDE_VIRTUAL_EVENT_NAME, seq)
self.widget.unbind(HIDE_VIRTUAL_EVENT_NAME, self.hideid)
self.hideid = None
self.label.destroy()
self.label = None
self.tipwindow.destroy()
self.tipwindow = None
self.widget.mark_unset(MARK_RIGHT)
self.parenline = self.parencol = self.lastline = None
def is_active(self):
return bool(self.tipwindow)
###############################
#
# Test Code
#
class container: # Conceptually an editor_window
def __init__(self):
root = Tk()
text = self.text = Text(root)
text.pack(side=LEFT, fill=BOTH, expand=1)
text.insert("insert", "string.split")
root.update()
self.calltip = CallTip(text)
text.event_add("<<calltip-show>>", "(")
text.event_add("<<calltip-hide>>", ")")
text.bind("<<calltip-show>>", self.calltip_show)
text.bind("<<calltip-hide>>", self.calltip_hide)
text.focus_set()
root.mainloop()
def calltip_show(self, event):
self.calltip.showtip("Hello world")
def calltip_hide(self, event):
self.calltip.hidetip()
def main():
# Test code
c=container()
if __name__=='__main__':
main()
| {
"repo_name": "patrioticcow/MessagesForSkype",
"path": "packages/win32/bundle/MessagesForSkype/modules/python/1.3.1-beta/Lib/idlelib/CallTipWindow.py",
"copies": "96",
"size": "5924",
"license": "mit",
"hash": -5301654483153786000,
"line_mean": 33.6432748538,
"line_max": 79,
"alpha_frac": 0.5697164078,
"autogenerated": false,
"ratio": 3.721105527638191,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A CallTip window class for Tkinter/IDLE.
After ToolTip.py, which uses ideas gleaned from PySol
Used by the CallTips IDLE extension.
"""
from tkinter import *
HIDE_VIRTUAL_EVENT_NAME = "<<calltipwindow-hide>>"
HIDE_SEQUENCES = ("<Key-Escape>", "<FocusOut>")
CHECKHIDE_VIRTUAL_EVENT_NAME = "<<calltipwindow-checkhide>>"
CHECKHIDE_SEQUENCES = ("<KeyRelease>", "<ButtonRelease>")
CHECKHIDE_TIME = 100 # miliseconds
MARK_RIGHT = "calltipwindowregion_right"
class CallTip:
def __init__(self, widget):
self.widget = widget
self.tipwindow = self.label = None
self.parenline = self.parencol = None
self.lastline = None
self.hideid = self.checkhideid = None
self.checkhide_after_id = None
def position_window(self):
"""Check if needs to reposition the window, and if so - do it."""
curline = int(self.widget.index("insert").split('.')[0])
if curline == self.lastline:
return
self.lastline = curline
self.widget.see("insert")
if curline == self.parenline:
box = self.widget.bbox("%d.%d" % (self.parenline,
self.parencol))
else:
box = self.widget.bbox("%d.0" % curline)
if not box:
box = list(self.widget.bbox("insert"))
# align to left of window
box[0] = 0
box[2] = 0
x = box[0] + self.widget.winfo_rootx() + 2
y = box[1] + box[3] + self.widget.winfo_rooty()
self.tipwindow.wm_geometry("+%d+%d" % (x, y))
def showtip(self, text, parenleft, parenright):
"""Show the calltip, bind events which will close it and reposition it.
"""
# truncate overly long calltip
if len(text) >= 79:
textlines = text.splitlines()
for i, line in enumerate(textlines):
if len(line) > 79:
textlines[i] = line[:75] + ' ...'
text = '\n'.join(textlines)
self.text = text
if self.tipwindow or not self.text:
return
self.widget.mark_set(MARK_RIGHT, parenright)
self.parenline, self.parencol = map(
int, self.widget.index(parenleft).split("."))
self.tipwindow = tw = Toplevel(self.widget)
self.position_window()
# remove border on calltip window
tw.wm_overrideredirect(1)
try:
# This command is only needed and available on Tk >= 8.4.0 for OSX
# Without it, call tips intrude on the typing process by grabbing
# the focus.
tw.tk.call("::tk::unsupported::MacWindowStyle", "style", tw._w,
"help", "noActivates")
except TclError:
pass
self.label = Label(tw, text=self.text, justify=LEFT,
background="#ffffe0", relief=SOLID, borderwidth=1,
font = self.widget['font'])
self.label.pack()
self.checkhideid = self.widget.bind(CHECKHIDE_VIRTUAL_EVENT_NAME,
self.checkhide_event)
for seq in CHECKHIDE_SEQUENCES:
self.widget.event_add(CHECKHIDE_VIRTUAL_EVENT_NAME, seq)
self.widget.after(CHECKHIDE_TIME, self.checkhide_event)
self.hideid = self.widget.bind(HIDE_VIRTUAL_EVENT_NAME,
self.hide_event)
for seq in HIDE_SEQUENCES:
self.widget.event_add(HIDE_VIRTUAL_EVENT_NAME, seq)
def checkhide_event(self, event=None):
if not self.tipwindow:
# If the event was triggered by the same event that unbinded
# this function, the function will be called nevertheless,
# so do nothing in this case.
return
curline, curcol = map(int, self.widget.index("insert").split('.'))
if curline < self.parenline or \
(curline == self.parenline and curcol <= self.parencol) or \
self.widget.compare("insert", ">", MARK_RIGHT):
self.hidetip()
else:
self.position_window()
if self.checkhide_after_id is not None:
self.widget.after_cancel(self.checkhide_after_id)
self.checkhide_after_id = \
self.widget.after(CHECKHIDE_TIME, self.checkhide_event)
def hide_event(self, event):
if not self.tipwindow:
# See the explanation in checkhide_event.
return
self.hidetip()
def hidetip(self):
if not self.tipwindow:
return
for seq in CHECKHIDE_SEQUENCES:
self.widget.event_delete(CHECKHIDE_VIRTUAL_EVENT_NAME, seq)
self.widget.unbind(CHECKHIDE_VIRTUAL_EVENT_NAME, self.checkhideid)
self.checkhideid = None
for seq in HIDE_SEQUENCES:
self.widget.event_delete(HIDE_VIRTUAL_EVENT_NAME, seq)
self.widget.unbind(HIDE_VIRTUAL_EVENT_NAME, self.hideid)
self.hideid = None
self.label.destroy()
self.label = None
self.tipwindow.destroy()
self.tipwindow = None
self.widget.mark_unset(MARK_RIGHT)
self.parenline = self.parencol = self.lastline = None
def is_active(self):
return bool(self.tipwindow)
###############################
#
# Test Code
#
class container: # Conceptually an editor_window
def __init__(self):
root = Tk()
text = self.text = Text(root)
text.pack(side=LEFT, fill=BOTH, expand=1)
text.insert("insert", "string.split")
root.update()
self.calltip = CallTip(text)
text.event_add("<<calltip-show>>", "(")
text.event_add("<<calltip-hide>>", ")")
text.bind("<<calltip-show>>", self.calltip_show)
text.bind("<<calltip-hide>>", self.calltip_hide)
text.focus_set()
root.mainloop()
def calltip_show(self, event):
self.calltip.showtip("Hello world")
def calltip_hide(self, event):
self.calltip.hidetip()
def main():
# Test code
c=container()
if __name__=='__main__':
main()
| {
"repo_name": "MalloyPower/parsing-python",
"path": "front-end/testsuite-python-lib/Python-3.3.0/Lib/idlelib/CallTipWindow.py",
"copies": "6",
"size": "6125",
"license": "mit",
"hash": -587619604425742300,
"line_mean": 34,
"line_max": 79,
"alpha_frac": 0.5699591837,
"autogenerated": false,
"ratio": 3.721142162818955,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7291101346518954,
"avg_score": null,
"num_lines": null
} |
"""A CallTip window class for Tkinter/IDLE.
After ToolTip.py, which uses ideas gleaned from PySol
Used by the CallTips IDLE extension.
"""
from Tkinter import Toplevel, Label, LEFT, SOLID, TclError
HIDE_VIRTUAL_EVENT_NAME = "<<calltipwindow-hide>>"
HIDE_SEQUENCES = ("<Key-Escape>", "<FocusOut>")
CHECKHIDE_VIRTUAL_EVENT_NAME = "<<calltipwindow-checkhide>>"
CHECKHIDE_SEQUENCES = ("<KeyRelease>", "<ButtonRelease>")
CHECKHIDE_TIME = 100 # miliseconds
MARK_RIGHT = "calltipwindowregion_right"
class CallTip:
def __init__(self, widget):
self.widget = widget
self.tipwindow = self.label = None
self.parenline = self.parencol = None
self.lastline = None
self.hideid = self.checkhideid = None
self.checkhide_after_id = None
def position_window(self):
"""Check if needs to reposition the window, and if so - do it."""
curline = int(self.widget.index("insert").split('.')[0])
if curline == self.lastline:
return
self.lastline = curline
self.widget.see("insert")
if curline == self.parenline:
box = self.widget.bbox("%d.%d" % (self.parenline,
self.parencol))
else:
box = self.widget.bbox("%d.0" % curline)
if not box:
box = list(self.widget.bbox("insert"))
# align to left of window
box[0] = 0
box[2] = 0
x = box[0] + self.widget.winfo_rootx() + 2
y = box[1] + box[3] + self.widget.winfo_rooty()
self.tipwindow.wm_geometry("+%d+%d" % (x, y))
def showtip(self, text, parenleft, parenright):
"""Show the calltip, bind events which will close it and reposition it.
"""
# Only called in CallTips, where lines are truncated
self.text = text
if self.tipwindow or not self.text:
return
self.widget.mark_set(MARK_RIGHT, parenright)
self.parenline, self.parencol = map(
int, self.widget.index(parenleft).split("."))
self.tipwindow = tw = Toplevel(self.widget)
self.position_window()
# remove border on calltip window
tw.wm_overrideredirect(1)
try:
# This command is only needed and available on Tk >= 8.4.0 for OSX
# Without it, call tips intrude on the typing process by grabbing
# the focus.
tw.tk.call("::tk::unsupported::MacWindowStyle", "style", tw._w,
"help", "noActivates")
except TclError:
pass
self.label = Label(tw, text=self.text, justify=LEFT,
background="#ffffe0", relief=SOLID, borderwidth=1,
font = self.widget['font'])
self.label.pack()
tw.lift() # work around bug in Tk 8.5.18+ (issue #24570)
self.checkhideid = self.widget.bind(CHECKHIDE_VIRTUAL_EVENT_NAME,
self.checkhide_event)
for seq in CHECKHIDE_SEQUENCES:
self.widget.event_add(CHECKHIDE_VIRTUAL_EVENT_NAME, seq)
self.widget.after(CHECKHIDE_TIME, self.checkhide_event)
self.hideid = self.widget.bind(HIDE_VIRTUAL_EVENT_NAME,
self.hide_event)
for seq in HIDE_SEQUENCES:
self.widget.event_add(HIDE_VIRTUAL_EVENT_NAME, seq)
def checkhide_event(self, event=None):
if not self.tipwindow:
# If the event was triggered by the same event that unbinded
# this function, the function will be called nevertheless,
# so do nothing in this case.
return
curline, curcol = map(int, self.widget.index("insert").split('.'))
if curline < self.parenline or \
(curline == self.parenline and curcol <= self.parencol) or \
self.widget.compare("insert", ">", MARK_RIGHT):
self.hidetip()
else:
self.position_window()
if self.checkhide_after_id is not None:
self.widget.after_cancel(self.checkhide_after_id)
self.checkhide_after_id = \
self.widget.after(CHECKHIDE_TIME, self.checkhide_event)
def hide_event(self, event):
if not self.tipwindow:
# See the explanation in checkhide_event.
return
self.hidetip()
def hidetip(self):
if not self.tipwindow:
return
for seq in CHECKHIDE_SEQUENCES:
self.widget.event_delete(CHECKHIDE_VIRTUAL_EVENT_NAME, seq)
self.widget.unbind(CHECKHIDE_VIRTUAL_EVENT_NAME, self.checkhideid)
self.checkhideid = None
for seq in HIDE_SEQUENCES:
self.widget.event_delete(HIDE_VIRTUAL_EVENT_NAME, seq)
self.widget.unbind(HIDE_VIRTUAL_EVENT_NAME, self.hideid)
self.hideid = None
self.label.destroy()
self.label = None
self.tipwindow.destroy()
self.tipwindow = None
self.widget.mark_unset(MARK_RIGHT)
self.parenline = self.parencol = self.lastline = None
def is_active(self):
return bool(self.tipwindow)
def _calltip_window(parent): # htest #
from Tkinter import Toplevel, Text, LEFT, BOTH
top = Toplevel(parent)
top.title("Test calltips")
top.geometry("200x100+%d+%d" % (parent.winfo_rootx() + 200,
parent.winfo_rooty() + 150))
text = Text(top)
text.pack(side=LEFT, fill=BOTH, expand=1)
text.insert("insert", "string.split")
top.update()
calltip = CallTip(text)
def calltip_show(event):
calltip.showtip("(s=Hello world)", "insert", "end")
def calltip_hide(event):
calltip.hidetip()
text.event_add("<<calltip-show>>", "(")
text.event_add("<<calltip-hide>>", ")")
text.bind("<<calltip-show>>", calltip_show)
text.bind("<<calltip-hide>>", calltip_hide)
text.focus_set()
if __name__=='__main__':
from idlelib.idle_test.htest import run
run(_calltip_window)
| {
"repo_name": "Jeff-Tian/mybnb",
"path": "Python27/Lib/idlelib/CallTipWindow.py",
"copies": "1",
"size": "6195",
"license": "apache-2.0",
"hash": 6354900288663753000,
"line_mean": 36.4782608696,
"line_max": 79,
"alpha_frac": 0.5717514124,
"autogenerated": false,
"ratio": 3.7118034751348112,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4783554887534811,
"avg_score": null,
"num_lines": null
} |
"""A CallTip window class for Tkinter/IDLE.
After ToolTip.py, which uses ideas gleaned from PySol
Used by the CallTips IDLE extension.
"""
from Tkinter import *
HIDE_VIRTUAL_EVENT_NAME = "<<calltipwindow-hide>>"
HIDE_SEQUENCES = ("<Key-Escape>", "<FocusOut>")
CHECKHIDE_VIRTUAL_EVENT_NAME = "<<calltipwindow-checkhide>>"
CHECKHIDE_SEQUENCES = ("<KeyRelease>", "<ButtonRelease>")
CHECKHIDE_TIME = 100 # miliseconds
MARK_RIGHT = "calltipwindowregion_right"
class CallTip:
def __init__(self, widget):
self.widget = widget
self.tipwindow = self.label = None
self.parenline = self.parencol = None
self.lastline = None
self.hideid = self.checkhideid = None
def position_window(self):
"""Check if needs to reposition the window, and if so - do it."""
curline = int(self.widget.index("insert").split('.')[0])
if curline == self.lastline:
return
self.lastline = curline
self.widget.see("insert")
if curline == self.parenline:
box = self.widget.bbox("%d.%d" % (self.parenline,
self.parencol))
else:
box = self.widget.bbox("%d.0" % curline)
if not box:
box = list(self.widget.bbox("insert"))
# align to left of window
box[0] = 0
box[2] = 0
x = box[0] + self.widget.winfo_rootx() + 2
y = box[1] + box[3] + self.widget.winfo_rooty()
self.tipwindow.wm_geometry("+%d+%d" % (x, y))
def showtip(self, text, parenleft, parenright):
"""Show the calltip, bind events which will close it and reposition it.
"""
# truncate overly long calltip
if len(text) >= 79:
textlines = text.splitlines()
for i, line in enumerate(textlines):
if len(line) > 79:
textlines[i] = line[:75] + ' ...'
text = '\n'.join(textlines)
self.text = text
if self.tipwindow or not self.text:
return
self.widget.mark_set(MARK_RIGHT, parenright)
self.parenline, self.parencol = map(
int, self.widget.index(parenleft).split("."))
self.tipwindow = tw = Toplevel(self.widget)
self.position_window()
# remove border on calltip window
tw.wm_overrideredirect(1)
try:
# This command is only needed and available on Tk >= 8.4.0 for OSX
# Without it, call tips intrude on the typing process by grabbing
# the focus.
tw.tk.call("::tk::unsupported::MacWindowStyle", "style", tw._w,
"help", "noActivates")
except TclError:
pass
self.label = Label(tw, text=self.text, justify=LEFT,
background="#ffffe0", relief=SOLID, borderwidth=1,
font = self.widget['font'])
self.label.pack()
self.checkhideid = self.widget.bind(CHECKHIDE_VIRTUAL_EVENT_NAME,
self.checkhide_event)
for seq in CHECKHIDE_SEQUENCES:
self.widget.event_add(CHECKHIDE_VIRTUAL_EVENT_NAME, seq)
self.widget.after(CHECKHIDE_TIME, self.checkhide_event)
self.hideid = self.widget.bind(HIDE_VIRTUAL_EVENT_NAME,
self.hide_event)
for seq in HIDE_SEQUENCES:
self.widget.event_add(HIDE_VIRTUAL_EVENT_NAME, seq)
def checkhide_event(self, event=None):
if not self.tipwindow:
# If the event was triggered by the same event that unbinded
# this function, the function will be called nevertheless,
# so do nothing in this case.
return
curline, curcol = map(int, self.widget.index("insert").split('.'))
if curline < self.parenline or \
(curline == self.parenline and curcol <= self.parencol) or \
self.widget.compare("insert", ">", MARK_RIGHT):
self.hidetip()
else:
self.position_window()
self.widget.after(CHECKHIDE_TIME, self.checkhide_event)
def hide_event(self, event):
if not self.tipwindow:
# See the explanation in checkhide_event.
return
self.hidetip()
def hidetip(self):
if not self.tipwindow:
return
for seq in CHECKHIDE_SEQUENCES:
self.widget.event_delete(CHECKHIDE_VIRTUAL_EVENT_NAME, seq)
self.widget.unbind(CHECKHIDE_VIRTUAL_EVENT_NAME, self.checkhideid)
self.checkhideid = None
for seq in HIDE_SEQUENCES:
self.widget.event_delete(HIDE_VIRTUAL_EVENT_NAME, seq)
self.widget.unbind(HIDE_VIRTUAL_EVENT_NAME, self.hideid)
self.hideid = None
self.label.destroy()
self.label = None
self.tipwindow.destroy()
self.tipwindow = None
self.widget.mark_unset(MARK_RIGHT)
self.parenline = self.parencol = self.lastline = None
def is_active(self):
return bool(self.tipwindow)
###############################
#
# Test Code
#
class container: # Conceptually an editor_window
def __init__(self):
root = Tk()
text = self.text = Text(root)
text.pack(side=LEFT, fill=BOTH, expand=1)
text.insert("insert", "string.split")
root.update()
self.calltip = CallTip(text)
text.event_add("<<calltip-show>>", "(")
text.event_add("<<calltip-hide>>", ")")
text.bind("<<calltip-show>>", self.calltip_show)
text.bind("<<calltip-hide>>", self.calltip_hide)
text.focus_set()
root.mainloop()
def calltip_show(self, event):
self.calltip.showtip("Hello world")
def calltip_hide(self, event):
self.calltip.hidetip()
def main():
# Test code
c=container()
if __name__=='__main__':
main()
| {
"repo_name": "ericlink/adms-server",
"path": "playframework-dist/1.1-src/python/Lib/idlelib/CallTipWindow.py",
"copies": "7",
"size": "6095",
"license": "mit",
"hash": 3236687461239166000,
"line_mean": 33.6432748538,
"line_max": 79,
"alpha_frac": 0.5537325677,
"autogenerated": false,
"ratio": 3.7880671224362956,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7841799690136295,
"avg_score": null,
"num_lines": null
} |
# A call tracer decorator for both functions and methods
def tracer(func): # Use function, not class with __call__
calls = 0 # Else "self" is decorator instance only!
def onCall(*args, **kwargs): # Or in 2.X+3.X: use [onCall.calls += 1]
nonlocal calls
calls += 1
print('call %s to %s' % (calls, func.__name__))
return func(*args, **kwargs)
return onCall
if __name__ == '__main__':
# Applies to simple functions
@tracer
def spam(a, b, c): # spam = tracer(spam)
print(a + b + c) # onCall remembers spam
@tracer
def eggs(N):
return 2 ** N
spam(1, 2, 3) # Runs onCall(1, 2, 3)
spam(a=4, b=5, c=6)
print(eggs(32))
# Applies to class method functions too!
class Person:
def __init__(self, name, pay):
self.name = name
self.pay = pay
@tracer
def giveRaise(self, percent): # giveRaise = tracer(giveRaise)
self.pay *= (1.0 + percent) # onCall remembers giveRaise
@tracer
def lastName(self): # lastName = tracer(lastName)
return self.name.split()[-1]
print('methods...')
bob = Person('Bob Smith', 50000)
sue = Person('Sue Jones', 100000)
print(bob.name, sue.name)
sue.giveRaise(.10) # Runs onCall(sue, .10)
print(int(sue.pay))
print(bob.lastName(), sue.lastName()) # Runs onCall(bob), lastName in scopes
| {
"repo_name": "dreadrel/UWF_2014_spring_COP3990C-2507",
"path": "notebooks/scripts/book_code/code/calltracer.py",
"copies": "1",
"size": "1602",
"license": "apache-2.0",
"hash": -6866787486013216000,
"line_mean": 32.375,
"line_max": 83,
"alpha_frac": 0.5056179775,
"autogenerated": false,
"ratio": 3.7169373549883993,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47225553324883995,
"avg_score": null,
"num_lines": null
} |
"""A camera device."""
import time
from pathlib import Path
from sb_vision import Camera as VisionCamera
from sb_vision import Token, Vision
from .devices_base import Board
class Camera(Board):
"""A camera."""
lookup_keys = {
'subsystem': 'video4linux',
}
DISTANCE_MODEL = 'c270'
IMAGE_SIZE = (1280, 720)
def __init__(self, node, camera=None):
super().__init__(node)
self.camera = camera
@classmethod
def name(cls, node):
# Get device name
return Path(node['DEVNAME']).stem
def start(self):
if not self.camera:
self.camera = VisionCamera(
int(self.node['MINOR']),
self.IMAGE_SIZE,
self.DISTANCE_MODEL,
)
self.vision = Vision(self.camera)
self._status = {
'snapshot_timestamp': None,
'markers': [],
}
def _update_status(self, markers):
self._status = {
'snapshot_timestamp': time.time(),
'markers': markers,
}
@staticmethod
def _serialise_marker(marker: Token):
d = marker.__dict__
d['homography_matrix'] = marker.homography_matrix.tolist()
d['cartesian'] = marker.cartesian.tolist()
return d
def status(self):
return self._status
def command(self, cmd):
"""Run user-provided command."""
if cmd.get('see', False):
self._update_status(markers=[
self._serialise_marker(x)
for x in self.vision.snapshot()
])
# rely on the status being sent back to the requesting connection
# by the ``BoardRunner``.
| {
"repo_name": "sourcebots/robotd",
"path": "robotd/camera.py",
"copies": "1",
"size": "1711",
"license": "mit",
"hash": -7538196464957318000,
"line_mean": 23.7971014493,
"line_max": 77,
"alpha_frac": 0.5429573349,
"autogenerated": false,
"ratio": 4.112980769230769,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5155938104130768,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.